1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include <linux/uuid.h> 26 #include "ctree.h" 27 #include "disk-io.h" 28 #include "transaction.h" 29 #include "locking.h" 30 #include "tree-log.h" 31 #include "inode-map.h" 32 #include "volumes.h" 33 #include "dev-replace.h" 34 #include "qgroup.h" 35 36 #define BTRFS_ROOT_TRANS_TAG 0 37 38 static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { 39 [TRANS_STATE_RUNNING] = 0U, 40 [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE | 41 __TRANS_START), 42 [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE | 43 __TRANS_START | 44 __TRANS_ATTACH), 45 [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE | 46 __TRANS_START | 47 __TRANS_ATTACH | 48 __TRANS_JOIN), 49 [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE | 50 __TRANS_START | 51 __TRANS_ATTACH | 52 __TRANS_JOIN | 53 __TRANS_JOIN_NOLOCK), 54 [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE | 55 __TRANS_START | 56 __TRANS_ATTACH | 57 __TRANS_JOIN | 58 __TRANS_JOIN_NOLOCK), 59 }; 60 61 void btrfs_put_transaction(struct btrfs_transaction *transaction) 62 { 63 WARN_ON(atomic_read(&transaction->use_count) == 0); 64 if (atomic_dec_and_test(&transaction->use_count)) { 65 BUG_ON(!list_empty(&transaction->list)); 66 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root)); 67 while (!list_empty(&transaction->pending_chunks)) { 68 struct extent_map *em; 69 70 em = list_first_entry(&transaction->pending_chunks, 71 struct extent_map, list); 72 list_del_init(&em->list); 73 free_extent_map(em); 74 } 75 kmem_cache_free(btrfs_transaction_cachep, transaction); 76 } 77 } 78 79 static void clear_btree_io_tree(struct extent_io_tree *tree) 80 { 81 spin_lock(&tree->lock); 82 while (!RB_EMPTY_ROOT(&tree->state)) { 83 struct rb_node *node; 84 struct extent_state *state; 85 86 node = rb_first(&tree->state); 87 state = rb_entry(node, struct extent_state, rb_node); 88 rb_erase(&state->rb_node, &tree->state); 89 RB_CLEAR_NODE(&state->rb_node); 90 /* 91 * btree io trees aren't supposed to have tasks waiting for 92 * changes in the flags of extent states ever. 93 */ 94 ASSERT(!waitqueue_active(&state->wq)); 95 free_extent_state(state); 96 if (need_resched()) { 97 spin_unlock(&tree->lock); 98 cond_resched(); 99 spin_lock(&tree->lock); 100 } 101 } 102 spin_unlock(&tree->lock); 103 } 104 105 static noinline void switch_commit_roots(struct btrfs_transaction *trans, 106 struct btrfs_fs_info *fs_info) 107 { 108 struct btrfs_root *root, *tmp; 109 110 down_write(&fs_info->commit_root_sem); 111 list_for_each_entry_safe(root, tmp, &trans->switch_commits, 112 dirty_list) { 113 list_del_init(&root->dirty_list); 114 free_extent_buffer(root->commit_root); 115 root->commit_root = btrfs_root_node(root); 116 if (is_fstree(root->objectid)) 117 btrfs_unpin_free_ino(root); 118 clear_btree_io_tree(&root->dirty_log_pages); 119 } 120 up_write(&fs_info->commit_root_sem); 121 } 122 123 static inline void extwriter_counter_inc(struct btrfs_transaction *trans, 124 unsigned int type) 125 { 126 if (type & TRANS_EXTWRITERS) 127 atomic_inc(&trans->num_extwriters); 128 } 129 130 static inline void extwriter_counter_dec(struct btrfs_transaction *trans, 131 unsigned int type) 132 { 133 if (type & TRANS_EXTWRITERS) 134 atomic_dec(&trans->num_extwriters); 135 } 136 137 static inline void extwriter_counter_init(struct btrfs_transaction *trans, 138 unsigned int type) 139 { 140 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0)); 141 } 142 143 static inline int extwriter_counter_read(struct btrfs_transaction *trans) 144 { 145 return atomic_read(&trans->num_extwriters); 146 } 147 148 /* 149 * either allocate a new transaction or hop into the existing one 150 */ 151 static noinline int join_transaction(struct btrfs_root *root, unsigned int type) 152 { 153 struct btrfs_transaction *cur_trans; 154 struct btrfs_fs_info *fs_info = root->fs_info; 155 156 spin_lock(&fs_info->trans_lock); 157 loop: 158 /* The file system has been taken offline. No new transactions. */ 159 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 160 spin_unlock(&fs_info->trans_lock); 161 return -EROFS; 162 } 163 164 cur_trans = fs_info->running_transaction; 165 if (cur_trans) { 166 if (cur_trans->aborted) { 167 spin_unlock(&fs_info->trans_lock); 168 return cur_trans->aborted; 169 } 170 if (btrfs_blocked_trans_types[cur_trans->state] & type) { 171 spin_unlock(&fs_info->trans_lock); 172 return -EBUSY; 173 } 174 atomic_inc(&cur_trans->use_count); 175 atomic_inc(&cur_trans->num_writers); 176 extwriter_counter_inc(cur_trans, type); 177 spin_unlock(&fs_info->trans_lock); 178 return 0; 179 } 180 spin_unlock(&fs_info->trans_lock); 181 182 /* 183 * If we are ATTACH, we just want to catch the current transaction, 184 * and commit it. If there is no transaction, just return ENOENT. 185 */ 186 if (type == TRANS_ATTACH) 187 return -ENOENT; 188 189 /* 190 * JOIN_NOLOCK only happens during the transaction commit, so 191 * it is impossible that ->running_transaction is NULL 192 */ 193 BUG_ON(type == TRANS_JOIN_NOLOCK); 194 195 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 196 if (!cur_trans) 197 return -ENOMEM; 198 199 spin_lock(&fs_info->trans_lock); 200 if (fs_info->running_transaction) { 201 /* 202 * someone started a transaction after we unlocked. Make sure 203 * to redo the checks above 204 */ 205 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 206 goto loop; 207 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 208 spin_unlock(&fs_info->trans_lock); 209 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 210 return -EROFS; 211 } 212 213 atomic_set(&cur_trans->num_writers, 1); 214 extwriter_counter_init(cur_trans, type); 215 init_waitqueue_head(&cur_trans->writer_wait); 216 init_waitqueue_head(&cur_trans->commit_wait); 217 cur_trans->state = TRANS_STATE_RUNNING; 218 /* 219 * One for this trans handle, one so it will live on until we 220 * commit the transaction. 221 */ 222 atomic_set(&cur_trans->use_count, 2); 223 cur_trans->start_time = get_seconds(); 224 225 cur_trans->delayed_refs.href_root = RB_ROOT; 226 atomic_set(&cur_trans->delayed_refs.num_entries, 0); 227 cur_trans->delayed_refs.num_heads_ready = 0; 228 cur_trans->delayed_refs.num_heads = 0; 229 cur_trans->delayed_refs.flushing = 0; 230 cur_trans->delayed_refs.run_delayed_start = 0; 231 232 /* 233 * although the tree mod log is per file system and not per transaction, 234 * the log must never go across transaction boundaries. 235 */ 236 smp_mb(); 237 if (!list_empty(&fs_info->tree_mod_seq_list)) 238 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when " 239 "creating a fresh transaction\n"); 240 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 241 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when " 242 "creating a fresh transaction\n"); 243 atomic64_set(&fs_info->tree_mod_seq, 0); 244 245 spin_lock_init(&cur_trans->delayed_refs.lock); 246 247 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 248 INIT_LIST_HEAD(&cur_trans->pending_chunks); 249 INIT_LIST_HEAD(&cur_trans->switch_commits); 250 INIT_LIST_HEAD(&cur_trans->pending_ordered); 251 list_add_tail(&cur_trans->list, &fs_info->trans_list); 252 extent_io_tree_init(&cur_trans->dirty_pages, 253 fs_info->btree_inode->i_mapping); 254 fs_info->generation++; 255 cur_trans->transid = fs_info->generation; 256 fs_info->running_transaction = cur_trans; 257 cur_trans->aborted = 0; 258 spin_unlock(&fs_info->trans_lock); 259 260 return 0; 261 } 262 263 /* 264 * this does all the record keeping required to make sure that a reference 265 * counted root is properly recorded in a given transaction. This is required 266 * to make sure the old root from before we joined the transaction is deleted 267 * when the transaction commits 268 */ 269 static int record_root_in_trans(struct btrfs_trans_handle *trans, 270 struct btrfs_root *root) 271 { 272 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 273 root->last_trans < trans->transid) { 274 WARN_ON(root == root->fs_info->extent_root); 275 WARN_ON(root->commit_root != root->node); 276 277 /* 278 * see below for IN_TRANS_SETUP usage rules 279 * we have the reloc mutex held now, so there 280 * is only one writer in this function 281 */ 282 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 283 284 /* make sure readers find IN_TRANS_SETUP before 285 * they find our root->last_trans update 286 */ 287 smp_wmb(); 288 289 spin_lock(&root->fs_info->fs_roots_radix_lock); 290 if (root->last_trans == trans->transid) { 291 spin_unlock(&root->fs_info->fs_roots_radix_lock); 292 return 0; 293 } 294 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 295 (unsigned long)root->root_key.objectid, 296 BTRFS_ROOT_TRANS_TAG); 297 spin_unlock(&root->fs_info->fs_roots_radix_lock); 298 root->last_trans = trans->transid; 299 300 /* this is pretty tricky. We don't want to 301 * take the relocation lock in btrfs_record_root_in_trans 302 * unless we're really doing the first setup for this root in 303 * this transaction. 304 * 305 * Normally we'd use root->last_trans as a flag to decide 306 * if we want to take the expensive mutex. 307 * 308 * But, we have to set root->last_trans before we 309 * init the relocation root, otherwise, we trip over warnings 310 * in ctree.c. The solution used here is to flag ourselves 311 * with root IN_TRANS_SETUP. When this is 1, we're still 312 * fixing up the reloc trees and everyone must wait. 313 * 314 * When this is zero, they can trust root->last_trans and fly 315 * through btrfs_record_root_in_trans without having to take the 316 * lock. smp_wmb() makes sure that all the writes above are 317 * done before we pop in the zero below 318 */ 319 btrfs_init_reloc_root(trans, root); 320 smp_mb__before_atomic(); 321 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 322 } 323 return 0; 324 } 325 326 327 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 328 struct btrfs_root *root) 329 { 330 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 331 return 0; 332 333 /* 334 * see record_root_in_trans for comments about IN_TRANS_SETUP usage 335 * and barriers 336 */ 337 smp_rmb(); 338 if (root->last_trans == trans->transid && 339 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) 340 return 0; 341 342 mutex_lock(&root->fs_info->reloc_mutex); 343 record_root_in_trans(trans, root); 344 mutex_unlock(&root->fs_info->reloc_mutex); 345 346 return 0; 347 } 348 349 static inline int is_transaction_blocked(struct btrfs_transaction *trans) 350 { 351 return (trans->state >= TRANS_STATE_BLOCKED && 352 trans->state < TRANS_STATE_UNBLOCKED && 353 !trans->aborted); 354 } 355 356 /* wait for commit against the current transaction to become unblocked 357 * when this is done, it is safe to start a new transaction, but the current 358 * transaction might not be fully on disk. 359 */ 360 static void wait_current_trans(struct btrfs_root *root) 361 { 362 struct btrfs_transaction *cur_trans; 363 364 spin_lock(&root->fs_info->trans_lock); 365 cur_trans = root->fs_info->running_transaction; 366 if (cur_trans && is_transaction_blocked(cur_trans)) { 367 atomic_inc(&cur_trans->use_count); 368 spin_unlock(&root->fs_info->trans_lock); 369 370 wait_event(root->fs_info->transaction_wait, 371 cur_trans->state >= TRANS_STATE_UNBLOCKED || 372 cur_trans->aborted); 373 btrfs_put_transaction(cur_trans); 374 } else { 375 spin_unlock(&root->fs_info->trans_lock); 376 } 377 } 378 379 static int may_wait_transaction(struct btrfs_root *root, int type) 380 { 381 if (root->fs_info->log_root_recovering) 382 return 0; 383 384 if (type == TRANS_USERSPACE) 385 return 1; 386 387 if (type == TRANS_START && 388 !atomic_read(&root->fs_info->open_ioctl_trans)) 389 return 1; 390 391 return 0; 392 } 393 394 static inline bool need_reserve_reloc_root(struct btrfs_root *root) 395 { 396 if (!root->fs_info->reloc_ctl || 397 !test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 398 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 399 root->reloc_root) 400 return false; 401 402 return true; 403 } 404 405 static struct btrfs_trans_handle * 406 start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type, 407 enum btrfs_reserve_flush_enum flush) 408 { 409 struct btrfs_trans_handle *h; 410 struct btrfs_transaction *cur_trans; 411 u64 num_bytes = 0; 412 u64 qgroup_reserved = 0; 413 bool reloc_reserved = false; 414 int ret; 415 416 /* Send isn't supposed to start transactions. */ 417 ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB); 418 419 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 420 return ERR_PTR(-EROFS); 421 422 if (current->journal_info) { 423 WARN_ON(type & TRANS_EXTWRITERS); 424 h = current->journal_info; 425 h->use_count++; 426 WARN_ON(h->use_count > 2); 427 h->orig_rsv = h->block_rsv; 428 h->block_rsv = NULL; 429 goto got_it; 430 } 431 432 /* 433 * Do the reservation before we join the transaction so we can do all 434 * the appropriate flushing if need be. 435 */ 436 if (num_items > 0 && root != root->fs_info->chunk_root) { 437 if (root->fs_info->quota_enabled && 438 is_fstree(root->root_key.objectid)) { 439 qgroup_reserved = num_items * root->nodesize; 440 ret = btrfs_qgroup_reserve(root, qgroup_reserved); 441 if (ret) 442 return ERR_PTR(ret); 443 } 444 445 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 446 /* 447 * Do the reservation for the relocation root creation 448 */ 449 if (need_reserve_reloc_root(root)) { 450 num_bytes += root->nodesize; 451 reloc_reserved = true; 452 } 453 454 ret = btrfs_block_rsv_add(root, 455 &root->fs_info->trans_block_rsv, 456 num_bytes, flush); 457 if (ret) 458 goto reserve_fail; 459 } 460 again: 461 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 462 if (!h) { 463 ret = -ENOMEM; 464 goto alloc_fail; 465 } 466 467 /* 468 * If we are JOIN_NOLOCK we're already committing a transaction and 469 * waiting on this guy, so we don't need to do the sb_start_intwrite 470 * because we're already holding a ref. We need this because we could 471 * have raced in and did an fsync() on a file which can kick a commit 472 * and then we deadlock with somebody doing a freeze. 473 * 474 * If we are ATTACH, it means we just want to catch the current 475 * transaction and commit it, so we needn't do sb_start_intwrite(). 476 */ 477 if (type & __TRANS_FREEZABLE) 478 sb_start_intwrite(root->fs_info->sb); 479 480 if (may_wait_transaction(root, type)) 481 wait_current_trans(root); 482 483 do { 484 ret = join_transaction(root, type); 485 if (ret == -EBUSY) { 486 wait_current_trans(root); 487 if (unlikely(type == TRANS_ATTACH)) 488 ret = -ENOENT; 489 } 490 } while (ret == -EBUSY); 491 492 if (ret < 0) { 493 /* We must get the transaction if we are JOIN_NOLOCK. */ 494 BUG_ON(type == TRANS_JOIN_NOLOCK); 495 goto join_fail; 496 } 497 498 cur_trans = root->fs_info->running_transaction; 499 500 h->transid = cur_trans->transid; 501 h->transaction = cur_trans; 502 h->blocks_used = 0; 503 h->bytes_reserved = 0; 504 h->root = root; 505 h->delayed_ref_updates = 0; 506 h->use_count = 1; 507 h->adding_csums = 0; 508 h->block_rsv = NULL; 509 h->orig_rsv = NULL; 510 h->aborted = 0; 511 h->qgroup_reserved = 0; 512 h->delayed_ref_elem.seq = 0; 513 h->type = type; 514 h->allocating_chunk = false; 515 h->reloc_reserved = false; 516 h->sync = false; 517 INIT_LIST_HEAD(&h->qgroup_ref_list); 518 INIT_LIST_HEAD(&h->new_bgs); 519 INIT_LIST_HEAD(&h->ordered); 520 521 smp_mb(); 522 if (cur_trans->state >= TRANS_STATE_BLOCKED && 523 may_wait_transaction(root, type)) { 524 current->journal_info = h; 525 btrfs_commit_transaction(h, root); 526 goto again; 527 } 528 529 if (num_bytes) { 530 trace_btrfs_space_reservation(root->fs_info, "transaction", 531 h->transid, num_bytes, 1); 532 h->block_rsv = &root->fs_info->trans_block_rsv; 533 h->bytes_reserved = num_bytes; 534 h->reloc_reserved = reloc_reserved; 535 } 536 h->qgroup_reserved = qgroup_reserved; 537 538 got_it: 539 btrfs_record_root_in_trans(h, root); 540 541 if (!current->journal_info && type != TRANS_USERSPACE) 542 current->journal_info = h; 543 return h; 544 545 join_fail: 546 if (type & __TRANS_FREEZABLE) 547 sb_end_intwrite(root->fs_info->sb); 548 kmem_cache_free(btrfs_trans_handle_cachep, h); 549 alloc_fail: 550 if (num_bytes) 551 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, 552 num_bytes); 553 reserve_fail: 554 if (qgroup_reserved) 555 btrfs_qgroup_free(root, qgroup_reserved); 556 return ERR_PTR(ret); 557 } 558 559 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 560 int num_items) 561 { 562 return start_transaction(root, num_items, TRANS_START, 563 BTRFS_RESERVE_FLUSH_ALL); 564 } 565 566 struct btrfs_trans_handle *btrfs_start_transaction_lflush( 567 struct btrfs_root *root, int num_items) 568 { 569 return start_transaction(root, num_items, TRANS_START, 570 BTRFS_RESERVE_FLUSH_LIMIT); 571 } 572 573 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 574 { 575 return start_transaction(root, 0, TRANS_JOIN, 0); 576 } 577 578 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 579 { 580 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0); 581 } 582 583 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 584 { 585 return start_transaction(root, 0, TRANS_USERSPACE, 0); 586 } 587 588 /* 589 * btrfs_attach_transaction() - catch the running transaction 590 * 591 * It is used when we want to commit the current the transaction, but 592 * don't want to start a new one. 593 * 594 * Note: If this function return -ENOENT, it just means there is no 595 * running transaction. But it is possible that the inactive transaction 596 * is still in the memory, not fully on disk. If you hope there is no 597 * inactive transaction in the fs when -ENOENT is returned, you should 598 * invoke 599 * btrfs_attach_transaction_barrier() 600 */ 601 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 602 { 603 return start_transaction(root, 0, TRANS_ATTACH, 0); 604 } 605 606 /* 607 * btrfs_attach_transaction_barrier() - catch the running transaction 608 * 609 * It is similar to the above function, the differentia is this one 610 * will wait for all the inactive transactions until they fully 611 * complete. 612 */ 613 struct btrfs_trans_handle * 614 btrfs_attach_transaction_barrier(struct btrfs_root *root) 615 { 616 struct btrfs_trans_handle *trans; 617 618 trans = start_transaction(root, 0, TRANS_ATTACH, 0); 619 if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT) 620 btrfs_wait_for_commit(root, 0); 621 622 return trans; 623 } 624 625 /* wait for a transaction commit to be fully complete */ 626 static noinline void wait_for_commit(struct btrfs_root *root, 627 struct btrfs_transaction *commit) 628 { 629 wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED); 630 } 631 632 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 633 { 634 struct btrfs_transaction *cur_trans = NULL, *t; 635 int ret = 0; 636 637 if (transid) { 638 if (transid <= root->fs_info->last_trans_committed) 639 goto out; 640 641 /* find specified transaction */ 642 spin_lock(&root->fs_info->trans_lock); 643 list_for_each_entry(t, &root->fs_info->trans_list, list) { 644 if (t->transid == transid) { 645 cur_trans = t; 646 atomic_inc(&cur_trans->use_count); 647 ret = 0; 648 break; 649 } 650 if (t->transid > transid) { 651 ret = 0; 652 break; 653 } 654 } 655 spin_unlock(&root->fs_info->trans_lock); 656 657 /* 658 * The specified transaction doesn't exist, or we 659 * raced with btrfs_commit_transaction 660 */ 661 if (!cur_trans) { 662 if (transid > root->fs_info->last_trans_committed) 663 ret = -EINVAL; 664 goto out; 665 } 666 } else { 667 /* find newest transaction that is committing | committed */ 668 spin_lock(&root->fs_info->trans_lock); 669 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 670 list) { 671 if (t->state >= TRANS_STATE_COMMIT_START) { 672 if (t->state == TRANS_STATE_COMPLETED) 673 break; 674 cur_trans = t; 675 atomic_inc(&cur_trans->use_count); 676 break; 677 } 678 } 679 spin_unlock(&root->fs_info->trans_lock); 680 if (!cur_trans) 681 goto out; /* nothing committing|committed */ 682 } 683 684 wait_for_commit(root, cur_trans); 685 btrfs_put_transaction(cur_trans); 686 out: 687 return ret; 688 } 689 690 void btrfs_throttle(struct btrfs_root *root) 691 { 692 if (!atomic_read(&root->fs_info->open_ioctl_trans)) 693 wait_current_trans(root); 694 } 695 696 static int should_end_transaction(struct btrfs_trans_handle *trans, 697 struct btrfs_root *root) 698 { 699 if (root->fs_info->global_block_rsv.space_info->full && 700 btrfs_check_space_for_delayed_refs(trans, root)) 701 return 1; 702 703 return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); 704 } 705 706 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 707 struct btrfs_root *root) 708 { 709 struct btrfs_transaction *cur_trans = trans->transaction; 710 int updates; 711 int err; 712 713 smp_mb(); 714 if (cur_trans->state >= TRANS_STATE_BLOCKED || 715 cur_trans->delayed_refs.flushing) 716 return 1; 717 718 updates = trans->delayed_ref_updates; 719 trans->delayed_ref_updates = 0; 720 if (updates) { 721 err = btrfs_run_delayed_refs(trans, root, updates); 722 if (err) /* Error code will also eval true */ 723 return err; 724 } 725 726 return should_end_transaction(trans, root); 727 } 728 729 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 730 struct btrfs_root *root, int throttle) 731 { 732 struct btrfs_transaction *cur_trans = trans->transaction; 733 struct btrfs_fs_info *info = root->fs_info; 734 unsigned long cur = trans->delayed_ref_updates; 735 int lock = (trans->type != TRANS_JOIN_NOLOCK); 736 int err = 0; 737 int must_run_delayed_refs = 0; 738 739 if (trans->use_count > 1) { 740 trans->use_count--; 741 trans->block_rsv = trans->orig_rsv; 742 return 0; 743 } 744 745 btrfs_trans_release_metadata(trans, root); 746 trans->block_rsv = NULL; 747 748 if (!list_empty(&trans->new_bgs)) 749 btrfs_create_pending_block_groups(trans, root); 750 751 if (!list_empty(&trans->ordered)) { 752 spin_lock(&info->trans_lock); 753 list_splice(&trans->ordered, &cur_trans->pending_ordered); 754 spin_unlock(&info->trans_lock); 755 } 756 757 trans->delayed_ref_updates = 0; 758 if (!trans->sync) { 759 must_run_delayed_refs = 760 btrfs_should_throttle_delayed_refs(trans, root); 761 cur = max_t(unsigned long, cur, 32); 762 763 /* 764 * don't make the caller wait if they are from a NOLOCK 765 * or ATTACH transaction, it will deadlock with commit 766 */ 767 if (must_run_delayed_refs == 1 && 768 (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH))) 769 must_run_delayed_refs = 2; 770 } 771 772 if (trans->qgroup_reserved) { 773 /* 774 * the same root has to be passed here between start_transaction 775 * and end_transaction. Subvolume quota depends on this. 776 */ 777 btrfs_qgroup_free(trans->root, trans->qgroup_reserved); 778 trans->qgroup_reserved = 0; 779 } 780 781 btrfs_trans_release_metadata(trans, root); 782 trans->block_rsv = NULL; 783 784 if (!list_empty(&trans->new_bgs)) 785 btrfs_create_pending_block_groups(trans, root); 786 787 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && 788 should_end_transaction(trans, root) && 789 ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) { 790 spin_lock(&info->trans_lock); 791 if (cur_trans->state == TRANS_STATE_RUNNING) 792 cur_trans->state = TRANS_STATE_BLOCKED; 793 spin_unlock(&info->trans_lock); 794 } 795 796 if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { 797 if (throttle) 798 return btrfs_commit_transaction(trans, root); 799 else 800 wake_up_process(info->transaction_kthread); 801 } 802 803 if (trans->type & __TRANS_FREEZABLE) 804 sb_end_intwrite(root->fs_info->sb); 805 806 WARN_ON(cur_trans != info->running_transaction); 807 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 808 atomic_dec(&cur_trans->num_writers); 809 extwriter_counter_dec(cur_trans, trans->type); 810 811 smp_mb(); 812 if (waitqueue_active(&cur_trans->writer_wait)) 813 wake_up(&cur_trans->writer_wait); 814 btrfs_put_transaction(cur_trans); 815 816 if (current->journal_info == trans) 817 current->journal_info = NULL; 818 819 if (throttle) 820 btrfs_run_delayed_iputs(root); 821 822 if (trans->aborted || 823 test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) { 824 wake_up_process(info->transaction_kthread); 825 err = -EIO; 826 } 827 assert_qgroups_uptodate(trans); 828 829 kmem_cache_free(btrfs_trans_handle_cachep, trans); 830 if (must_run_delayed_refs) { 831 btrfs_async_run_delayed_refs(root, cur, 832 must_run_delayed_refs == 1); 833 } 834 return err; 835 } 836 837 int btrfs_end_transaction(struct btrfs_trans_handle *trans, 838 struct btrfs_root *root) 839 { 840 return __btrfs_end_transaction(trans, root, 0); 841 } 842 843 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 844 struct btrfs_root *root) 845 { 846 return __btrfs_end_transaction(trans, root, 1); 847 } 848 849 /* 850 * when btree blocks are allocated, they have some corresponding bits set for 851 * them in one of two extent_io trees. This is used to make sure all of 852 * those extents are sent to disk but does not wait on them 853 */ 854 int btrfs_write_marked_extents(struct btrfs_root *root, 855 struct extent_io_tree *dirty_pages, int mark) 856 { 857 int err = 0; 858 int werr = 0; 859 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 860 struct extent_state *cached_state = NULL; 861 u64 start = 0; 862 u64 end; 863 864 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 865 mark, &cached_state)) { 866 bool wait_writeback = false; 867 868 err = convert_extent_bit(dirty_pages, start, end, 869 EXTENT_NEED_WAIT, 870 mark, &cached_state, GFP_NOFS); 871 /* 872 * convert_extent_bit can return -ENOMEM, which is most of the 873 * time a temporary error. So when it happens, ignore the error 874 * and wait for writeback of this range to finish - because we 875 * failed to set the bit EXTENT_NEED_WAIT for the range, a call 876 * to btrfs_wait_marked_extents() would not know that writeback 877 * for this range started and therefore wouldn't wait for it to 878 * finish - we don't want to commit a superblock that points to 879 * btree nodes/leafs for which writeback hasn't finished yet 880 * (and without errors). 881 * We cleanup any entries left in the io tree when committing 882 * the transaction (through clear_btree_io_tree()). 883 */ 884 if (err == -ENOMEM) { 885 err = 0; 886 wait_writeback = true; 887 } 888 if (!err) 889 err = filemap_fdatawrite_range(mapping, start, end); 890 if (err) 891 werr = err; 892 else if (wait_writeback) 893 werr = filemap_fdatawait_range(mapping, start, end); 894 free_extent_state(cached_state); 895 cached_state = NULL; 896 cond_resched(); 897 start = end + 1; 898 } 899 return werr; 900 } 901 902 /* 903 * when btree blocks are allocated, they have some corresponding bits set for 904 * them in one of two extent_io trees. This is used to make sure all of 905 * those extents are on disk for transaction or log commit. We wait 906 * on all the pages and clear them from the dirty pages state tree 907 */ 908 int btrfs_wait_marked_extents(struct btrfs_root *root, 909 struct extent_io_tree *dirty_pages, int mark) 910 { 911 int err = 0; 912 int werr = 0; 913 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 914 struct extent_state *cached_state = NULL; 915 u64 start = 0; 916 u64 end; 917 struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode); 918 bool errors = false; 919 920 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 921 EXTENT_NEED_WAIT, &cached_state)) { 922 /* 923 * Ignore -ENOMEM errors returned by clear_extent_bit(). 924 * When committing the transaction, we'll remove any entries 925 * left in the io tree. For a log commit, we don't remove them 926 * after committing the log because the tree can be accessed 927 * concurrently - we do it only at transaction commit time when 928 * it's safe to do it (through clear_btree_io_tree()). 929 */ 930 err = clear_extent_bit(dirty_pages, start, end, 931 EXTENT_NEED_WAIT, 932 0, 0, &cached_state, GFP_NOFS); 933 if (err == -ENOMEM) 934 err = 0; 935 if (!err) 936 err = filemap_fdatawait_range(mapping, start, end); 937 if (err) 938 werr = err; 939 free_extent_state(cached_state); 940 cached_state = NULL; 941 cond_resched(); 942 start = end + 1; 943 } 944 if (err) 945 werr = err; 946 947 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 948 if ((mark & EXTENT_DIRTY) && 949 test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, 950 &btree_ino->runtime_flags)) 951 errors = true; 952 953 if ((mark & EXTENT_NEW) && 954 test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, 955 &btree_ino->runtime_flags)) 956 errors = true; 957 } else { 958 if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR, 959 &btree_ino->runtime_flags)) 960 errors = true; 961 } 962 963 if (errors && !werr) 964 werr = -EIO; 965 966 return werr; 967 } 968 969 /* 970 * when btree blocks are allocated, they have some corresponding bits set for 971 * them in one of two extent_io trees. This is used to make sure all of 972 * those extents are on disk for transaction or log commit 973 */ 974 static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 975 struct extent_io_tree *dirty_pages, int mark) 976 { 977 int ret; 978 int ret2; 979 struct blk_plug plug; 980 981 blk_start_plug(&plug); 982 ret = btrfs_write_marked_extents(root, dirty_pages, mark); 983 blk_finish_plug(&plug); 984 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); 985 986 if (ret) 987 return ret; 988 if (ret2) 989 return ret2; 990 return 0; 991 } 992 993 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 994 struct btrfs_root *root) 995 { 996 int ret; 997 998 ret = btrfs_write_and_wait_marked_extents(root, 999 &trans->transaction->dirty_pages, 1000 EXTENT_DIRTY); 1001 clear_btree_io_tree(&trans->transaction->dirty_pages); 1002 1003 return ret; 1004 } 1005 1006 /* 1007 * this is used to update the root pointer in the tree of tree roots. 1008 * 1009 * But, in the case of the extent allocation tree, updating the root 1010 * pointer may allocate blocks which may change the root of the extent 1011 * allocation tree. 1012 * 1013 * So, this loops and repeats and makes sure the cowonly root didn't 1014 * change while the root pointer was being updated in the metadata. 1015 */ 1016 static int update_cowonly_root(struct btrfs_trans_handle *trans, 1017 struct btrfs_root *root) 1018 { 1019 int ret; 1020 u64 old_root_bytenr; 1021 u64 old_root_used; 1022 struct btrfs_root *tree_root = root->fs_info->tree_root; 1023 1024 old_root_used = btrfs_root_used(&root->root_item); 1025 btrfs_write_dirty_block_groups(trans, root); 1026 1027 while (1) { 1028 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1029 if (old_root_bytenr == root->node->start && 1030 old_root_used == btrfs_root_used(&root->root_item)) 1031 break; 1032 1033 btrfs_set_root_node(&root->root_item, root->node); 1034 ret = btrfs_update_root(trans, tree_root, 1035 &root->root_key, 1036 &root->root_item); 1037 if (ret) 1038 return ret; 1039 1040 old_root_used = btrfs_root_used(&root->root_item); 1041 ret = btrfs_write_dirty_block_groups(trans, root); 1042 if (ret) 1043 return ret; 1044 } 1045 1046 return 0; 1047 } 1048 1049 /* 1050 * update all the cowonly tree roots on disk 1051 * 1052 * The error handling in this function may not be obvious. Any of the 1053 * failures will cause the file system to go offline. We still need 1054 * to clean up the delayed refs. 1055 */ 1056 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 1057 struct btrfs_root *root) 1058 { 1059 struct btrfs_fs_info *fs_info = root->fs_info; 1060 struct list_head *next; 1061 struct extent_buffer *eb; 1062 int ret; 1063 1064 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1065 if (ret) 1066 return ret; 1067 1068 eb = btrfs_lock_root_node(fs_info->tree_root); 1069 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 1070 0, &eb); 1071 btrfs_tree_unlock(eb); 1072 free_extent_buffer(eb); 1073 1074 if (ret) 1075 return ret; 1076 1077 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1078 if (ret) 1079 return ret; 1080 1081 ret = btrfs_run_dev_stats(trans, root->fs_info); 1082 if (ret) 1083 return ret; 1084 ret = btrfs_run_dev_replace(trans, root->fs_info); 1085 if (ret) 1086 return ret; 1087 ret = btrfs_run_qgroups(trans, root->fs_info); 1088 if (ret) 1089 return ret; 1090 1091 /* run_qgroups might have added some more refs */ 1092 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1093 if (ret) 1094 return ret; 1095 1096 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 1097 next = fs_info->dirty_cowonly_roots.next; 1098 list_del_init(next); 1099 root = list_entry(next, struct btrfs_root, dirty_list); 1100 1101 if (root != fs_info->extent_root) 1102 list_add_tail(&root->dirty_list, 1103 &trans->transaction->switch_commits); 1104 ret = update_cowonly_root(trans, root); 1105 if (ret) 1106 return ret; 1107 } 1108 1109 list_add_tail(&fs_info->extent_root->dirty_list, 1110 &trans->transaction->switch_commits); 1111 btrfs_after_dev_replace_commit(fs_info); 1112 1113 return 0; 1114 } 1115 1116 /* 1117 * dead roots are old snapshots that need to be deleted. This allocates 1118 * a dirty root struct and adds it into the list of dead roots that need to 1119 * be deleted 1120 */ 1121 void btrfs_add_dead_root(struct btrfs_root *root) 1122 { 1123 spin_lock(&root->fs_info->trans_lock); 1124 if (list_empty(&root->root_list)) 1125 list_add_tail(&root->root_list, &root->fs_info->dead_roots); 1126 spin_unlock(&root->fs_info->trans_lock); 1127 } 1128 1129 /* 1130 * update all the cowonly tree roots on disk 1131 */ 1132 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 1133 struct btrfs_root *root) 1134 { 1135 struct btrfs_root *gang[8]; 1136 struct btrfs_fs_info *fs_info = root->fs_info; 1137 int i; 1138 int ret; 1139 int err = 0; 1140 1141 spin_lock(&fs_info->fs_roots_radix_lock); 1142 while (1) { 1143 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 1144 (void **)gang, 0, 1145 ARRAY_SIZE(gang), 1146 BTRFS_ROOT_TRANS_TAG); 1147 if (ret == 0) 1148 break; 1149 for (i = 0; i < ret; i++) { 1150 root = gang[i]; 1151 radix_tree_tag_clear(&fs_info->fs_roots_radix, 1152 (unsigned long)root->root_key.objectid, 1153 BTRFS_ROOT_TRANS_TAG); 1154 spin_unlock(&fs_info->fs_roots_radix_lock); 1155 1156 btrfs_free_log(trans, root); 1157 btrfs_update_reloc_root(trans, root); 1158 btrfs_orphan_commit_root(trans, root); 1159 1160 btrfs_save_ino_cache(root, trans); 1161 1162 /* see comments in should_cow_block() */ 1163 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1164 smp_mb__after_atomic(); 1165 1166 if (root->commit_root != root->node) { 1167 list_add_tail(&root->dirty_list, 1168 &trans->transaction->switch_commits); 1169 btrfs_set_root_node(&root->root_item, 1170 root->node); 1171 } 1172 1173 err = btrfs_update_root(trans, fs_info->tree_root, 1174 &root->root_key, 1175 &root->root_item); 1176 spin_lock(&fs_info->fs_roots_radix_lock); 1177 if (err) 1178 break; 1179 } 1180 } 1181 spin_unlock(&fs_info->fs_roots_radix_lock); 1182 return err; 1183 } 1184 1185 /* 1186 * defrag a given btree. 1187 * Every leaf in the btree is read and defragged. 1188 */ 1189 int btrfs_defrag_root(struct btrfs_root *root) 1190 { 1191 struct btrfs_fs_info *info = root->fs_info; 1192 struct btrfs_trans_handle *trans; 1193 int ret; 1194 1195 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state)) 1196 return 0; 1197 1198 while (1) { 1199 trans = btrfs_start_transaction(root, 0); 1200 if (IS_ERR(trans)) 1201 return PTR_ERR(trans); 1202 1203 ret = btrfs_defrag_leaves(trans, root); 1204 1205 btrfs_end_transaction(trans, root); 1206 btrfs_btree_balance_dirty(info->tree_root); 1207 cond_resched(); 1208 1209 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 1210 break; 1211 1212 if (btrfs_defrag_cancelled(root->fs_info)) { 1213 pr_debug("BTRFS: defrag_root cancelled\n"); 1214 ret = -EAGAIN; 1215 break; 1216 } 1217 } 1218 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state); 1219 return ret; 1220 } 1221 1222 /* 1223 * new snapshots need to be created at a very specific time in the 1224 * transaction commit. This does the actual creation. 1225 * 1226 * Note: 1227 * If the error which may affect the commitment of the current transaction 1228 * happens, we should return the error number. If the error which just affect 1229 * the creation of the pending snapshots, just return 0. 1230 */ 1231 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1232 struct btrfs_fs_info *fs_info, 1233 struct btrfs_pending_snapshot *pending) 1234 { 1235 struct btrfs_key key; 1236 struct btrfs_root_item *new_root_item; 1237 struct btrfs_root *tree_root = fs_info->tree_root; 1238 struct btrfs_root *root = pending->root; 1239 struct btrfs_root *parent_root; 1240 struct btrfs_block_rsv *rsv; 1241 struct inode *parent_inode; 1242 struct btrfs_path *path; 1243 struct btrfs_dir_item *dir_item; 1244 struct dentry *dentry; 1245 struct extent_buffer *tmp; 1246 struct extent_buffer *old; 1247 struct timespec cur_time = CURRENT_TIME; 1248 int ret = 0; 1249 u64 to_reserve = 0; 1250 u64 index = 0; 1251 u64 objectid; 1252 u64 root_flags; 1253 uuid_le new_uuid; 1254 1255 path = btrfs_alloc_path(); 1256 if (!path) { 1257 pending->error = -ENOMEM; 1258 return 0; 1259 } 1260 1261 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 1262 if (!new_root_item) { 1263 pending->error = -ENOMEM; 1264 goto root_item_alloc_fail; 1265 } 1266 1267 pending->error = btrfs_find_free_objectid(tree_root, &objectid); 1268 if (pending->error) 1269 goto no_free_objectid; 1270 1271 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 1272 1273 if (to_reserve > 0) { 1274 pending->error = btrfs_block_rsv_add(root, 1275 &pending->block_rsv, 1276 to_reserve, 1277 BTRFS_RESERVE_NO_FLUSH); 1278 if (pending->error) 1279 goto no_free_objectid; 1280 } 1281 1282 key.objectid = objectid; 1283 key.offset = (u64)-1; 1284 key.type = BTRFS_ROOT_ITEM_KEY; 1285 1286 rsv = trans->block_rsv; 1287 trans->block_rsv = &pending->block_rsv; 1288 trans->bytes_reserved = trans->block_rsv->reserved; 1289 1290 dentry = pending->dentry; 1291 parent_inode = pending->dir; 1292 parent_root = BTRFS_I(parent_inode)->root; 1293 record_root_in_trans(trans, parent_root); 1294 1295 /* 1296 * insert the directory item 1297 */ 1298 ret = btrfs_set_inode_index(parent_inode, &index); 1299 BUG_ON(ret); /* -ENOMEM */ 1300 1301 /* check if there is a file/dir which has the same name. */ 1302 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1303 btrfs_ino(parent_inode), 1304 dentry->d_name.name, 1305 dentry->d_name.len, 0); 1306 if (dir_item != NULL && !IS_ERR(dir_item)) { 1307 pending->error = -EEXIST; 1308 goto dir_item_existed; 1309 } else if (IS_ERR(dir_item)) { 1310 ret = PTR_ERR(dir_item); 1311 btrfs_abort_transaction(trans, root, ret); 1312 goto fail; 1313 } 1314 btrfs_release_path(path); 1315 1316 /* 1317 * pull in the delayed directory update 1318 * and the delayed inode item 1319 * otherwise we corrupt the FS during 1320 * snapshot 1321 */ 1322 ret = btrfs_run_delayed_items(trans, root); 1323 if (ret) { /* Transaction aborted */ 1324 btrfs_abort_transaction(trans, root, ret); 1325 goto fail; 1326 } 1327 1328 record_root_in_trans(trans, root); 1329 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1330 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1331 btrfs_check_and_init_root_item(new_root_item); 1332 1333 root_flags = btrfs_root_flags(new_root_item); 1334 if (pending->readonly) 1335 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1336 else 1337 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1338 btrfs_set_root_flags(new_root_item, root_flags); 1339 1340 btrfs_set_root_generation_v2(new_root_item, 1341 trans->transid); 1342 uuid_le_gen(&new_uuid); 1343 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); 1344 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1345 BTRFS_UUID_SIZE); 1346 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { 1347 memset(new_root_item->received_uuid, 0, 1348 sizeof(new_root_item->received_uuid)); 1349 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1350 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1351 btrfs_set_root_stransid(new_root_item, 0); 1352 btrfs_set_root_rtransid(new_root_item, 0); 1353 } 1354 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec); 1355 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec); 1356 btrfs_set_root_otransid(new_root_item, trans->transid); 1357 1358 old = btrfs_lock_root_node(root); 1359 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old); 1360 if (ret) { 1361 btrfs_tree_unlock(old); 1362 free_extent_buffer(old); 1363 btrfs_abort_transaction(trans, root, ret); 1364 goto fail; 1365 } 1366 1367 btrfs_set_lock_blocking(old); 1368 1369 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1370 /* clean up in any case */ 1371 btrfs_tree_unlock(old); 1372 free_extent_buffer(old); 1373 if (ret) { 1374 btrfs_abort_transaction(trans, root, ret); 1375 goto fail; 1376 } 1377 1378 /* 1379 * We need to flush delayed refs in order to make sure all of our quota 1380 * operations have been done before we call btrfs_qgroup_inherit. 1381 */ 1382 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1383 if (ret) { 1384 btrfs_abort_transaction(trans, root, ret); 1385 goto fail; 1386 } 1387 1388 ret = btrfs_qgroup_inherit(trans, fs_info, 1389 root->root_key.objectid, 1390 objectid, pending->inherit); 1391 if (ret) { 1392 btrfs_abort_transaction(trans, root, ret); 1393 goto fail; 1394 } 1395 1396 /* see comments in should_cow_block() */ 1397 set_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1398 smp_wmb(); 1399 1400 btrfs_set_root_node(new_root_item, tmp); 1401 /* record when the snapshot was created in key.offset */ 1402 key.offset = trans->transid; 1403 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1404 btrfs_tree_unlock(tmp); 1405 free_extent_buffer(tmp); 1406 if (ret) { 1407 btrfs_abort_transaction(trans, root, ret); 1408 goto fail; 1409 } 1410 1411 /* 1412 * insert root back/forward references 1413 */ 1414 ret = btrfs_add_root_ref(trans, tree_root, objectid, 1415 parent_root->root_key.objectid, 1416 btrfs_ino(parent_inode), index, 1417 dentry->d_name.name, dentry->d_name.len); 1418 if (ret) { 1419 btrfs_abort_transaction(trans, root, ret); 1420 goto fail; 1421 } 1422 1423 key.offset = (u64)-1; 1424 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 1425 if (IS_ERR(pending->snap)) { 1426 ret = PTR_ERR(pending->snap); 1427 btrfs_abort_transaction(trans, root, ret); 1428 goto fail; 1429 } 1430 1431 ret = btrfs_reloc_post_snapshot(trans, pending); 1432 if (ret) { 1433 btrfs_abort_transaction(trans, root, ret); 1434 goto fail; 1435 } 1436 1437 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1438 if (ret) { 1439 btrfs_abort_transaction(trans, root, ret); 1440 goto fail; 1441 } 1442 1443 ret = btrfs_insert_dir_item(trans, parent_root, 1444 dentry->d_name.name, dentry->d_name.len, 1445 parent_inode, &key, 1446 BTRFS_FT_DIR, index); 1447 /* We have check then name at the beginning, so it is impossible. */ 1448 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1449 if (ret) { 1450 btrfs_abort_transaction(trans, root, ret); 1451 goto fail; 1452 } 1453 1454 btrfs_i_size_write(parent_inode, parent_inode->i_size + 1455 dentry->d_name.len * 2); 1456 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 1457 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode); 1458 if (ret) { 1459 btrfs_abort_transaction(trans, root, ret); 1460 goto fail; 1461 } 1462 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b, 1463 BTRFS_UUID_KEY_SUBVOL, objectid); 1464 if (ret) { 1465 btrfs_abort_transaction(trans, root, ret); 1466 goto fail; 1467 } 1468 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) { 1469 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 1470 new_root_item->received_uuid, 1471 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 1472 objectid); 1473 if (ret && ret != -EEXIST) { 1474 btrfs_abort_transaction(trans, root, ret); 1475 goto fail; 1476 } 1477 } 1478 fail: 1479 pending->error = ret; 1480 dir_item_existed: 1481 trans->block_rsv = rsv; 1482 trans->bytes_reserved = 0; 1483 no_free_objectid: 1484 kfree(new_root_item); 1485 root_item_alloc_fail: 1486 btrfs_free_path(path); 1487 return ret; 1488 } 1489 1490 /* 1491 * create all the snapshots we've scheduled for creation 1492 */ 1493 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 1494 struct btrfs_fs_info *fs_info) 1495 { 1496 struct btrfs_pending_snapshot *pending, *next; 1497 struct list_head *head = &trans->transaction->pending_snapshots; 1498 int ret = 0; 1499 1500 list_for_each_entry_safe(pending, next, head, list) { 1501 list_del(&pending->list); 1502 ret = create_pending_snapshot(trans, fs_info, pending); 1503 if (ret) 1504 break; 1505 } 1506 return ret; 1507 } 1508 1509 static void update_super_roots(struct btrfs_root *root) 1510 { 1511 struct btrfs_root_item *root_item; 1512 struct btrfs_super_block *super; 1513 1514 super = root->fs_info->super_copy; 1515 1516 root_item = &root->fs_info->chunk_root->root_item; 1517 super->chunk_root = root_item->bytenr; 1518 super->chunk_root_generation = root_item->generation; 1519 super->chunk_root_level = root_item->level; 1520 1521 root_item = &root->fs_info->tree_root->root_item; 1522 super->root = root_item->bytenr; 1523 super->generation = root_item->generation; 1524 super->root_level = root_item->level; 1525 if (btrfs_test_opt(root, SPACE_CACHE)) 1526 super->cache_generation = root_item->generation; 1527 if (root->fs_info->update_uuid_tree_gen) 1528 super->uuid_tree_generation = root_item->generation; 1529 } 1530 1531 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1532 { 1533 struct btrfs_transaction *trans; 1534 int ret = 0; 1535 1536 spin_lock(&info->trans_lock); 1537 trans = info->running_transaction; 1538 if (trans) 1539 ret = (trans->state >= TRANS_STATE_COMMIT_START); 1540 spin_unlock(&info->trans_lock); 1541 return ret; 1542 } 1543 1544 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1545 { 1546 struct btrfs_transaction *trans; 1547 int ret = 0; 1548 1549 spin_lock(&info->trans_lock); 1550 trans = info->running_transaction; 1551 if (trans) 1552 ret = is_transaction_blocked(trans); 1553 spin_unlock(&info->trans_lock); 1554 return ret; 1555 } 1556 1557 /* 1558 * wait for the current transaction commit to start and block subsequent 1559 * transaction joins 1560 */ 1561 static void wait_current_trans_commit_start(struct btrfs_root *root, 1562 struct btrfs_transaction *trans) 1563 { 1564 wait_event(root->fs_info->transaction_blocked_wait, 1565 trans->state >= TRANS_STATE_COMMIT_START || 1566 trans->aborted); 1567 } 1568 1569 /* 1570 * wait for the current transaction to start and then become unblocked. 1571 * caller holds ref. 1572 */ 1573 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1574 struct btrfs_transaction *trans) 1575 { 1576 wait_event(root->fs_info->transaction_wait, 1577 trans->state >= TRANS_STATE_UNBLOCKED || 1578 trans->aborted); 1579 } 1580 1581 /* 1582 * commit transactions asynchronously. once btrfs_commit_transaction_async 1583 * returns, any subsequent transaction will not be allowed to join. 1584 */ 1585 struct btrfs_async_commit { 1586 struct btrfs_trans_handle *newtrans; 1587 struct btrfs_root *root; 1588 struct work_struct work; 1589 }; 1590 1591 static void do_async_commit(struct work_struct *work) 1592 { 1593 struct btrfs_async_commit *ac = 1594 container_of(work, struct btrfs_async_commit, work); 1595 1596 /* 1597 * We've got freeze protection passed with the transaction. 1598 * Tell lockdep about it. 1599 */ 1600 if (ac->newtrans->type & __TRANS_FREEZABLE) 1601 rwsem_acquire_read( 1602 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1603 0, 1, _THIS_IP_); 1604 1605 current->journal_info = ac->newtrans; 1606 1607 btrfs_commit_transaction(ac->newtrans, ac->root); 1608 kfree(ac); 1609 } 1610 1611 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1612 struct btrfs_root *root, 1613 int wait_for_unblock) 1614 { 1615 struct btrfs_async_commit *ac; 1616 struct btrfs_transaction *cur_trans; 1617 1618 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1619 if (!ac) 1620 return -ENOMEM; 1621 1622 INIT_WORK(&ac->work, do_async_commit); 1623 ac->root = root; 1624 ac->newtrans = btrfs_join_transaction(root); 1625 if (IS_ERR(ac->newtrans)) { 1626 int err = PTR_ERR(ac->newtrans); 1627 kfree(ac); 1628 return err; 1629 } 1630 1631 /* take transaction reference */ 1632 cur_trans = trans->transaction; 1633 atomic_inc(&cur_trans->use_count); 1634 1635 btrfs_end_transaction(trans, root); 1636 1637 /* 1638 * Tell lockdep we've released the freeze rwsem, since the 1639 * async commit thread will be the one to unlock it. 1640 */ 1641 if (ac->newtrans->type & __TRANS_FREEZABLE) 1642 rwsem_release( 1643 &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1644 1, _THIS_IP_); 1645 1646 schedule_work(&ac->work); 1647 1648 /* wait for transaction to start and unblock */ 1649 if (wait_for_unblock) 1650 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1651 else 1652 wait_current_trans_commit_start(root, cur_trans); 1653 1654 if (current->journal_info == trans) 1655 current->journal_info = NULL; 1656 1657 btrfs_put_transaction(cur_trans); 1658 return 0; 1659 } 1660 1661 1662 static void cleanup_transaction(struct btrfs_trans_handle *trans, 1663 struct btrfs_root *root, int err) 1664 { 1665 struct btrfs_transaction *cur_trans = trans->transaction; 1666 DEFINE_WAIT(wait); 1667 1668 WARN_ON(trans->use_count > 1); 1669 1670 btrfs_abort_transaction(trans, root, err); 1671 1672 spin_lock(&root->fs_info->trans_lock); 1673 1674 /* 1675 * If the transaction is removed from the list, it means this 1676 * transaction has been committed successfully, so it is impossible 1677 * to call the cleanup function. 1678 */ 1679 BUG_ON(list_empty(&cur_trans->list)); 1680 1681 list_del_init(&cur_trans->list); 1682 if (cur_trans == root->fs_info->running_transaction) { 1683 cur_trans->state = TRANS_STATE_COMMIT_DOING; 1684 spin_unlock(&root->fs_info->trans_lock); 1685 wait_event(cur_trans->writer_wait, 1686 atomic_read(&cur_trans->num_writers) == 1); 1687 1688 spin_lock(&root->fs_info->trans_lock); 1689 } 1690 spin_unlock(&root->fs_info->trans_lock); 1691 1692 btrfs_cleanup_one_transaction(trans->transaction, root); 1693 1694 spin_lock(&root->fs_info->trans_lock); 1695 if (cur_trans == root->fs_info->running_transaction) 1696 root->fs_info->running_transaction = NULL; 1697 spin_unlock(&root->fs_info->trans_lock); 1698 1699 if (trans->type & __TRANS_FREEZABLE) 1700 sb_end_intwrite(root->fs_info->sb); 1701 btrfs_put_transaction(cur_trans); 1702 btrfs_put_transaction(cur_trans); 1703 1704 trace_btrfs_transaction_commit(root); 1705 1706 if (current->journal_info == trans) 1707 current->journal_info = NULL; 1708 btrfs_scrub_cancel(root->fs_info); 1709 1710 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1711 } 1712 1713 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 1714 { 1715 if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT)) 1716 return btrfs_start_delalloc_roots(fs_info, 1, -1); 1717 return 0; 1718 } 1719 1720 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) 1721 { 1722 if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT)) 1723 btrfs_wait_ordered_roots(fs_info, -1); 1724 } 1725 1726 static inline void 1727 btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans, 1728 struct btrfs_fs_info *fs_info) 1729 { 1730 struct btrfs_ordered_extent *ordered; 1731 1732 spin_lock(&fs_info->trans_lock); 1733 while (!list_empty(&cur_trans->pending_ordered)) { 1734 ordered = list_first_entry(&cur_trans->pending_ordered, 1735 struct btrfs_ordered_extent, 1736 trans_list); 1737 list_del_init(&ordered->trans_list); 1738 spin_unlock(&fs_info->trans_lock); 1739 1740 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE, 1741 &ordered->flags)); 1742 btrfs_put_ordered_extent(ordered); 1743 spin_lock(&fs_info->trans_lock); 1744 } 1745 spin_unlock(&fs_info->trans_lock); 1746 } 1747 1748 int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1749 struct btrfs_root *root) 1750 { 1751 struct btrfs_transaction *cur_trans = trans->transaction; 1752 struct btrfs_transaction *prev_trans = NULL; 1753 struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode); 1754 int ret; 1755 1756 /* Stop the commit early if ->aborted is set */ 1757 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1758 ret = cur_trans->aborted; 1759 btrfs_end_transaction(trans, root); 1760 return ret; 1761 } 1762 1763 /* make a pass through all the delayed refs we have so far 1764 * any runnings procs may add more while we are here 1765 */ 1766 ret = btrfs_run_delayed_refs(trans, root, 0); 1767 if (ret) { 1768 btrfs_end_transaction(trans, root); 1769 return ret; 1770 } 1771 1772 btrfs_trans_release_metadata(trans, root); 1773 trans->block_rsv = NULL; 1774 if (trans->qgroup_reserved) { 1775 btrfs_qgroup_free(root, trans->qgroup_reserved); 1776 trans->qgroup_reserved = 0; 1777 } 1778 1779 cur_trans = trans->transaction; 1780 1781 /* 1782 * set the flushing flag so procs in this transaction have to 1783 * start sending their work down. 1784 */ 1785 cur_trans->delayed_refs.flushing = 1; 1786 smp_wmb(); 1787 1788 if (!list_empty(&trans->new_bgs)) 1789 btrfs_create_pending_block_groups(trans, root); 1790 1791 ret = btrfs_run_delayed_refs(trans, root, 0); 1792 if (ret) { 1793 btrfs_end_transaction(trans, root); 1794 return ret; 1795 } 1796 1797 spin_lock(&root->fs_info->trans_lock); 1798 list_splice(&trans->ordered, &cur_trans->pending_ordered); 1799 if (cur_trans->state >= TRANS_STATE_COMMIT_START) { 1800 spin_unlock(&root->fs_info->trans_lock); 1801 atomic_inc(&cur_trans->use_count); 1802 ret = btrfs_end_transaction(trans, root); 1803 1804 wait_for_commit(root, cur_trans); 1805 1806 btrfs_put_transaction(cur_trans); 1807 1808 return ret; 1809 } 1810 1811 cur_trans->state = TRANS_STATE_COMMIT_START; 1812 wake_up(&root->fs_info->transaction_blocked_wait); 1813 1814 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1815 prev_trans = list_entry(cur_trans->list.prev, 1816 struct btrfs_transaction, list); 1817 if (prev_trans->state != TRANS_STATE_COMPLETED) { 1818 atomic_inc(&prev_trans->use_count); 1819 spin_unlock(&root->fs_info->trans_lock); 1820 1821 wait_for_commit(root, prev_trans); 1822 1823 btrfs_put_transaction(prev_trans); 1824 } else { 1825 spin_unlock(&root->fs_info->trans_lock); 1826 } 1827 } else { 1828 spin_unlock(&root->fs_info->trans_lock); 1829 } 1830 1831 extwriter_counter_dec(cur_trans, trans->type); 1832 1833 ret = btrfs_start_delalloc_flush(root->fs_info); 1834 if (ret) 1835 goto cleanup_transaction; 1836 1837 ret = btrfs_run_delayed_items(trans, root); 1838 if (ret) 1839 goto cleanup_transaction; 1840 1841 wait_event(cur_trans->writer_wait, 1842 extwriter_counter_read(cur_trans) == 0); 1843 1844 /* some pending stuffs might be added after the previous flush. */ 1845 ret = btrfs_run_delayed_items(trans, root); 1846 if (ret) 1847 goto cleanup_transaction; 1848 1849 btrfs_wait_delalloc_flush(root->fs_info); 1850 1851 btrfs_wait_pending_ordered(cur_trans, root->fs_info); 1852 1853 btrfs_scrub_pause(root); 1854 /* 1855 * Ok now we need to make sure to block out any other joins while we 1856 * commit the transaction. We could have started a join before setting 1857 * COMMIT_DOING so make sure to wait for num_writers to == 1 again. 1858 */ 1859 spin_lock(&root->fs_info->trans_lock); 1860 cur_trans->state = TRANS_STATE_COMMIT_DOING; 1861 spin_unlock(&root->fs_info->trans_lock); 1862 wait_event(cur_trans->writer_wait, 1863 atomic_read(&cur_trans->num_writers) == 1); 1864 1865 /* ->aborted might be set after the previous check, so check it */ 1866 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1867 ret = cur_trans->aborted; 1868 goto scrub_continue; 1869 } 1870 /* 1871 * the reloc mutex makes sure that we stop 1872 * the balancing code from coming in and moving 1873 * extents around in the middle of the commit 1874 */ 1875 mutex_lock(&root->fs_info->reloc_mutex); 1876 1877 /* 1878 * We needn't worry about the delayed items because we will 1879 * deal with them in create_pending_snapshot(), which is the 1880 * core function of the snapshot creation. 1881 */ 1882 ret = create_pending_snapshots(trans, root->fs_info); 1883 if (ret) { 1884 mutex_unlock(&root->fs_info->reloc_mutex); 1885 goto scrub_continue; 1886 } 1887 1888 /* 1889 * We insert the dir indexes of the snapshots and update the inode 1890 * of the snapshots' parents after the snapshot creation, so there 1891 * are some delayed items which are not dealt with. Now deal with 1892 * them. 1893 * 1894 * We needn't worry that this operation will corrupt the snapshots, 1895 * because all the tree which are snapshoted will be forced to COW 1896 * the nodes and leaves. 1897 */ 1898 ret = btrfs_run_delayed_items(trans, root); 1899 if (ret) { 1900 mutex_unlock(&root->fs_info->reloc_mutex); 1901 goto scrub_continue; 1902 } 1903 1904 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1905 if (ret) { 1906 mutex_unlock(&root->fs_info->reloc_mutex); 1907 goto scrub_continue; 1908 } 1909 1910 /* 1911 * make sure none of the code above managed to slip in a 1912 * delayed item 1913 */ 1914 btrfs_assert_delayed_root_empty(root); 1915 1916 WARN_ON(cur_trans != trans->transaction); 1917 1918 /* btrfs_commit_tree_roots is responsible for getting the 1919 * various roots consistent with each other. Every pointer 1920 * in the tree of tree roots has to point to the most up to date 1921 * root for every subvolume and other tree. So, we have to keep 1922 * the tree logging code from jumping in and changing any 1923 * of the trees. 1924 * 1925 * At this point in the commit, there can't be any tree-log 1926 * writers, but a little lower down we drop the trans mutex 1927 * and let new people in. By holding the tree_log_mutex 1928 * from now until after the super is written, we avoid races 1929 * with the tree-log code. 1930 */ 1931 mutex_lock(&root->fs_info->tree_log_mutex); 1932 1933 ret = commit_fs_roots(trans, root); 1934 if (ret) { 1935 mutex_unlock(&root->fs_info->tree_log_mutex); 1936 mutex_unlock(&root->fs_info->reloc_mutex); 1937 goto scrub_continue; 1938 } 1939 1940 /* 1941 * Since the transaction is done, we can apply the pending changes 1942 * before the next transaction. 1943 */ 1944 btrfs_apply_pending_changes(root->fs_info); 1945 1946 /* commit_fs_roots gets rid of all the tree log roots, it is now 1947 * safe to free the root of tree log roots 1948 */ 1949 btrfs_free_log_root_tree(trans, root->fs_info); 1950 1951 ret = commit_cowonly_roots(trans, root); 1952 if (ret) { 1953 mutex_unlock(&root->fs_info->tree_log_mutex); 1954 mutex_unlock(&root->fs_info->reloc_mutex); 1955 goto scrub_continue; 1956 } 1957 1958 /* 1959 * The tasks which save the space cache and inode cache may also 1960 * update ->aborted, check it. 1961 */ 1962 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1963 ret = cur_trans->aborted; 1964 mutex_unlock(&root->fs_info->tree_log_mutex); 1965 mutex_unlock(&root->fs_info->reloc_mutex); 1966 goto scrub_continue; 1967 } 1968 1969 btrfs_prepare_extent_commit(trans, root); 1970 1971 cur_trans = root->fs_info->running_transaction; 1972 1973 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1974 root->fs_info->tree_root->node); 1975 list_add_tail(&root->fs_info->tree_root->dirty_list, 1976 &cur_trans->switch_commits); 1977 1978 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1979 root->fs_info->chunk_root->node); 1980 list_add_tail(&root->fs_info->chunk_root->dirty_list, 1981 &cur_trans->switch_commits); 1982 1983 switch_commit_roots(cur_trans, root->fs_info); 1984 1985 assert_qgroups_uptodate(trans); 1986 update_super_roots(root); 1987 1988 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1989 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0); 1990 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1991 sizeof(*root->fs_info->super_copy)); 1992 1993 btrfs_update_commit_device_size(root->fs_info); 1994 btrfs_update_commit_device_bytes_used(root, cur_trans); 1995 1996 clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags); 1997 clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags); 1998 1999 spin_lock(&root->fs_info->trans_lock); 2000 cur_trans->state = TRANS_STATE_UNBLOCKED; 2001 root->fs_info->running_transaction = NULL; 2002 spin_unlock(&root->fs_info->trans_lock); 2003 mutex_unlock(&root->fs_info->reloc_mutex); 2004 2005 wake_up(&root->fs_info->transaction_wait); 2006 2007 ret = btrfs_write_and_wait_transaction(trans, root); 2008 if (ret) { 2009 btrfs_error(root->fs_info, ret, 2010 "Error while writing out transaction"); 2011 mutex_unlock(&root->fs_info->tree_log_mutex); 2012 goto scrub_continue; 2013 } 2014 2015 ret = write_ctree_super(trans, root, 0); 2016 if (ret) { 2017 mutex_unlock(&root->fs_info->tree_log_mutex); 2018 goto scrub_continue; 2019 } 2020 2021 /* 2022 * the super is written, we can safely allow the tree-loggers 2023 * to go about their business 2024 */ 2025 mutex_unlock(&root->fs_info->tree_log_mutex); 2026 2027 btrfs_finish_extent_commit(trans, root); 2028 2029 root->fs_info->last_trans_committed = cur_trans->transid; 2030 /* 2031 * We needn't acquire the lock here because there is no other task 2032 * which can change it. 2033 */ 2034 cur_trans->state = TRANS_STATE_COMPLETED; 2035 wake_up(&cur_trans->commit_wait); 2036 2037 spin_lock(&root->fs_info->trans_lock); 2038 list_del_init(&cur_trans->list); 2039 spin_unlock(&root->fs_info->trans_lock); 2040 2041 btrfs_put_transaction(cur_trans); 2042 btrfs_put_transaction(cur_trans); 2043 2044 if (trans->type & __TRANS_FREEZABLE) 2045 sb_end_intwrite(root->fs_info->sb); 2046 2047 trace_btrfs_transaction_commit(root); 2048 2049 btrfs_scrub_continue(root); 2050 2051 if (current->journal_info == trans) 2052 current->journal_info = NULL; 2053 2054 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2055 2056 if (current != root->fs_info->transaction_kthread) 2057 btrfs_run_delayed_iputs(root); 2058 2059 return ret; 2060 2061 scrub_continue: 2062 btrfs_scrub_continue(root); 2063 cleanup_transaction: 2064 btrfs_trans_release_metadata(trans, root); 2065 trans->block_rsv = NULL; 2066 if (trans->qgroup_reserved) { 2067 btrfs_qgroup_free(root, trans->qgroup_reserved); 2068 trans->qgroup_reserved = 0; 2069 } 2070 btrfs_warn(root->fs_info, "Skipping commit of aborted transaction."); 2071 if (current->journal_info == trans) 2072 current->journal_info = NULL; 2073 cleanup_transaction(trans, root, ret); 2074 2075 return ret; 2076 } 2077 2078 /* 2079 * return < 0 if error 2080 * 0 if there are no more dead_roots at the time of call 2081 * 1 there are more to be processed, call me again 2082 * 2083 * The return value indicates there are certainly more snapshots to delete, but 2084 * if there comes a new one during processing, it may return 0. We don't mind, 2085 * because btrfs_commit_super will poke cleaner thread and it will process it a 2086 * few seconds later. 2087 */ 2088 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) 2089 { 2090 int ret; 2091 struct btrfs_fs_info *fs_info = root->fs_info; 2092 2093 spin_lock(&fs_info->trans_lock); 2094 if (list_empty(&fs_info->dead_roots)) { 2095 spin_unlock(&fs_info->trans_lock); 2096 return 0; 2097 } 2098 root = list_first_entry(&fs_info->dead_roots, 2099 struct btrfs_root, root_list); 2100 list_del_init(&root->root_list); 2101 spin_unlock(&fs_info->trans_lock); 2102 2103 pr_debug("BTRFS: cleaner removing %llu\n", root->objectid); 2104 2105 btrfs_kill_all_delayed_nodes(root); 2106 2107 if (btrfs_header_backref_rev(root->node) < 2108 BTRFS_MIXED_BACKREF_REV) 2109 ret = btrfs_drop_snapshot(root, NULL, 0, 0); 2110 else 2111 ret = btrfs_drop_snapshot(root, NULL, 1, 0); 2112 2113 return (ret < 0) ? 0 : 1; 2114 } 2115 2116 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info) 2117 { 2118 unsigned long prev; 2119 unsigned long bit; 2120 2121 prev = cmpxchg(&fs_info->pending_changes, 0, 0); 2122 if (!prev) 2123 return; 2124 2125 bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE; 2126 if (prev & bit) 2127 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2128 prev &= ~bit; 2129 2130 bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE; 2131 if (prev & bit) 2132 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2133 prev &= ~bit; 2134 2135 bit = 1 << BTRFS_PENDING_COMMIT; 2136 if (prev & bit) 2137 btrfs_debug(fs_info, "pending commit done"); 2138 prev &= ~bit; 2139 2140 if (prev) 2141 btrfs_warn(fs_info, 2142 "unknown pending changes left 0x%lx, ignoring", prev); 2143 } 2144