1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include <linux/uuid.h> 26 #include "ctree.h" 27 #include "disk-io.h" 28 #include "transaction.h" 29 #include "locking.h" 30 #include "tree-log.h" 31 #include "inode-map.h" 32 #include "volumes.h" 33 #include "dev-replace.h" 34 35 #define BTRFS_ROOT_TRANS_TAG 0 36 37 void put_transaction(struct btrfs_transaction *transaction) 38 { 39 WARN_ON(atomic_read(&transaction->use_count) == 0); 40 if (atomic_dec_and_test(&transaction->use_count)) { 41 BUG_ON(!list_empty(&transaction->list)); 42 WARN_ON(transaction->delayed_refs.root.rb_node); 43 kmem_cache_free(btrfs_transaction_cachep, transaction); 44 } 45 } 46 47 static noinline void switch_commit_root(struct btrfs_root *root) 48 { 49 free_extent_buffer(root->commit_root); 50 root->commit_root = btrfs_root_node(root); 51 } 52 53 static inline int can_join_transaction(struct btrfs_transaction *trans, 54 int type) 55 { 56 return !(trans->in_commit && 57 type != TRANS_JOIN && 58 type != TRANS_JOIN_NOLOCK); 59 } 60 61 /* 62 * either allocate a new transaction or hop into the existing one 63 */ 64 static noinline int join_transaction(struct btrfs_root *root, int type) 65 { 66 struct btrfs_transaction *cur_trans; 67 struct btrfs_fs_info *fs_info = root->fs_info; 68 69 spin_lock(&fs_info->trans_lock); 70 loop: 71 /* The file system has been taken offline. No new transactions. */ 72 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 73 spin_unlock(&fs_info->trans_lock); 74 return -EROFS; 75 } 76 77 if (fs_info->trans_no_join) { 78 /* 79 * If we are JOIN_NOLOCK we're already committing a current 80 * transaction, we just need a handle to deal with something 81 * when committing the transaction, such as inode cache and 82 * space cache. It is a special case. 83 */ 84 if (type != TRANS_JOIN_NOLOCK) { 85 spin_unlock(&fs_info->trans_lock); 86 return -EBUSY; 87 } 88 } 89 90 cur_trans = fs_info->running_transaction; 91 if (cur_trans) { 92 if (cur_trans->aborted) { 93 spin_unlock(&fs_info->trans_lock); 94 return cur_trans->aborted; 95 } 96 if (!can_join_transaction(cur_trans, type)) { 97 spin_unlock(&fs_info->trans_lock); 98 return -EBUSY; 99 } 100 atomic_inc(&cur_trans->use_count); 101 atomic_inc(&cur_trans->num_writers); 102 cur_trans->num_joined++; 103 spin_unlock(&fs_info->trans_lock); 104 return 0; 105 } 106 spin_unlock(&fs_info->trans_lock); 107 108 /* 109 * If we are ATTACH, we just want to catch the current transaction, 110 * and commit it. If there is no transaction, just return ENOENT. 111 */ 112 if (type == TRANS_ATTACH) 113 return -ENOENT; 114 115 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 116 if (!cur_trans) 117 return -ENOMEM; 118 119 spin_lock(&fs_info->trans_lock); 120 if (fs_info->running_transaction) { 121 /* 122 * someone started a transaction after we unlocked. Make sure 123 * to redo the trans_no_join checks above 124 */ 125 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 126 goto loop; 127 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 128 spin_unlock(&fs_info->trans_lock); 129 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 130 return -EROFS; 131 } 132 133 atomic_set(&cur_trans->num_writers, 1); 134 cur_trans->num_joined = 0; 135 init_waitqueue_head(&cur_trans->writer_wait); 136 init_waitqueue_head(&cur_trans->commit_wait); 137 cur_trans->in_commit = 0; 138 cur_trans->blocked = 0; 139 /* 140 * One for this trans handle, one so it will live on until we 141 * commit the transaction. 142 */ 143 atomic_set(&cur_trans->use_count, 2); 144 cur_trans->commit_done = 0; 145 cur_trans->start_time = get_seconds(); 146 147 cur_trans->delayed_refs.root = RB_ROOT; 148 cur_trans->delayed_refs.num_entries = 0; 149 cur_trans->delayed_refs.num_heads_ready = 0; 150 cur_trans->delayed_refs.num_heads = 0; 151 cur_trans->delayed_refs.flushing = 0; 152 cur_trans->delayed_refs.run_delayed_start = 0; 153 154 /* 155 * although the tree mod log is per file system and not per transaction, 156 * the log must never go across transaction boundaries. 157 */ 158 smp_mb(); 159 if (!list_empty(&fs_info->tree_mod_seq_list)) 160 WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when " 161 "creating a fresh transaction\n"); 162 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 163 WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when " 164 "creating a fresh transaction\n"); 165 atomic_set(&fs_info->tree_mod_seq, 0); 166 167 spin_lock_init(&cur_trans->commit_lock); 168 spin_lock_init(&cur_trans->delayed_refs.lock); 169 atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0); 170 atomic_set(&cur_trans->delayed_refs.ref_seq, 0); 171 init_waitqueue_head(&cur_trans->delayed_refs.wait); 172 173 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 174 INIT_LIST_HEAD(&cur_trans->ordered_operations); 175 list_add_tail(&cur_trans->list, &fs_info->trans_list); 176 extent_io_tree_init(&cur_trans->dirty_pages, 177 fs_info->btree_inode->i_mapping); 178 fs_info->generation++; 179 cur_trans->transid = fs_info->generation; 180 fs_info->running_transaction = cur_trans; 181 cur_trans->aborted = 0; 182 spin_unlock(&fs_info->trans_lock); 183 184 return 0; 185 } 186 187 /* 188 * this does all the record keeping required to make sure that a reference 189 * counted root is properly recorded in a given transaction. This is required 190 * to make sure the old root from before we joined the transaction is deleted 191 * when the transaction commits 192 */ 193 static int record_root_in_trans(struct btrfs_trans_handle *trans, 194 struct btrfs_root *root) 195 { 196 if (root->ref_cows && root->last_trans < trans->transid) { 197 WARN_ON(root == root->fs_info->extent_root); 198 WARN_ON(root->commit_root != root->node); 199 200 /* 201 * see below for in_trans_setup usage rules 202 * we have the reloc mutex held now, so there 203 * is only one writer in this function 204 */ 205 root->in_trans_setup = 1; 206 207 /* make sure readers find in_trans_setup before 208 * they find our root->last_trans update 209 */ 210 smp_wmb(); 211 212 spin_lock(&root->fs_info->fs_roots_radix_lock); 213 if (root->last_trans == trans->transid) { 214 spin_unlock(&root->fs_info->fs_roots_radix_lock); 215 return 0; 216 } 217 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 218 (unsigned long)root->root_key.objectid, 219 BTRFS_ROOT_TRANS_TAG); 220 spin_unlock(&root->fs_info->fs_roots_radix_lock); 221 root->last_trans = trans->transid; 222 223 /* this is pretty tricky. We don't want to 224 * take the relocation lock in btrfs_record_root_in_trans 225 * unless we're really doing the first setup for this root in 226 * this transaction. 227 * 228 * Normally we'd use root->last_trans as a flag to decide 229 * if we want to take the expensive mutex. 230 * 231 * But, we have to set root->last_trans before we 232 * init the relocation root, otherwise, we trip over warnings 233 * in ctree.c. The solution used here is to flag ourselves 234 * with root->in_trans_setup. When this is 1, we're still 235 * fixing up the reloc trees and everyone must wait. 236 * 237 * When this is zero, they can trust root->last_trans and fly 238 * through btrfs_record_root_in_trans without having to take the 239 * lock. smp_wmb() makes sure that all the writes above are 240 * done before we pop in the zero below 241 */ 242 btrfs_init_reloc_root(trans, root); 243 smp_wmb(); 244 root->in_trans_setup = 0; 245 } 246 return 0; 247 } 248 249 250 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 251 struct btrfs_root *root) 252 { 253 if (!root->ref_cows) 254 return 0; 255 256 /* 257 * see record_root_in_trans for comments about in_trans_setup usage 258 * and barriers 259 */ 260 smp_rmb(); 261 if (root->last_trans == trans->transid && 262 !root->in_trans_setup) 263 return 0; 264 265 mutex_lock(&root->fs_info->reloc_mutex); 266 record_root_in_trans(trans, root); 267 mutex_unlock(&root->fs_info->reloc_mutex); 268 269 return 0; 270 } 271 272 /* wait for commit against the current transaction to become unblocked 273 * when this is done, it is safe to start a new transaction, but the current 274 * transaction might not be fully on disk. 275 */ 276 static void wait_current_trans(struct btrfs_root *root) 277 { 278 struct btrfs_transaction *cur_trans; 279 280 spin_lock(&root->fs_info->trans_lock); 281 cur_trans = root->fs_info->running_transaction; 282 if (cur_trans && cur_trans->blocked) { 283 atomic_inc(&cur_trans->use_count); 284 spin_unlock(&root->fs_info->trans_lock); 285 286 wait_event(root->fs_info->transaction_wait, 287 !cur_trans->blocked); 288 put_transaction(cur_trans); 289 } else { 290 spin_unlock(&root->fs_info->trans_lock); 291 } 292 } 293 294 static int may_wait_transaction(struct btrfs_root *root, int type) 295 { 296 if (root->fs_info->log_root_recovering) 297 return 0; 298 299 if (type == TRANS_USERSPACE) 300 return 1; 301 302 if (type == TRANS_START && 303 !atomic_read(&root->fs_info->open_ioctl_trans)) 304 return 1; 305 306 return 0; 307 } 308 309 static struct btrfs_trans_handle * 310 start_transaction(struct btrfs_root *root, u64 num_items, int type, 311 enum btrfs_reserve_flush_enum flush) 312 { 313 struct btrfs_trans_handle *h; 314 struct btrfs_transaction *cur_trans; 315 u64 num_bytes = 0; 316 int ret; 317 u64 qgroup_reserved = 0; 318 319 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 320 return ERR_PTR(-EROFS); 321 322 if (current->journal_info) { 323 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); 324 h = current->journal_info; 325 h->use_count++; 326 WARN_ON(h->use_count > 2); 327 h->orig_rsv = h->block_rsv; 328 h->block_rsv = NULL; 329 goto got_it; 330 } 331 332 /* 333 * Do the reservation before we join the transaction so we can do all 334 * the appropriate flushing if need be. 335 */ 336 if (num_items > 0 && root != root->fs_info->chunk_root) { 337 if (root->fs_info->quota_enabled && 338 is_fstree(root->root_key.objectid)) { 339 qgroup_reserved = num_items * root->leafsize; 340 ret = btrfs_qgroup_reserve(root, qgroup_reserved); 341 if (ret) 342 return ERR_PTR(ret); 343 } 344 345 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 346 ret = btrfs_block_rsv_add(root, 347 &root->fs_info->trans_block_rsv, 348 num_bytes, flush); 349 if (ret) 350 goto reserve_fail; 351 } 352 again: 353 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 354 if (!h) { 355 ret = -ENOMEM; 356 goto alloc_fail; 357 } 358 359 /* 360 * If we are JOIN_NOLOCK we're already committing a transaction and 361 * waiting on this guy, so we don't need to do the sb_start_intwrite 362 * because we're already holding a ref. We need this because we could 363 * have raced in and did an fsync() on a file which can kick a commit 364 * and then we deadlock with somebody doing a freeze. 365 * 366 * If we are ATTACH, it means we just want to catch the current 367 * transaction and commit it, so we needn't do sb_start_intwrite(). 368 */ 369 if (type < TRANS_JOIN_NOLOCK) 370 sb_start_intwrite(root->fs_info->sb); 371 372 if (may_wait_transaction(root, type)) 373 wait_current_trans(root); 374 375 do { 376 ret = join_transaction(root, type); 377 if (ret == -EBUSY) { 378 wait_current_trans(root); 379 if (unlikely(type == TRANS_ATTACH)) 380 ret = -ENOENT; 381 } 382 } while (ret == -EBUSY); 383 384 if (ret < 0) { 385 /* We must get the transaction if we are JOIN_NOLOCK. */ 386 BUG_ON(type == TRANS_JOIN_NOLOCK); 387 goto join_fail; 388 } 389 390 cur_trans = root->fs_info->running_transaction; 391 392 h->transid = cur_trans->transid; 393 h->transaction = cur_trans; 394 h->blocks_used = 0; 395 h->bytes_reserved = 0; 396 h->root = root; 397 h->delayed_ref_updates = 0; 398 h->use_count = 1; 399 h->adding_csums = 0; 400 h->block_rsv = NULL; 401 h->orig_rsv = NULL; 402 h->aborted = 0; 403 h->qgroup_reserved = 0; 404 h->delayed_ref_elem.seq = 0; 405 h->type = type; 406 h->allocating_chunk = false; 407 INIT_LIST_HEAD(&h->qgroup_ref_list); 408 INIT_LIST_HEAD(&h->new_bgs); 409 410 smp_mb(); 411 if (cur_trans->blocked && may_wait_transaction(root, type)) { 412 btrfs_commit_transaction(h, root); 413 goto again; 414 } 415 416 if (num_bytes) { 417 trace_btrfs_space_reservation(root->fs_info, "transaction", 418 h->transid, num_bytes, 1); 419 h->block_rsv = &root->fs_info->trans_block_rsv; 420 h->bytes_reserved = num_bytes; 421 } 422 h->qgroup_reserved = qgroup_reserved; 423 424 got_it: 425 btrfs_record_root_in_trans(h, root); 426 427 if (!current->journal_info && type != TRANS_USERSPACE) 428 current->journal_info = h; 429 return h; 430 431 join_fail: 432 if (type < TRANS_JOIN_NOLOCK) 433 sb_end_intwrite(root->fs_info->sb); 434 kmem_cache_free(btrfs_trans_handle_cachep, h); 435 alloc_fail: 436 if (num_bytes) 437 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, 438 num_bytes); 439 reserve_fail: 440 if (qgroup_reserved) 441 btrfs_qgroup_free(root, qgroup_reserved); 442 return ERR_PTR(ret); 443 } 444 445 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 446 int num_items) 447 { 448 return start_transaction(root, num_items, TRANS_START, 449 BTRFS_RESERVE_FLUSH_ALL); 450 } 451 452 struct btrfs_trans_handle *btrfs_start_transaction_lflush( 453 struct btrfs_root *root, int num_items) 454 { 455 return start_transaction(root, num_items, TRANS_START, 456 BTRFS_RESERVE_FLUSH_LIMIT); 457 } 458 459 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 460 { 461 return start_transaction(root, 0, TRANS_JOIN, 0); 462 } 463 464 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 465 { 466 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0); 467 } 468 469 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 470 { 471 return start_transaction(root, 0, TRANS_USERSPACE, 0); 472 } 473 474 /* 475 * btrfs_attach_transaction() - catch the running transaction 476 * 477 * It is used when we want to commit the current the transaction, but 478 * don't want to start a new one. 479 * 480 * Note: If this function return -ENOENT, it just means there is no 481 * running transaction. But it is possible that the inactive transaction 482 * is still in the memory, not fully on disk. If you hope there is no 483 * inactive transaction in the fs when -ENOENT is returned, you should 484 * invoke 485 * btrfs_attach_transaction_barrier() 486 */ 487 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 488 { 489 return start_transaction(root, 0, TRANS_ATTACH, 0); 490 } 491 492 /* 493 * btrfs_attach_transaction() - catch the running transaction 494 * 495 * It is similar to the above function, the differentia is this one 496 * will wait for all the inactive transactions until they fully 497 * complete. 498 */ 499 struct btrfs_trans_handle * 500 btrfs_attach_transaction_barrier(struct btrfs_root *root) 501 { 502 struct btrfs_trans_handle *trans; 503 504 trans = start_transaction(root, 0, TRANS_ATTACH, 0); 505 if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT) 506 btrfs_wait_for_commit(root, 0); 507 508 return trans; 509 } 510 511 /* wait for a transaction commit to be fully complete */ 512 static noinline void wait_for_commit(struct btrfs_root *root, 513 struct btrfs_transaction *commit) 514 { 515 wait_event(commit->commit_wait, commit->commit_done); 516 } 517 518 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 519 { 520 struct btrfs_transaction *cur_trans = NULL, *t; 521 int ret = 0; 522 523 if (transid) { 524 if (transid <= root->fs_info->last_trans_committed) 525 goto out; 526 527 ret = -EINVAL; 528 /* find specified transaction */ 529 spin_lock(&root->fs_info->trans_lock); 530 list_for_each_entry(t, &root->fs_info->trans_list, list) { 531 if (t->transid == transid) { 532 cur_trans = t; 533 atomic_inc(&cur_trans->use_count); 534 ret = 0; 535 break; 536 } 537 if (t->transid > transid) { 538 ret = 0; 539 break; 540 } 541 } 542 spin_unlock(&root->fs_info->trans_lock); 543 /* The specified transaction doesn't exist */ 544 if (!cur_trans) 545 goto out; 546 } else { 547 /* find newest transaction that is committing | committed */ 548 spin_lock(&root->fs_info->trans_lock); 549 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 550 list) { 551 if (t->in_commit) { 552 if (t->commit_done) 553 break; 554 cur_trans = t; 555 atomic_inc(&cur_trans->use_count); 556 break; 557 } 558 } 559 spin_unlock(&root->fs_info->trans_lock); 560 if (!cur_trans) 561 goto out; /* nothing committing|committed */ 562 } 563 564 wait_for_commit(root, cur_trans); 565 put_transaction(cur_trans); 566 out: 567 return ret; 568 } 569 570 void btrfs_throttle(struct btrfs_root *root) 571 { 572 if (!atomic_read(&root->fs_info->open_ioctl_trans)) 573 wait_current_trans(root); 574 } 575 576 static int should_end_transaction(struct btrfs_trans_handle *trans, 577 struct btrfs_root *root) 578 { 579 int ret; 580 581 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); 582 return ret ? 1 : 0; 583 } 584 585 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 586 struct btrfs_root *root) 587 { 588 struct btrfs_transaction *cur_trans = trans->transaction; 589 int updates; 590 int err; 591 592 smp_mb(); 593 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 594 return 1; 595 596 updates = trans->delayed_ref_updates; 597 trans->delayed_ref_updates = 0; 598 if (updates) { 599 err = btrfs_run_delayed_refs(trans, root, updates); 600 if (err) /* Error code will also eval true */ 601 return err; 602 } 603 604 return should_end_transaction(trans, root); 605 } 606 607 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 608 struct btrfs_root *root, int throttle) 609 { 610 struct btrfs_transaction *cur_trans = trans->transaction; 611 struct btrfs_fs_info *info = root->fs_info; 612 int count = 0; 613 int lock = (trans->type != TRANS_JOIN_NOLOCK); 614 int err = 0; 615 616 if (--trans->use_count) { 617 trans->block_rsv = trans->orig_rsv; 618 return 0; 619 } 620 621 /* 622 * do the qgroup accounting as early as possible 623 */ 624 err = btrfs_delayed_refs_qgroup_accounting(trans, info); 625 626 btrfs_trans_release_metadata(trans, root); 627 trans->block_rsv = NULL; 628 /* 629 * the same root has to be passed to start_transaction and 630 * end_transaction. Subvolume quota depends on this. 631 */ 632 WARN_ON(trans->root != root); 633 634 if (trans->qgroup_reserved) { 635 btrfs_qgroup_free(root, trans->qgroup_reserved); 636 trans->qgroup_reserved = 0; 637 } 638 639 if (!list_empty(&trans->new_bgs)) 640 btrfs_create_pending_block_groups(trans, root); 641 642 while (count < 1) { 643 unsigned long cur = trans->delayed_ref_updates; 644 trans->delayed_ref_updates = 0; 645 if (cur && 646 trans->transaction->delayed_refs.num_heads_ready > 64) { 647 trans->delayed_ref_updates = 0; 648 btrfs_run_delayed_refs(trans, root, cur); 649 } else { 650 break; 651 } 652 count++; 653 } 654 655 btrfs_trans_release_metadata(trans, root); 656 trans->block_rsv = NULL; 657 658 if (!list_empty(&trans->new_bgs)) 659 btrfs_create_pending_block_groups(trans, root); 660 661 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && 662 should_end_transaction(trans, root)) { 663 trans->transaction->blocked = 1; 664 smp_wmb(); 665 } 666 667 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 668 if (throttle) { 669 /* 670 * We may race with somebody else here so end up having 671 * to call end_transaction on ourselves again, so inc 672 * our use_count. 673 */ 674 trans->use_count++; 675 return btrfs_commit_transaction(trans, root); 676 } else { 677 wake_up_process(info->transaction_kthread); 678 } 679 } 680 681 if (trans->type < TRANS_JOIN_NOLOCK) 682 sb_end_intwrite(root->fs_info->sb); 683 684 WARN_ON(cur_trans != info->running_transaction); 685 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 686 atomic_dec(&cur_trans->num_writers); 687 688 smp_mb(); 689 if (waitqueue_active(&cur_trans->writer_wait)) 690 wake_up(&cur_trans->writer_wait); 691 put_transaction(cur_trans); 692 693 if (current->journal_info == trans) 694 current->journal_info = NULL; 695 696 if (throttle) 697 btrfs_run_delayed_iputs(root); 698 699 if (trans->aborted || 700 test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 701 err = -EIO; 702 assert_qgroups_uptodate(trans); 703 704 kmem_cache_free(btrfs_trans_handle_cachep, trans); 705 return err; 706 } 707 708 int btrfs_end_transaction(struct btrfs_trans_handle *trans, 709 struct btrfs_root *root) 710 { 711 int ret; 712 713 ret = __btrfs_end_transaction(trans, root, 0); 714 if (ret) 715 return ret; 716 return 0; 717 } 718 719 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 720 struct btrfs_root *root) 721 { 722 int ret; 723 724 ret = __btrfs_end_transaction(trans, root, 1); 725 if (ret) 726 return ret; 727 return 0; 728 } 729 730 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, 731 struct btrfs_root *root) 732 { 733 return __btrfs_end_transaction(trans, root, 1); 734 } 735 736 /* 737 * when btree blocks are allocated, they have some corresponding bits set for 738 * them in one of two extent_io trees. This is used to make sure all of 739 * those extents are sent to disk but does not wait on them 740 */ 741 int btrfs_write_marked_extents(struct btrfs_root *root, 742 struct extent_io_tree *dirty_pages, int mark) 743 { 744 int err = 0; 745 int werr = 0; 746 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 747 struct extent_state *cached_state = NULL; 748 u64 start = 0; 749 u64 end; 750 struct blk_plug plug; 751 752 blk_start_plug(&plug); 753 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 754 mark, &cached_state)) { 755 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, 756 mark, &cached_state, GFP_NOFS); 757 cached_state = NULL; 758 err = filemap_fdatawrite_range(mapping, start, end); 759 if (err) 760 werr = err; 761 cond_resched(); 762 start = end + 1; 763 } 764 if (err) 765 werr = err; 766 blk_finish_plug(&plug); 767 return werr; 768 } 769 770 /* 771 * when btree blocks are allocated, they have some corresponding bits set for 772 * them in one of two extent_io trees. This is used to make sure all of 773 * those extents are on disk for transaction or log commit. We wait 774 * on all the pages and clear them from the dirty pages state tree 775 */ 776 int btrfs_wait_marked_extents(struct btrfs_root *root, 777 struct extent_io_tree *dirty_pages, int mark) 778 { 779 int err = 0; 780 int werr = 0; 781 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 782 struct extent_state *cached_state = NULL; 783 u64 start = 0; 784 u64 end; 785 786 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 787 EXTENT_NEED_WAIT, &cached_state)) { 788 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, 789 0, 0, &cached_state, GFP_NOFS); 790 err = filemap_fdatawait_range(mapping, start, end); 791 if (err) 792 werr = err; 793 cond_resched(); 794 start = end + 1; 795 } 796 if (err) 797 werr = err; 798 return werr; 799 } 800 801 /* 802 * when btree blocks are allocated, they have some corresponding bits set for 803 * them in one of two extent_io trees. This is used to make sure all of 804 * those extents are on disk for transaction or log commit 805 */ 806 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 807 struct extent_io_tree *dirty_pages, int mark) 808 { 809 int ret; 810 int ret2; 811 812 ret = btrfs_write_marked_extents(root, dirty_pages, mark); 813 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); 814 815 if (ret) 816 return ret; 817 if (ret2) 818 return ret2; 819 return 0; 820 } 821 822 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 823 struct btrfs_root *root) 824 { 825 if (!trans || !trans->transaction) { 826 struct inode *btree_inode; 827 btree_inode = root->fs_info->btree_inode; 828 return filemap_write_and_wait(btree_inode->i_mapping); 829 } 830 return btrfs_write_and_wait_marked_extents(root, 831 &trans->transaction->dirty_pages, 832 EXTENT_DIRTY); 833 } 834 835 /* 836 * this is used to update the root pointer in the tree of tree roots. 837 * 838 * But, in the case of the extent allocation tree, updating the root 839 * pointer may allocate blocks which may change the root of the extent 840 * allocation tree. 841 * 842 * So, this loops and repeats and makes sure the cowonly root didn't 843 * change while the root pointer was being updated in the metadata. 844 */ 845 static int update_cowonly_root(struct btrfs_trans_handle *trans, 846 struct btrfs_root *root) 847 { 848 int ret; 849 u64 old_root_bytenr; 850 u64 old_root_used; 851 struct btrfs_root *tree_root = root->fs_info->tree_root; 852 853 old_root_used = btrfs_root_used(&root->root_item); 854 btrfs_write_dirty_block_groups(trans, root); 855 856 while (1) { 857 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 858 if (old_root_bytenr == root->node->start && 859 old_root_used == btrfs_root_used(&root->root_item)) 860 break; 861 862 btrfs_set_root_node(&root->root_item, root->node); 863 ret = btrfs_update_root(trans, tree_root, 864 &root->root_key, 865 &root->root_item); 866 if (ret) 867 return ret; 868 869 old_root_used = btrfs_root_used(&root->root_item); 870 ret = btrfs_write_dirty_block_groups(trans, root); 871 if (ret) 872 return ret; 873 } 874 875 if (root != root->fs_info->extent_root) 876 switch_commit_root(root); 877 878 return 0; 879 } 880 881 /* 882 * update all the cowonly tree roots on disk 883 * 884 * The error handling in this function may not be obvious. Any of the 885 * failures will cause the file system to go offline. We still need 886 * to clean up the delayed refs. 887 */ 888 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 889 struct btrfs_root *root) 890 { 891 struct btrfs_fs_info *fs_info = root->fs_info; 892 struct list_head *next; 893 struct extent_buffer *eb; 894 int ret; 895 896 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 897 if (ret) 898 return ret; 899 900 eb = btrfs_lock_root_node(fs_info->tree_root); 901 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 902 0, &eb); 903 btrfs_tree_unlock(eb); 904 free_extent_buffer(eb); 905 906 if (ret) 907 return ret; 908 909 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 910 if (ret) 911 return ret; 912 913 ret = btrfs_run_dev_stats(trans, root->fs_info); 914 WARN_ON(ret); 915 ret = btrfs_run_dev_replace(trans, root->fs_info); 916 WARN_ON(ret); 917 918 ret = btrfs_run_qgroups(trans, root->fs_info); 919 BUG_ON(ret); 920 921 /* run_qgroups might have added some more refs */ 922 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 923 BUG_ON(ret); 924 925 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 926 next = fs_info->dirty_cowonly_roots.next; 927 list_del_init(next); 928 root = list_entry(next, struct btrfs_root, dirty_list); 929 930 ret = update_cowonly_root(trans, root); 931 if (ret) 932 return ret; 933 } 934 935 down_write(&fs_info->extent_commit_sem); 936 switch_commit_root(fs_info->extent_root); 937 up_write(&fs_info->extent_commit_sem); 938 939 btrfs_after_dev_replace_commit(fs_info); 940 941 return 0; 942 } 943 944 /* 945 * dead roots are old snapshots that need to be deleted. This allocates 946 * a dirty root struct and adds it into the list of dead roots that need to 947 * be deleted 948 */ 949 int btrfs_add_dead_root(struct btrfs_root *root) 950 { 951 spin_lock(&root->fs_info->trans_lock); 952 list_add(&root->root_list, &root->fs_info->dead_roots); 953 spin_unlock(&root->fs_info->trans_lock); 954 return 0; 955 } 956 957 /* 958 * update all the cowonly tree roots on disk 959 */ 960 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 961 struct btrfs_root *root) 962 { 963 struct btrfs_root *gang[8]; 964 struct btrfs_fs_info *fs_info = root->fs_info; 965 int i; 966 int ret; 967 int err = 0; 968 969 spin_lock(&fs_info->fs_roots_radix_lock); 970 while (1) { 971 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 972 (void **)gang, 0, 973 ARRAY_SIZE(gang), 974 BTRFS_ROOT_TRANS_TAG); 975 if (ret == 0) 976 break; 977 for (i = 0; i < ret; i++) { 978 root = gang[i]; 979 radix_tree_tag_clear(&fs_info->fs_roots_radix, 980 (unsigned long)root->root_key.objectid, 981 BTRFS_ROOT_TRANS_TAG); 982 spin_unlock(&fs_info->fs_roots_radix_lock); 983 984 btrfs_free_log(trans, root); 985 btrfs_update_reloc_root(trans, root); 986 btrfs_orphan_commit_root(trans, root); 987 988 btrfs_save_ino_cache(root, trans); 989 990 /* see comments in should_cow_block() */ 991 root->force_cow = 0; 992 smp_wmb(); 993 994 if (root->commit_root != root->node) { 995 mutex_lock(&root->fs_commit_mutex); 996 switch_commit_root(root); 997 btrfs_unpin_free_ino(root); 998 mutex_unlock(&root->fs_commit_mutex); 999 1000 btrfs_set_root_node(&root->root_item, 1001 root->node); 1002 } 1003 1004 err = btrfs_update_root(trans, fs_info->tree_root, 1005 &root->root_key, 1006 &root->root_item); 1007 spin_lock(&fs_info->fs_roots_radix_lock); 1008 if (err) 1009 break; 1010 } 1011 } 1012 spin_unlock(&fs_info->fs_roots_radix_lock); 1013 return err; 1014 } 1015 1016 /* 1017 * defrag a given btree. 1018 * Every leaf in the btree is read and defragged. 1019 */ 1020 int btrfs_defrag_root(struct btrfs_root *root) 1021 { 1022 struct btrfs_fs_info *info = root->fs_info; 1023 struct btrfs_trans_handle *trans; 1024 int ret; 1025 1026 if (xchg(&root->defrag_running, 1)) 1027 return 0; 1028 1029 while (1) { 1030 trans = btrfs_start_transaction(root, 0); 1031 if (IS_ERR(trans)) 1032 return PTR_ERR(trans); 1033 1034 ret = btrfs_defrag_leaves(trans, root); 1035 1036 btrfs_end_transaction(trans, root); 1037 btrfs_btree_balance_dirty(info->tree_root); 1038 cond_resched(); 1039 1040 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 1041 break; 1042 1043 if (btrfs_defrag_cancelled(root->fs_info)) { 1044 printk(KERN_DEBUG "btrfs: defrag_root cancelled\n"); 1045 ret = -EAGAIN; 1046 break; 1047 } 1048 } 1049 root->defrag_running = 0; 1050 return ret; 1051 } 1052 1053 /* 1054 * new snapshots need to be created at a very specific time in the 1055 * transaction commit. This does the actual creation. 1056 * 1057 * Note: 1058 * If the error which may affect the commitment of the current transaction 1059 * happens, we should return the error number. If the error which just affect 1060 * the creation of the pending snapshots, just return 0. 1061 */ 1062 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1063 struct btrfs_fs_info *fs_info, 1064 struct btrfs_pending_snapshot *pending) 1065 { 1066 struct btrfs_key key; 1067 struct btrfs_root_item *new_root_item; 1068 struct btrfs_root *tree_root = fs_info->tree_root; 1069 struct btrfs_root *root = pending->root; 1070 struct btrfs_root *parent_root; 1071 struct btrfs_block_rsv *rsv; 1072 struct inode *parent_inode; 1073 struct btrfs_path *path; 1074 struct btrfs_dir_item *dir_item; 1075 struct dentry *dentry; 1076 struct extent_buffer *tmp; 1077 struct extent_buffer *old; 1078 struct timespec cur_time = CURRENT_TIME; 1079 int ret = 0; 1080 u64 to_reserve = 0; 1081 u64 index = 0; 1082 u64 objectid; 1083 u64 root_flags; 1084 uuid_le new_uuid; 1085 1086 path = btrfs_alloc_path(); 1087 if (!path) { 1088 pending->error = -ENOMEM; 1089 return 0; 1090 } 1091 1092 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 1093 if (!new_root_item) { 1094 pending->error = -ENOMEM; 1095 goto root_item_alloc_fail; 1096 } 1097 1098 pending->error = btrfs_find_free_objectid(tree_root, &objectid); 1099 if (pending->error) 1100 goto no_free_objectid; 1101 1102 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 1103 1104 if (to_reserve > 0) { 1105 pending->error = btrfs_block_rsv_add(root, 1106 &pending->block_rsv, 1107 to_reserve, 1108 BTRFS_RESERVE_NO_FLUSH); 1109 if (pending->error) 1110 goto no_free_objectid; 1111 } 1112 1113 pending->error = btrfs_qgroup_inherit(trans, fs_info, 1114 root->root_key.objectid, 1115 objectid, pending->inherit); 1116 if (pending->error) 1117 goto no_free_objectid; 1118 1119 key.objectid = objectid; 1120 key.offset = (u64)-1; 1121 key.type = BTRFS_ROOT_ITEM_KEY; 1122 1123 rsv = trans->block_rsv; 1124 trans->block_rsv = &pending->block_rsv; 1125 trans->bytes_reserved = trans->block_rsv->reserved; 1126 1127 dentry = pending->dentry; 1128 parent_inode = pending->dir; 1129 parent_root = BTRFS_I(parent_inode)->root; 1130 record_root_in_trans(trans, parent_root); 1131 1132 /* 1133 * insert the directory item 1134 */ 1135 ret = btrfs_set_inode_index(parent_inode, &index); 1136 BUG_ON(ret); /* -ENOMEM */ 1137 1138 /* check if there is a file/dir which has the same name. */ 1139 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1140 btrfs_ino(parent_inode), 1141 dentry->d_name.name, 1142 dentry->d_name.len, 0); 1143 if (dir_item != NULL && !IS_ERR(dir_item)) { 1144 pending->error = -EEXIST; 1145 goto dir_item_existed; 1146 } else if (IS_ERR(dir_item)) { 1147 ret = PTR_ERR(dir_item); 1148 btrfs_abort_transaction(trans, root, ret); 1149 goto fail; 1150 } 1151 btrfs_release_path(path); 1152 1153 /* 1154 * pull in the delayed directory update 1155 * and the delayed inode item 1156 * otherwise we corrupt the FS during 1157 * snapshot 1158 */ 1159 ret = btrfs_run_delayed_items(trans, root); 1160 if (ret) { /* Transaction aborted */ 1161 btrfs_abort_transaction(trans, root, ret); 1162 goto fail; 1163 } 1164 1165 record_root_in_trans(trans, root); 1166 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1167 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1168 btrfs_check_and_init_root_item(new_root_item); 1169 1170 root_flags = btrfs_root_flags(new_root_item); 1171 if (pending->readonly) 1172 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1173 else 1174 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1175 btrfs_set_root_flags(new_root_item, root_flags); 1176 1177 btrfs_set_root_generation_v2(new_root_item, 1178 trans->transid); 1179 uuid_le_gen(&new_uuid); 1180 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); 1181 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1182 BTRFS_UUID_SIZE); 1183 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); 1184 new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec); 1185 btrfs_set_root_otransid(new_root_item, trans->transid); 1186 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1187 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1188 btrfs_set_root_stransid(new_root_item, 0); 1189 btrfs_set_root_rtransid(new_root_item, 0); 1190 1191 old = btrfs_lock_root_node(root); 1192 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old); 1193 if (ret) { 1194 btrfs_tree_unlock(old); 1195 free_extent_buffer(old); 1196 btrfs_abort_transaction(trans, root, ret); 1197 goto fail; 1198 } 1199 1200 btrfs_set_lock_blocking(old); 1201 1202 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1203 /* clean up in any case */ 1204 btrfs_tree_unlock(old); 1205 free_extent_buffer(old); 1206 if (ret) { 1207 btrfs_abort_transaction(trans, root, ret); 1208 goto fail; 1209 } 1210 1211 /* see comments in should_cow_block() */ 1212 root->force_cow = 1; 1213 smp_wmb(); 1214 1215 btrfs_set_root_node(new_root_item, tmp); 1216 /* record when the snapshot was created in key.offset */ 1217 key.offset = trans->transid; 1218 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1219 btrfs_tree_unlock(tmp); 1220 free_extent_buffer(tmp); 1221 if (ret) { 1222 btrfs_abort_transaction(trans, root, ret); 1223 goto fail; 1224 } 1225 1226 /* 1227 * insert root back/forward references 1228 */ 1229 ret = btrfs_add_root_ref(trans, tree_root, objectid, 1230 parent_root->root_key.objectid, 1231 btrfs_ino(parent_inode), index, 1232 dentry->d_name.name, dentry->d_name.len); 1233 if (ret) { 1234 btrfs_abort_transaction(trans, root, ret); 1235 goto fail; 1236 } 1237 1238 key.offset = (u64)-1; 1239 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 1240 if (IS_ERR(pending->snap)) { 1241 ret = PTR_ERR(pending->snap); 1242 btrfs_abort_transaction(trans, root, ret); 1243 goto fail; 1244 } 1245 1246 ret = btrfs_reloc_post_snapshot(trans, pending); 1247 if (ret) { 1248 btrfs_abort_transaction(trans, root, ret); 1249 goto fail; 1250 } 1251 1252 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1253 if (ret) { 1254 btrfs_abort_transaction(trans, root, ret); 1255 goto fail; 1256 } 1257 1258 ret = btrfs_insert_dir_item(trans, parent_root, 1259 dentry->d_name.name, dentry->d_name.len, 1260 parent_inode, &key, 1261 BTRFS_FT_DIR, index); 1262 /* We have check then name at the beginning, so it is impossible. */ 1263 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1264 if (ret) { 1265 btrfs_abort_transaction(trans, root, ret); 1266 goto fail; 1267 } 1268 1269 btrfs_i_size_write(parent_inode, parent_inode->i_size + 1270 dentry->d_name.len * 2); 1271 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 1272 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode); 1273 if (ret) 1274 btrfs_abort_transaction(trans, root, ret); 1275 fail: 1276 pending->error = ret; 1277 dir_item_existed: 1278 trans->block_rsv = rsv; 1279 trans->bytes_reserved = 0; 1280 no_free_objectid: 1281 kfree(new_root_item); 1282 root_item_alloc_fail: 1283 btrfs_free_path(path); 1284 return ret; 1285 } 1286 1287 /* 1288 * create all the snapshots we've scheduled for creation 1289 */ 1290 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 1291 struct btrfs_fs_info *fs_info) 1292 { 1293 struct btrfs_pending_snapshot *pending, *next; 1294 struct list_head *head = &trans->transaction->pending_snapshots; 1295 int ret = 0; 1296 1297 list_for_each_entry_safe(pending, next, head, list) { 1298 list_del(&pending->list); 1299 ret = create_pending_snapshot(trans, fs_info, pending); 1300 if (ret) 1301 break; 1302 } 1303 return ret; 1304 } 1305 1306 static void update_super_roots(struct btrfs_root *root) 1307 { 1308 struct btrfs_root_item *root_item; 1309 struct btrfs_super_block *super; 1310 1311 super = root->fs_info->super_copy; 1312 1313 root_item = &root->fs_info->chunk_root->root_item; 1314 super->chunk_root = root_item->bytenr; 1315 super->chunk_root_generation = root_item->generation; 1316 super->chunk_root_level = root_item->level; 1317 1318 root_item = &root->fs_info->tree_root->root_item; 1319 super->root = root_item->bytenr; 1320 super->generation = root_item->generation; 1321 super->root_level = root_item->level; 1322 if (btrfs_test_opt(root, SPACE_CACHE)) 1323 super->cache_generation = root_item->generation; 1324 } 1325 1326 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1327 { 1328 int ret = 0; 1329 spin_lock(&info->trans_lock); 1330 if (info->running_transaction) 1331 ret = info->running_transaction->in_commit; 1332 spin_unlock(&info->trans_lock); 1333 return ret; 1334 } 1335 1336 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1337 { 1338 int ret = 0; 1339 spin_lock(&info->trans_lock); 1340 if (info->running_transaction) 1341 ret = info->running_transaction->blocked; 1342 spin_unlock(&info->trans_lock); 1343 return ret; 1344 } 1345 1346 /* 1347 * wait for the current transaction commit to start and block subsequent 1348 * transaction joins 1349 */ 1350 static void wait_current_trans_commit_start(struct btrfs_root *root, 1351 struct btrfs_transaction *trans) 1352 { 1353 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit); 1354 } 1355 1356 /* 1357 * wait for the current transaction to start and then become unblocked. 1358 * caller holds ref. 1359 */ 1360 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1361 struct btrfs_transaction *trans) 1362 { 1363 wait_event(root->fs_info->transaction_wait, 1364 trans->commit_done || (trans->in_commit && !trans->blocked)); 1365 } 1366 1367 /* 1368 * commit transactions asynchronously. once btrfs_commit_transaction_async 1369 * returns, any subsequent transaction will not be allowed to join. 1370 */ 1371 struct btrfs_async_commit { 1372 struct btrfs_trans_handle *newtrans; 1373 struct btrfs_root *root; 1374 struct work_struct work; 1375 }; 1376 1377 static void do_async_commit(struct work_struct *work) 1378 { 1379 struct btrfs_async_commit *ac = 1380 container_of(work, struct btrfs_async_commit, work); 1381 1382 /* 1383 * We've got freeze protection passed with the transaction. 1384 * Tell lockdep about it. 1385 */ 1386 if (ac->newtrans->type < TRANS_JOIN_NOLOCK) 1387 rwsem_acquire_read( 1388 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1389 0, 1, _THIS_IP_); 1390 1391 current->journal_info = ac->newtrans; 1392 1393 btrfs_commit_transaction(ac->newtrans, ac->root); 1394 kfree(ac); 1395 } 1396 1397 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1398 struct btrfs_root *root, 1399 int wait_for_unblock) 1400 { 1401 struct btrfs_async_commit *ac; 1402 struct btrfs_transaction *cur_trans; 1403 1404 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1405 if (!ac) 1406 return -ENOMEM; 1407 1408 INIT_WORK(&ac->work, do_async_commit); 1409 ac->root = root; 1410 ac->newtrans = btrfs_join_transaction(root); 1411 if (IS_ERR(ac->newtrans)) { 1412 int err = PTR_ERR(ac->newtrans); 1413 kfree(ac); 1414 return err; 1415 } 1416 1417 /* take transaction reference */ 1418 cur_trans = trans->transaction; 1419 atomic_inc(&cur_trans->use_count); 1420 1421 btrfs_end_transaction(trans, root); 1422 1423 /* 1424 * Tell lockdep we've released the freeze rwsem, since the 1425 * async commit thread will be the one to unlock it. 1426 */ 1427 if (trans->type < TRANS_JOIN_NOLOCK) 1428 rwsem_release( 1429 &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1430 1, _THIS_IP_); 1431 1432 schedule_work(&ac->work); 1433 1434 /* wait for transaction to start and unblock */ 1435 if (wait_for_unblock) 1436 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1437 else 1438 wait_current_trans_commit_start(root, cur_trans); 1439 1440 if (current->journal_info == trans) 1441 current->journal_info = NULL; 1442 1443 put_transaction(cur_trans); 1444 return 0; 1445 } 1446 1447 1448 static void cleanup_transaction(struct btrfs_trans_handle *trans, 1449 struct btrfs_root *root, int err) 1450 { 1451 struct btrfs_transaction *cur_trans = trans->transaction; 1452 DEFINE_WAIT(wait); 1453 1454 WARN_ON(trans->use_count > 1); 1455 1456 btrfs_abort_transaction(trans, root, err); 1457 1458 spin_lock(&root->fs_info->trans_lock); 1459 1460 if (list_empty(&cur_trans->list)) { 1461 spin_unlock(&root->fs_info->trans_lock); 1462 btrfs_end_transaction(trans, root); 1463 return; 1464 } 1465 1466 list_del_init(&cur_trans->list); 1467 if (cur_trans == root->fs_info->running_transaction) { 1468 root->fs_info->trans_no_join = 1; 1469 spin_unlock(&root->fs_info->trans_lock); 1470 wait_event(cur_trans->writer_wait, 1471 atomic_read(&cur_trans->num_writers) == 1); 1472 1473 spin_lock(&root->fs_info->trans_lock); 1474 root->fs_info->running_transaction = NULL; 1475 } 1476 spin_unlock(&root->fs_info->trans_lock); 1477 1478 btrfs_cleanup_one_transaction(trans->transaction, root); 1479 1480 put_transaction(cur_trans); 1481 put_transaction(cur_trans); 1482 1483 trace_btrfs_transaction_commit(root); 1484 1485 btrfs_scrub_continue(root); 1486 1487 if (current->journal_info == trans) 1488 current->journal_info = NULL; 1489 1490 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1491 } 1492 1493 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans, 1494 struct btrfs_root *root) 1495 { 1496 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); 1497 int snap_pending = 0; 1498 int ret; 1499 1500 if (!flush_on_commit) { 1501 spin_lock(&root->fs_info->trans_lock); 1502 if (!list_empty(&trans->transaction->pending_snapshots)) 1503 snap_pending = 1; 1504 spin_unlock(&root->fs_info->trans_lock); 1505 } 1506 1507 if (flush_on_commit || snap_pending) { 1508 ret = btrfs_start_delalloc_inodes(root, 1); 1509 if (ret) 1510 return ret; 1511 btrfs_wait_ordered_extents(root, 1); 1512 } 1513 1514 ret = btrfs_run_delayed_items(trans, root); 1515 if (ret) 1516 return ret; 1517 1518 /* 1519 * running the delayed items may have added new refs. account 1520 * them now so that they hinder processing of more delayed refs 1521 * as little as possible. 1522 */ 1523 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info); 1524 1525 /* 1526 * rename don't use btrfs_join_transaction, so, once we 1527 * set the transaction to blocked above, we aren't going 1528 * to get any new ordered operations. We can safely run 1529 * it here and no for sure that nothing new will be added 1530 * to the list 1531 */ 1532 ret = btrfs_run_ordered_operations(trans, root, 1); 1533 1534 return ret; 1535 } 1536 1537 /* 1538 * btrfs_transaction state sequence: 1539 * in_commit = 0, blocked = 0 (initial) 1540 * in_commit = 1, blocked = 1 1541 * blocked = 0 1542 * commit_done = 1 1543 */ 1544 int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1545 struct btrfs_root *root) 1546 { 1547 unsigned long joined = 0; 1548 struct btrfs_transaction *cur_trans = trans->transaction; 1549 struct btrfs_transaction *prev_trans = NULL; 1550 DEFINE_WAIT(wait); 1551 int ret; 1552 int should_grow = 0; 1553 unsigned long now = get_seconds(); 1554 1555 ret = btrfs_run_ordered_operations(trans, root, 0); 1556 if (ret) { 1557 btrfs_abort_transaction(trans, root, ret); 1558 btrfs_end_transaction(trans, root); 1559 return ret; 1560 } 1561 1562 /* Stop the commit early if ->aborted is set */ 1563 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1564 ret = cur_trans->aborted; 1565 btrfs_end_transaction(trans, root); 1566 return ret; 1567 } 1568 1569 /* make a pass through all the delayed refs we have so far 1570 * any runnings procs may add more while we are here 1571 */ 1572 ret = btrfs_run_delayed_refs(trans, root, 0); 1573 if (ret) { 1574 btrfs_end_transaction(trans, root); 1575 return ret; 1576 } 1577 1578 btrfs_trans_release_metadata(trans, root); 1579 trans->block_rsv = NULL; 1580 if (trans->qgroup_reserved) { 1581 btrfs_qgroup_free(root, trans->qgroup_reserved); 1582 trans->qgroup_reserved = 0; 1583 } 1584 1585 cur_trans = trans->transaction; 1586 1587 /* 1588 * set the flushing flag so procs in this transaction have to 1589 * start sending their work down. 1590 */ 1591 cur_trans->delayed_refs.flushing = 1; 1592 1593 if (!list_empty(&trans->new_bgs)) 1594 btrfs_create_pending_block_groups(trans, root); 1595 1596 ret = btrfs_run_delayed_refs(trans, root, 0); 1597 if (ret) { 1598 btrfs_end_transaction(trans, root); 1599 return ret; 1600 } 1601 1602 spin_lock(&cur_trans->commit_lock); 1603 if (cur_trans->in_commit) { 1604 spin_unlock(&cur_trans->commit_lock); 1605 atomic_inc(&cur_trans->use_count); 1606 ret = btrfs_end_transaction(trans, root); 1607 1608 wait_for_commit(root, cur_trans); 1609 1610 put_transaction(cur_trans); 1611 1612 return ret; 1613 } 1614 1615 trans->transaction->in_commit = 1; 1616 trans->transaction->blocked = 1; 1617 spin_unlock(&cur_trans->commit_lock); 1618 wake_up(&root->fs_info->transaction_blocked_wait); 1619 1620 spin_lock(&root->fs_info->trans_lock); 1621 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1622 prev_trans = list_entry(cur_trans->list.prev, 1623 struct btrfs_transaction, list); 1624 if (!prev_trans->commit_done) { 1625 atomic_inc(&prev_trans->use_count); 1626 spin_unlock(&root->fs_info->trans_lock); 1627 1628 wait_for_commit(root, prev_trans); 1629 1630 put_transaction(prev_trans); 1631 } else { 1632 spin_unlock(&root->fs_info->trans_lock); 1633 } 1634 } else { 1635 spin_unlock(&root->fs_info->trans_lock); 1636 } 1637 1638 if (!btrfs_test_opt(root, SSD) && 1639 (now < cur_trans->start_time || now - cur_trans->start_time < 1)) 1640 should_grow = 1; 1641 1642 do { 1643 joined = cur_trans->num_joined; 1644 1645 WARN_ON(cur_trans != trans->transaction); 1646 1647 ret = btrfs_flush_all_pending_stuffs(trans, root); 1648 if (ret) 1649 goto cleanup_transaction; 1650 1651 prepare_to_wait(&cur_trans->writer_wait, &wait, 1652 TASK_UNINTERRUPTIBLE); 1653 1654 if (atomic_read(&cur_trans->num_writers) > 1) 1655 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1656 else if (should_grow) 1657 schedule_timeout(1); 1658 1659 finish_wait(&cur_trans->writer_wait, &wait); 1660 } while (atomic_read(&cur_trans->num_writers) > 1 || 1661 (should_grow && cur_trans->num_joined != joined)); 1662 1663 ret = btrfs_flush_all_pending_stuffs(trans, root); 1664 if (ret) 1665 goto cleanup_transaction; 1666 1667 /* 1668 * Ok now we need to make sure to block out any other joins while we 1669 * commit the transaction. We could have started a join before setting 1670 * no_join so make sure to wait for num_writers to == 1 again. 1671 */ 1672 spin_lock(&root->fs_info->trans_lock); 1673 root->fs_info->trans_no_join = 1; 1674 spin_unlock(&root->fs_info->trans_lock); 1675 wait_event(cur_trans->writer_wait, 1676 atomic_read(&cur_trans->num_writers) == 1); 1677 1678 /* ->aborted might be set after the previous check, so check it */ 1679 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1680 ret = cur_trans->aborted; 1681 goto cleanup_transaction; 1682 } 1683 /* 1684 * the reloc mutex makes sure that we stop 1685 * the balancing code from coming in and moving 1686 * extents around in the middle of the commit 1687 */ 1688 mutex_lock(&root->fs_info->reloc_mutex); 1689 1690 /* 1691 * We needn't worry about the delayed items because we will 1692 * deal with them in create_pending_snapshot(), which is the 1693 * core function of the snapshot creation. 1694 */ 1695 ret = create_pending_snapshots(trans, root->fs_info); 1696 if (ret) { 1697 mutex_unlock(&root->fs_info->reloc_mutex); 1698 goto cleanup_transaction; 1699 } 1700 1701 /* 1702 * We insert the dir indexes of the snapshots and update the inode 1703 * of the snapshots' parents after the snapshot creation, so there 1704 * are some delayed items which are not dealt with. Now deal with 1705 * them. 1706 * 1707 * We needn't worry that this operation will corrupt the snapshots, 1708 * because all the tree which are snapshoted will be forced to COW 1709 * the nodes and leaves. 1710 */ 1711 ret = btrfs_run_delayed_items(trans, root); 1712 if (ret) { 1713 mutex_unlock(&root->fs_info->reloc_mutex); 1714 goto cleanup_transaction; 1715 } 1716 1717 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1718 if (ret) { 1719 mutex_unlock(&root->fs_info->reloc_mutex); 1720 goto cleanup_transaction; 1721 } 1722 1723 /* 1724 * make sure none of the code above managed to slip in a 1725 * delayed item 1726 */ 1727 btrfs_assert_delayed_root_empty(root); 1728 1729 WARN_ON(cur_trans != trans->transaction); 1730 1731 btrfs_scrub_pause(root); 1732 /* btrfs_commit_tree_roots is responsible for getting the 1733 * various roots consistent with each other. Every pointer 1734 * in the tree of tree roots has to point to the most up to date 1735 * root for every subvolume and other tree. So, we have to keep 1736 * the tree logging code from jumping in and changing any 1737 * of the trees. 1738 * 1739 * At this point in the commit, there can't be any tree-log 1740 * writers, but a little lower down we drop the trans mutex 1741 * and let new people in. By holding the tree_log_mutex 1742 * from now until after the super is written, we avoid races 1743 * with the tree-log code. 1744 */ 1745 mutex_lock(&root->fs_info->tree_log_mutex); 1746 1747 ret = commit_fs_roots(trans, root); 1748 if (ret) { 1749 mutex_unlock(&root->fs_info->tree_log_mutex); 1750 mutex_unlock(&root->fs_info->reloc_mutex); 1751 goto cleanup_transaction; 1752 } 1753 1754 /* commit_fs_roots gets rid of all the tree log roots, it is now 1755 * safe to free the root of tree log roots 1756 */ 1757 btrfs_free_log_root_tree(trans, root->fs_info); 1758 1759 ret = commit_cowonly_roots(trans, root); 1760 if (ret) { 1761 mutex_unlock(&root->fs_info->tree_log_mutex); 1762 mutex_unlock(&root->fs_info->reloc_mutex); 1763 goto cleanup_transaction; 1764 } 1765 1766 /* 1767 * The tasks which save the space cache and inode cache may also 1768 * update ->aborted, check it. 1769 */ 1770 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1771 ret = cur_trans->aborted; 1772 mutex_unlock(&root->fs_info->tree_log_mutex); 1773 mutex_unlock(&root->fs_info->reloc_mutex); 1774 goto cleanup_transaction; 1775 } 1776 1777 btrfs_prepare_extent_commit(trans, root); 1778 1779 cur_trans = root->fs_info->running_transaction; 1780 1781 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1782 root->fs_info->tree_root->node); 1783 switch_commit_root(root->fs_info->tree_root); 1784 1785 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1786 root->fs_info->chunk_root->node); 1787 switch_commit_root(root->fs_info->chunk_root); 1788 1789 assert_qgroups_uptodate(trans); 1790 update_super_roots(root); 1791 1792 if (!root->fs_info->log_root_recovering) { 1793 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1794 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0); 1795 } 1796 1797 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1798 sizeof(*root->fs_info->super_copy)); 1799 1800 trans->transaction->blocked = 0; 1801 spin_lock(&root->fs_info->trans_lock); 1802 root->fs_info->running_transaction = NULL; 1803 root->fs_info->trans_no_join = 0; 1804 spin_unlock(&root->fs_info->trans_lock); 1805 mutex_unlock(&root->fs_info->reloc_mutex); 1806 1807 wake_up(&root->fs_info->transaction_wait); 1808 1809 ret = btrfs_write_and_wait_transaction(trans, root); 1810 if (ret) { 1811 btrfs_error(root->fs_info, ret, 1812 "Error while writing out transaction."); 1813 mutex_unlock(&root->fs_info->tree_log_mutex); 1814 goto cleanup_transaction; 1815 } 1816 1817 ret = write_ctree_super(trans, root, 0); 1818 if (ret) { 1819 mutex_unlock(&root->fs_info->tree_log_mutex); 1820 goto cleanup_transaction; 1821 } 1822 1823 /* 1824 * the super is written, we can safely allow the tree-loggers 1825 * to go about their business 1826 */ 1827 mutex_unlock(&root->fs_info->tree_log_mutex); 1828 1829 btrfs_finish_extent_commit(trans, root); 1830 1831 cur_trans->commit_done = 1; 1832 1833 root->fs_info->last_trans_committed = cur_trans->transid; 1834 1835 wake_up(&cur_trans->commit_wait); 1836 1837 spin_lock(&root->fs_info->trans_lock); 1838 list_del_init(&cur_trans->list); 1839 spin_unlock(&root->fs_info->trans_lock); 1840 1841 put_transaction(cur_trans); 1842 put_transaction(cur_trans); 1843 1844 if (trans->type < TRANS_JOIN_NOLOCK) 1845 sb_end_intwrite(root->fs_info->sb); 1846 1847 trace_btrfs_transaction_commit(root); 1848 1849 btrfs_scrub_continue(root); 1850 1851 if (current->journal_info == trans) 1852 current->journal_info = NULL; 1853 1854 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1855 1856 if (current != root->fs_info->transaction_kthread) 1857 btrfs_run_delayed_iputs(root); 1858 1859 return ret; 1860 1861 cleanup_transaction: 1862 btrfs_trans_release_metadata(trans, root); 1863 trans->block_rsv = NULL; 1864 if (trans->qgroup_reserved) { 1865 btrfs_qgroup_free(root, trans->qgroup_reserved); 1866 trans->qgroup_reserved = 0; 1867 } 1868 btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n"); 1869 // WARN_ON(1); 1870 if (current->journal_info == trans) 1871 current->journal_info = NULL; 1872 cleanup_transaction(trans, root, ret); 1873 1874 return ret; 1875 } 1876 1877 /* 1878 * interface function to delete all the snapshots we have scheduled for deletion 1879 */ 1880 int btrfs_clean_old_snapshots(struct btrfs_root *root) 1881 { 1882 LIST_HEAD(list); 1883 struct btrfs_fs_info *fs_info = root->fs_info; 1884 1885 spin_lock(&fs_info->trans_lock); 1886 list_splice_init(&fs_info->dead_roots, &list); 1887 spin_unlock(&fs_info->trans_lock); 1888 1889 while (!list_empty(&list)) { 1890 int ret; 1891 1892 root = list_entry(list.next, struct btrfs_root, root_list); 1893 list_del(&root->root_list); 1894 1895 btrfs_kill_all_delayed_nodes(root); 1896 1897 if (btrfs_header_backref_rev(root->node) < 1898 BTRFS_MIXED_BACKREF_REV) 1899 ret = btrfs_drop_snapshot(root, NULL, 0, 0); 1900 else 1901 ret =btrfs_drop_snapshot(root, NULL, 1, 0); 1902 BUG_ON(ret < 0); 1903 } 1904 return 0; 1905 } 1906