1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include <linux/uuid.h> 26 #include "ctree.h" 27 #include "disk-io.h" 28 #include "transaction.h" 29 #include "locking.h" 30 #include "tree-log.h" 31 #include "inode-map.h" 32 #include "volumes.h" 33 #include "dev-replace.h" 34 35 #define BTRFS_ROOT_TRANS_TAG 0 36 37 static void put_transaction(struct btrfs_transaction *transaction) 38 { 39 WARN_ON(atomic_read(&transaction->use_count) == 0); 40 if (atomic_dec_and_test(&transaction->use_count)) { 41 BUG_ON(!list_empty(&transaction->list)); 42 WARN_ON(transaction->delayed_refs.root.rb_node); 43 kmem_cache_free(btrfs_transaction_cachep, transaction); 44 } 45 } 46 47 static noinline void switch_commit_root(struct btrfs_root *root) 48 { 49 free_extent_buffer(root->commit_root); 50 root->commit_root = btrfs_root_node(root); 51 } 52 53 static inline int can_join_transaction(struct btrfs_transaction *trans, 54 int type) 55 { 56 return !(trans->in_commit && 57 type != TRANS_JOIN && 58 type != TRANS_JOIN_NOLOCK); 59 } 60 61 /* 62 * either allocate a new transaction or hop into the existing one 63 */ 64 static noinline int join_transaction(struct btrfs_root *root, int type) 65 { 66 struct btrfs_transaction *cur_trans; 67 struct btrfs_fs_info *fs_info = root->fs_info; 68 69 spin_lock(&fs_info->trans_lock); 70 loop: 71 /* The file system has been taken offline. No new transactions. */ 72 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 73 spin_unlock(&fs_info->trans_lock); 74 return -EROFS; 75 } 76 77 if (fs_info->trans_no_join) { 78 /* 79 * If we are JOIN_NOLOCK we're already committing a current 80 * transaction, we just need a handle to deal with something 81 * when committing the transaction, such as inode cache and 82 * space cache. It is a special case. 83 */ 84 if (type != TRANS_JOIN_NOLOCK) { 85 spin_unlock(&fs_info->trans_lock); 86 return -EBUSY; 87 } 88 } 89 90 cur_trans = fs_info->running_transaction; 91 if (cur_trans) { 92 if (cur_trans->aborted) { 93 spin_unlock(&fs_info->trans_lock); 94 return cur_trans->aborted; 95 } 96 if (!can_join_transaction(cur_trans, type)) { 97 spin_unlock(&fs_info->trans_lock); 98 return -EBUSY; 99 } 100 atomic_inc(&cur_trans->use_count); 101 atomic_inc(&cur_trans->num_writers); 102 cur_trans->num_joined++; 103 spin_unlock(&fs_info->trans_lock); 104 return 0; 105 } 106 spin_unlock(&fs_info->trans_lock); 107 108 /* 109 * If we are ATTACH, we just want to catch the current transaction, 110 * and commit it. If there is no transaction, just return ENOENT. 111 */ 112 if (type == TRANS_ATTACH) 113 return -ENOENT; 114 115 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 116 if (!cur_trans) 117 return -ENOMEM; 118 119 spin_lock(&fs_info->trans_lock); 120 if (fs_info->running_transaction) { 121 /* 122 * someone started a transaction after we unlocked. Make sure 123 * to redo the trans_no_join checks above 124 */ 125 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 126 goto loop; 127 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 128 spin_unlock(&fs_info->trans_lock); 129 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 130 return -EROFS; 131 } 132 133 atomic_set(&cur_trans->num_writers, 1); 134 cur_trans->num_joined = 0; 135 init_waitqueue_head(&cur_trans->writer_wait); 136 init_waitqueue_head(&cur_trans->commit_wait); 137 cur_trans->in_commit = 0; 138 cur_trans->blocked = 0; 139 /* 140 * One for this trans handle, one so it will live on until we 141 * commit the transaction. 142 */ 143 atomic_set(&cur_trans->use_count, 2); 144 cur_trans->commit_done = 0; 145 cur_trans->start_time = get_seconds(); 146 147 cur_trans->delayed_refs.root = RB_ROOT; 148 cur_trans->delayed_refs.num_entries = 0; 149 cur_trans->delayed_refs.num_heads_ready = 0; 150 cur_trans->delayed_refs.num_heads = 0; 151 cur_trans->delayed_refs.flushing = 0; 152 cur_trans->delayed_refs.run_delayed_start = 0; 153 154 /* 155 * although the tree mod log is per file system and not per transaction, 156 * the log must never go across transaction boundaries. 157 */ 158 smp_mb(); 159 if (!list_empty(&fs_info->tree_mod_seq_list)) 160 WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when " 161 "creating a fresh transaction\n"); 162 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 163 WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when " 164 "creating a fresh transaction\n"); 165 atomic64_set(&fs_info->tree_mod_seq, 0); 166 167 spin_lock_init(&cur_trans->commit_lock); 168 spin_lock_init(&cur_trans->delayed_refs.lock); 169 atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0); 170 atomic_set(&cur_trans->delayed_refs.ref_seq, 0); 171 init_waitqueue_head(&cur_trans->delayed_refs.wait); 172 173 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 174 INIT_LIST_HEAD(&cur_trans->ordered_operations); 175 list_add_tail(&cur_trans->list, &fs_info->trans_list); 176 extent_io_tree_init(&cur_trans->dirty_pages, 177 fs_info->btree_inode->i_mapping); 178 fs_info->generation++; 179 cur_trans->transid = fs_info->generation; 180 fs_info->running_transaction = cur_trans; 181 cur_trans->aborted = 0; 182 spin_unlock(&fs_info->trans_lock); 183 184 return 0; 185 } 186 187 /* 188 * this does all the record keeping required to make sure that a reference 189 * counted root is properly recorded in a given transaction. This is required 190 * to make sure the old root from before we joined the transaction is deleted 191 * when the transaction commits 192 */ 193 static int record_root_in_trans(struct btrfs_trans_handle *trans, 194 struct btrfs_root *root) 195 { 196 if (root->ref_cows && root->last_trans < trans->transid) { 197 WARN_ON(root == root->fs_info->extent_root); 198 WARN_ON(root->commit_root != root->node); 199 200 /* 201 * see below for in_trans_setup usage rules 202 * we have the reloc mutex held now, so there 203 * is only one writer in this function 204 */ 205 root->in_trans_setup = 1; 206 207 /* make sure readers find in_trans_setup before 208 * they find our root->last_trans update 209 */ 210 smp_wmb(); 211 212 spin_lock(&root->fs_info->fs_roots_radix_lock); 213 if (root->last_trans == trans->transid) { 214 spin_unlock(&root->fs_info->fs_roots_radix_lock); 215 return 0; 216 } 217 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 218 (unsigned long)root->root_key.objectid, 219 BTRFS_ROOT_TRANS_TAG); 220 spin_unlock(&root->fs_info->fs_roots_radix_lock); 221 root->last_trans = trans->transid; 222 223 /* this is pretty tricky. We don't want to 224 * take the relocation lock in btrfs_record_root_in_trans 225 * unless we're really doing the first setup for this root in 226 * this transaction. 227 * 228 * Normally we'd use root->last_trans as a flag to decide 229 * if we want to take the expensive mutex. 230 * 231 * But, we have to set root->last_trans before we 232 * init the relocation root, otherwise, we trip over warnings 233 * in ctree.c. The solution used here is to flag ourselves 234 * with root->in_trans_setup. When this is 1, we're still 235 * fixing up the reloc trees and everyone must wait. 236 * 237 * When this is zero, they can trust root->last_trans and fly 238 * through btrfs_record_root_in_trans without having to take the 239 * lock. smp_wmb() makes sure that all the writes above are 240 * done before we pop in the zero below 241 */ 242 btrfs_init_reloc_root(trans, root); 243 smp_wmb(); 244 root->in_trans_setup = 0; 245 } 246 return 0; 247 } 248 249 250 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 251 struct btrfs_root *root) 252 { 253 if (!root->ref_cows) 254 return 0; 255 256 /* 257 * see record_root_in_trans for comments about in_trans_setup usage 258 * and barriers 259 */ 260 smp_rmb(); 261 if (root->last_trans == trans->transid && 262 !root->in_trans_setup) 263 return 0; 264 265 mutex_lock(&root->fs_info->reloc_mutex); 266 record_root_in_trans(trans, root); 267 mutex_unlock(&root->fs_info->reloc_mutex); 268 269 return 0; 270 } 271 272 /* wait for commit against the current transaction to become unblocked 273 * when this is done, it is safe to start a new transaction, but the current 274 * transaction might not be fully on disk. 275 */ 276 static void wait_current_trans(struct btrfs_root *root) 277 { 278 struct btrfs_transaction *cur_trans; 279 280 spin_lock(&root->fs_info->trans_lock); 281 cur_trans = root->fs_info->running_transaction; 282 if (cur_trans && cur_trans->blocked) { 283 atomic_inc(&cur_trans->use_count); 284 spin_unlock(&root->fs_info->trans_lock); 285 286 wait_event(root->fs_info->transaction_wait, 287 !cur_trans->blocked); 288 put_transaction(cur_trans); 289 } else { 290 spin_unlock(&root->fs_info->trans_lock); 291 } 292 } 293 294 static int may_wait_transaction(struct btrfs_root *root, int type) 295 { 296 if (root->fs_info->log_root_recovering) 297 return 0; 298 299 if (type == TRANS_USERSPACE) 300 return 1; 301 302 if (type == TRANS_START && 303 !atomic_read(&root->fs_info->open_ioctl_trans)) 304 return 1; 305 306 return 0; 307 } 308 309 static struct btrfs_trans_handle * 310 start_transaction(struct btrfs_root *root, u64 num_items, int type, 311 enum btrfs_reserve_flush_enum flush) 312 { 313 struct btrfs_trans_handle *h; 314 struct btrfs_transaction *cur_trans; 315 u64 num_bytes = 0; 316 int ret; 317 u64 qgroup_reserved = 0; 318 319 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 320 return ERR_PTR(-EROFS); 321 322 if (current->journal_info) { 323 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); 324 h = current->journal_info; 325 h->use_count++; 326 WARN_ON(h->use_count > 2); 327 h->orig_rsv = h->block_rsv; 328 h->block_rsv = NULL; 329 goto got_it; 330 } 331 332 /* 333 * Do the reservation before we join the transaction so we can do all 334 * the appropriate flushing if need be. 335 */ 336 if (num_items > 0 && root != root->fs_info->chunk_root) { 337 if (root->fs_info->quota_enabled && 338 is_fstree(root->root_key.objectid)) { 339 qgroup_reserved = num_items * root->leafsize; 340 ret = btrfs_qgroup_reserve(root, qgroup_reserved); 341 if (ret) 342 return ERR_PTR(ret); 343 } 344 345 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 346 ret = btrfs_block_rsv_add(root, 347 &root->fs_info->trans_block_rsv, 348 num_bytes, flush); 349 if (ret) 350 goto reserve_fail; 351 } 352 again: 353 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 354 if (!h) { 355 ret = -ENOMEM; 356 goto alloc_fail; 357 } 358 359 /* 360 * If we are JOIN_NOLOCK we're already committing a transaction and 361 * waiting on this guy, so we don't need to do the sb_start_intwrite 362 * because we're already holding a ref. We need this because we could 363 * have raced in and did an fsync() on a file which can kick a commit 364 * and then we deadlock with somebody doing a freeze. 365 * 366 * If we are ATTACH, it means we just want to catch the current 367 * transaction and commit it, so we needn't do sb_start_intwrite(). 368 */ 369 if (type < TRANS_JOIN_NOLOCK) 370 sb_start_intwrite(root->fs_info->sb); 371 372 if (may_wait_transaction(root, type)) 373 wait_current_trans(root); 374 375 do { 376 ret = join_transaction(root, type); 377 if (ret == -EBUSY) { 378 wait_current_trans(root); 379 if (unlikely(type == TRANS_ATTACH)) 380 ret = -ENOENT; 381 } 382 } while (ret == -EBUSY); 383 384 if (ret < 0) { 385 /* We must get the transaction if we are JOIN_NOLOCK. */ 386 BUG_ON(type == TRANS_JOIN_NOLOCK); 387 goto join_fail; 388 } 389 390 cur_trans = root->fs_info->running_transaction; 391 392 h->transid = cur_trans->transid; 393 h->transaction = cur_trans; 394 h->blocks_used = 0; 395 h->bytes_reserved = 0; 396 h->root = root; 397 h->delayed_ref_updates = 0; 398 h->use_count = 1; 399 h->adding_csums = 0; 400 h->block_rsv = NULL; 401 h->orig_rsv = NULL; 402 h->aborted = 0; 403 h->qgroup_reserved = 0; 404 h->delayed_ref_elem.seq = 0; 405 h->type = type; 406 h->allocating_chunk = false; 407 INIT_LIST_HEAD(&h->qgroup_ref_list); 408 INIT_LIST_HEAD(&h->new_bgs); 409 410 smp_mb(); 411 if (cur_trans->blocked && may_wait_transaction(root, type)) { 412 btrfs_commit_transaction(h, root); 413 goto again; 414 } 415 416 if (num_bytes) { 417 trace_btrfs_space_reservation(root->fs_info, "transaction", 418 h->transid, num_bytes, 1); 419 h->block_rsv = &root->fs_info->trans_block_rsv; 420 h->bytes_reserved = num_bytes; 421 } 422 h->qgroup_reserved = qgroup_reserved; 423 424 got_it: 425 btrfs_record_root_in_trans(h, root); 426 427 if (!current->journal_info && type != TRANS_USERSPACE) 428 current->journal_info = h; 429 return h; 430 431 join_fail: 432 if (type < TRANS_JOIN_NOLOCK) 433 sb_end_intwrite(root->fs_info->sb); 434 kmem_cache_free(btrfs_trans_handle_cachep, h); 435 alloc_fail: 436 if (num_bytes) 437 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, 438 num_bytes); 439 reserve_fail: 440 if (qgroup_reserved) 441 btrfs_qgroup_free(root, qgroup_reserved); 442 return ERR_PTR(ret); 443 } 444 445 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 446 int num_items) 447 { 448 return start_transaction(root, num_items, TRANS_START, 449 BTRFS_RESERVE_FLUSH_ALL); 450 } 451 452 struct btrfs_trans_handle *btrfs_start_transaction_lflush( 453 struct btrfs_root *root, int num_items) 454 { 455 return start_transaction(root, num_items, TRANS_START, 456 BTRFS_RESERVE_FLUSH_LIMIT); 457 } 458 459 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 460 { 461 return start_transaction(root, 0, TRANS_JOIN, 0); 462 } 463 464 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 465 { 466 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0); 467 } 468 469 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 470 { 471 return start_transaction(root, 0, TRANS_USERSPACE, 0); 472 } 473 474 /* 475 * btrfs_attach_transaction() - catch the running transaction 476 * 477 * It is used when we want to commit the current the transaction, but 478 * don't want to start a new one. 479 * 480 * Note: If this function return -ENOENT, it just means there is no 481 * running transaction. But it is possible that the inactive transaction 482 * is still in the memory, not fully on disk. If you hope there is no 483 * inactive transaction in the fs when -ENOENT is returned, you should 484 * invoke 485 * btrfs_attach_transaction_barrier() 486 */ 487 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 488 { 489 return start_transaction(root, 0, TRANS_ATTACH, 0); 490 } 491 492 /* 493 * btrfs_attach_transaction() - catch the running transaction 494 * 495 * It is similar to the above function, the differentia is this one 496 * will wait for all the inactive transactions until they fully 497 * complete. 498 */ 499 struct btrfs_trans_handle * 500 btrfs_attach_transaction_barrier(struct btrfs_root *root) 501 { 502 struct btrfs_trans_handle *trans; 503 504 trans = start_transaction(root, 0, TRANS_ATTACH, 0); 505 if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT) 506 btrfs_wait_for_commit(root, 0); 507 508 return trans; 509 } 510 511 /* wait for a transaction commit to be fully complete */ 512 static noinline void wait_for_commit(struct btrfs_root *root, 513 struct btrfs_transaction *commit) 514 { 515 wait_event(commit->commit_wait, commit->commit_done); 516 } 517 518 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 519 { 520 struct btrfs_transaction *cur_trans = NULL, *t; 521 int ret = 0; 522 523 if (transid) { 524 if (transid <= root->fs_info->last_trans_committed) 525 goto out; 526 527 ret = -EINVAL; 528 /* find specified transaction */ 529 spin_lock(&root->fs_info->trans_lock); 530 list_for_each_entry(t, &root->fs_info->trans_list, list) { 531 if (t->transid == transid) { 532 cur_trans = t; 533 atomic_inc(&cur_trans->use_count); 534 ret = 0; 535 break; 536 } 537 if (t->transid > transid) { 538 ret = 0; 539 break; 540 } 541 } 542 spin_unlock(&root->fs_info->trans_lock); 543 /* The specified transaction doesn't exist */ 544 if (!cur_trans) 545 goto out; 546 } else { 547 /* find newest transaction that is committing | committed */ 548 spin_lock(&root->fs_info->trans_lock); 549 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 550 list) { 551 if (t->in_commit) { 552 if (t->commit_done) 553 break; 554 cur_trans = t; 555 atomic_inc(&cur_trans->use_count); 556 break; 557 } 558 } 559 spin_unlock(&root->fs_info->trans_lock); 560 if (!cur_trans) 561 goto out; /* nothing committing|committed */ 562 } 563 564 wait_for_commit(root, cur_trans); 565 put_transaction(cur_trans); 566 out: 567 return ret; 568 } 569 570 void btrfs_throttle(struct btrfs_root *root) 571 { 572 if (!atomic_read(&root->fs_info->open_ioctl_trans)) 573 wait_current_trans(root); 574 } 575 576 static int should_end_transaction(struct btrfs_trans_handle *trans, 577 struct btrfs_root *root) 578 { 579 int ret; 580 581 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); 582 return ret ? 1 : 0; 583 } 584 585 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 586 struct btrfs_root *root) 587 { 588 struct btrfs_transaction *cur_trans = trans->transaction; 589 int updates; 590 int err; 591 592 smp_mb(); 593 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 594 return 1; 595 596 updates = trans->delayed_ref_updates; 597 trans->delayed_ref_updates = 0; 598 if (updates) { 599 err = btrfs_run_delayed_refs(trans, root, updates); 600 if (err) /* Error code will also eval true */ 601 return err; 602 } 603 604 return should_end_transaction(trans, root); 605 } 606 607 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 608 struct btrfs_root *root, int throttle) 609 { 610 struct btrfs_transaction *cur_trans = trans->transaction; 611 struct btrfs_fs_info *info = root->fs_info; 612 int count = 0; 613 int lock = (trans->type != TRANS_JOIN_NOLOCK); 614 int err = 0; 615 616 if (--trans->use_count) { 617 trans->block_rsv = trans->orig_rsv; 618 return 0; 619 } 620 621 /* 622 * do the qgroup accounting as early as possible 623 */ 624 err = btrfs_delayed_refs_qgroup_accounting(trans, info); 625 626 btrfs_trans_release_metadata(trans, root); 627 trans->block_rsv = NULL; 628 629 if (trans->qgroup_reserved) { 630 /* 631 * the same root has to be passed here between start_transaction 632 * and end_transaction. Subvolume quota depends on this. 633 */ 634 btrfs_qgroup_free(trans->root, trans->qgroup_reserved); 635 trans->qgroup_reserved = 0; 636 } 637 638 if (!list_empty(&trans->new_bgs)) 639 btrfs_create_pending_block_groups(trans, root); 640 641 while (count < 1) { 642 unsigned long cur = trans->delayed_ref_updates; 643 trans->delayed_ref_updates = 0; 644 if (cur && 645 trans->transaction->delayed_refs.num_heads_ready > 64) { 646 trans->delayed_ref_updates = 0; 647 btrfs_run_delayed_refs(trans, root, cur); 648 } else { 649 break; 650 } 651 count++; 652 } 653 654 btrfs_trans_release_metadata(trans, root); 655 trans->block_rsv = NULL; 656 657 if (!list_empty(&trans->new_bgs)) 658 btrfs_create_pending_block_groups(trans, root); 659 660 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && 661 should_end_transaction(trans, root)) { 662 trans->transaction->blocked = 1; 663 smp_wmb(); 664 } 665 666 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 667 if (throttle) { 668 /* 669 * We may race with somebody else here so end up having 670 * to call end_transaction on ourselves again, so inc 671 * our use_count. 672 */ 673 trans->use_count++; 674 return btrfs_commit_transaction(trans, root); 675 } else { 676 wake_up_process(info->transaction_kthread); 677 } 678 } 679 680 if (trans->type < TRANS_JOIN_NOLOCK) 681 sb_end_intwrite(root->fs_info->sb); 682 683 WARN_ON(cur_trans != info->running_transaction); 684 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 685 atomic_dec(&cur_trans->num_writers); 686 687 smp_mb(); 688 if (waitqueue_active(&cur_trans->writer_wait)) 689 wake_up(&cur_trans->writer_wait); 690 put_transaction(cur_trans); 691 692 if (current->journal_info == trans) 693 current->journal_info = NULL; 694 695 if (throttle) 696 btrfs_run_delayed_iputs(root); 697 698 if (trans->aborted || 699 test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 700 err = -EIO; 701 assert_qgroups_uptodate(trans); 702 703 kmem_cache_free(btrfs_trans_handle_cachep, trans); 704 return err; 705 } 706 707 int btrfs_end_transaction(struct btrfs_trans_handle *trans, 708 struct btrfs_root *root) 709 { 710 return __btrfs_end_transaction(trans, root, 0); 711 } 712 713 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 714 struct btrfs_root *root) 715 { 716 return __btrfs_end_transaction(trans, root, 1); 717 } 718 719 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, 720 struct btrfs_root *root) 721 { 722 return __btrfs_end_transaction(trans, root, 1); 723 } 724 725 /* 726 * when btree blocks are allocated, they have some corresponding bits set for 727 * them in one of two extent_io trees. This is used to make sure all of 728 * those extents are sent to disk but does not wait on them 729 */ 730 int btrfs_write_marked_extents(struct btrfs_root *root, 731 struct extent_io_tree *dirty_pages, int mark) 732 { 733 int err = 0; 734 int werr = 0; 735 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 736 struct extent_state *cached_state = NULL; 737 u64 start = 0; 738 u64 end; 739 struct blk_plug plug; 740 741 blk_start_plug(&plug); 742 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 743 mark, &cached_state)) { 744 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, 745 mark, &cached_state, GFP_NOFS); 746 cached_state = NULL; 747 err = filemap_fdatawrite_range(mapping, start, end); 748 if (err) 749 werr = err; 750 cond_resched(); 751 start = end + 1; 752 } 753 if (err) 754 werr = err; 755 blk_finish_plug(&plug); 756 return werr; 757 } 758 759 /* 760 * when btree blocks are allocated, they have some corresponding bits set for 761 * them in one of two extent_io trees. This is used to make sure all of 762 * those extents are on disk for transaction or log commit. We wait 763 * on all the pages and clear them from the dirty pages state tree 764 */ 765 int btrfs_wait_marked_extents(struct btrfs_root *root, 766 struct extent_io_tree *dirty_pages, int mark) 767 { 768 int err = 0; 769 int werr = 0; 770 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 771 struct extent_state *cached_state = NULL; 772 u64 start = 0; 773 u64 end; 774 775 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 776 EXTENT_NEED_WAIT, &cached_state)) { 777 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, 778 0, 0, &cached_state, GFP_NOFS); 779 err = filemap_fdatawait_range(mapping, start, end); 780 if (err) 781 werr = err; 782 cond_resched(); 783 start = end + 1; 784 } 785 if (err) 786 werr = err; 787 return werr; 788 } 789 790 /* 791 * when btree blocks are allocated, they have some corresponding bits set for 792 * them in one of two extent_io trees. This is used to make sure all of 793 * those extents are on disk for transaction or log commit 794 */ 795 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 796 struct extent_io_tree *dirty_pages, int mark) 797 { 798 int ret; 799 int ret2; 800 801 ret = btrfs_write_marked_extents(root, dirty_pages, mark); 802 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); 803 804 if (ret) 805 return ret; 806 if (ret2) 807 return ret2; 808 return 0; 809 } 810 811 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 812 struct btrfs_root *root) 813 { 814 if (!trans || !trans->transaction) { 815 struct inode *btree_inode; 816 btree_inode = root->fs_info->btree_inode; 817 return filemap_write_and_wait(btree_inode->i_mapping); 818 } 819 return btrfs_write_and_wait_marked_extents(root, 820 &trans->transaction->dirty_pages, 821 EXTENT_DIRTY); 822 } 823 824 /* 825 * this is used to update the root pointer in the tree of tree roots. 826 * 827 * But, in the case of the extent allocation tree, updating the root 828 * pointer may allocate blocks which may change the root of the extent 829 * allocation tree. 830 * 831 * So, this loops and repeats and makes sure the cowonly root didn't 832 * change while the root pointer was being updated in the metadata. 833 */ 834 static int update_cowonly_root(struct btrfs_trans_handle *trans, 835 struct btrfs_root *root) 836 { 837 int ret; 838 u64 old_root_bytenr; 839 u64 old_root_used; 840 struct btrfs_root *tree_root = root->fs_info->tree_root; 841 842 old_root_used = btrfs_root_used(&root->root_item); 843 btrfs_write_dirty_block_groups(trans, root); 844 845 while (1) { 846 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 847 if (old_root_bytenr == root->node->start && 848 old_root_used == btrfs_root_used(&root->root_item)) 849 break; 850 851 btrfs_set_root_node(&root->root_item, root->node); 852 ret = btrfs_update_root(trans, tree_root, 853 &root->root_key, 854 &root->root_item); 855 if (ret) 856 return ret; 857 858 old_root_used = btrfs_root_used(&root->root_item); 859 ret = btrfs_write_dirty_block_groups(trans, root); 860 if (ret) 861 return ret; 862 } 863 864 if (root != root->fs_info->extent_root) 865 switch_commit_root(root); 866 867 return 0; 868 } 869 870 /* 871 * update all the cowonly tree roots on disk 872 * 873 * The error handling in this function may not be obvious. Any of the 874 * failures will cause the file system to go offline. We still need 875 * to clean up the delayed refs. 876 */ 877 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 878 struct btrfs_root *root) 879 { 880 struct btrfs_fs_info *fs_info = root->fs_info; 881 struct list_head *next; 882 struct extent_buffer *eb; 883 int ret; 884 885 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 886 if (ret) 887 return ret; 888 889 eb = btrfs_lock_root_node(fs_info->tree_root); 890 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 891 0, &eb); 892 btrfs_tree_unlock(eb); 893 free_extent_buffer(eb); 894 895 if (ret) 896 return ret; 897 898 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 899 if (ret) 900 return ret; 901 902 ret = btrfs_run_dev_stats(trans, root->fs_info); 903 WARN_ON(ret); 904 ret = btrfs_run_dev_replace(trans, root->fs_info); 905 WARN_ON(ret); 906 907 ret = btrfs_run_qgroups(trans, root->fs_info); 908 BUG_ON(ret); 909 910 /* run_qgroups might have added some more refs */ 911 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 912 BUG_ON(ret); 913 914 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 915 next = fs_info->dirty_cowonly_roots.next; 916 list_del_init(next); 917 root = list_entry(next, struct btrfs_root, dirty_list); 918 919 ret = update_cowonly_root(trans, root); 920 if (ret) 921 return ret; 922 } 923 924 down_write(&fs_info->extent_commit_sem); 925 switch_commit_root(fs_info->extent_root); 926 up_write(&fs_info->extent_commit_sem); 927 928 btrfs_after_dev_replace_commit(fs_info); 929 930 return 0; 931 } 932 933 /* 934 * dead roots are old snapshots that need to be deleted. This allocates 935 * a dirty root struct and adds it into the list of dead roots that need to 936 * be deleted 937 */ 938 int btrfs_add_dead_root(struct btrfs_root *root) 939 { 940 spin_lock(&root->fs_info->trans_lock); 941 list_add_tail(&root->root_list, &root->fs_info->dead_roots); 942 spin_unlock(&root->fs_info->trans_lock); 943 return 0; 944 } 945 946 /* 947 * update all the cowonly tree roots on disk 948 */ 949 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 950 struct btrfs_root *root) 951 { 952 struct btrfs_root *gang[8]; 953 struct btrfs_fs_info *fs_info = root->fs_info; 954 int i; 955 int ret; 956 int err = 0; 957 958 spin_lock(&fs_info->fs_roots_radix_lock); 959 while (1) { 960 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 961 (void **)gang, 0, 962 ARRAY_SIZE(gang), 963 BTRFS_ROOT_TRANS_TAG); 964 if (ret == 0) 965 break; 966 for (i = 0; i < ret; i++) { 967 root = gang[i]; 968 radix_tree_tag_clear(&fs_info->fs_roots_radix, 969 (unsigned long)root->root_key.objectid, 970 BTRFS_ROOT_TRANS_TAG); 971 spin_unlock(&fs_info->fs_roots_radix_lock); 972 973 btrfs_free_log(trans, root); 974 btrfs_update_reloc_root(trans, root); 975 btrfs_orphan_commit_root(trans, root); 976 977 btrfs_save_ino_cache(root, trans); 978 979 /* see comments in should_cow_block() */ 980 root->force_cow = 0; 981 smp_wmb(); 982 983 if (root->commit_root != root->node) { 984 mutex_lock(&root->fs_commit_mutex); 985 switch_commit_root(root); 986 btrfs_unpin_free_ino(root); 987 mutex_unlock(&root->fs_commit_mutex); 988 989 btrfs_set_root_node(&root->root_item, 990 root->node); 991 } 992 993 err = btrfs_update_root(trans, fs_info->tree_root, 994 &root->root_key, 995 &root->root_item); 996 spin_lock(&fs_info->fs_roots_radix_lock); 997 if (err) 998 break; 999 } 1000 } 1001 spin_unlock(&fs_info->fs_roots_radix_lock); 1002 return err; 1003 } 1004 1005 /* 1006 * defrag a given btree. 1007 * Every leaf in the btree is read and defragged. 1008 */ 1009 int btrfs_defrag_root(struct btrfs_root *root) 1010 { 1011 struct btrfs_fs_info *info = root->fs_info; 1012 struct btrfs_trans_handle *trans; 1013 int ret; 1014 1015 if (xchg(&root->defrag_running, 1)) 1016 return 0; 1017 1018 while (1) { 1019 trans = btrfs_start_transaction(root, 0); 1020 if (IS_ERR(trans)) 1021 return PTR_ERR(trans); 1022 1023 ret = btrfs_defrag_leaves(trans, root); 1024 1025 btrfs_end_transaction(trans, root); 1026 btrfs_btree_balance_dirty(info->tree_root); 1027 cond_resched(); 1028 1029 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 1030 break; 1031 1032 if (btrfs_defrag_cancelled(root->fs_info)) { 1033 printk(KERN_DEBUG "btrfs: defrag_root cancelled\n"); 1034 ret = -EAGAIN; 1035 break; 1036 } 1037 } 1038 root->defrag_running = 0; 1039 return ret; 1040 } 1041 1042 /* 1043 * new snapshots need to be created at a very specific time in the 1044 * transaction commit. This does the actual creation. 1045 * 1046 * Note: 1047 * If the error which may affect the commitment of the current transaction 1048 * happens, we should return the error number. If the error which just affect 1049 * the creation of the pending snapshots, just return 0. 1050 */ 1051 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1052 struct btrfs_fs_info *fs_info, 1053 struct btrfs_pending_snapshot *pending) 1054 { 1055 struct btrfs_key key; 1056 struct btrfs_root_item *new_root_item; 1057 struct btrfs_root *tree_root = fs_info->tree_root; 1058 struct btrfs_root *root = pending->root; 1059 struct btrfs_root *parent_root; 1060 struct btrfs_block_rsv *rsv; 1061 struct inode *parent_inode; 1062 struct btrfs_path *path; 1063 struct btrfs_dir_item *dir_item; 1064 struct dentry *dentry; 1065 struct extent_buffer *tmp; 1066 struct extent_buffer *old; 1067 struct timespec cur_time = CURRENT_TIME; 1068 int ret = 0; 1069 u64 to_reserve = 0; 1070 u64 index = 0; 1071 u64 objectid; 1072 u64 root_flags; 1073 uuid_le new_uuid; 1074 1075 path = btrfs_alloc_path(); 1076 if (!path) { 1077 pending->error = -ENOMEM; 1078 return 0; 1079 } 1080 1081 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 1082 if (!new_root_item) { 1083 pending->error = -ENOMEM; 1084 goto root_item_alloc_fail; 1085 } 1086 1087 pending->error = btrfs_find_free_objectid(tree_root, &objectid); 1088 if (pending->error) 1089 goto no_free_objectid; 1090 1091 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 1092 1093 if (to_reserve > 0) { 1094 pending->error = btrfs_block_rsv_add(root, 1095 &pending->block_rsv, 1096 to_reserve, 1097 BTRFS_RESERVE_NO_FLUSH); 1098 if (pending->error) 1099 goto no_free_objectid; 1100 } 1101 1102 pending->error = btrfs_qgroup_inherit(trans, fs_info, 1103 root->root_key.objectid, 1104 objectid, pending->inherit); 1105 if (pending->error) 1106 goto no_free_objectid; 1107 1108 key.objectid = objectid; 1109 key.offset = (u64)-1; 1110 key.type = BTRFS_ROOT_ITEM_KEY; 1111 1112 rsv = trans->block_rsv; 1113 trans->block_rsv = &pending->block_rsv; 1114 trans->bytes_reserved = trans->block_rsv->reserved; 1115 1116 dentry = pending->dentry; 1117 parent_inode = pending->dir; 1118 parent_root = BTRFS_I(parent_inode)->root; 1119 record_root_in_trans(trans, parent_root); 1120 1121 /* 1122 * insert the directory item 1123 */ 1124 ret = btrfs_set_inode_index(parent_inode, &index); 1125 BUG_ON(ret); /* -ENOMEM */ 1126 1127 /* check if there is a file/dir which has the same name. */ 1128 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1129 btrfs_ino(parent_inode), 1130 dentry->d_name.name, 1131 dentry->d_name.len, 0); 1132 if (dir_item != NULL && !IS_ERR(dir_item)) { 1133 pending->error = -EEXIST; 1134 goto dir_item_existed; 1135 } else if (IS_ERR(dir_item)) { 1136 ret = PTR_ERR(dir_item); 1137 btrfs_abort_transaction(trans, root, ret); 1138 goto fail; 1139 } 1140 btrfs_release_path(path); 1141 1142 /* 1143 * pull in the delayed directory update 1144 * and the delayed inode item 1145 * otherwise we corrupt the FS during 1146 * snapshot 1147 */ 1148 ret = btrfs_run_delayed_items(trans, root); 1149 if (ret) { /* Transaction aborted */ 1150 btrfs_abort_transaction(trans, root, ret); 1151 goto fail; 1152 } 1153 1154 record_root_in_trans(trans, root); 1155 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1156 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1157 btrfs_check_and_init_root_item(new_root_item); 1158 1159 root_flags = btrfs_root_flags(new_root_item); 1160 if (pending->readonly) 1161 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1162 else 1163 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1164 btrfs_set_root_flags(new_root_item, root_flags); 1165 1166 btrfs_set_root_generation_v2(new_root_item, 1167 trans->transid); 1168 uuid_le_gen(&new_uuid); 1169 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); 1170 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1171 BTRFS_UUID_SIZE); 1172 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { 1173 memset(new_root_item->received_uuid, 0, 1174 sizeof(new_root_item->received_uuid)); 1175 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1176 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1177 btrfs_set_root_stransid(new_root_item, 0); 1178 btrfs_set_root_rtransid(new_root_item, 0); 1179 } 1180 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); 1181 new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec); 1182 btrfs_set_root_otransid(new_root_item, trans->transid); 1183 1184 old = btrfs_lock_root_node(root); 1185 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old); 1186 if (ret) { 1187 btrfs_tree_unlock(old); 1188 free_extent_buffer(old); 1189 btrfs_abort_transaction(trans, root, ret); 1190 goto fail; 1191 } 1192 1193 btrfs_set_lock_blocking(old); 1194 1195 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1196 /* clean up in any case */ 1197 btrfs_tree_unlock(old); 1198 free_extent_buffer(old); 1199 if (ret) { 1200 btrfs_abort_transaction(trans, root, ret); 1201 goto fail; 1202 } 1203 1204 /* see comments in should_cow_block() */ 1205 root->force_cow = 1; 1206 smp_wmb(); 1207 1208 btrfs_set_root_node(new_root_item, tmp); 1209 /* record when the snapshot was created in key.offset */ 1210 key.offset = trans->transid; 1211 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1212 btrfs_tree_unlock(tmp); 1213 free_extent_buffer(tmp); 1214 if (ret) { 1215 btrfs_abort_transaction(trans, root, ret); 1216 goto fail; 1217 } 1218 1219 /* 1220 * insert root back/forward references 1221 */ 1222 ret = btrfs_add_root_ref(trans, tree_root, objectid, 1223 parent_root->root_key.objectid, 1224 btrfs_ino(parent_inode), index, 1225 dentry->d_name.name, dentry->d_name.len); 1226 if (ret) { 1227 btrfs_abort_transaction(trans, root, ret); 1228 goto fail; 1229 } 1230 1231 key.offset = (u64)-1; 1232 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 1233 if (IS_ERR(pending->snap)) { 1234 ret = PTR_ERR(pending->snap); 1235 btrfs_abort_transaction(trans, root, ret); 1236 goto fail; 1237 } 1238 1239 ret = btrfs_reloc_post_snapshot(trans, pending); 1240 if (ret) { 1241 btrfs_abort_transaction(trans, root, ret); 1242 goto fail; 1243 } 1244 1245 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1246 if (ret) { 1247 btrfs_abort_transaction(trans, root, ret); 1248 goto fail; 1249 } 1250 1251 ret = btrfs_insert_dir_item(trans, parent_root, 1252 dentry->d_name.name, dentry->d_name.len, 1253 parent_inode, &key, 1254 BTRFS_FT_DIR, index); 1255 /* We have check then name at the beginning, so it is impossible. */ 1256 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1257 if (ret) { 1258 btrfs_abort_transaction(trans, root, ret); 1259 goto fail; 1260 } 1261 1262 btrfs_i_size_write(parent_inode, parent_inode->i_size + 1263 dentry->d_name.len * 2); 1264 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 1265 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode); 1266 if (ret) 1267 btrfs_abort_transaction(trans, root, ret); 1268 fail: 1269 pending->error = ret; 1270 dir_item_existed: 1271 trans->block_rsv = rsv; 1272 trans->bytes_reserved = 0; 1273 no_free_objectid: 1274 kfree(new_root_item); 1275 root_item_alloc_fail: 1276 btrfs_free_path(path); 1277 return ret; 1278 } 1279 1280 /* 1281 * create all the snapshots we've scheduled for creation 1282 */ 1283 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 1284 struct btrfs_fs_info *fs_info) 1285 { 1286 struct btrfs_pending_snapshot *pending, *next; 1287 struct list_head *head = &trans->transaction->pending_snapshots; 1288 int ret = 0; 1289 1290 list_for_each_entry_safe(pending, next, head, list) { 1291 list_del(&pending->list); 1292 ret = create_pending_snapshot(trans, fs_info, pending); 1293 if (ret) 1294 break; 1295 } 1296 return ret; 1297 } 1298 1299 static void update_super_roots(struct btrfs_root *root) 1300 { 1301 struct btrfs_root_item *root_item; 1302 struct btrfs_super_block *super; 1303 1304 super = root->fs_info->super_copy; 1305 1306 root_item = &root->fs_info->chunk_root->root_item; 1307 super->chunk_root = root_item->bytenr; 1308 super->chunk_root_generation = root_item->generation; 1309 super->chunk_root_level = root_item->level; 1310 1311 root_item = &root->fs_info->tree_root->root_item; 1312 super->root = root_item->bytenr; 1313 super->generation = root_item->generation; 1314 super->root_level = root_item->level; 1315 if (btrfs_test_opt(root, SPACE_CACHE)) 1316 super->cache_generation = root_item->generation; 1317 } 1318 1319 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1320 { 1321 int ret = 0; 1322 spin_lock(&info->trans_lock); 1323 if (info->running_transaction) 1324 ret = info->running_transaction->in_commit; 1325 spin_unlock(&info->trans_lock); 1326 return ret; 1327 } 1328 1329 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1330 { 1331 int ret = 0; 1332 spin_lock(&info->trans_lock); 1333 if (info->running_transaction) 1334 ret = info->running_transaction->blocked; 1335 spin_unlock(&info->trans_lock); 1336 return ret; 1337 } 1338 1339 /* 1340 * wait for the current transaction commit to start and block subsequent 1341 * transaction joins 1342 */ 1343 static void wait_current_trans_commit_start(struct btrfs_root *root, 1344 struct btrfs_transaction *trans) 1345 { 1346 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit); 1347 } 1348 1349 /* 1350 * wait for the current transaction to start and then become unblocked. 1351 * caller holds ref. 1352 */ 1353 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1354 struct btrfs_transaction *trans) 1355 { 1356 wait_event(root->fs_info->transaction_wait, 1357 trans->commit_done || (trans->in_commit && !trans->blocked)); 1358 } 1359 1360 /* 1361 * commit transactions asynchronously. once btrfs_commit_transaction_async 1362 * returns, any subsequent transaction will not be allowed to join. 1363 */ 1364 struct btrfs_async_commit { 1365 struct btrfs_trans_handle *newtrans; 1366 struct btrfs_root *root; 1367 struct work_struct work; 1368 }; 1369 1370 static void do_async_commit(struct work_struct *work) 1371 { 1372 struct btrfs_async_commit *ac = 1373 container_of(work, struct btrfs_async_commit, work); 1374 1375 /* 1376 * We've got freeze protection passed with the transaction. 1377 * Tell lockdep about it. 1378 */ 1379 if (ac->newtrans->type < TRANS_JOIN_NOLOCK) 1380 rwsem_acquire_read( 1381 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1382 0, 1, _THIS_IP_); 1383 1384 current->journal_info = ac->newtrans; 1385 1386 btrfs_commit_transaction(ac->newtrans, ac->root); 1387 kfree(ac); 1388 } 1389 1390 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1391 struct btrfs_root *root, 1392 int wait_for_unblock) 1393 { 1394 struct btrfs_async_commit *ac; 1395 struct btrfs_transaction *cur_trans; 1396 1397 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1398 if (!ac) 1399 return -ENOMEM; 1400 1401 INIT_WORK(&ac->work, do_async_commit); 1402 ac->root = root; 1403 ac->newtrans = btrfs_join_transaction(root); 1404 if (IS_ERR(ac->newtrans)) { 1405 int err = PTR_ERR(ac->newtrans); 1406 kfree(ac); 1407 return err; 1408 } 1409 1410 /* take transaction reference */ 1411 cur_trans = trans->transaction; 1412 atomic_inc(&cur_trans->use_count); 1413 1414 btrfs_end_transaction(trans, root); 1415 1416 /* 1417 * Tell lockdep we've released the freeze rwsem, since the 1418 * async commit thread will be the one to unlock it. 1419 */ 1420 if (trans->type < TRANS_JOIN_NOLOCK) 1421 rwsem_release( 1422 &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1423 1, _THIS_IP_); 1424 1425 schedule_work(&ac->work); 1426 1427 /* wait for transaction to start and unblock */ 1428 if (wait_for_unblock) 1429 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1430 else 1431 wait_current_trans_commit_start(root, cur_trans); 1432 1433 if (current->journal_info == trans) 1434 current->journal_info = NULL; 1435 1436 put_transaction(cur_trans); 1437 return 0; 1438 } 1439 1440 1441 static void cleanup_transaction(struct btrfs_trans_handle *trans, 1442 struct btrfs_root *root, int err) 1443 { 1444 struct btrfs_transaction *cur_trans = trans->transaction; 1445 DEFINE_WAIT(wait); 1446 1447 WARN_ON(trans->use_count > 1); 1448 1449 btrfs_abort_transaction(trans, root, err); 1450 1451 spin_lock(&root->fs_info->trans_lock); 1452 1453 if (list_empty(&cur_trans->list)) { 1454 spin_unlock(&root->fs_info->trans_lock); 1455 btrfs_end_transaction(trans, root); 1456 return; 1457 } 1458 1459 list_del_init(&cur_trans->list); 1460 if (cur_trans == root->fs_info->running_transaction) { 1461 root->fs_info->trans_no_join = 1; 1462 spin_unlock(&root->fs_info->trans_lock); 1463 wait_event(cur_trans->writer_wait, 1464 atomic_read(&cur_trans->num_writers) == 1); 1465 1466 spin_lock(&root->fs_info->trans_lock); 1467 root->fs_info->running_transaction = NULL; 1468 } 1469 spin_unlock(&root->fs_info->trans_lock); 1470 1471 btrfs_cleanup_one_transaction(trans->transaction, root); 1472 1473 put_transaction(cur_trans); 1474 put_transaction(cur_trans); 1475 1476 trace_btrfs_transaction_commit(root); 1477 1478 btrfs_scrub_continue(root); 1479 1480 if (current->journal_info == trans) 1481 current->journal_info = NULL; 1482 1483 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1484 1485 spin_lock(&root->fs_info->trans_lock); 1486 root->fs_info->trans_no_join = 0; 1487 spin_unlock(&root->fs_info->trans_lock); 1488 } 1489 1490 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans, 1491 struct btrfs_root *root) 1492 { 1493 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); 1494 int snap_pending = 0; 1495 int ret; 1496 1497 if (!flush_on_commit) { 1498 spin_lock(&root->fs_info->trans_lock); 1499 if (!list_empty(&trans->transaction->pending_snapshots)) 1500 snap_pending = 1; 1501 spin_unlock(&root->fs_info->trans_lock); 1502 } 1503 1504 if (flush_on_commit || snap_pending) { 1505 ret = btrfs_start_delalloc_inodes(root, 1); 1506 if (ret) 1507 return ret; 1508 btrfs_wait_ordered_extents(root, 1); 1509 } 1510 1511 ret = btrfs_run_delayed_items(trans, root); 1512 if (ret) 1513 return ret; 1514 1515 /* 1516 * running the delayed items may have added new refs. account 1517 * them now so that they hinder processing of more delayed refs 1518 * as little as possible. 1519 */ 1520 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info); 1521 1522 /* 1523 * rename don't use btrfs_join_transaction, so, once we 1524 * set the transaction to blocked above, we aren't going 1525 * to get any new ordered operations. We can safely run 1526 * it here and no for sure that nothing new will be added 1527 * to the list 1528 */ 1529 ret = btrfs_run_ordered_operations(trans, root, 1); 1530 1531 return ret; 1532 } 1533 1534 /* 1535 * btrfs_transaction state sequence: 1536 * in_commit = 0, blocked = 0 (initial) 1537 * in_commit = 1, blocked = 1 1538 * blocked = 0 1539 * commit_done = 1 1540 */ 1541 int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1542 struct btrfs_root *root) 1543 { 1544 unsigned long joined = 0; 1545 struct btrfs_transaction *cur_trans = trans->transaction; 1546 struct btrfs_transaction *prev_trans = NULL; 1547 DEFINE_WAIT(wait); 1548 int ret; 1549 int should_grow = 0; 1550 unsigned long now = get_seconds(); 1551 1552 ret = btrfs_run_ordered_operations(trans, root, 0); 1553 if (ret) { 1554 btrfs_abort_transaction(trans, root, ret); 1555 btrfs_end_transaction(trans, root); 1556 return ret; 1557 } 1558 1559 /* Stop the commit early if ->aborted is set */ 1560 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1561 ret = cur_trans->aborted; 1562 btrfs_end_transaction(trans, root); 1563 return ret; 1564 } 1565 1566 /* make a pass through all the delayed refs we have so far 1567 * any runnings procs may add more while we are here 1568 */ 1569 ret = btrfs_run_delayed_refs(trans, root, 0); 1570 if (ret) { 1571 btrfs_end_transaction(trans, root); 1572 return ret; 1573 } 1574 1575 btrfs_trans_release_metadata(trans, root); 1576 trans->block_rsv = NULL; 1577 if (trans->qgroup_reserved) { 1578 btrfs_qgroup_free(root, trans->qgroup_reserved); 1579 trans->qgroup_reserved = 0; 1580 } 1581 1582 cur_trans = trans->transaction; 1583 1584 /* 1585 * set the flushing flag so procs in this transaction have to 1586 * start sending their work down. 1587 */ 1588 cur_trans->delayed_refs.flushing = 1; 1589 1590 if (!list_empty(&trans->new_bgs)) 1591 btrfs_create_pending_block_groups(trans, root); 1592 1593 ret = btrfs_run_delayed_refs(trans, root, 0); 1594 if (ret) { 1595 btrfs_end_transaction(trans, root); 1596 return ret; 1597 } 1598 1599 spin_lock(&cur_trans->commit_lock); 1600 if (cur_trans->in_commit) { 1601 spin_unlock(&cur_trans->commit_lock); 1602 atomic_inc(&cur_trans->use_count); 1603 ret = btrfs_end_transaction(trans, root); 1604 1605 wait_for_commit(root, cur_trans); 1606 1607 put_transaction(cur_trans); 1608 1609 return ret; 1610 } 1611 1612 trans->transaction->in_commit = 1; 1613 trans->transaction->blocked = 1; 1614 spin_unlock(&cur_trans->commit_lock); 1615 wake_up(&root->fs_info->transaction_blocked_wait); 1616 1617 spin_lock(&root->fs_info->trans_lock); 1618 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1619 prev_trans = list_entry(cur_trans->list.prev, 1620 struct btrfs_transaction, list); 1621 if (!prev_trans->commit_done) { 1622 atomic_inc(&prev_trans->use_count); 1623 spin_unlock(&root->fs_info->trans_lock); 1624 1625 wait_for_commit(root, prev_trans); 1626 1627 put_transaction(prev_trans); 1628 } else { 1629 spin_unlock(&root->fs_info->trans_lock); 1630 } 1631 } else { 1632 spin_unlock(&root->fs_info->trans_lock); 1633 } 1634 1635 if (!btrfs_test_opt(root, SSD) && 1636 (now < cur_trans->start_time || now - cur_trans->start_time < 1)) 1637 should_grow = 1; 1638 1639 do { 1640 joined = cur_trans->num_joined; 1641 1642 WARN_ON(cur_trans != trans->transaction); 1643 1644 ret = btrfs_flush_all_pending_stuffs(trans, root); 1645 if (ret) 1646 goto cleanup_transaction; 1647 1648 prepare_to_wait(&cur_trans->writer_wait, &wait, 1649 TASK_UNINTERRUPTIBLE); 1650 1651 if (atomic_read(&cur_trans->num_writers) > 1) 1652 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1653 else if (should_grow) 1654 schedule_timeout(1); 1655 1656 finish_wait(&cur_trans->writer_wait, &wait); 1657 } while (atomic_read(&cur_trans->num_writers) > 1 || 1658 (should_grow && cur_trans->num_joined != joined)); 1659 1660 ret = btrfs_flush_all_pending_stuffs(trans, root); 1661 if (ret) 1662 goto cleanup_transaction; 1663 1664 /* 1665 * Ok now we need to make sure to block out any other joins while we 1666 * commit the transaction. We could have started a join before setting 1667 * no_join so make sure to wait for num_writers to == 1 again. 1668 */ 1669 spin_lock(&root->fs_info->trans_lock); 1670 root->fs_info->trans_no_join = 1; 1671 spin_unlock(&root->fs_info->trans_lock); 1672 wait_event(cur_trans->writer_wait, 1673 atomic_read(&cur_trans->num_writers) == 1); 1674 1675 /* ->aborted might be set after the previous check, so check it */ 1676 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1677 ret = cur_trans->aborted; 1678 goto cleanup_transaction; 1679 } 1680 /* 1681 * the reloc mutex makes sure that we stop 1682 * the balancing code from coming in and moving 1683 * extents around in the middle of the commit 1684 */ 1685 mutex_lock(&root->fs_info->reloc_mutex); 1686 1687 /* 1688 * We needn't worry about the delayed items because we will 1689 * deal with them in create_pending_snapshot(), which is the 1690 * core function of the snapshot creation. 1691 */ 1692 ret = create_pending_snapshots(trans, root->fs_info); 1693 if (ret) { 1694 mutex_unlock(&root->fs_info->reloc_mutex); 1695 goto cleanup_transaction; 1696 } 1697 1698 /* 1699 * We insert the dir indexes of the snapshots and update the inode 1700 * of the snapshots' parents after the snapshot creation, so there 1701 * are some delayed items which are not dealt with. Now deal with 1702 * them. 1703 * 1704 * We needn't worry that this operation will corrupt the snapshots, 1705 * because all the tree which are snapshoted will be forced to COW 1706 * the nodes and leaves. 1707 */ 1708 ret = btrfs_run_delayed_items(trans, root); 1709 if (ret) { 1710 mutex_unlock(&root->fs_info->reloc_mutex); 1711 goto cleanup_transaction; 1712 } 1713 1714 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1715 if (ret) { 1716 mutex_unlock(&root->fs_info->reloc_mutex); 1717 goto cleanup_transaction; 1718 } 1719 1720 /* 1721 * make sure none of the code above managed to slip in a 1722 * delayed item 1723 */ 1724 btrfs_assert_delayed_root_empty(root); 1725 1726 WARN_ON(cur_trans != trans->transaction); 1727 1728 btrfs_scrub_pause(root); 1729 /* btrfs_commit_tree_roots is responsible for getting the 1730 * various roots consistent with each other. Every pointer 1731 * in the tree of tree roots has to point to the most up to date 1732 * root for every subvolume and other tree. So, we have to keep 1733 * the tree logging code from jumping in and changing any 1734 * of the trees. 1735 * 1736 * At this point in the commit, there can't be any tree-log 1737 * writers, but a little lower down we drop the trans mutex 1738 * and let new people in. By holding the tree_log_mutex 1739 * from now until after the super is written, we avoid races 1740 * with the tree-log code. 1741 */ 1742 mutex_lock(&root->fs_info->tree_log_mutex); 1743 1744 ret = commit_fs_roots(trans, root); 1745 if (ret) { 1746 mutex_unlock(&root->fs_info->tree_log_mutex); 1747 mutex_unlock(&root->fs_info->reloc_mutex); 1748 goto cleanup_transaction; 1749 } 1750 1751 /* commit_fs_roots gets rid of all the tree log roots, it is now 1752 * safe to free the root of tree log roots 1753 */ 1754 btrfs_free_log_root_tree(trans, root->fs_info); 1755 1756 ret = commit_cowonly_roots(trans, root); 1757 if (ret) { 1758 mutex_unlock(&root->fs_info->tree_log_mutex); 1759 mutex_unlock(&root->fs_info->reloc_mutex); 1760 goto cleanup_transaction; 1761 } 1762 1763 /* 1764 * The tasks which save the space cache and inode cache may also 1765 * update ->aborted, check it. 1766 */ 1767 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1768 ret = cur_trans->aborted; 1769 mutex_unlock(&root->fs_info->tree_log_mutex); 1770 mutex_unlock(&root->fs_info->reloc_mutex); 1771 goto cleanup_transaction; 1772 } 1773 1774 btrfs_prepare_extent_commit(trans, root); 1775 1776 cur_trans = root->fs_info->running_transaction; 1777 1778 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1779 root->fs_info->tree_root->node); 1780 switch_commit_root(root->fs_info->tree_root); 1781 1782 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1783 root->fs_info->chunk_root->node); 1784 switch_commit_root(root->fs_info->chunk_root); 1785 1786 assert_qgroups_uptodate(trans); 1787 update_super_roots(root); 1788 1789 if (!root->fs_info->log_root_recovering) { 1790 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1791 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0); 1792 } 1793 1794 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1795 sizeof(*root->fs_info->super_copy)); 1796 1797 trans->transaction->blocked = 0; 1798 spin_lock(&root->fs_info->trans_lock); 1799 root->fs_info->running_transaction = NULL; 1800 root->fs_info->trans_no_join = 0; 1801 spin_unlock(&root->fs_info->trans_lock); 1802 mutex_unlock(&root->fs_info->reloc_mutex); 1803 1804 wake_up(&root->fs_info->transaction_wait); 1805 1806 ret = btrfs_write_and_wait_transaction(trans, root); 1807 if (ret) { 1808 btrfs_error(root->fs_info, ret, 1809 "Error while writing out transaction"); 1810 mutex_unlock(&root->fs_info->tree_log_mutex); 1811 goto cleanup_transaction; 1812 } 1813 1814 ret = write_ctree_super(trans, root, 0); 1815 if (ret) { 1816 mutex_unlock(&root->fs_info->tree_log_mutex); 1817 goto cleanup_transaction; 1818 } 1819 1820 /* 1821 * the super is written, we can safely allow the tree-loggers 1822 * to go about their business 1823 */ 1824 mutex_unlock(&root->fs_info->tree_log_mutex); 1825 1826 btrfs_finish_extent_commit(trans, root); 1827 1828 cur_trans->commit_done = 1; 1829 1830 root->fs_info->last_trans_committed = cur_trans->transid; 1831 1832 wake_up(&cur_trans->commit_wait); 1833 1834 spin_lock(&root->fs_info->trans_lock); 1835 list_del_init(&cur_trans->list); 1836 spin_unlock(&root->fs_info->trans_lock); 1837 1838 put_transaction(cur_trans); 1839 put_transaction(cur_trans); 1840 1841 if (trans->type < TRANS_JOIN_NOLOCK) 1842 sb_end_intwrite(root->fs_info->sb); 1843 1844 trace_btrfs_transaction_commit(root); 1845 1846 btrfs_scrub_continue(root); 1847 1848 if (current->journal_info == trans) 1849 current->journal_info = NULL; 1850 1851 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1852 1853 if (current != root->fs_info->transaction_kthread) 1854 btrfs_run_delayed_iputs(root); 1855 1856 return ret; 1857 1858 cleanup_transaction: 1859 btrfs_trans_release_metadata(trans, root); 1860 trans->block_rsv = NULL; 1861 if (trans->qgroup_reserved) { 1862 btrfs_qgroup_free(root, trans->qgroup_reserved); 1863 trans->qgroup_reserved = 0; 1864 } 1865 btrfs_warn(root->fs_info, "Skipping commit of aborted transaction."); 1866 if (current->journal_info == trans) 1867 current->journal_info = NULL; 1868 cleanup_transaction(trans, root, ret); 1869 1870 return ret; 1871 } 1872 1873 /* 1874 * return < 0 if error 1875 * 0 if there are no more dead_roots at the time of call 1876 * 1 there are more to be processed, call me again 1877 * 1878 * The return value indicates there are certainly more snapshots to delete, but 1879 * if there comes a new one during processing, it may return 0. We don't mind, 1880 * because btrfs_commit_super will poke cleaner thread and it will process it a 1881 * few seconds later. 1882 */ 1883 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) 1884 { 1885 int ret; 1886 struct btrfs_fs_info *fs_info = root->fs_info; 1887 1888 if (fs_info->sb->s_flags & MS_RDONLY) { 1889 pr_debug("btrfs: cleaner called for RO fs!\n"); 1890 return 0; 1891 } 1892 1893 spin_lock(&fs_info->trans_lock); 1894 if (list_empty(&fs_info->dead_roots)) { 1895 spin_unlock(&fs_info->trans_lock); 1896 return 0; 1897 } 1898 root = list_first_entry(&fs_info->dead_roots, 1899 struct btrfs_root, root_list); 1900 list_del(&root->root_list); 1901 spin_unlock(&fs_info->trans_lock); 1902 1903 pr_debug("btrfs: cleaner removing %llu\n", 1904 (unsigned long long)root->objectid); 1905 1906 btrfs_kill_all_delayed_nodes(root); 1907 1908 if (btrfs_header_backref_rev(root->node) < 1909 BTRFS_MIXED_BACKREF_REV) 1910 ret = btrfs_drop_snapshot(root, NULL, 0, 0); 1911 else 1912 ret = btrfs_drop_snapshot(root, NULL, 1, 0); 1913 /* 1914 * If we encounter a transaction abort during snapshot cleaning, we 1915 * don't want to crash here 1916 */ 1917 BUG_ON(ret < 0 && ret != -EAGAIN && ret != -EROFS); 1918 return 1; 1919 } 1920