1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include <linux/uuid.h> 26 #include "ctree.h" 27 #include "disk-io.h" 28 #include "transaction.h" 29 #include "locking.h" 30 #include "tree-log.h" 31 #include "inode-map.h" 32 #include "volumes.h" 33 #include "dev-replace.h" 34 35 #define BTRFS_ROOT_TRANS_TAG 0 36 37 void put_transaction(struct btrfs_transaction *transaction) 38 { 39 WARN_ON(atomic_read(&transaction->use_count) == 0); 40 if (atomic_dec_and_test(&transaction->use_count)) { 41 BUG_ON(!list_empty(&transaction->list)); 42 WARN_ON(transaction->delayed_refs.root.rb_node); 43 kmem_cache_free(btrfs_transaction_cachep, transaction); 44 } 45 } 46 47 static noinline void switch_commit_root(struct btrfs_root *root) 48 { 49 free_extent_buffer(root->commit_root); 50 root->commit_root = btrfs_root_node(root); 51 } 52 53 static inline int can_join_transaction(struct btrfs_transaction *trans, 54 int type) 55 { 56 return !(trans->in_commit && 57 type != TRANS_JOIN && 58 type != TRANS_JOIN_NOLOCK); 59 } 60 61 /* 62 * either allocate a new transaction or hop into the existing one 63 */ 64 static noinline int join_transaction(struct btrfs_root *root, int type) 65 { 66 struct btrfs_transaction *cur_trans; 67 struct btrfs_fs_info *fs_info = root->fs_info; 68 69 spin_lock(&fs_info->trans_lock); 70 loop: 71 /* The file system has been taken offline. No new transactions. */ 72 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 73 spin_unlock(&fs_info->trans_lock); 74 return -EROFS; 75 } 76 77 if (fs_info->trans_no_join) { 78 /* 79 * If we are JOIN_NOLOCK we're already committing a current 80 * transaction, we just need a handle to deal with something 81 * when committing the transaction, such as inode cache and 82 * space cache. It is a special case. 83 */ 84 if (type != TRANS_JOIN_NOLOCK) { 85 spin_unlock(&fs_info->trans_lock); 86 return -EBUSY; 87 } 88 } 89 90 cur_trans = fs_info->running_transaction; 91 if (cur_trans) { 92 if (cur_trans->aborted) { 93 spin_unlock(&fs_info->trans_lock); 94 return cur_trans->aborted; 95 } 96 if (!can_join_transaction(cur_trans, type)) { 97 spin_unlock(&fs_info->trans_lock); 98 return -EBUSY; 99 } 100 atomic_inc(&cur_trans->use_count); 101 atomic_inc(&cur_trans->num_writers); 102 cur_trans->num_joined++; 103 spin_unlock(&fs_info->trans_lock); 104 return 0; 105 } 106 spin_unlock(&fs_info->trans_lock); 107 108 /* 109 * If we are ATTACH, we just want to catch the current transaction, 110 * and commit it. If there is no transaction, just return ENOENT. 111 */ 112 if (type == TRANS_ATTACH) 113 return -ENOENT; 114 115 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 116 if (!cur_trans) 117 return -ENOMEM; 118 119 spin_lock(&fs_info->trans_lock); 120 if (fs_info->running_transaction) { 121 /* 122 * someone started a transaction after we unlocked. Make sure 123 * to redo the trans_no_join checks above 124 */ 125 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 126 cur_trans = fs_info->running_transaction; 127 goto loop; 128 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 129 spin_unlock(&fs_info->trans_lock); 130 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 131 return -EROFS; 132 } 133 134 atomic_set(&cur_trans->num_writers, 1); 135 cur_trans->num_joined = 0; 136 init_waitqueue_head(&cur_trans->writer_wait); 137 init_waitqueue_head(&cur_trans->commit_wait); 138 cur_trans->in_commit = 0; 139 cur_trans->blocked = 0; 140 /* 141 * One for this trans handle, one so it will live on until we 142 * commit the transaction. 143 */ 144 atomic_set(&cur_trans->use_count, 2); 145 cur_trans->commit_done = 0; 146 cur_trans->start_time = get_seconds(); 147 148 cur_trans->delayed_refs.root = RB_ROOT; 149 cur_trans->delayed_refs.num_entries = 0; 150 cur_trans->delayed_refs.num_heads_ready = 0; 151 cur_trans->delayed_refs.num_heads = 0; 152 cur_trans->delayed_refs.flushing = 0; 153 cur_trans->delayed_refs.run_delayed_start = 0; 154 155 /* 156 * although the tree mod log is per file system and not per transaction, 157 * the log must never go across transaction boundaries. 158 */ 159 smp_mb(); 160 if (!list_empty(&fs_info->tree_mod_seq_list)) 161 WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when " 162 "creating a fresh transaction\n"); 163 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 164 WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when " 165 "creating a fresh transaction\n"); 166 atomic_set(&fs_info->tree_mod_seq, 0); 167 168 spin_lock_init(&cur_trans->commit_lock); 169 spin_lock_init(&cur_trans->delayed_refs.lock); 170 171 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 172 INIT_LIST_HEAD(&cur_trans->ordered_operations); 173 list_add_tail(&cur_trans->list, &fs_info->trans_list); 174 extent_io_tree_init(&cur_trans->dirty_pages, 175 fs_info->btree_inode->i_mapping); 176 fs_info->generation++; 177 cur_trans->transid = fs_info->generation; 178 fs_info->running_transaction = cur_trans; 179 cur_trans->aborted = 0; 180 spin_unlock(&fs_info->trans_lock); 181 182 return 0; 183 } 184 185 /* 186 * this does all the record keeping required to make sure that a reference 187 * counted root is properly recorded in a given transaction. This is required 188 * to make sure the old root from before we joined the transaction is deleted 189 * when the transaction commits 190 */ 191 static int record_root_in_trans(struct btrfs_trans_handle *trans, 192 struct btrfs_root *root) 193 { 194 if (root->ref_cows && root->last_trans < trans->transid) { 195 WARN_ON(root == root->fs_info->extent_root); 196 WARN_ON(root->commit_root != root->node); 197 198 /* 199 * see below for in_trans_setup usage rules 200 * we have the reloc mutex held now, so there 201 * is only one writer in this function 202 */ 203 root->in_trans_setup = 1; 204 205 /* make sure readers find in_trans_setup before 206 * they find our root->last_trans update 207 */ 208 smp_wmb(); 209 210 spin_lock(&root->fs_info->fs_roots_radix_lock); 211 if (root->last_trans == trans->transid) { 212 spin_unlock(&root->fs_info->fs_roots_radix_lock); 213 return 0; 214 } 215 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 216 (unsigned long)root->root_key.objectid, 217 BTRFS_ROOT_TRANS_TAG); 218 spin_unlock(&root->fs_info->fs_roots_radix_lock); 219 root->last_trans = trans->transid; 220 221 /* this is pretty tricky. We don't want to 222 * take the relocation lock in btrfs_record_root_in_trans 223 * unless we're really doing the first setup for this root in 224 * this transaction. 225 * 226 * Normally we'd use root->last_trans as a flag to decide 227 * if we want to take the expensive mutex. 228 * 229 * But, we have to set root->last_trans before we 230 * init the relocation root, otherwise, we trip over warnings 231 * in ctree.c. The solution used here is to flag ourselves 232 * with root->in_trans_setup. When this is 1, we're still 233 * fixing up the reloc trees and everyone must wait. 234 * 235 * When this is zero, they can trust root->last_trans and fly 236 * through btrfs_record_root_in_trans without having to take the 237 * lock. smp_wmb() makes sure that all the writes above are 238 * done before we pop in the zero below 239 */ 240 btrfs_init_reloc_root(trans, root); 241 smp_wmb(); 242 root->in_trans_setup = 0; 243 } 244 return 0; 245 } 246 247 248 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 249 struct btrfs_root *root) 250 { 251 if (!root->ref_cows) 252 return 0; 253 254 /* 255 * see record_root_in_trans for comments about in_trans_setup usage 256 * and barriers 257 */ 258 smp_rmb(); 259 if (root->last_trans == trans->transid && 260 !root->in_trans_setup) 261 return 0; 262 263 mutex_lock(&root->fs_info->reloc_mutex); 264 record_root_in_trans(trans, root); 265 mutex_unlock(&root->fs_info->reloc_mutex); 266 267 return 0; 268 } 269 270 /* wait for commit against the current transaction to become unblocked 271 * when this is done, it is safe to start a new transaction, but the current 272 * transaction might not be fully on disk. 273 */ 274 static void wait_current_trans(struct btrfs_root *root) 275 { 276 struct btrfs_transaction *cur_trans; 277 278 spin_lock(&root->fs_info->trans_lock); 279 cur_trans = root->fs_info->running_transaction; 280 if (cur_trans && cur_trans->blocked) { 281 atomic_inc(&cur_trans->use_count); 282 spin_unlock(&root->fs_info->trans_lock); 283 284 wait_event(root->fs_info->transaction_wait, 285 !cur_trans->blocked); 286 put_transaction(cur_trans); 287 } else { 288 spin_unlock(&root->fs_info->trans_lock); 289 } 290 } 291 292 static int may_wait_transaction(struct btrfs_root *root, int type) 293 { 294 if (root->fs_info->log_root_recovering) 295 return 0; 296 297 if (type == TRANS_USERSPACE) 298 return 1; 299 300 if (type == TRANS_START && 301 !atomic_read(&root->fs_info->open_ioctl_trans)) 302 return 1; 303 304 return 0; 305 } 306 307 static struct btrfs_trans_handle * 308 start_transaction(struct btrfs_root *root, u64 num_items, int type, 309 enum btrfs_reserve_flush_enum flush) 310 { 311 struct btrfs_trans_handle *h; 312 struct btrfs_transaction *cur_trans; 313 u64 num_bytes = 0; 314 int ret; 315 u64 qgroup_reserved = 0; 316 317 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 318 return ERR_PTR(-EROFS); 319 320 if (current->journal_info) { 321 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); 322 h = current->journal_info; 323 h->use_count++; 324 WARN_ON(h->use_count > 2); 325 h->orig_rsv = h->block_rsv; 326 h->block_rsv = NULL; 327 goto got_it; 328 } 329 330 /* 331 * Do the reservation before we join the transaction so we can do all 332 * the appropriate flushing if need be. 333 */ 334 if (num_items > 0 && root != root->fs_info->chunk_root) { 335 if (root->fs_info->quota_enabled && 336 is_fstree(root->root_key.objectid)) { 337 qgroup_reserved = num_items * root->leafsize; 338 ret = btrfs_qgroup_reserve(root, qgroup_reserved); 339 if (ret) 340 return ERR_PTR(ret); 341 } 342 343 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 344 ret = btrfs_block_rsv_add(root, 345 &root->fs_info->trans_block_rsv, 346 num_bytes, flush); 347 if (ret) 348 goto reserve_fail; 349 } 350 again: 351 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 352 if (!h) { 353 ret = -ENOMEM; 354 goto alloc_fail; 355 } 356 357 /* 358 * If we are JOIN_NOLOCK we're already committing a transaction and 359 * waiting on this guy, so we don't need to do the sb_start_intwrite 360 * because we're already holding a ref. We need this because we could 361 * have raced in and did an fsync() on a file which can kick a commit 362 * and then we deadlock with somebody doing a freeze. 363 * 364 * If we are ATTACH, it means we just want to catch the current 365 * transaction and commit it, so we needn't do sb_start_intwrite(). 366 */ 367 if (type < TRANS_JOIN_NOLOCK) 368 sb_start_intwrite(root->fs_info->sb); 369 370 if (may_wait_transaction(root, type)) 371 wait_current_trans(root); 372 373 do { 374 ret = join_transaction(root, type); 375 if (ret == -EBUSY) { 376 wait_current_trans(root); 377 if (unlikely(type == TRANS_ATTACH)) 378 ret = -ENOENT; 379 } 380 } while (ret == -EBUSY); 381 382 if (ret < 0) { 383 /* We must get the transaction if we are JOIN_NOLOCK. */ 384 BUG_ON(type == TRANS_JOIN_NOLOCK); 385 goto join_fail; 386 } 387 388 cur_trans = root->fs_info->running_transaction; 389 390 h->transid = cur_trans->transid; 391 h->transaction = cur_trans; 392 h->blocks_used = 0; 393 h->bytes_reserved = 0; 394 h->root = root; 395 h->delayed_ref_updates = 0; 396 h->use_count = 1; 397 h->adding_csums = 0; 398 h->block_rsv = NULL; 399 h->orig_rsv = NULL; 400 h->aborted = 0; 401 h->qgroup_reserved = 0; 402 h->delayed_ref_elem.seq = 0; 403 h->type = type; 404 h->allocating_chunk = false; 405 INIT_LIST_HEAD(&h->qgroup_ref_list); 406 INIT_LIST_HEAD(&h->new_bgs); 407 408 smp_mb(); 409 if (cur_trans->blocked && may_wait_transaction(root, type)) { 410 btrfs_commit_transaction(h, root); 411 goto again; 412 } 413 414 if (num_bytes) { 415 trace_btrfs_space_reservation(root->fs_info, "transaction", 416 h->transid, num_bytes, 1); 417 h->block_rsv = &root->fs_info->trans_block_rsv; 418 h->bytes_reserved = num_bytes; 419 } 420 h->qgroup_reserved = qgroup_reserved; 421 422 got_it: 423 btrfs_record_root_in_trans(h, root); 424 425 if (!current->journal_info && type != TRANS_USERSPACE) 426 current->journal_info = h; 427 return h; 428 429 join_fail: 430 if (type < TRANS_JOIN_NOLOCK) 431 sb_end_intwrite(root->fs_info->sb); 432 kmem_cache_free(btrfs_trans_handle_cachep, h); 433 alloc_fail: 434 if (num_bytes) 435 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, 436 num_bytes); 437 reserve_fail: 438 if (qgroup_reserved) 439 btrfs_qgroup_free(root, qgroup_reserved); 440 return ERR_PTR(ret); 441 } 442 443 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 444 int num_items) 445 { 446 return start_transaction(root, num_items, TRANS_START, 447 BTRFS_RESERVE_FLUSH_ALL); 448 } 449 450 struct btrfs_trans_handle *btrfs_start_transaction_lflush( 451 struct btrfs_root *root, int num_items) 452 { 453 return start_transaction(root, num_items, TRANS_START, 454 BTRFS_RESERVE_FLUSH_LIMIT); 455 } 456 457 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 458 { 459 return start_transaction(root, 0, TRANS_JOIN, 0); 460 } 461 462 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 463 { 464 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0); 465 } 466 467 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 468 { 469 return start_transaction(root, 0, TRANS_USERSPACE, 0); 470 } 471 472 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 473 { 474 return start_transaction(root, 0, TRANS_ATTACH, 0); 475 } 476 477 /* wait for a transaction commit to be fully complete */ 478 static noinline void wait_for_commit(struct btrfs_root *root, 479 struct btrfs_transaction *commit) 480 { 481 wait_event(commit->commit_wait, commit->commit_done); 482 } 483 484 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 485 { 486 struct btrfs_transaction *cur_trans = NULL, *t; 487 int ret = 0; 488 489 if (transid) { 490 if (transid <= root->fs_info->last_trans_committed) 491 goto out; 492 493 ret = -EINVAL; 494 /* find specified transaction */ 495 spin_lock(&root->fs_info->trans_lock); 496 list_for_each_entry(t, &root->fs_info->trans_list, list) { 497 if (t->transid == transid) { 498 cur_trans = t; 499 atomic_inc(&cur_trans->use_count); 500 ret = 0; 501 break; 502 } 503 if (t->transid > transid) { 504 ret = 0; 505 break; 506 } 507 } 508 spin_unlock(&root->fs_info->trans_lock); 509 /* The specified transaction doesn't exist */ 510 if (!cur_trans) 511 goto out; 512 } else { 513 /* find newest transaction that is committing | committed */ 514 spin_lock(&root->fs_info->trans_lock); 515 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 516 list) { 517 if (t->in_commit) { 518 if (t->commit_done) 519 break; 520 cur_trans = t; 521 atomic_inc(&cur_trans->use_count); 522 break; 523 } 524 } 525 spin_unlock(&root->fs_info->trans_lock); 526 if (!cur_trans) 527 goto out; /* nothing committing|committed */ 528 } 529 530 wait_for_commit(root, cur_trans); 531 put_transaction(cur_trans); 532 out: 533 return ret; 534 } 535 536 void btrfs_throttle(struct btrfs_root *root) 537 { 538 if (!atomic_read(&root->fs_info->open_ioctl_trans)) 539 wait_current_trans(root); 540 } 541 542 static int should_end_transaction(struct btrfs_trans_handle *trans, 543 struct btrfs_root *root) 544 { 545 int ret; 546 547 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); 548 return ret ? 1 : 0; 549 } 550 551 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 552 struct btrfs_root *root) 553 { 554 struct btrfs_transaction *cur_trans = trans->transaction; 555 int updates; 556 int err; 557 558 smp_mb(); 559 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 560 return 1; 561 562 updates = trans->delayed_ref_updates; 563 trans->delayed_ref_updates = 0; 564 if (updates) { 565 err = btrfs_run_delayed_refs(trans, root, updates); 566 if (err) /* Error code will also eval true */ 567 return err; 568 } 569 570 return should_end_transaction(trans, root); 571 } 572 573 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 574 struct btrfs_root *root, int throttle) 575 { 576 struct btrfs_transaction *cur_trans = trans->transaction; 577 struct btrfs_fs_info *info = root->fs_info; 578 int count = 0; 579 int lock = (trans->type != TRANS_JOIN_NOLOCK); 580 int err = 0; 581 582 if (--trans->use_count) { 583 trans->block_rsv = trans->orig_rsv; 584 return 0; 585 } 586 587 /* 588 * do the qgroup accounting as early as possible 589 */ 590 err = btrfs_delayed_refs_qgroup_accounting(trans, info); 591 592 btrfs_trans_release_metadata(trans, root); 593 trans->block_rsv = NULL; 594 /* 595 * the same root has to be passed to start_transaction and 596 * end_transaction. Subvolume quota depends on this. 597 */ 598 WARN_ON(trans->root != root); 599 600 if (trans->qgroup_reserved) { 601 btrfs_qgroup_free(root, trans->qgroup_reserved); 602 trans->qgroup_reserved = 0; 603 } 604 605 if (!list_empty(&trans->new_bgs)) 606 btrfs_create_pending_block_groups(trans, root); 607 608 while (count < 2) { 609 unsigned long cur = trans->delayed_ref_updates; 610 trans->delayed_ref_updates = 0; 611 if (cur && 612 trans->transaction->delayed_refs.num_heads_ready > 64) { 613 trans->delayed_ref_updates = 0; 614 btrfs_run_delayed_refs(trans, root, cur); 615 } else { 616 break; 617 } 618 count++; 619 } 620 btrfs_trans_release_metadata(trans, root); 621 trans->block_rsv = NULL; 622 623 if (!list_empty(&trans->new_bgs)) 624 btrfs_create_pending_block_groups(trans, root); 625 626 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && 627 should_end_transaction(trans, root)) { 628 trans->transaction->blocked = 1; 629 smp_wmb(); 630 } 631 632 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 633 if (throttle) { 634 /* 635 * We may race with somebody else here so end up having 636 * to call end_transaction on ourselves again, so inc 637 * our use_count. 638 */ 639 trans->use_count++; 640 return btrfs_commit_transaction(trans, root); 641 } else { 642 wake_up_process(info->transaction_kthread); 643 } 644 } 645 646 if (trans->type < TRANS_JOIN_NOLOCK) 647 sb_end_intwrite(root->fs_info->sb); 648 649 WARN_ON(cur_trans != info->running_transaction); 650 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 651 atomic_dec(&cur_trans->num_writers); 652 653 smp_mb(); 654 if (waitqueue_active(&cur_trans->writer_wait)) 655 wake_up(&cur_trans->writer_wait); 656 put_transaction(cur_trans); 657 658 if (current->journal_info == trans) 659 current->journal_info = NULL; 660 661 if (throttle) 662 btrfs_run_delayed_iputs(root); 663 664 if (trans->aborted || 665 test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 666 err = -EIO; 667 assert_qgroups_uptodate(trans); 668 669 kmem_cache_free(btrfs_trans_handle_cachep, trans); 670 return err; 671 } 672 673 int btrfs_end_transaction(struct btrfs_trans_handle *trans, 674 struct btrfs_root *root) 675 { 676 int ret; 677 678 ret = __btrfs_end_transaction(trans, root, 0); 679 if (ret) 680 return ret; 681 return 0; 682 } 683 684 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 685 struct btrfs_root *root) 686 { 687 int ret; 688 689 ret = __btrfs_end_transaction(trans, root, 1); 690 if (ret) 691 return ret; 692 return 0; 693 } 694 695 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, 696 struct btrfs_root *root) 697 { 698 return __btrfs_end_transaction(trans, root, 1); 699 } 700 701 /* 702 * when btree blocks are allocated, they have some corresponding bits set for 703 * them in one of two extent_io trees. This is used to make sure all of 704 * those extents are sent to disk but does not wait on them 705 */ 706 int btrfs_write_marked_extents(struct btrfs_root *root, 707 struct extent_io_tree *dirty_pages, int mark) 708 { 709 int err = 0; 710 int werr = 0; 711 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 712 struct extent_state *cached_state = NULL; 713 u64 start = 0; 714 u64 end; 715 716 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 717 mark, &cached_state)) { 718 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, 719 mark, &cached_state, GFP_NOFS); 720 cached_state = NULL; 721 err = filemap_fdatawrite_range(mapping, start, end); 722 if (err) 723 werr = err; 724 cond_resched(); 725 start = end + 1; 726 } 727 if (err) 728 werr = err; 729 return werr; 730 } 731 732 /* 733 * when btree blocks are allocated, they have some corresponding bits set for 734 * them in one of two extent_io trees. This is used to make sure all of 735 * those extents are on disk for transaction or log commit. We wait 736 * on all the pages and clear them from the dirty pages state tree 737 */ 738 int btrfs_wait_marked_extents(struct btrfs_root *root, 739 struct extent_io_tree *dirty_pages, int mark) 740 { 741 int err = 0; 742 int werr = 0; 743 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 744 struct extent_state *cached_state = NULL; 745 u64 start = 0; 746 u64 end; 747 748 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 749 EXTENT_NEED_WAIT, &cached_state)) { 750 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, 751 0, 0, &cached_state, GFP_NOFS); 752 err = filemap_fdatawait_range(mapping, start, end); 753 if (err) 754 werr = err; 755 cond_resched(); 756 start = end + 1; 757 } 758 if (err) 759 werr = err; 760 return werr; 761 } 762 763 /* 764 * when btree blocks are allocated, they have some corresponding bits set for 765 * them in one of two extent_io trees. This is used to make sure all of 766 * those extents are on disk for transaction or log commit 767 */ 768 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 769 struct extent_io_tree *dirty_pages, int mark) 770 { 771 int ret; 772 int ret2; 773 774 ret = btrfs_write_marked_extents(root, dirty_pages, mark); 775 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); 776 777 if (ret) 778 return ret; 779 if (ret2) 780 return ret2; 781 return 0; 782 } 783 784 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 785 struct btrfs_root *root) 786 { 787 if (!trans || !trans->transaction) { 788 struct inode *btree_inode; 789 btree_inode = root->fs_info->btree_inode; 790 return filemap_write_and_wait(btree_inode->i_mapping); 791 } 792 return btrfs_write_and_wait_marked_extents(root, 793 &trans->transaction->dirty_pages, 794 EXTENT_DIRTY); 795 } 796 797 /* 798 * this is used to update the root pointer in the tree of tree roots. 799 * 800 * But, in the case of the extent allocation tree, updating the root 801 * pointer may allocate blocks which may change the root of the extent 802 * allocation tree. 803 * 804 * So, this loops and repeats and makes sure the cowonly root didn't 805 * change while the root pointer was being updated in the metadata. 806 */ 807 static int update_cowonly_root(struct btrfs_trans_handle *trans, 808 struct btrfs_root *root) 809 { 810 int ret; 811 u64 old_root_bytenr; 812 u64 old_root_used; 813 struct btrfs_root *tree_root = root->fs_info->tree_root; 814 815 old_root_used = btrfs_root_used(&root->root_item); 816 btrfs_write_dirty_block_groups(trans, root); 817 818 while (1) { 819 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 820 if (old_root_bytenr == root->node->start && 821 old_root_used == btrfs_root_used(&root->root_item)) 822 break; 823 824 btrfs_set_root_node(&root->root_item, root->node); 825 ret = btrfs_update_root(trans, tree_root, 826 &root->root_key, 827 &root->root_item); 828 if (ret) 829 return ret; 830 831 old_root_used = btrfs_root_used(&root->root_item); 832 ret = btrfs_write_dirty_block_groups(trans, root); 833 if (ret) 834 return ret; 835 } 836 837 if (root != root->fs_info->extent_root) 838 switch_commit_root(root); 839 840 return 0; 841 } 842 843 /* 844 * update all the cowonly tree roots on disk 845 * 846 * The error handling in this function may not be obvious. Any of the 847 * failures will cause the file system to go offline. We still need 848 * to clean up the delayed refs. 849 */ 850 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 851 struct btrfs_root *root) 852 { 853 struct btrfs_fs_info *fs_info = root->fs_info; 854 struct list_head *next; 855 struct extent_buffer *eb; 856 int ret; 857 858 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 859 if (ret) 860 return ret; 861 862 eb = btrfs_lock_root_node(fs_info->tree_root); 863 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 864 0, &eb); 865 btrfs_tree_unlock(eb); 866 free_extent_buffer(eb); 867 868 if (ret) 869 return ret; 870 871 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 872 if (ret) 873 return ret; 874 875 ret = btrfs_run_dev_stats(trans, root->fs_info); 876 WARN_ON(ret); 877 ret = btrfs_run_dev_replace(trans, root->fs_info); 878 WARN_ON(ret); 879 880 ret = btrfs_run_qgroups(trans, root->fs_info); 881 BUG_ON(ret); 882 883 /* run_qgroups might have added some more refs */ 884 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 885 BUG_ON(ret); 886 887 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 888 next = fs_info->dirty_cowonly_roots.next; 889 list_del_init(next); 890 root = list_entry(next, struct btrfs_root, dirty_list); 891 892 ret = update_cowonly_root(trans, root); 893 if (ret) 894 return ret; 895 } 896 897 down_write(&fs_info->extent_commit_sem); 898 switch_commit_root(fs_info->extent_root); 899 up_write(&fs_info->extent_commit_sem); 900 901 btrfs_after_dev_replace_commit(fs_info); 902 903 return 0; 904 } 905 906 /* 907 * dead roots are old snapshots that need to be deleted. This allocates 908 * a dirty root struct and adds it into the list of dead roots that need to 909 * be deleted 910 */ 911 int btrfs_add_dead_root(struct btrfs_root *root) 912 { 913 spin_lock(&root->fs_info->trans_lock); 914 list_add(&root->root_list, &root->fs_info->dead_roots); 915 spin_unlock(&root->fs_info->trans_lock); 916 return 0; 917 } 918 919 /* 920 * update all the cowonly tree roots on disk 921 */ 922 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 923 struct btrfs_root *root) 924 { 925 struct btrfs_root *gang[8]; 926 struct btrfs_fs_info *fs_info = root->fs_info; 927 int i; 928 int ret; 929 int err = 0; 930 931 spin_lock(&fs_info->fs_roots_radix_lock); 932 while (1) { 933 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 934 (void **)gang, 0, 935 ARRAY_SIZE(gang), 936 BTRFS_ROOT_TRANS_TAG); 937 if (ret == 0) 938 break; 939 for (i = 0; i < ret; i++) { 940 root = gang[i]; 941 radix_tree_tag_clear(&fs_info->fs_roots_radix, 942 (unsigned long)root->root_key.objectid, 943 BTRFS_ROOT_TRANS_TAG); 944 spin_unlock(&fs_info->fs_roots_radix_lock); 945 946 btrfs_free_log(trans, root); 947 btrfs_update_reloc_root(trans, root); 948 btrfs_orphan_commit_root(trans, root); 949 950 btrfs_save_ino_cache(root, trans); 951 952 /* see comments in should_cow_block() */ 953 root->force_cow = 0; 954 smp_wmb(); 955 956 if (root->commit_root != root->node) { 957 mutex_lock(&root->fs_commit_mutex); 958 switch_commit_root(root); 959 btrfs_unpin_free_ino(root); 960 mutex_unlock(&root->fs_commit_mutex); 961 962 btrfs_set_root_node(&root->root_item, 963 root->node); 964 } 965 966 err = btrfs_update_root(trans, fs_info->tree_root, 967 &root->root_key, 968 &root->root_item); 969 spin_lock(&fs_info->fs_roots_radix_lock); 970 if (err) 971 break; 972 } 973 } 974 spin_unlock(&fs_info->fs_roots_radix_lock); 975 return err; 976 } 977 978 /* 979 * defrag a given btree. 980 * Every leaf in the btree is read and defragged. 981 */ 982 int btrfs_defrag_root(struct btrfs_root *root) 983 { 984 struct btrfs_fs_info *info = root->fs_info; 985 struct btrfs_trans_handle *trans; 986 int ret; 987 988 if (xchg(&root->defrag_running, 1)) 989 return 0; 990 991 while (1) { 992 trans = btrfs_start_transaction(root, 0); 993 if (IS_ERR(trans)) 994 return PTR_ERR(trans); 995 996 ret = btrfs_defrag_leaves(trans, root); 997 998 btrfs_end_transaction(trans, root); 999 btrfs_btree_balance_dirty(info->tree_root); 1000 cond_resched(); 1001 1002 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 1003 break; 1004 1005 if (btrfs_defrag_cancelled(root->fs_info)) { 1006 printk(KERN_DEBUG "btrfs: defrag_root cancelled\n"); 1007 ret = -EAGAIN; 1008 break; 1009 } 1010 } 1011 root->defrag_running = 0; 1012 return ret; 1013 } 1014 1015 /* 1016 * new snapshots need to be created at a very specific time in the 1017 * transaction commit. This does the actual creation 1018 */ 1019 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1020 struct btrfs_fs_info *fs_info, 1021 struct btrfs_pending_snapshot *pending) 1022 { 1023 struct btrfs_key key; 1024 struct btrfs_root_item *new_root_item; 1025 struct btrfs_root *tree_root = fs_info->tree_root; 1026 struct btrfs_root *root = pending->root; 1027 struct btrfs_root *parent_root; 1028 struct btrfs_block_rsv *rsv; 1029 struct inode *parent_inode; 1030 struct btrfs_path *path; 1031 struct btrfs_dir_item *dir_item; 1032 struct dentry *parent; 1033 struct dentry *dentry; 1034 struct extent_buffer *tmp; 1035 struct extent_buffer *old; 1036 struct timespec cur_time = CURRENT_TIME; 1037 int ret; 1038 u64 to_reserve = 0; 1039 u64 index = 0; 1040 u64 objectid; 1041 u64 root_flags; 1042 uuid_le new_uuid; 1043 1044 path = btrfs_alloc_path(); 1045 if (!path) { 1046 ret = pending->error = -ENOMEM; 1047 goto path_alloc_fail; 1048 } 1049 1050 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 1051 if (!new_root_item) { 1052 ret = pending->error = -ENOMEM; 1053 goto root_item_alloc_fail; 1054 } 1055 1056 ret = btrfs_find_free_objectid(tree_root, &objectid); 1057 if (ret) { 1058 pending->error = ret; 1059 goto no_free_objectid; 1060 } 1061 1062 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 1063 1064 if (to_reserve > 0) { 1065 ret = btrfs_block_rsv_add(root, &pending->block_rsv, 1066 to_reserve, 1067 BTRFS_RESERVE_NO_FLUSH); 1068 if (ret) { 1069 pending->error = ret; 1070 goto no_free_objectid; 1071 } 1072 } 1073 1074 ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid, 1075 objectid, pending->inherit); 1076 if (ret) { 1077 pending->error = ret; 1078 goto no_free_objectid; 1079 } 1080 1081 key.objectid = objectid; 1082 key.offset = (u64)-1; 1083 key.type = BTRFS_ROOT_ITEM_KEY; 1084 1085 rsv = trans->block_rsv; 1086 trans->block_rsv = &pending->block_rsv; 1087 1088 dentry = pending->dentry; 1089 parent = dget_parent(dentry); 1090 parent_inode = parent->d_inode; 1091 parent_root = BTRFS_I(parent_inode)->root; 1092 record_root_in_trans(trans, parent_root); 1093 1094 /* 1095 * insert the directory item 1096 */ 1097 ret = btrfs_set_inode_index(parent_inode, &index); 1098 BUG_ON(ret); /* -ENOMEM */ 1099 1100 /* check if there is a file/dir which has the same name. */ 1101 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1102 btrfs_ino(parent_inode), 1103 dentry->d_name.name, 1104 dentry->d_name.len, 0); 1105 if (dir_item != NULL && !IS_ERR(dir_item)) { 1106 pending->error = -EEXIST; 1107 goto fail; 1108 } else if (IS_ERR(dir_item)) { 1109 ret = PTR_ERR(dir_item); 1110 btrfs_abort_transaction(trans, root, ret); 1111 goto fail; 1112 } 1113 btrfs_release_path(path); 1114 1115 /* 1116 * pull in the delayed directory update 1117 * and the delayed inode item 1118 * otherwise we corrupt the FS during 1119 * snapshot 1120 */ 1121 ret = btrfs_run_delayed_items(trans, root); 1122 if (ret) { /* Transaction aborted */ 1123 btrfs_abort_transaction(trans, root, ret); 1124 goto fail; 1125 } 1126 1127 record_root_in_trans(trans, root); 1128 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1129 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1130 btrfs_check_and_init_root_item(new_root_item); 1131 1132 root_flags = btrfs_root_flags(new_root_item); 1133 if (pending->readonly) 1134 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1135 else 1136 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1137 btrfs_set_root_flags(new_root_item, root_flags); 1138 1139 btrfs_set_root_generation_v2(new_root_item, 1140 trans->transid); 1141 uuid_le_gen(&new_uuid); 1142 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); 1143 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1144 BTRFS_UUID_SIZE); 1145 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); 1146 new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec); 1147 btrfs_set_root_otransid(new_root_item, trans->transid); 1148 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1149 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1150 btrfs_set_root_stransid(new_root_item, 0); 1151 btrfs_set_root_rtransid(new_root_item, 0); 1152 1153 old = btrfs_lock_root_node(root); 1154 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old); 1155 if (ret) { 1156 btrfs_tree_unlock(old); 1157 free_extent_buffer(old); 1158 btrfs_abort_transaction(trans, root, ret); 1159 goto fail; 1160 } 1161 1162 btrfs_set_lock_blocking(old); 1163 1164 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1165 /* clean up in any case */ 1166 btrfs_tree_unlock(old); 1167 free_extent_buffer(old); 1168 if (ret) { 1169 btrfs_abort_transaction(trans, root, ret); 1170 goto fail; 1171 } 1172 1173 /* see comments in should_cow_block() */ 1174 root->force_cow = 1; 1175 smp_wmb(); 1176 1177 btrfs_set_root_node(new_root_item, tmp); 1178 /* record when the snapshot was created in key.offset */ 1179 key.offset = trans->transid; 1180 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1181 btrfs_tree_unlock(tmp); 1182 free_extent_buffer(tmp); 1183 if (ret) { 1184 btrfs_abort_transaction(trans, root, ret); 1185 goto fail; 1186 } 1187 1188 /* 1189 * insert root back/forward references 1190 */ 1191 ret = btrfs_add_root_ref(trans, tree_root, objectid, 1192 parent_root->root_key.objectid, 1193 btrfs_ino(parent_inode), index, 1194 dentry->d_name.name, dentry->d_name.len); 1195 if (ret) { 1196 btrfs_abort_transaction(trans, root, ret); 1197 goto fail; 1198 } 1199 1200 key.offset = (u64)-1; 1201 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 1202 if (IS_ERR(pending->snap)) { 1203 ret = PTR_ERR(pending->snap); 1204 btrfs_abort_transaction(trans, root, ret); 1205 goto fail; 1206 } 1207 1208 ret = btrfs_reloc_post_snapshot(trans, pending); 1209 if (ret) { 1210 btrfs_abort_transaction(trans, root, ret); 1211 goto fail; 1212 } 1213 1214 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1215 if (ret) { 1216 btrfs_abort_transaction(trans, root, ret); 1217 goto fail; 1218 } 1219 1220 ret = btrfs_insert_dir_item(trans, parent_root, 1221 dentry->d_name.name, dentry->d_name.len, 1222 parent_inode, &key, 1223 BTRFS_FT_DIR, index); 1224 /* We have check then name at the beginning, so it is impossible. */ 1225 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1226 if (ret) { 1227 btrfs_abort_transaction(trans, root, ret); 1228 goto fail; 1229 } 1230 1231 btrfs_i_size_write(parent_inode, parent_inode->i_size + 1232 dentry->d_name.len * 2); 1233 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 1234 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode); 1235 if (ret) 1236 btrfs_abort_transaction(trans, root, ret); 1237 fail: 1238 dput(parent); 1239 trans->block_rsv = rsv; 1240 no_free_objectid: 1241 kfree(new_root_item); 1242 root_item_alloc_fail: 1243 btrfs_free_path(path); 1244 path_alloc_fail: 1245 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); 1246 return ret; 1247 } 1248 1249 /* 1250 * create all the snapshots we've scheduled for creation 1251 */ 1252 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 1253 struct btrfs_fs_info *fs_info) 1254 { 1255 struct btrfs_pending_snapshot *pending; 1256 struct list_head *head = &trans->transaction->pending_snapshots; 1257 1258 list_for_each_entry(pending, head, list) 1259 create_pending_snapshot(trans, fs_info, pending); 1260 return 0; 1261 } 1262 1263 static void update_super_roots(struct btrfs_root *root) 1264 { 1265 struct btrfs_root_item *root_item; 1266 struct btrfs_super_block *super; 1267 1268 super = root->fs_info->super_copy; 1269 1270 root_item = &root->fs_info->chunk_root->root_item; 1271 super->chunk_root = root_item->bytenr; 1272 super->chunk_root_generation = root_item->generation; 1273 super->chunk_root_level = root_item->level; 1274 1275 root_item = &root->fs_info->tree_root->root_item; 1276 super->root = root_item->bytenr; 1277 super->generation = root_item->generation; 1278 super->root_level = root_item->level; 1279 if (btrfs_test_opt(root, SPACE_CACHE)) 1280 super->cache_generation = root_item->generation; 1281 } 1282 1283 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1284 { 1285 int ret = 0; 1286 spin_lock(&info->trans_lock); 1287 if (info->running_transaction) 1288 ret = info->running_transaction->in_commit; 1289 spin_unlock(&info->trans_lock); 1290 return ret; 1291 } 1292 1293 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1294 { 1295 int ret = 0; 1296 spin_lock(&info->trans_lock); 1297 if (info->running_transaction) 1298 ret = info->running_transaction->blocked; 1299 spin_unlock(&info->trans_lock); 1300 return ret; 1301 } 1302 1303 /* 1304 * wait for the current transaction commit to start and block subsequent 1305 * transaction joins 1306 */ 1307 static void wait_current_trans_commit_start(struct btrfs_root *root, 1308 struct btrfs_transaction *trans) 1309 { 1310 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit); 1311 } 1312 1313 /* 1314 * wait for the current transaction to start and then become unblocked. 1315 * caller holds ref. 1316 */ 1317 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1318 struct btrfs_transaction *trans) 1319 { 1320 wait_event(root->fs_info->transaction_wait, 1321 trans->commit_done || (trans->in_commit && !trans->blocked)); 1322 } 1323 1324 /* 1325 * commit transactions asynchronously. once btrfs_commit_transaction_async 1326 * returns, any subsequent transaction will not be allowed to join. 1327 */ 1328 struct btrfs_async_commit { 1329 struct btrfs_trans_handle *newtrans; 1330 struct btrfs_root *root; 1331 struct work_struct work; 1332 }; 1333 1334 static void do_async_commit(struct work_struct *work) 1335 { 1336 struct btrfs_async_commit *ac = 1337 container_of(work, struct btrfs_async_commit, work); 1338 1339 /* 1340 * We've got freeze protection passed with the transaction. 1341 * Tell lockdep about it. 1342 */ 1343 if (ac->newtrans->type < TRANS_JOIN_NOLOCK) 1344 rwsem_acquire_read( 1345 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1346 0, 1, _THIS_IP_); 1347 1348 current->journal_info = ac->newtrans; 1349 1350 btrfs_commit_transaction(ac->newtrans, ac->root); 1351 kfree(ac); 1352 } 1353 1354 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1355 struct btrfs_root *root, 1356 int wait_for_unblock) 1357 { 1358 struct btrfs_async_commit *ac; 1359 struct btrfs_transaction *cur_trans; 1360 1361 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1362 if (!ac) 1363 return -ENOMEM; 1364 1365 INIT_WORK(&ac->work, do_async_commit); 1366 ac->root = root; 1367 ac->newtrans = btrfs_join_transaction(root); 1368 if (IS_ERR(ac->newtrans)) { 1369 int err = PTR_ERR(ac->newtrans); 1370 kfree(ac); 1371 return err; 1372 } 1373 1374 /* take transaction reference */ 1375 cur_trans = trans->transaction; 1376 atomic_inc(&cur_trans->use_count); 1377 1378 btrfs_end_transaction(trans, root); 1379 1380 /* 1381 * Tell lockdep we've released the freeze rwsem, since the 1382 * async commit thread will be the one to unlock it. 1383 */ 1384 if (trans->type < TRANS_JOIN_NOLOCK) 1385 rwsem_release( 1386 &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1387 1, _THIS_IP_); 1388 1389 schedule_work(&ac->work); 1390 1391 /* wait for transaction to start and unblock */ 1392 if (wait_for_unblock) 1393 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1394 else 1395 wait_current_trans_commit_start(root, cur_trans); 1396 1397 if (current->journal_info == trans) 1398 current->journal_info = NULL; 1399 1400 put_transaction(cur_trans); 1401 return 0; 1402 } 1403 1404 1405 static void cleanup_transaction(struct btrfs_trans_handle *trans, 1406 struct btrfs_root *root, int err) 1407 { 1408 struct btrfs_transaction *cur_trans = trans->transaction; 1409 1410 WARN_ON(trans->use_count > 1); 1411 1412 btrfs_abort_transaction(trans, root, err); 1413 1414 spin_lock(&root->fs_info->trans_lock); 1415 list_del_init(&cur_trans->list); 1416 if (cur_trans == root->fs_info->running_transaction) { 1417 root->fs_info->running_transaction = NULL; 1418 root->fs_info->trans_no_join = 0; 1419 } 1420 spin_unlock(&root->fs_info->trans_lock); 1421 1422 btrfs_cleanup_one_transaction(trans->transaction, root); 1423 1424 put_transaction(cur_trans); 1425 put_transaction(cur_trans); 1426 1427 trace_btrfs_transaction_commit(root); 1428 1429 btrfs_scrub_continue(root); 1430 1431 if (current->journal_info == trans) 1432 current->journal_info = NULL; 1433 1434 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1435 } 1436 1437 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans, 1438 struct btrfs_root *root) 1439 { 1440 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); 1441 int snap_pending = 0; 1442 int ret; 1443 1444 if (!flush_on_commit) { 1445 spin_lock(&root->fs_info->trans_lock); 1446 if (!list_empty(&trans->transaction->pending_snapshots)) 1447 snap_pending = 1; 1448 spin_unlock(&root->fs_info->trans_lock); 1449 } 1450 1451 if (flush_on_commit || snap_pending) { 1452 ret = btrfs_start_delalloc_inodes(root, 1); 1453 if (ret) 1454 return ret; 1455 btrfs_wait_ordered_extents(root, 1); 1456 } 1457 1458 ret = btrfs_run_delayed_items(trans, root); 1459 if (ret) 1460 return ret; 1461 1462 /* 1463 * running the delayed items may have added new refs. account 1464 * them now so that they hinder processing of more delayed refs 1465 * as little as possible. 1466 */ 1467 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info); 1468 1469 /* 1470 * rename don't use btrfs_join_transaction, so, once we 1471 * set the transaction to blocked above, we aren't going 1472 * to get any new ordered operations. We can safely run 1473 * it here and no for sure that nothing new will be added 1474 * to the list 1475 */ 1476 ret = btrfs_run_ordered_operations(trans, root, 1); 1477 1478 return ret; 1479 } 1480 1481 /* 1482 * btrfs_transaction state sequence: 1483 * in_commit = 0, blocked = 0 (initial) 1484 * in_commit = 1, blocked = 1 1485 * blocked = 0 1486 * commit_done = 1 1487 */ 1488 int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1489 struct btrfs_root *root) 1490 { 1491 unsigned long joined = 0; 1492 struct btrfs_transaction *cur_trans = trans->transaction; 1493 struct btrfs_transaction *prev_trans = NULL; 1494 DEFINE_WAIT(wait); 1495 int ret; 1496 int should_grow = 0; 1497 unsigned long now = get_seconds(); 1498 1499 ret = btrfs_run_ordered_operations(trans, root, 0); 1500 if (ret) { 1501 btrfs_abort_transaction(trans, root, ret); 1502 btrfs_end_transaction(trans, root); 1503 return ret; 1504 } 1505 1506 /* Stop the commit early if ->aborted is set */ 1507 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1508 ret = cur_trans->aborted; 1509 btrfs_end_transaction(trans, root); 1510 return ret; 1511 } 1512 1513 /* make a pass through all the delayed refs we have so far 1514 * any runnings procs may add more while we are here 1515 */ 1516 ret = btrfs_run_delayed_refs(trans, root, 0); 1517 if (ret) { 1518 btrfs_end_transaction(trans, root); 1519 return ret; 1520 } 1521 1522 btrfs_trans_release_metadata(trans, root); 1523 trans->block_rsv = NULL; 1524 1525 cur_trans = trans->transaction; 1526 1527 /* 1528 * set the flushing flag so procs in this transaction have to 1529 * start sending their work down. 1530 */ 1531 cur_trans->delayed_refs.flushing = 1; 1532 1533 if (!list_empty(&trans->new_bgs)) 1534 btrfs_create_pending_block_groups(trans, root); 1535 1536 ret = btrfs_run_delayed_refs(trans, root, 0); 1537 if (ret) { 1538 btrfs_end_transaction(trans, root); 1539 return ret; 1540 } 1541 1542 spin_lock(&cur_trans->commit_lock); 1543 if (cur_trans->in_commit) { 1544 spin_unlock(&cur_trans->commit_lock); 1545 atomic_inc(&cur_trans->use_count); 1546 ret = btrfs_end_transaction(trans, root); 1547 1548 wait_for_commit(root, cur_trans); 1549 1550 put_transaction(cur_trans); 1551 1552 return ret; 1553 } 1554 1555 trans->transaction->in_commit = 1; 1556 trans->transaction->blocked = 1; 1557 spin_unlock(&cur_trans->commit_lock); 1558 wake_up(&root->fs_info->transaction_blocked_wait); 1559 1560 spin_lock(&root->fs_info->trans_lock); 1561 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1562 prev_trans = list_entry(cur_trans->list.prev, 1563 struct btrfs_transaction, list); 1564 if (!prev_trans->commit_done) { 1565 atomic_inc(&prev_trans->use_count); 1566 spin_unlock(&root->fs_info->trans_lock); 1567 1568 wait_for_commit(root, prev_trans); 1569 1570 put_transaction(prev_trans); 1571 } else { 1572 spin_unlock(&root->fs_info->trans_lock); 1573 } 1574 } else { 1575 spin_unlock(&root->fs_info->trans_lock); 1576 } 1577 1578 if (!btrfs_test_opt(root, SSD) && 1579 (now < cur_trans->start_time || now - cur_trans->start_time < 1)) 1580 should_grow = 1; 1581 1582 do { 1583 joined = cur_trans->num_joined; 1584 1585 WARN_ON(cur_trans != trans->transaction); 1586 1587 ret = btrfs_flush_all_pending_stuffs(trans, root); 1588 if (ret) 1589 goto cleanup_transaction; 1590 1591 prepare_to_wait(&cur_trans->writer_wait, &wait, 1592 TASK_UNINTERRUPTIBLE); 1593 1594 if (atomic_read(&cur_trans->num_writers) > 1) 1595 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1596 else if (should_grow) 1597 schedule_timeout(1); 1598 1599 finish_wait(&cur_trans->writer_wait, &wait); 1600 } while (atomic_read(&cur_trans->num_writers) > 1 || 1601 (should_grow && cur_trans->num_joined != joined)); 1602 1603 ret = btrfs_flush_all_pending_stuffs(trans, root); 1604 if (ret) 1605 goto cleanup_transaction; 1606 1607 /* 1608 * Ok now we need to make sure to block out any other joins while we 1609 * commit the transaction. We could have started a join before setting 1610 * no_join so make sure to wait for num_writers to == 1 again. 1611 */ 1612 spin_lock(&root->fs_info->trans_lock); 1613 root->fs_info->trans_no_join = 1; 1614 spin_unlock(&root->fs_info->trans_lock); 1615 wait_event(cur_trans->writer_wait, 1616 atomic_read(&cur_trans->num_writers) == 1); 1617 1618 /* ->aborted might be set after the previous check, so check it */ 1619 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1620 ret = cur_trans->aborted; 1621 goto cleanup_transaction; 1622 } 1623 /* 1624 * the reloc mutex makes sure that we stop 1625 * the balancing code from coming in and moving 1626 * extents around in the middle of the commit 1627 */ 1628 mutex_lock(&root->fs_info->reloc_mutex); 1629 1630 /* 1631 * We needn't worry about the delayed items because we will 1632 * deal with them in create_pending_snapshot(), which is the 1633 * core function of the snapshot creation. 1634 */ 1635 ret = create_pending_snapshots(trans, root->fs_info); 1636 if (ret) { 1637 mutex_unlock(&root->fs_info->reloc_mutex); 1638 goto cleanup_transaction; 1639 } 1640 1641 /* 1642 * We insert the dir indexes of the snapshots and update the inode 1643 * of the snapshots' parents after the snapshot creation, so there 1644 * are some delayed items which are not dealt with. Now deal with 1645 * them. 1646 * 1647 * We needn't worry that this operation will corrupt the snapshots, 1648 * because all the tree which are snapshoted will be forced to COW 1649 * the nodes and leaves. 1650 */ 1651 ret = btrfs_run_delayed_items(trans, root); 1652 if (ret) { 1653 mutex_unlock(&root->fs_info->reloc_mutex); 1654 goto cleanup_transaction; 1655 } 1656 1657 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1658 if (ret) { 1659 mutex_unlock(&root->fs_info->reloc_mutex); 1660 goto cleanup_transaction; 1661 } 1662 1663 /* 1664 * make sure none of the code above managed to slip in a 1665 * delayed item 1666 */ 1667 btrfs_assert_delayed_root_empty(root); 1668 1669 WARN_ON(cur_trans != trans->transaction); 1670 1671 btrfs_scrub_pause(root); 1672 /* btrfs_commit_tree_roots is responsible for getting the 1673 * various roots consistent with each other. Every pointer 1674 * in the tree of tree roots has to point to the most up to date 1675 * root for every subvolume and other tree. So, we have to keep 1676 * the tree logging code from jumping in and changing any 1677 * of the trees. 1678 * 1679 * At this point in the commit, there can't be any tree-log 1680 * writers, but a little lower down we drop the trans mutex 1681 * and let new people in. By holding the tree_log_mutex 1682 * from now until after the super is written, we avoid races 1683 * with the tree-log code. 1684 */ 1685 mutex_lock(&root->fs_info->tree_log_mutex); 1686 1687 ret = commit_fs_roots(trans, root); 1688 if (ret) { 1689 mutex_unlock(&root->fs_info->tree_log_mutex); 1690 mutex_unlock(&root->fs_info->reloc_mutex); 1691 goto cleanup_transaction; 1692 } 1693 1694 /* commit_fs_roots gets rid of all the tree log roots, it is now 1695 * safe to free the root of tree log roots 1696 */ 1697 btrfs_free_log_root_tree(trans, root->fs_info); 1698 1699 ret = commit_cowonly_roots(trans, root); 1700 if (ret) { 1701 mutex_unlock(&root->fs_info->tree_log_mutex); 1702 mutex_unlock(&root->fs_info->reloc_mutex); 1703 goto cleanup_transaction; 1704 } 1705 1706 /* 1707 * The tasks which save the space cache and inode cache may also 1708 * update ->aborted, check it. 1709 */ 1710 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1711 ret = cur_trans->aborted; 1712 mutex_unlock(&root->fs_info->tree_log_mutex); 1713 mutex_unlock(&root->fs_info->reloc_mutex); 1714 goto cleanup_transaction; 1715 } 1716 1717 btrfs_prepare_extent_commit(trans, root); 1718 1719 cur_trans = root->fs_info->running_transaction; 1720 1721 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1722 root->fs_info->tree_root->node); 1723 switch_commit_root(root->fs_info->tree_root); 1724 1725 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1726 root->fs_info->chunk_root->node); 1727 switch_commit_root(root->fs_info->chunk_root); 1728 1729 assert_qgroups_uptodate(trans); 1730 update_super_roots(root); 1731 1732 if (!root->fs_info->log_root_recovering) { 1733 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1734 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0); 1735 } 1736 1737 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1738 sizeof(*root->fs_info->super_copy)); 1739 1740 trans->transaction->blocked = 0; 1741 spin_lock(&root->fs_info->trans_lock); 1742 root->fs_info->running_transaction = NULL; 1743 root->fs_info->trans_no_join = 0; 1744 spin_unlock(&root->fs_info->trans_lock); 1745 mutex_unlock(&root->fs_info->reloc_mutex); 1746 1747 wake_up(&root->fs_info->transaction_wait); 1748 1749 ret = btrfs_write_and_wait_transaction(trans, root); 1750 if (ret) { 1751 btrfs_error(root->fs_info, ret, 1752 "Error while writing out transaction."); 1753 mutex_unlock(&root->fs_info->tree_log_mutex); 1754 goto cleanup_transaction; 1755 } 1756 1757 ret = write_ctree_super(trans, root, 0); 1758 if (ret) { 1759 mutex_unlock(&root->fs_info->tree_log_mutex); 1760 goto cleanup_transaction; 1761 } 1762 1763 /* 1764 * the super is written, we can safely allow the tree-loggers 1765 * to go about their business 1766 */ 1767 mutex_unlock(&root->fs_info->tree_log_mutex); 1768 1769 btrfs_finish_extent_commit(trans, root); 1770 1771 cur_trans->commit_done = 1; 1772 1773 root->fs_info->last_trans_committed = cur_trans->transid; 1774 1775 wake_up(&cur_trans->commit_wait); 1776 1777 spin_lock(&root->fs_info->trans_lock); 1778 list_del_init(&cur_trans->list); 1779 spin_unlock(&root->fs_info->trans_lock); 1780 1781 put_transaction(cur_trans); 1782 put_transaction(cur_trans); 1783 1784 if (trans->type < TRANS_JOIN_NOLOCK) 1785 sb_end_intwrite(root->fs_info->sb); 1786 1787 trace_btrfs_transaction_commit(root); 1788 1789 btrfs_scrub_continue(root); 1790 1791 if (current->journal_info == trans) 1792 current->journal_info = NULL; 1793 1794 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1795 1796 if (current != root->fs_info->transaction_kthread) 1797 btrfs_run_delayed_iputs(root); 1798 1799 return ret; 1800 1801 cleanup_transaction: 1802 btrfs_trans_release_metadata(trans, root); 1803 trans->block_rsv = NULL; 1804 btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n"); 1805 // WARN_ON(1); 1806 if (current->journal_info == trans) 1807 current->journal_info = NULL; 1808 cleanup_transaction(trans, root, ret); 1809 1810 return ret; 1811 } 1812 1813 /* 1814 * interface function to delete all the snapshots we have scheduled for deletion 1815 */ 1816 int btrfs_clean_old_snapshots(struct btrfs_root *root) 1817 { 1818 LIST_HEAD(list); 1819 struct btrfs_fs_info *fs_info = root->fs_info; 1820 1821 spin_lock(&fs_info->trans_lock); 1822 list_splice_init(&fs_info->dead_roots, &list); 1823 spin_unlock(&fs_info->trans_lock); 1824 1825 while (!list_empty(&list)) { 1826 int ret; 1827 1828 root = list_entry(list.next, struct btrfs_root, root_list); 1829 list_del(&root->root_list); 1830 1831 btrfs_kill_all_delayed_nodes(root); 1832 1833 if (btrfs_header_backref_rev(root->node) < 1834 BTRFS_MIXED_BACKREF_REV) 1835 ret = btrfs_drop_snapshot(root, NULL, 0, 0); 1836 else 1837 ret =btrfs_drop_snapshot(root, NULL, 1, 0); 1838 BUG_ON(ret < 0); 1839 } 1840 return 0; 1841 } 1842