1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "locking.h" 29 #include "tree-log.h" 30 #include "inode-map.h" 31 32 #define BTRFS_ROOT_TRANS_TAG 0 33 34 static noinline void put_transaction(struct btrfs_transaction *transaction) 35 { 36 WARN_ON(atomic_read(&transaction->use_count) == 0); 37 if (atomic_dec_and_test(&transaction->use_count)) { 38 BUG_ON(!list_empty(&transaction->list)); 39 memset(transaction, 0, sizeof(*transaction)); 40 kmem_cache_free(btrfs_transaction_cachep, transaction); 41 } 42 } 43 44 static noinline void switch_commit_root(struct btrfs_root *root) 45 { 46 free_extent_buffer(root->commit_root); 47 root->commit_root = btrfs_root_node(root); 48 } 49 50 /* 51 * either allocate a new transaction or hop into the existing one 52 */ 53 static noinline int join_transaction(struct btrfs_root *root, int nofail) 54 { 55 struct btrfs_transaction *cur_trans; 56 57 spin_lock(&root->fs_info->trans_lock); 58 if (root->fs_info->trans_no_join) { 59 if (!nofail) { 60 spin_unlock(&root->fs_info->trans_lock); 61 return -EBUSY; 62 } 63 } 64 65 cur_trans = root->fs_info->running_transaction; 66 if (cur_trans) { 67 atomic_inc(&cur_trans->use_count); 68 atomic_inc(&cur_trans->num_writers); 69 cur_trans->num_joined++; 70 spin_unlock(&root->fs_info->trans_lock); 71 return 0; 72 } 73 spin_unlock(&root->fs_info->trans_lock); 74 75 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 76 if (!cur_trans) 77 return -ENOMEM; 78 spin_lock(&root->fs_info->trans_lock); 79 if (root->fs_info->running_transaction) { 80 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 81 cur_trans = root->fs_info->running_transaction; 82 atomic_inc(&cur_trans->use_count); 83 atomic_inc(&cur_trans->num_writers); 84 cur_trans->num_joined++; 85 spin_unlock(&root->fs_info->trans_lock); 86 return 0; 87 } 88 atomic_set(&cur_trans->num_writers, 1); 89 cur_trans->num_joined = 0; 90 init_waitqueue_head(&cur_trans->writer_wait); 91 init_waitqueue_head(&cur_trans->commit_wait); 92 cur_trans->in_commit = 0; 93 cur_trans->blocked = 0; 94 /* 95 * One for this trans handle, one so it will live on until we 96 * commit the transaction. 97 */ 98 atomic_set(&cur_trans->use_count, 2); 99 cur_trans->commit_done = 0; 100 cur_trans->start_time = get_seconds(); 101 102 cur_trans->delayed_refs.root = RB_ROOT; 103 cur_trans->delayed_refs.num_entries = 0; 104 cur_trans->delayed_refs.num_heads_ready = 0; 105 cur_trans->delayed_refs.num_heads = 0; 106 cur_trans->delayed_refs.flushing = 0; 107 cur_trans->delayed_refs.run_delayed_start = 0; 108 spin_lock_init(&cur_trans->commit_lock); 109 spin_lock_init(&cur_trans->delayed_refs.lock); 110 111 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 112 list_add_tail(&cur_trans->list, &root->fs_info->trans_list); 113 extent_io_tree_init(&cur_trans->dirty_pages, 114 root->fs_info->btree_inode->i_mapping); 115 root->fs_info->generation++; 116 cur_trans->transid = root->fs_info->generation; 117 root->fs_info->running_transaction = cur_trans; 118 spin_unlock(&root->fs_info->trans_lock); 119 120 return 0; 121 } 122 123 /* 124 * this does all the record keeping required to make sure that a reference 125 * counted root is properly recorded in a given transaction. This is required 126 * to make sure the old root from before we joined the transaction is deleted 127 * when the transaction commits 128 */ 129 static int record_root_in_trans(struct btrfs_trans_handle *trans, 130 struct btrfs_root *root) 131 { 132 if (root->ref_cows && root->last_trans < trans->transid) { 133 WARN_ON(root == root->fs_info->extent_root); 134 WARN_ON(root->commit_root != root->node); 135 136 /* 137 * see below for in_trans_setup usage rules 138 * we have the reloc mutex held now, so there 139 * is only one writer in this function 140 */ 141 root->in_trans_setup = 1; 142 143 /* make sure readers find in_trans_setup before 144 * they find our root->last_trans update 145 */ 146 smp_wmb(); 147 148 spin_lock(&root->fs_info->fs_roots_radix_lock); 149 if (root->last_trans == trans->transid) { 150 spin_unlock(&root->fs_info->fs_roots_radix_lock); 151 return 0; 152 } 153 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 154 (unsigned long)root->root_key.objectid, 155 BTRFS_ROOT_TRANS_TAG); 156 spin_unlock(&root->fs_info->fs_roots_radix_lock); 157 root->last_trans = trans->transid; 158 159 /* this is pretty tricky. We don't want to 160 * take the relocation lock in btrfs_record_root_in_trans 161 * unless we're really doing the first setup for this root in 162 * this transaction. 163 * 164 * Normally we'd use root->last_trans as a flag to decide 165 * if we want to take the expensive mutex. 166 * 167 * But, we have to set root->last_trans before we 168 * init the relocation root, otherwise, we trip over warnings 169 * in ctree.c. The solution used here is to flag ourselves 170 * with root->in_trans_setup. When this is 1, we're still 171 * fixing up the reloc trees and everyone must wait. 172 * 173 * When this is zero, they can trust root->last_trans and fly 174 * through btrfs_record_root_in_trans without having to take the 175 * lock. smp_wmb() makes sure that all the writes above are 176 * done before we pop in the zero below 177 */ 178 btrfs_init_reloc_root(trans, root); 179 smp_wmb(); 180 root->in_trans_setup = 0; 181 } 182 return 0; 183 } 184 185 186 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 187 struct btrfs_root *root) 188 { 189 if (!root->ref_cows) 190 return 0; 191 192 /* 193 * see record_root_in_trans for comments about in_trans_setup usage 194 * and barriers 195 */ 196 smp_rmb(); 197 if (root->last_trans == trans->transid && 198 !root->in_trans_setup) 199 return 0; 200 201 mutex_lock(&root->fs_info->reloc_mutex); 202 record_root_in_trans(trans, root); 203 mutex_unlock(&root->fs_info->reloc_mutex); 204 205 return 0; 206 } 207 208 /* wait for commit against the current transaction to become unblocked 209 * when this is done, it is safe to start a new transaction, but the current 210 * transaction might not be fully on disk. 211 */ 212 static void wait_current_trans(struct btrfs_root *root) 213 { 214 struct btrfs_transaction *cur_trans; 215 216 spin_lock(&root->fs_info->trans_lock); 217 cur_trans = root->fs_info->running_transaction; 218 if (cur_trans && cur_trans->blocked) { 219 atomic_inc(&cur_trans->use_count); 220 spin_unlock(&root->fs_info->trans_lock); 221 222 wait_event(root->fs_info->transaction_wait, 223 !cur_trans->blocked); 224 put_transaction(cur_trans); 225 } else { 226 spin_unlock(&root->fs_info->trans_lock); 227 } 228 } 229 230 enum btrfs_trans_type { 231 TRANS_START, 232 TRANS_JOIN, 233 TRANS_USERSPACE, 234 TRANS_JOIN_NOLOCK, 235 }; 236 237 static int may_wait_transaction(struct btrfs_root *root, int type) 238 { 239 if (root->fs_info->log_root_recovering) 240 return 0; 241 242 if (type == TRANS_USERSPACE) 243 return 1; 244 245 if (type == TRANS_START && 246 !atomic_read(&root->fs_info->open_ioctl_trans)) 247 return 1; 248 249 return 0; 250 } 251 252 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, 253 u64 num_items, int type) 254 { 255 struct btrfs_trans_handle *h; 256 struct btrfs_transaction *cur_trans; 257 u64 num_bytes = 0; 258 int ret; 259 260 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 261 return ERR_PTR(-EROFS); 262 263 if (current->journal_info) { 264 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); 265 h = current->journal_info; 266 h->use_count++; 267 h->orig_rsv = h->block_rsv; 268 h->block_rsv = NULL; 269 goto got_it; 270 } 271 272 /* 273 * Do the reservation before we join the transaction so we can do all 274 * the appropriate flushing if need be. 275 */ 276 if (num_items > 0 && root != root->fs_info->chunk_root) { 277 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 278 ret = btrfs_block_rsv_add(NULL, root, 279 &root->fs_info->trans_block_rsv, 280 num_bytes); 281 if (ret) 282 return ERR_PTR(ret); 283 } 284 again: 285 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 286 if (!h) 287 return ERR_PTR(-ENOMEM); 288 289 if (may_wait_transaction(root, type)) 290 wait_current_trans(root); 291 292 do { 293 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK); 294 if (ret == -EBUSY) 295 wait_current_trans(root); 296 } while (ret == -EBUSY); 297 298 if (ret < 0) { 299 kmem_cache_free(btrfs_trans_handle_cachep, h); 300 return ERR_PTR(ret); 301 } 302 303 cur_trans = root->fs_info->running_transaction; 304 305 h->transid = cur_trans->transid; 306 h->transaction = cur_trans; 307 h->blocks_used = 0; 308 h->bytes_reserved = 0; 309 h->delayed_ref_updates = 0; 310 h->use_count = 1; 311 h->block_rsv = NULL; 312 h->orig_rsv = NULL; 313 314 smp_mb(); 315 if (cur_trans->blocked && may_wait_transaction(root, type)) { 316 btrfs_commit_transaction(h, root); 317 goto again; 318 } 319 320 if (num_bytes) { 321 h->block_rsv = &root->fs_info->trans_block_rsv; 322 h->bytes_reserved = num_bytes; 323 } 324 325 got_it: 326 btrfs_record_root_in_trans(h, root); 327 328 if (!current->journal_info && type != TRANS_USERSPACE) 329 current->journal_info = h; 330 return h; 331 } 332 333 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 334 int num_items) 335 { 336 return start_transaction(root, num_items, TRANS_START); 337 } 338 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 339 { 340 return start_transaction(root, 0, TRANS_JOIN); 341 } 342 343 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 344 { 345 return start_transaction(root, 0, TRANS_JOIN_NOLOCK); 346 } 347 348 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 349 { 350 return start_transaction(root, 0, TRANS_USERSPACE); 351 } 352 353 /* wait for a transaction commit to be fully complete */ 354 static noinline void wait_for_commit(struct btrfs_root *root, 355 struct btrfs_transaction *commit) 356 { 357 wait_event(commit->commit_wait, commit->commit_done); 358 } 359 360 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 361 { 362 struct btrfs_transaction *cur_trans = NULL, *t; 363 int ret; 364 365 ret = 0; 366 if (transid) { 367 if (transid <= root->fs_info->last_trans_committed) 368 goto out; 369 370 /* find specified transaction */ 371 spin_lock(&root->fs_info->trans_lock); 372 list_for_each_entry(t, &root->fs_info->trans_list, list) { 373 if (t->transid == transid) { 374 cur_trans = t; 375 atomic_inc(&cur_trans->use_count); 376 break; 377 } 378 if (t->transid > transid) 379 break; 380 } 381 spin_unlock(&root->fs_info->trans_lock); 382 ret = -EINVAL; 383 if (!cur_trans) 384 goto out; /* bad transid */ 385 } else { 386 /* find newest transaction that is committing | committed */ 387 spin_lock(&root->fs_info->trans_lock); 388 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 389 list) { 390 if (t->in_commit) { 391 if (t->commit_done) 392 break; 393 cur_trans = t; 394 atomic_inc(&cur_trans->use_count); 395 break; 396 } 397 } 398 spin_unlock(&root->fs_info->trans_lock); 399 if (!cur_trans) 400 goto out; /* nothing committing|committed */ 401 } 402 403 wait_for_commit(root, cur_trans); 404 405 put_transaction(cur_trans); 406 ret = 0; 407 out: 408 return ret; 409 } 410 411 void btrfs_throttle(struct btrfs_root *root) 412 { 413 if (!atomic_read(&root->fs_info->open_ioctl_trans)) 414 wait_current_trans(root); 415 } 416 417 static int should_end_transaction(struct btrfs_trans_handle *trans, 418 struct btrfs_root *root) 419 { 420 int ret; 421 ret = btrfs_block_rsv_check(trans, root, 422 &root->fs_info->global_block_rsv, 0, 5); 423 return ret ? 1 : 0; 424 } 425 426 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 427 struct btrfs_root *root) 428 { 429 struct btrfs_transaction *cur_trans = trans->transaction; 430 int updates; 431 432 smp_mb(); 433 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 434 return 1; 435 436 updates = trans->delayed_ref_updates; 437 trans->delayed_ref_updates = 0; 438 if (updates) 439 btrfs_run_delayed_refs(trans, root, updates); 440 441 return should_end_transaction(trans, root); 442 } 443 444 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 445 struct btrfs_root *root, int throttle, int lock) 446 { 447 struct btrfs_transaction *cur_trans = trans->transaction; 448 struct btrfs_fs_info *info = root->fs_info; 449 int count = 0; 450 451 if (--trans->use_count) { 452 trans->block_rsv = trans->orig_rsv; 453 return 0; 454 } 455 456 while (count < 4) { 457 unsigned long cur = trans->delayed_ref_updates; 458 trans->delayed_ref_updates = 0; 459 if (cur && 460 trans->transaction->delayed_refs.num_heads_ready > 64) { 461 trans->delayed_ref_updates = 0; 462 463 /* 464 * do a full flush if the transaction is trying 465 * to close 466 */ 467 if (trans->transaction->delayed_refs.flushing) 468 cur = 0; 469 btrfs_run_delayed_refs(trans, root, cur); 470 } else { 471 break; 472 } 473 count++; 474 } 475 476 btrfs_trans_release_metadata(trans, root); 477 478 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && 479 should_end_transaction(trans, root)) { 480 trans->transaction->blocked = 1; 481 smp_wmb(); 482 } 483 484 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 485 if (throttle) { 486 /* 487 * We may race with somebody else here so end up having 488 * to call end_transaction on ourselves again, so inc 489 * our use_count. 490 */ 491 trans->use_count++; 492 return btrfs_commit_transaction(trans, root); 493 } else { 494 wake_up_process(info->transaction_kthread); 495 } 496 } 497 498 WARN_ON(cur_trans != info->running_transaction); 499 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 500 atomic_dec(&cur_trans->num_writers); 501 502 smp_mb(); 503 if (waitqueue_active(&cur_trans->writer_wait)) 504 wake_up(&cur_trans->writer_wait); 505 put_transaction(cur_trans); 506 507 if (current->journal_info == trans) 508 current->journal_info = NULL; 509 memset(trans, 0, sizeof(*trans)); 510 kmem_cache_free(btrfs_trans_handle_cachep, trans); 511 512 if (throttle) 513 btrfs_run_delayed_iputs(root); 514 515 return 0; 516 } 517 518 int btrfs_end_transaction(struct btrfs_trans_handle *trans, 519 struct btrfs_root *root) 520 { 521 int ret; 522 523 ret = __btrfs_end_transaction(trans, root, 0, 1); 524 if (ret) 525 return ret; 526 return 0; 527 } 528 529 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 530 struct btrfs_root *root) 531 { 532 int ret; 533 534 ret = __btrfs_end_transaction(trans, root, 1, 1); 535 if (ret) 536 return ret; 537 return 0; 538 } 539 540 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, 541 struct btrfs_root *root) 542 { 543 int ret; 544 545 ret = __btrfs_end_transaction(trans, root, 0, 0); 546 if (ret) 547 return ret; 548 return 0; 549 } 550 551 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, 552 struct btrfs_root *root) 553 { 554 return __btrfs_end_transaction(trans, root, 1, 1); 555 } 556 557 /* 558 * when btree blocks are allocated, they have some corresponding bits set for 559 * them in one of two extent_io trees. This is used to make sure all of 560 * those extents are sent to disk but does not wait on them 561 */ 562 int btrfs_write_marked_extents(struct btrfs_root *root, 563 struct extent_io_tree *dirty_pages, int mark) 564 { 565 int ret; 566 int err = 0; 567 int werr = 0; 568 struct page *page; 569 struct inode *btree_inode = root->fs_info->btree_inode; 570 u64 start = 0; 571 u64 end; 572 unsigned long index; 573 574 while (1) { 575 ret = find_first_extent_bit(dirty_pages, start, &start, &end, 576 mark); 577 if (ret) 578 break; 579 while (start <= end) { 580 cond_resched(); 581 582 index = start >> PAGE_CACHE_SHIFT; 583 start = (u64)(index + 1) << PAGE_CACHE_SHIFT; 584 page = find_get_page(btree_inode->i_mapping, index); 585 if (!page) 586 continue; 587 588 btree_lock_page_hook(page); 589 if (!page->mapping) { 590 unlock_page(page); 591 page_cache_release(page); 592 continue; 593 } 594 595 if (PageWriteback(page)) { 596 if (PageDirty(page)) 597 wait_on_page_writeback(page); 598 else { 599 unlock_page(page); 600 page_cache_release(page); 601 continue; 602 } 603 } 604 err = write_one_page(page, 0); 605 if (err) 606 werr = err; 607 page_cache_release(page); 608 } 609 } 610 if (err) 611 werr = err; 612 return werr; 613 } 614 615 /* 616 * when btree blocks are allocated, they have some corresponding bits set for 617 * them in one of two extent_io trees. This is used to make sure all of 618 * those extents are on disk for transaction or log commit. We wait 619 * on all the pages and clear them from the dirty pages state tree 620 */ 621 int btrfs_wait_marked_extents(struct btrfs_root *root, 622 struct extent_io_tree *dirty_pages, int mark) 623 { 624 int ret; 625 int err = 0; 626 int werr = 0; 627 struct page *page; 628 struct inode *btree_inode = root->fs_info->btree_inode; 629 u64 start = 0; 630 u64 end; 631 unsigned long index; 632 633 while (1) { 634 ret = find_first_extent_bit(dirty_pages, start, &start, &end, 635 mark); 636 if (ret) 637 break; 638 639 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); 640 while (start <= end) { 641 index = start >> PAGE_CACHE_SHIFT; 642 start = (u64)(index + 1) << PAGE_CACHE_SHIFT; 643 page = find_get_page(btree_inode->i_mapping, index); 644 if (!page) 645 continue; 646 if (PageDirty(page)) { 647 btree_lock_page_hook(page); 648 wait_on_page_writeback(page); 649 err = write_one_page(page, 0); 650 if (err) 651 werr = err; 652 } 653 wait_on_page_writeback(page); 654 page_cache_release(page); 655 cond_resched(); 656 } 657 } 658 if (err) 659 werr = err; 660 return werr; 661 } 662 663 /* 664 * when btree blocks are allocated, they have some corresponding bits set for 665 * them in one of two extent_io trees. This is used to make sure all of 666 * those extents are on disk for transaction or log commit 667 */ 668 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 669 struct extent_io_tree *dirty_pages, int mark) 670 { 671 int ret; 672 int ret2; 673 674 ret = btrfs_write_marked_extents(root, dirty_pages, mark); 675 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); 676 return ret || ret2; 677 } 678 679 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 680 struct btrfs_root *root) 681 { 682 if (!trans || !trans->transaction) { 683 struct inode *btree_inode; 684 btree_inode = root->fs_info->btree_inode; 685 return filemap_write_and_wait(btree_inode->i_mapping); 686 } 687 return btrfs_write_and_wait_marked_extents(root, 688 &trans->transaction->dirty_pages, 689 EXTENT_DIRTY); 690 } 691 692 /* 693 * this is used to update the root pointer in the tree of tree roots. 694 * 695 * But, in the case of the extent allocation tree, updating the root 696 * pointer may allocate blocks which may change the root of the extent 697 * allocation tree. 698 * 699 * So, this loops and repeats and makes sure the cowonly root didn't 700 * change while the root pointer was being updated in the metadata. 701 */ 702 static int update_cowonly_root(struct btrfs_trans_handle *trans, 703 struct btrfs_root *root) 704 { 705 int ret; 706 u64 old_root_bytenr; 707 u64 old_root_used; 708 struct btrfs_root *tree_root = root->fs_info->tree_root; 709 710 old_root_used = btrfs_root_used(&root->root_item); 711 btrfs_write_dirty_block_groups(trans, root); 712 713 while (1) { 714 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 715 if (old_root_bytenr == root->node->start && 716 old_root_used == btrfs_root_used(&root->root_item)) 717 break; 718 719 btrfs_set_root_node(&root->root_item, root->node); 720 ret = btrfs_update_root(trans, tree_root, 721 &root->root_key, 722 &root->root_item); 723 BUG_ON(ret); 724 725 old_root_used = btrfs_root_used(&root->root_item); 726 ret = btrfs_write_dirty_block_groups(trans, root); 727 BUG_ON(ret); 728 } 729 730 if (root != root->fs_info->extent_root) 731 switch_commit_root(root); 732 733 return 0; 734 } 735 736 /* 737 * update all the cowonly tree roots on disk 738 */ 739 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 740 struct btrfs_root *root) 741 { 742 struct btrfs_fs_info *fs_info = root->fs_info; 743 struct list_head *next; 744 struct extent_buffer *eb; 745 int ret; 746 747 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 748 BUG_ON(ret); 749 750 eb = btrfs_lock_root_node(fs_info->tree_root); 751 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); 752 btrfs_tree_unlock(eb); 753 free_extent_buffer(eb); 754 755 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 756 BUG_ON(ret); 757 758 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 759 next = fs_info->dirty_cowonly_roots.next; 760 list_del_init(next); 761 root = list_entry(next, struct btrfs_root, dirty_list); 762 763 update_cowonly_root(trans, root); 764 } 765 766 down_write(&fs_info->extent_commit_sem); 767 switch_commit_root(fs_info->extent_root); 768 up_write(&fs_info->extent_commit_sem); 769 770 return 0; 771 } 772 773 /* 774 * dead roots are old snapshots that need to be deleted. This allocates 775 * a dirty root struct and adds it into the list of dead roots that need to 776 * be deleted 777 */ 778 int btrfs_add_dead_root(struct btrfs_root *root) 779 { 780 spin_lock(&root->fs_info->trans_lock); 781 list_add(&root->root_list, &root->fs_info->dead_roots); 782 spin_unlock(&root->fs_info->trans_lock); 783 return 0; 784 } 785 786 /* 787 * update all the cowonly tree roots on disk 788 */ 789 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 790 struct btrfs_root *root) 791 { 792 struct btrfs_root *gang[8]; 793 struct btrfs_fs_info *fs_info = root->fs_info; 794 int i; 795 int ret; 796 int err = 0; 797 798 spin_lock(&fs_info->fs_roots_radix_lock); 799 while (1) { 800 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 801 (void **)gang, 0, 802 ARRAY_SIZE(gang), 803 BTRFS_ROOT_TRANS_TAG); 804 if (ret == 0) 805 break; 806 for (i = 0; i < ret; i++) { 807 root = gang[i]; 808 radix_tree_tag_clear(&fs_info->fs_roots_radix, 809 (unsigned long)root->root_key.objectid, 810 BTRFS_ROOT_TRANS_TAG); 811 spin_unlock(&fs_info->fs_roots_radix_lock); 812 813 btrfs_free_log(trans, root); 814 btrfs_update_reloc_root(trans, root); 815 btrfs_orphan_commit_root(trans, root); 816 817 btrfs_save_ino_cache(root, trans); 818 819 if (root->commit_root != root->node) { 820 mutex_lock(&root->fs_commit_mutex); 821 switch_commit_root(root); 822 btrfs_unpin_free_ino(root); 823 mutex_unlock(&root->fs_commit_mutex); 824 825 btrfs_set_root_node(&root->root_item, 826 root->node); 827 } 828 829 err = btrfs_update_root(trans, fs_info->tree_root, 830 &root->root_key, 831 &root->root_item); 832 spin_lock(&fs_info->fs_roots_radix_lock); 833 if (err) 834 break; 835 } 836 } 837 spin_unlock(&fs_info->fs_roots_radix_lock); 838 return err; 839 } 840 841 /* 842 * defrag a given btree. If cacheonly == 1, this won't read from the disk, 843 * otherwise every leaf in the btree is read and defragged. 844 */ 845 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) 846 { 847 struct btrfs_fs_info *info = root->fs_info; 848 struct btrfs_trans_handle *trans; 849 int ret; 850 unsigned long nr; 851 852 if (xchg(&root->defrag_running, 1)) 853 return 0; 854 855 while (1) { 856 trans = btrfs_start_transaction(root, 0); 857 if (IS_ERR(trans)) 858 return PTR_ERR(trans); 859 860 ret = btrfs_defrag_leaves(trans, root, cacheonly); 861 862 nr = trans->blocks_used; 863 btrfs_end_transaction(trans, root); 864 btrfs_btree_balance_dirty(info->tree_root, nr); 865 cond_resched(); 866 867 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 868 break; 869 } 870 root->defrag_running = 0; 871 return ret; 872 } 873 874 /* 875 * new snapshots need to be created at a very specific time in the 876 * transaction commit. This does the actual creation 877 */ 878 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 879 struct btrfs_fs_info *fs_info, 880 struct btrfs_pending_snapshot *pending) 881 { 882 struct btrfs_key key; 883 struct btrfs_root_item *new_root_item; 884 struct btrfs_root *tree_root = fs_info->tree_root; 885 struct btrfs_root *root = pending->root; 886 struct btrfs_root *parent_root; 887 struct btrfs_block_rsv *rsv; 888 struct inode *parent_inode; 889 struct dentry *parent; 890 struct dentry *dentry; 891 struct extent_buffer *tmp; 892 struct extent_buffer *old; 893 int ret; 894 u64 to_reserve = 0; 895 u64 index = 0; 896 u64 objectid; 897 u64 root_flags; 898 899 rsv = trans->block_rsv; 900 901 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 902 if (!new_root_item) { 903 pending->error = -ENOMEM; 904 goto fail; 905 } 906 907 ret = btrfs_find_free_objectid(tree_root, &objectid); 908 if (ret) { 909 pending->error = ret; 910 goto fail; 911 } 912 913 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 914 btrfs_orphan_pre_snapshot(trans, pending, &to_reserve); 915 916 if (to_reserve > 0) { 917 ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv, 918 to_reserve); 919 if (ret) { 920 pending->error = ret; 921 goto fail; 922 } 923 } 924 925 key.objectid = objectid; 926 key.offset = (u64)-1; 927 key.type = BTRFS_ROOT_ITEM_KEY; 928 929 trans->block_rsv = &pending->block_rsv; 930 931 dentry = pending->dentry; 932 parent = dget_parent(dentry); 933 parent_inode = parent->d_inode; 934 parent_root = BTRFS_I(parent_inode)->root; 935 record_root_in_trans(trans, parent_root); 936 937 /* 938 * insert the directory item 939 */ 940 ret = btrfs_set_inode_index(parent_inode, &index); 941 BUG_ON(ret); 942 ret = btrfs_insert_dir_item(trans, parent_root, 943 dentry->d_name.name, dentry->d_name.len, 944 parent_inode, &key, 945 BTRFS_FT_DIR, index); 946 BUG_ON(ret); 947 948 btrfs_i_size_write(parent_inode, parent_inode->i_size + 949 dentry->d_name.len * 2); 950 ret = btrfs_update_inode(trans, parent_root, parent_inode); 951 BUG_ON(ret); 952 953 /* 954 * pull in the delayed directory update 955 * and the delayed inode item 956 * otherwise we corrupt the FS during 957 * snapshot 958 */ 959 ret = btrfs_run_delayed_items(trans, root); 960 BUG_ON(ret); 961 962 record_root_in_trans(trans, root); 963 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 964 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 965 btrfs_check_and_init_root_item(new_root_item); 966 967 root_flags = btrfs_root_flags(new_root_item); 968 if (pending->readonly) 969 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 970 else 971 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 972 btrfs_set_root_flags(new_root_item, root_flags); 973 974 old = btrfs_lock_root_node(root); 975 btrfs_cow_block(trans, root, old, NULL, 0, &old); 976 btrfs_set_lock_blocking(old); 977 978 btrfs_copy_root(trans, root, old, &tmp, objectid); 979 btrfs_tree_unlock(old); 980 free_extent_buffer(old); 981 982 btrfs_set_root_node(new_root_item, tmp); 983 /* record when the snapshot was created in key.offset */ 984 key.offset = trans->transid; 985 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 986 btrfs_tree_unlock(tmp); 987 free_extent_buffer(tmp); 988 BUG_ON(ret); 989 990 /* 991 * insert root back/forward references 992 */ 993 ret = btrfs_add_root_ref(trans, tree_root, objectid, 994 parent_root->root_key.objectid, 995 btrfs_ino(parent_inode), index, 996 dentry->d_name.name, dentry->d_name.len); 997 BUG_ON(ret); 998 dput(parent); 999 1000 key.offset = (u64)-1; 1001 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 1002 BUG_ON(IS_ERR(pending->snap)); 1003 1004 btrfs_reloc_post_snapshot(trans, pending); 1005 btrfs_orphan_post_snapshot(trans, pending); 1006 fail: 1007 kfree(new_root_item); 1008 trans->block_rsv = rsv; 1009 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); 1010 return 0; 1011 } 1012 1013 /* 1014 * create all the snapshots we've scheduled for creation 1015 */ 1016 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 1017 struct btrfs_fs_info *fs_info) 1018 { 1019 struct btrfs_pending_snapshot *pending; 1020 struct list_head *head = &trans->transaction->pending_snapshots; 1021 int ret; 1022 1023 list_for_each_entry(pending, head, list) { 1024 ret = create_pending_snapshot(trans, fs_info, pending); 1025 BUG_ON(ret); 1026 } 1027 return 0; 1028 } 1029 1030 static void update_super_roots(struct btrfs_root *root) 1031 { 1032 struct btrfs_root_item *root_item; 1033 struct btrfs_super_block *super; 1034 1035 super = &root->fs_info->super_copy; 1036 1037 root_item = &root->fs_info->chunk_root->root_item; 1038 super->chunk_root = root_item->bytenr; 1039 super->chunk_root_generation = root_item->generation; 1040 super->chunk_root_level = root_item->level; 1041 1042 root_item = &root->fs_info->tree_root->root_item; 1043 super->root = root_item->bytenr; 1044 super->generation = root_item->generation; 1045 super->root_level = root_item->level; 1046 if (super->cache_generation != 0 || btrfs_test_opt(root, SPACE_CACHE)) 1047 super->cache_generation = root_item->generation; 1048 } 1049 1050 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1051 { 1052 int ret = 0; 1053 spin_lock(&info->trans_lock); 1054 if (info->running_transaction) 1055 ret = info->running_transaction->in_commit; 1056 spin_unlock(&info->trans_lock); 1057 return ret; 1058 } 1059 1060 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1061 { 1062 int ret = 0; 1063 spin_lock(&info->trans_lock); 1064 if (info->running_transaction) 1065 ret = info->running_transaction->blocked; 1066 spin_unlock(&info->trans_lock); 1067 return ret; 1068 } 1069 1070 /* 1071 * wait for the current transaction commit to start and block subsequent 1072 * transaction joins 1073 */ 1074 static void wait_current_trans_commit_start(struct btrfs_root *root, 1075 struct btrfs_transaction *trans) 1076 { 1077 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit); 1078 } 1079 1080 /* 1081 * wait for the current transaction to start and then become unblocked. 1082 * caller holds ref. 1083 */ 1084 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1085 struct btrfs_transaction *trans) 1086 { 1087 wait_event(root->fs_info->transaction_wait, 1088 trans->commit_done || (trans->in_commit && !trans->blocked)); 1089 } 1090 1091 /* 1092 * commit transactions asynchronously. once btrfs_commit_transaction_async 1093 * returns, any subsequent transaction will not be allowed to join. 1094 */ 1095 struct btrfs_async_commit { 1096 struct btrfs_trans_handle *newtrans; 1097 struct btrfs_root *root; 1098 struct delayed_work work; 1099 }; 1100 1101 static void do_async_commit(struct work_struct *work) 1102 { 1103 struct btrfs_async_commit *ac = 1104 container_of(work, struct btrfs_async_commit, work.work); 1105 1106 btrfs_commit_transaction(ac->newtrans, ac->root); 1107 kfree(ac); 1108 } 1109 1110 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1111 struct btrfs_root *root, 1112 int wait_for_unblock) 1113 { 1114 struct btrfs_async_commit *ac; 1115 struct btrfs_transaction *cur_trans; 1116 1117 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1118 if (!ac) 1119 return -ENOMEM; 1120 1121 INIT_DELAYED_WORK(&ac->work, do_async_commit); 1122 ac->root = root; 1123 ac->newtrans = btrfs_join_transaction(root); 1124 if (IS_ERR(ac->newtrans)) { 1125 int err = PTR_ERR(ac->newtrans); 1126 kfree(ac); 1127 return err; 1128 } 1129 1130 /* take transaction reference */ 1131 cur_trans = trans->transaction; 1132 atomic_inc(&cur_trans->use_count); 1133 1134 btrfs_end_transaction(trans, root); 1135 schedule_delayed_work(&ac->work, 0); 1136 1137 /* wait for transaction to start and unblock */ 1138 if (wait_for_unblock) 1139 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1140 else 1141 wait_current_trans_commit_start(root, cur_trans); 1142 1143 if (current->journal_info == trans) 1144 current->journal_info = NULL; 1145 1146 put_transaction(cur_trans); 1147 return 0; 1148 } 1149 1150 /* 1151 * btrfs_transaction state sequence: 1152 * in_commit = 0, blocked = 0 (initial) 1153 * in_commit = 1, blocked = 1 1154 * blocked = 0 1155 * commit_done = 1 1156 */ 1157 int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1158 struct btrfs_root *root) 1159 { 1160 unsigned long joined = 0; 1161 struct btrfs_transaction *cur_trans; 1162 struct btrfs_transaction *prev_trans = NULL; 1163 DEFINE_WAIT(wait); 1164 int ret; 1165 int should_grow = 0; 1166 unsigned long now = get_seconds(); 1167 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); 1168 1169 btrfs_run_ordered_operations(root, 0); 1170 1171 /* make a pass through all the delayed refs we have so far 1172 * any runnings procs may add more while we are here 1173 */ 1174 ret = btrfs_run_delayed_refs(trans, root, 0); 1175 BUG_ON(ret); 1176 1177 btrfs_trans_release_metadata(trans, root); 1178 1179 cur_trans = trans->transaction; 1180 /* 1181 * set the flushing flag so procs in this transaction have to 1182 * start sending their work down. 1183 */ 1184 cur_trans->delayed_refs.flushing = 1; 1185 1186 ret = btrfs_run_delayed_refs(trans, root, 0); 1187 BUG_ON(ret); 1188 1189 spin_lock(&cur_trans->commit_lock); 1190 if (cur_trans->in_commit) { 1191 spin_unlock(&cur_trans->commit_lock); 1192 atomic_inc(&cur_trans->use_count); 1193 btrfs_end_transaction(trans, root); 1194 1195 wait_for_commit(root, cur_trans); 1196 1197 put_transaction(cur_trans); 1198 1199 return 0; 1200 } 1201 1202 trans->transaction->in_commit = 1; 1203 trans->transaction->blocked = 1; 1204 spin_unlock(&cur_trans->commit_lock); 1205 wake_up(&root->fs_info->transaction_blocked_wait); 1206 1207 spin_lock(&root->fs_info->trans_lock); 1208 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1209 prev_trans = list_entry(cur_trans->list.prev, 1210 struct btrfs_transaction, list); 1211 if (!prev_trans->commit_done) { 1212 atomic_inc(&prev_trans->use_count); 1213 spin_unlock(&root->fs_info->trans_lock); 1214 1215 wait_for_commit(root, prev_trans); 1216 1217 put_transaction(prev_trans); 1218 } else { 1219 spin_unlock(&root->fs_info->trans_lock); 1220 } 1221 } else { 1222 spin_unlock(&root->fs_info->trans_lock); 1223 } 1224 1225 if (now < cur_trans->start_time || now - cur_trans->start_time < 1) 1226 should_grow = 1; 1227 1228 do { 1229 int snap_pending = 0; 1230 1231 joined = cur_trans->num_joined; 1232 if (!list_empty(&trans->transaction->pending_snapshots)) 1233 snap_pending = 1; 1234 1235 WARN_ON(cur_trans != trans->transaction); 1236 1237 if (flush_on_commit || snap_pending) { 1238 btrfs_start_delalloc_inodes(root, 1); 1239 ret = btrfs_wait_ordered_extents(root, 0, 1); 1240 BUG_ON(ret); 1241 } 1242 1243 ret = btrfs_run_delayed_items(trans, root); 1244 BUG_ON(ret); 1245 1246 /* 1247 * rename don't use btrfs_join_transaction, so, once we 1248 * set the transaction to blocked above, we aren't going 1249 * to get any new ordered operations. We can safely run 1250 * it here and no for sure that nothing new will be added 1251 * to the list 1252 */ 1253 btrfs_run_ordered_operations(root, 1); 1254 1255 prepare_to_wait(&cur_trans->writer_wait, &wait, 1256 TASK_UNINTERRUPTIBLE); 1257 1258 if (atomic_read(&cur_trans->num_writers) > 1) 1259 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1260 else if (should_grow) 1261 schedule_timeout(1); 1262 1263 finish_wait(&cur_trans->writer_wait, &wait); 1264 } while (atomic_read(&cur_trans->num_writers) > 1 || 1265 (should_grow && cur_trans->num_joined != joined)); 1266 1267 /* 1268 * Ok now we need to make sure to block out any other joins while we 1269 * commit the transaction. We could have started a join before setting 1270 * no_join so make sure to wait for num_writers to == 1 again. 1271 */ 1272 spin_lock(&root->fs_info->trans_lock); 1273 root->fs_info->trans_no_join = 1; 1274 spin_unlock(&root->fs_info->trans_lock); 1275 wait_event(cur_trans->writer_wait, 1276 atomic_read(&cur_trans->num_writers) == 1); 1277 1278 /* 1279 * the reloc mutex makes sure that we stop 1280 * the balancing code from coming in and moving 1281 * extents around in the middle of the commit 1282 */ 1283 mutex_lock(&root->fs_info->reloc_mutex); 1284 1285 ret = btrfs_run_delayed_items(trans, root); 1286 BUG_ON(ret); 1287 1288 ret = create_pending_snapshots(trans, root->fs_info); 1289 BUG_ON(ret); 1290 1291 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1292 BUG_ON(ret); 1293 1294 /* 1295 * make sure none of the code above managed to slip in a 1296 * delayed item 1297 */ 1298 btrfs_assert_delayed_root_empty(root); 1299 1300 WARN_ON(cur_trans != trans->transaction); 1301 1302 btrfs_scrub_pause(root); 1303 /* btrfs_commit_tree_roots is responsible for getting the 1304 * various roots consistent with each other. Every pointer 1305 * in the tree of tree roots has to point to the most up to date 1306 * root for every subvolume and other tree. So, we have to keep 1307 * the tree logging code from jumping in and changing any 1308 * of the trees. 1309 * 1310 * At this point in the commit, there can't be any tree-log 1311 * writers, but a little lower down we drop the trans mutex 1312 * and let new people in. By holding the tree_log_mutex 1313 * from now until after the super is written, we avoid races 1314 * with the tree-log code. 1315 */ 1316 mutex_lock(&root->fs_info->tree_log_mutex); 1317 1318 ret = commit_fs_roots(trans, root); 1319 BUG_ON(ret); 1320 1321 /* commit_fs_roots gets rid of all the tree log roots, it is now 1322 * safe to free the root of tree log roots 1323 */ 1324 btrfs_free_log_root_tree(trans, root->fs_info); 1325 1326 ret = commit_cowonly_roots(trans, root); 1327 BUG_ON(ret); 1328 1329 btrfs_prepare_extent_commit(trans, root); 1330 1331 cur_trans = root->fs_info->running_transaction; 1332 1333 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1334 root->fs_info->tree_root->node); 1335 switch_commit_root(root->fs_info->tree_root); 1336 1337 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1338 root->fs_info->chunk_root->node); 1339 switch_commit_root(root->fs_info->chunk_root); 1340 1341 update_super_roots(root); 1342 1343 if (!root->fs_info->log_root_recovering) { 1344 btrfs_set_super_log_root(&root->fs_info->super_copy, 0); 1345 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0); 1346 } 1347 1348 memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy, 1349 sizeof(root->fs_info->super_copy)); 1350 1351 trans->transaction->blocked = 0; 1352 spin_lock(&root->fs_info->trans_lock); 1353 root->fs_info->running_transaction = NULL; 1354 root->fs_info->trans_no_join = 0; 1355 spin_unlock(&root->fs_info->trans_lock); 1356 mutex_unlock(&root->fs_info->reloc_mutex); 1357 1358 wake_up(&root->fs_info->transaction_wait); 1359 1360 ret = btrfs_write_and_wait_transaction(trans, root); 1361 BUG_ON(ret); 1362 write_ctree_super(trans, root, 0); 1363 1364 /* 1365 * the super is written, we can safely allow the tree-loggers 1366 * to go about their business 1367 */ 1368 mutex_unlock(&root->fs_info->tree_log_mutex); 1369 1370 btrfs_finish_extent_commit(trans, root); 1371 1372 cur_trans->commit_done = 1; 1373 1374 root->fs_info->last_trans_committed = cur_trans->transid; 1375 1376 wake_up(&cur_trans->commit_wait); 1377 1378 spin_lock(&root->fs_info->trans_lock); 1379 list_del_init(&cur_trans->list); 1380 spin_unlock(&root->fs_info->trans_lock); 1381 1382 put_transaction(cur_trans); 1383 put_transaction(cur_trans); 1384 1385 trace_btrfs_transaction_commit(root); 1386 1387 btrfs_scrub_continue(root); 1388 1389 if (current->journal_info == trans) 1390 current->journal_info = NULL; 1391 1392 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1393 1394 if (current != root->fs_info->transaction_kthread) 1395 btrfs_run_delayed_iputs(root); 1396 1397 return ret; 1398 } 1399 1400 /* 1401 * interface function to delete all the snapshots we have scheduled for deletion 1402 */ 1403 int btrfs_clean_old_snapshots(struct btrfs_root *root) 1404 { 1405 LIST_HEAD(list); 1406 struct btrfs_fs_info *fs_info = root->fs_info; 1407 1408 spin_lock(&fs_info->trans_lock); 1409 list_splice_init(&fs_info->dead_roots, &list); 1410 spin_unlock(&fs_info->trans_lock); 1411 1412 while (!list_empty(&list)) { 1413 root = list_entry(list.next, struct btrfs_root, root_list); 1414 list_del(&root->root_list); 1415 1416 btrfs_kill_all_delayed_nodes(root); 1417 1418 if (btrfs_header_backref_rev(root->node) < 1419 BTRFS_MIXED_BACKREF_REV) 1420 btrfs_drop_snapshot(root, NULL, 0); 1421 else 1422 btrfs_drop_snapshot(root, NULL, 1); 1423 } 1424 return 0; 1425 } 1426