1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include <linux/uuid.h> 26 #include "ctree.h" 27 #include "disk-io.h" 28 #include "transaction.h" 29 #include "locking.h" 30 #include "tree-log.h" 31 #include "inode-map.h" 32 #include "volumes.h" 33 #include "dev-replace.h" 34 #include "qgroup.h" 35 36 #define BTRFS_ROOT_TRANS_TAG 0 37 38 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { 39 [TRANS_STATE_RUNNING] = 0U, 40 [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE | 41 __TRANS_START), 42 [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE | 43 __TRANS_START | 44 __TRANS_ATTACH), 45 [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE | 46 __TRANS_START | 47 __TRANS_ATTACH | 48 __TRANS_JOIN), 49 [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE | 50 __TRANS_START | 51 __TRANS_ATTACH | 52 __TRANS_JOIN | 53 __TRANS_JOIN_NOLOCK), 54 [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE | 55 __TRANS_START | 56 __TRANS_ATTACH | 57 __TRANS_JOIN | 58 __TRANS_JOIN_NOLOCK), 59 }; 60 61 void btrfs_put_transaction(struct btrfs_transaction *transaction) 62 { 63 WARN_ON(atomic_read(&transaction->use_count) == 0); 64 if (atomic_dec_and_test(&transaction->use_count)) { 65 BUG_ON(!list_empty(&transaction->list)); 66 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root)); 67 if (transaction->delayed_refs.pending_csums) 68 printk(KERN_ERR "pending csums is %llu\n", 69 transaction->delayed_refs.pending_csums); 70 while (!list_empty(&transaction->pending_chunks)) { 71 struct extent_map *em; 72 73 em = list_first_entry(&transaction->pending_chunks, 74 struct extent_map, list); 75 list_del_init(&em->list); 76 free_extent_map(em); 77 } 78 kmem_cache_free(btrfs_transaction_cachep, transaction); 79 } 80 } 81 82 static void clear_btree_io_tree(struct extent_io_tree *tree) 83 { 84 spin_lock(&tree->lock); 85 /* 86 * Do a single barrier for the waitqueue_active check here, the state 87 * of the waitqueue should not change once clear_btree_io_tree is 88 * called. 89 */ 90 smp_mb(); 91 while (!RB_EMPTY_ROOT(&tree->state)) { 92 struct rb_node *node; 93 struct extent_state *state; 94 95 node = rb_first(&tree->state); 96 state = rb_entry(node, struct extent_state, rb_node); 97 rb_erase(&state->rb_node, &tree->state); 98 RB_CLEAR_NODE(&state->rb_node); 99 /* 100 * btree io trees aren't supposed to have tasks waiting for 101 * changes in the flags of extent states ever. 102 */ 103 ASSERT(!waitqueue_active(&state->wq)); 104 free_extent_state(state); 105 106 cond_resched_lock(&tree->lock); 107 } 108 spin_unlock(&tree->lock); 109 } 110 111 static noinline void switch_commit_roots(struct btrfs_transaction *trans, 112 struct btrfs_fs_info *fs_info) 113 { 114 struct btrfs_root *root, *tmp; 115 116 down_write(&fs_info->commit_root_sem); 117 list_for_each_entry_safe(root, tmp, &trans->switch_commits, 118 dirty_list) { 119 list_del_init(&root->dirty_list); 120 free_extent_buffer(root->commit_root); 121 root->commit_root = btrfs_root_node(root); 122 if (is_fstree(root->objectid)) 123 btrfs_unpin_free_ino(root); 124 clear_btree_io_tree(&root->dirty_log_pages); 125 } 126 127 /* We can free old roots now. */ 128 spin_lock(&trans->dropped_roots_lock); 129 while (!list_empty(&trans->dropped_roots)) { 130 root = list_first_entry(&trans->dropped_roots, 131 struct btrfs_root, root_list); 132 list_del_init(&root->root_list); 133 spin_unlock(&trans->dropped_roots_lock); 134 btrfs_drop_and_free_fs_root(fs_info, root); 135 spin_lock(&trans->dropped_roots_lock); 136 } 137 spin_unlock(&trans->dropped_roots_lock); 138 up_write(&fs_info->commit_root_sem); 139 } 140 141 static inline void extwriter_counter_inc(struct btrfs_transaction *trans, 142 unsigned int type) 143 { 144 if (type & TRANS_EXTWRITERS) 145 atomic_inc(&trans->num_extwriters); 146 } 147 148 static inline void extwriter_counter_dec(struct btrfs_transaction *trans, 149 unsigned int type) 150 { 151 if (type & TRANS_EXTWRITERS) 152 atomic_dec(&trans->num_extwriters); 153 } 154 155 static inline void extwriter_counter_init(struct btrfs_transaction *trans, 156 unsigned int type) 157 { 158 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0)); 159 } 160 161 static inline int extwriter_counter_read(struct btrfs_transaction *trans) 162 { 163 return atomic_read(&trans->num_extwriters); 164 } 165 166 /* 167 * either allocate a new transaction or hop into the existing one 168 */ 169 static noinline int join_transaction(struct btrfs_root *root, unsigned int type) 170 { 171 struct btrfs_transaction *cur_trans; 172 struct btrfs_fs_info *fs_info = root->fs_info; 173 174 spin_lock(&fs_info->trans_lock); 175 loop: 176 /* The file system has been taken offline. No new transactions. */ 177 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 178 spin_unlock(&fs_info->trans_lock); 179 return -EROFS; 180 } 181 182 cur_trans = fs_info->running_transaction; 183 if (cur_trans) { 184 if (cur_trans->aborted) { 185 spin_unlock(&fs_info->trans_lock); 186 return cur_trans->aborted; 187 } 188 if (btrfs_blocked_trans_types[cur_trans->state] & type) { 189 spin_unlock(&fs_info->trans_lock); 190 return -EBUSY; 191 } 192 atomic_inc(&cur_trans->use_count); 193 atomic_inc(&cur_trans->num_writers); 194 extwriter_counter_inc(cur_trans, type); 195 spin_unlock(&fs_info->trans_lock); 196 return 0; 197 } 198 spin_unlock(&fs_info->trans_lock); 199 200 /* 201 * If we are ATTACH, we just want to catch the current transaction, 202 * and commit it. If there is no transaction, just return ENOENT. 203 */ 204 if (type == TRANS_ATTACH) 205 return -ENOENT; 206 207 /* 208 * JOIN_NOLOCK only happens during the transaction commit, so 209 * it is impossible that ->running_transaction is NULL 210 */ 211 BUG_ON(type == TRANS_JOIN_NOLOCK); 212 213 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 214 if (!cur_trans) 215 return -ENOMEM; 216 217 spin_lock(&fs_info->trans_lock); 218 if (fs_info->running_transaction) { 219 /* 220 * someone started a transaction after we unlocked. Make sure 221 * to redo the checks above 222 */ 223 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 224 goto loop; 225 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 226 spin_unlock(&fs_info->trans_lock); 227 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 228 return -EROFS; 229 } 230 231 atomic_set(&cur_trans->num_writers, 1); 232 extwriter_counter_init(cur_trans, type); 233 init_waitqueue_head(&cur_trans->writer_wait); 234 init_waitqueue_head(&cur_trans->commit_wait); 235 init_waitqueue_head(&cur_trans->pending_wait); 236 cur_trans->state = TRANS_STATE_RUNNING; 237 /* 238 * One for this trans handle, one so it will live on until we 239 * commit the transaction. 240 */ 241 atomic_set(&cur_trans->use_count, 2); 242 atomic_set(&cur_trans->pending_ordered, 0); 243 cur_trans->flags = 0; 244 cur_trans->start_time = get_seconds(); 245 246 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); 247 248 cur_trans->delayed_refs.href_root = RB_ROOT; 249 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT; 250 atomic_set(&cur_trans->delayed_refs.num_entries, 0); 251 252 /* 253 * although the tree mod log is per file system and not per transaction, 254 * the log must never go across transaction boundaries. 255 */ 256 smp_mb(); 257 if (!list_empty(&fs_info->tree_mod_seq_list)) 258 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when " 259 "creating a fresh transaction\n"); 260 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 261 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when " 262 "creating a fresh transaction\n"); 263 atomic64_set(&fs_info->tree_mod_seq, 0); 264 265 spin_lock_init(&cur_trans->delayed_refs.lock); 266 267 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 268 INIT_LIST_HEAD(&cur_trans->pending_chunks); 269 INIT_LIST_HEAD(&cur_trans->switch_commits); 270 INIT_LIST_HEAD(&cur_trans->dirty_bgs); 271 INIT_LIST_HEAD(&cur_trans->io_bgs); 272 INIT_LIST_HEAD(&cur_trans->dropped_roots); 273 mutex_init(&cur_trans->cache_write_mutex); 274 cur_trans->num_dirty_bgs = 0; 275 spin_lock_init(&cur_trans->dirty_bgs_lock); 276 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 277 spin_lock_init(&cur_trans->dropped_roots_lock); 278 list_add_tail(&cur_trans->list, &fs_info->trans_list); 279 extent_io_tree_init(&cur_trans->dirty_pages, 280 fs_info->btree_inode->i_mapping); 281 fs_info->generation++; 282 cur_trans->transid = fs_info->generation; 283 fs_info->running_transaction = cur_trans; 284 cur_trans->aborted = 0; 285 spin_unlock(&fs_info->trans_lock); 286 287 return 0; 288 } 289 290 /* 291 * this does all the record keeping required to make sure that a reference 292 * counted root is properly recorded in a given transaction. This is required 293 * to make sure the old root from before we joined the transaction is deleted 294 * when the transaction commits 295 */ 296 static int record_root_in_trans(struct btrfs_trans_handle *trans, 297 struct btrfs_root *root) 298 { 299 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 300 root->last_trans < trans->transid) { 301 WARN_ON(root == root->fs_info->extent_root); 302 WARN_ON(root->commit_root != root->node); 303 304 /* 305 * see below for IN_TRANS_SETUP usage rules 306 * we have the reloc mutex held now, so there 307 * is only one writer in this function 308 */ 309 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 310 311 /* make sure readers find IN_TRANS_SETUP before 312 * they find our root->last_trans update 313 */ 314 smp_wmb(); 315 316 spin_lock(&root->fs_info->fs_roots_radix_lock); 317 if (root->last_trans == trans->transid) { 318 spin_unlock(&root->fs_info->fs_roots_radix_lock); 319 return 0; 320 } 321 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 322 (unsigned long)root->root_key.objectid, 323 BTRFS_ROOT_TRANS_TAG); 324 spin_unlock(&root->fs_info->fs_roots_radix_lock); 325 root->last_trans = trans->transid; 326 327 /* this is pretty tricky. We don't want to 328 * take the relocation lock in btrfs_record_root_in_trans 329 * unless we're really doing the first setup for this root in 330 * this transaction. 331 * 332 * Normally we'd use root->last_trans as a flag to decide 333 * if we want to take the expensive mutex. 334 * 335 * But, we have to set root->last_trans before we 336 * init the relocation root, otherwise, we trip over warnings 337 * in ctree.c. The solution used here is to flag ourselves 338 * with root IN_TRANS_SETUP. When this is 1, we're still 339 * fixing up the reloc trees and everyone must wait. 340 * 341 * When this is zero, they can trust root->last_trans and fly 342 * through btrfs_record_root_in_trans without having to take the 343 * lock. smp_wmb() makes sure that all the writes above are 344 * done before we pop in the zero below 345 */ 346 btrfs_init_reloc_root(trans, root); 347 smp_mb__before_atomic(); 348 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 349 } 350 return 0; 351 } 352 353 354 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, 355 struct btrfs_root *root) 356 { 357 struct btrfs_transaction *cur_trans = trans->transaction; 358 359 /* Add ourselves to the transaction dropped list */ 360 spin_lock(&cur_trans->dropped_roots_lock); 361 list_add_tail(&root->root_list, &cur_trans->dropped_roots); 362 spin_unlock(&cur_trans->dropped_roots_lock); 363 364 /* Make sure we don't try to update the root at commit time */ 365 spin_lock(&root->fs_info->fs_roots_radix_lock); 366 radix_tree_tag_clear(&root->fs_info->fs_roots_radix, 367 (unsigned long)root->root_key.objectid, 368 BTRFS_ROOT_TRANS_TAG); 369 spin_unlock(&root->fs_info->fs_roots_radix_lock); 370 } 371 372 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 373 struct btrfs_root *root) 374 { 375 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 376 return 0; 377 378 /* 379 * see record_root_in_trans for comments about IN_TRANS_SETUP usage 380 * and barriers 381 */ 382 smp_rmb(); 383 if (root->last_trans == trans->transid && 384 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) 385 return 0; 386 387 mutex_lock(&root->fs_info->reloc_mutex); 388 record_root_in_trans(trans, root); 389 mutex_unlock(&root->fs_info->reloc_mutex); 390 391 return 0; 392 } 393 394 static inline int is_transaction_blocked(struct btrfs_transaction *trans) 395 { 396 return (trans->state >= TRANS_STATE_BLOCKED && 397 trans->state < TRANS_STATE_UNBLOCKED && 398 !trans->aborted); 399 } 400 401 /* wait for commit against the current transaction to become unblocked 402 * when this is done, it is safe to start a new transaction, but the current 403 * transaction might not be fully on disk. 404 */ 405 static void wait_current_trans(struct btrfs_root *root) 406 { 407 struct btrfs_transaction *cur_trans; 408 409 spin_lock(&root->fs_info->trans_lock); 410 cur_trans = root->fs_info->running_transaction; 411 if (cur_trans && is_transaction_blocked(cur_trans)) { 412 atomic_inc(&cur_trans->use_count); 413 spin_unlock(&root->fs_info->trans_lock); 414 415 wait_event(root->fs_info->transaction_wait, 416 cur_trans->state >= TRANS_STATE_UNBLOCKED || 417 cur_trans->aborted); 418 btrfs_put_transaction(cur_trans); 419 } else { 420 spin_unlock(&root->fs_info->trans_lock); 421 } 422 } 423 424 static int may_wait_transaction(struct btrfs_root *root, int type) 425 { 426 if (root->fs_info->log_root_recovering) 427 return 0; 428 429 if (type == TRANS_USERSPACE) 430 return 1; 431 432 if (type == TRANS_START && 433 !atomic_read(&root->fs_info->open_ioctl_trans)) 434 return 1; 435 436 return 0; 437 } 438 439 static inline bool need_reserve_reloc_root(struct btrfs_root *root) 440 { 441 if (!root->fs_info->reloc_ctl || 442 !test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 443 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 444 root->reloc_root) 445 return false; 446 447 return true; 448 } 449 450 static struct btrfs_trans_handle * 451 start_transaction(struct btrfs_root *root, unsigned int num_items, 452 unsigned int type, enum btrfs_reserve_flush_enum flush) 453 { 454 struct btrfs_trans_handle *h; 455 struct btrfs_transaction *cur_trans; 456 u64 num_bytes = 0; 457 u64 qgroup_reserved = 0; 458 bool reloc_reserved = false; 459 int ret; 460 461 /* Send isn't supposed to start transactions. */ 462 ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB); 463 464 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 465 return ERR_PTR(-EROFS); 466 467 if (current->journal_info) { 468 WARN_ON(type & TRANS_EXTWRITERS); 469 h = current->journal_info; 470 h->use_count++; 471 WARN_ON(h->use_count > 2); 472 h->orig_rsv = h->block_rsv; 473 h->block_rsv = NULL; 474 goto got_it; 475 } 476 477 /* 478 * Do the reservation before we join the transaction so we can do all 479 * the appropriate flushing if need be. 480 */ 481 if (num_items > 0 && root != root->fs_info->chunk_root) { 482 qgroup_reserved = num_items * root->nodesize; 483 ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved); 484 if (ret) 485 return ERR_PTR(ret); 486 487 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 488 /* 489 * Do the reservation for the relocation root creation 490 */ 491 if (need_reserve_reloc_root(root)) { 492 num_bytes += root->nodesize; 493 reloc_reserved = true; 494 } 495 496 ret = btrfs_block_rsv_add(root, 497 &root->fs_info->trans_block_rsv, 498 num_bytes, flush); 499 if (ret) 500 goto reserve_fail; 501 } 502 again: 503 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS); 504 if (!h) { 505 ret = -ENOMEM; 506 goto alloc_fail; 507 } 508 509 /* 510 * If we are JOIN_NOLOCK we're already committing a transaction and 511 * waiting on this guy, so we don't need to do the sb_start_intwrite 512 * because we're already holding a ref. We need this because we could 513 * have raced in and did an fsync() on a file which can kick a commit 514 * and then we deadlock with somebody doing a freeze. 515 * 516 * If we are ATTACH, it means we just want to catch the current 517 * transaction and commit it, so we needn't do sb_start_intwrite(). 518 */ 519 if (type & __TRANS_FREEZABLE) 520 sb_start_intwrite(root->fs_info->sb); 521 522 if (may_wait_transaction(root, type)) 523 wait_current_trans(root); 524 525 do { 526 ret = join_transaction(root, type); 527 if (ret == -EBUSY) { 528 wait_current_trans(root); 529 if (unlikely(type == TRANS_ATTACH)) 530 ret = -ENOENT; 531 } 532 } while (ret == -EBUSY); 533 534 if (ret < 0) { 535 /* We must get the transaction if we are JOIN_NOLOCK. */ 536 BUG_ON(type == TRANS_JOIN_NOLOCK); 537 goto join_fail; 538 } 539 540 cur_trans = root->fs_info->running_transaction; 541 542 h->transid = cur_trans->transid; 543 h->transaction = cur_trans; 544 h->root = root; 545 h->use_count = 1; 546 547 h->type = type; 548 h->can_flush_pending_bgs = true; 549 INIT_LIST_HEAD(&h->qgroup_ref_list); 550 INIT_LIST_HEAD(&h->new_bgs); 551 552 smp_mb(); 553 if (cur_trans->state >= TRANS_STATE_BLOCKED && 554 may_wait_transaction(root, type)) { 555 current->journal_info = h; 556 btrfs_commit_transaction(h, root); 557 goto again; 558 } 559 560 if (num_bytes) { 561 trace_btrfs_space_reservation(root->fs_info, "transaction", 562 h->transid, num_bytes, 1); 563 h->block_rsv = &root->fs_info->trans_block_rsv; 564 h->bytes_reserved = num_bytes; 565 h->reloc_reserved = reloc_reserved; 566 } 567 568 got_it: 569 btrfs_record_root_in_trans(h, root); 570 571 if (!current->journal_info && type != TRANS_USERSPACE) 572 current->journal_info = h; 573 return h; 574 575 join_fail: 576 if (type & __TRANS_FREEZABLE) 577 sb_end_intwrite(root->fs_info->sb); 578 kmem_cache_free(btrfs_trans_handle_cachep, h); 579 alloc_fail: 580 if (num_bytes) 581 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, 582 num_bytes); 583 reserve_fail: 584 btrfs_qgroup_free_meta(root, qgroup_reserved); 585 return ERR_PTR(ret); 586 } 587 588 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 589 unsigned int num_items) 590 { 591 return start_transaction(root, num_items, TRANS_START, 592 BTRFS_RESERVE_FLUSH_ALL); 593 } 594 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 595 struct btrfs_root *root, 596 unsigned int num_items, 597 int min_factor) 598 { 599 struct btrfs_trans_handle *trans; 600 u64 num_bytes; 601 int ret; 602 603 trans = btrfs_start_transaction(root, num_items); 604 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 605 return trans; 606 607 trans = btrfs_start_transaction(root, 0); 608 if (IS_ERR(trans)) 609 return trans; 610 611 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 612 ret = btrfs_cond_migrate_bytes(root->fs_info, 613 &root->fs_info->trans_block_rsv, 614 num_bytes, 615 min_factor); 616 if (ret) { 617 btrfs_end_transaction(trans, root); 618 return ERR_PTR(ret); 619 } 620 621 trans->block_rsv = &root->fs_info->trans_block_rsv; 622 trans->bytes_reserved = num_bytes; 623 624 return trans; 625 } 626 627 struct btrfs_trans_handle *btrfs_start_transaction_lflush( 628 struct btrfs_root *root, 629 unsigned int num_items) 630 { 631 return start_transaction(root, num_items, TRANS_START, 632 BTRFS_RESERVE_FLUSH_LIMIT); 633 } 634 635 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 636 { 637 return start_transaction(root, 0, TRANS_JOIN, 638 BTRFS_RESERVE_NO_FLUSH); 639 } 640 641 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 642 { 643 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 644 BTRFS_RESERVE_NO_FLUSH); 645 } 646 647 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 648 { 649 return start_transaction(root, 0, TRANS_USERSPACE, 650 BTRFS_RESERVE_NO_FLUSH); 651 } 652 653 /* 654 * btrfs_attach_transaction() - catch the running transaction 655 * 656 * It is used when we want to commit the current the transaction, but 657 * don't want to start a new one. 658 * 659 * Note: If this function return -ENOENT, it just means there is no 660 * running transaction. But it is possible that the inactive transaction 661 * is still in the memory, not fully on disk. If you hope there is no 662 * inactive transaction in the fs when -ENOENT is returned, you should 663 * invoke 664 * btrfs_attach_transaction_barrier() 665 */ 666 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 667 { 668 return start_transaction(root, 0, TRANS_ATTACH, 669 BTRFS_RESERVE_NO_FLUSH); 670 } 671 672 /* 673 * btrfs_attach_transaction_barrier() - catch the running transaction 674 * 675 * It is similar to the above function, the differentia is this one 676 * will wait for all the inactive transactions until they fully 677 * complete. 678 */ 679 struct btrfs_trans_handle * 680 btrfs_attach_transaction_barrier(struct btrfs_root *root) 681 { 682 struct btrfs_trans_handle *trans; 683 684 trans = start_transaction(root, 0, TRANS_ATTACH, 685 BTRFS_RESERVE_NO_FLUSH); 686 if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT) 687 btrfs_wait_for_commit(root, 0); 688 689 return trans; 690 } 691 692 /* wait for a transaction commit to be fully complete */ 693 static noinline void wait_for_commit(struct btrfs_root *root, 694 struct btrfs_transaction *commit) 695 { 696 wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED); 697 } 698 699 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 700 { 701 struct btrfs_transaction *cur_trans = NULL, *t; 702 int ret = 0; 703 704 if (transid) { 705 if (transid <= root->fs_info->last_trans_committed) 706 goto out; 707 708 /* find specified transaction */ 709 spin_lock(&root->fs_info->trans_lock); 710 list_for_each_entry(t, &root->fs_info->trans_list, list) { 711 if (t->transid == transid) { 712 cur_trans = t; 713 atomic_inc(&cur_trans->use_count); 714 ret = 0; 715 break; 716 } 717 if (t->transid > transid) { 718 ret = 0; 719 break; 720 } 721 } 722 spin_unlock(&root->fs_info->trans_lock); 723 724 /* 725 * The specified transaction doesn't exist, or we 726 * raced with btrfs_commit_transaction 727 */ 728 if (!cur_trans) { 729 if (transid > root->fs_info->last_trans_committed) 730 ret = -EINVAL; 731 goto out; 732 } 733 } else { 734 /* find newest transaction that is committing | committed */ 735 spin_lock(&root->fs_info->trans_lock); 736 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 737 list) { 738 if (t->state >= TRANS_STATE_COMMIT_START) { 739 if (t->state == TRANS_STATE_COMPLETED) 740 break; 741 cur_trans = t; 742 atomic_inc(&cur_trans->use_count); 743 break; 744 } 745 } 746 spin_unlock(&root->fs_info->trans_lock); 747 if (!cur_trans) 748 goto out; /* nothing committing|committed */ 749 } 750 751 wait_for_commit(root, cur_trans); 752 btrfs_put_transaction(cur_trans); 753 out: 754 return ret; 755 } 756 757 void btrfs_throttle(struct btrfs_root *root) 758 { 759 if (!atomic_read(&root->fs_info->open_ioctl_trans)) 760 wait_current_trans(root); 761 } 762 763 static int should_end_transaction(struct btrfs_trans_handle *trans, 764 struct btrfs_root *root) 765 { 766 if (root->fs_info->global_block_rsv.space_info->full && 767 btrfs_check_space_for_delayed_refs(trans, root)) 768 return 1; 769 770 return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); 771 } 772 773 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 774 struct btrfs_root *root) 775 { 776 struct btrfs_transaction *cur_trans = trans->transaction; 777 int updates; 778 int err; 779 780 smp_mb(); 781 if (cur_trans->state >= TRANS_STATE_BLOCKED || 782 cur_trans->delayed_refs.flushing) 783 return 1; 784 785 updates = trans->delayed_ref_updates; 786 trans->delayed_ref_updates = 0; 787 if (updates) { 788 err = btrfs_run_delayed_refs(trans, root, updates * 2); 789 if (err) /* Error code will also eval true */ 790 return err; 791 } 792 793 return should_end_transaction(trans, root); 794 } 795 796 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 797 struct btrfs_root *root, int throttle) 798 { 799 struct btrfs_transaction *cur_trans = trans->transaction; 800 struct btrfs_fs_info *info = root->fs_info; 801 unsigned long cur = trans->delayed_ref_updates; 802 int lock = (trans->type != TRANS_JOIN_NOLOCK); 803 int err = 0; 804 int must_run_delayed_refs = 0; 805 806 if (trans->use_count > 1) { 807 trans->use_count--; 808 trans->block_rsv = trans->orig_rsv; 809 return 0; 810 } 811 812 btrfs_trans_release_metadata(trans, root); 813 trans->block_rsv = NULL; 814 815 if (!list_empty(&trans->new_bgs)) 816 btrfs_create_pending_block_groups(trans, root); 817 818 trans->delayed_ref_updates = 0; 819 if (!trans->sync) { 820 must_run_delayed_refs = 821 btrfs_should_throttle_delayed_refs(trans, root); 822 cur = max_t(unsigned long, cur, 32); 823 824 /* 825 * don't make the caller wait if they are from a NOLOCK 826 * or ATTACH transaction, it will deadlock with commit 827 */ 828 if (must_run_delayed_refs == 1 && 829 (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH))) 830 must_run_delayed_refs = 2; 831 } 832 833 btrfs_trans_release_metadata(trans, root); 834 trans->block_rsv = NULL; 835 836 if (!list_empty(&trans->new_bgs)) 837 btrfs_create_pending_block_groups(trans, root); 838 839 btrfs_trans_release_chunk_metadata(trans); 840 841 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && 842 should_end_transaction(trans, root) && 843 ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) { 844 spin_lock(&info->trans_lock); 845 if (cur_trans->state == TRANS_STATE_RUNNING) 846 cur_trans->state = TRANS_STATE_BLOCKED; 847 spin_unlock(&info->trans_lock); 848 } 849 850 if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { 851 if (throttle) 852 return btrfs_commit_transaction(trans, root); 853 else 854 wake_up_process(info->transaction_kthread); 855 } 856 857 if (trans->type & __TRANS_FREEZABLE) 858 sb_end_intwrite(root->fs_info->sb); 859 860 WARN_ON(cur_trans != info->running_transaction); 861 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 862 atomic_dec(&cur_trans->num_writers); 863 extwriter_counter_dec(cur_trans, trans->type); 864 865 /* 866 * Make sure counter is updated before we wake up waiters. 867 */ 868 smp_mb(); 869 if (waitqueue_active(&cur_trans->writer_wait)) 870 wake_up(&cur_trans->writer_wait); 871 btrfs_put_transaction(cur_trans); 872 873 if (current->journal_info == trans) 874 current->journal_info = NULL; 875 876 if (throttle) 877 btrfs_run_delayed_iputs(root); 878 879 if (trans->aborted || 880 test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) { 881 wake_up_process(info->transaction_kthread); 882 err = -EIO; 883 } 884 assert_qgroups_uptodate(trans); 885 886 kmem_cache_free(btrfs_trans_handle_cachep, trans); 887 if (must_run_delayed_refs) { 888 btrfs_async_run_delayed_refs(root, cur, 889 must_run_delayed_refs == 1); 890 } 891 return err; 892 } 893 894 int btrfs_end_transaction(struct btrfs_trans_handle *trans, 895 struct btrfs_root *root) 896 { 897 return __btrfs_end_transaction(trans, root, 0); 898 } 899 900 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 901 struct btrfs_root *root) 902 { 903 return __btrfs_end_transaction(trans, root, 1); 904 } 905 906 /* 907 * when btree blocks are allocated, they have some corresponding bits set for 908 * them in one of two extent_io trees. This is used to make sure all of 909 * those extents are sent to disk but does not wait on them 910 */ 911 int btrfs_write_marked_extents(struct btrfs_root *root, 912 struct extent_io_tree *dirty_pages, int mark) 913 { 914 int err = 0; 915 int werr = 0; 916 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 917 struct extent_state *cached_state = NULL; 918 u64 start = 0; 919 u64 end; 920 921 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 922 mark, &cached_state)) { 923 bool wait_writeback = false; 924 925 err = convert_extent_bit(dirty_pages, start, end, 926 EXTENT_NEED_WAIT, 927 mark, &cached_state, GFP_NOFS); 928 /* 929 * convert_extent_bit can return -ENOMEM, which is most of the 930 * time a temporary error. So when it happens, ignore the error 931 * and wait for writeback of this range to finish - because we 932 * failed to set the bit EXTENT_NEED_WAIT for the range, a call 933 * to btrfs_wait_marked_extents() would not know that writeback 934 * for this range started and therefore wouldn't wait for it to 935 * finish - we don't want to commit a superblock that points to 936 * btree nodes/leafs for which writeback hasn't finished yet 937 * (and without errors). 938 * We cleanup any entries left in the io tree when committing 939 * the transaction (through clear_btree_io_tree()). 940 */ 941 if (err == -ENOMEM) { 942 err = 0; 943 wait_writeback = true; 944 } 945 if (!err) 946 err = filemap_fdatawrite_range(mapping, start, end); 947 if (err) 948 werr = err; 949 else if (wait_writeback) 950 werr = filemap_fdatawait_range(mapping, start, end); 951 free_extent_state(cached_state); 952 cached_state = NULL; 953 cond_resched(); 954 start = end + 1; 955 } 956 return werr; 957 } 958 959 /* 960 * when btree blocks are allocated, they have some corresponding bits set for 961 * them in one of two extent_io trees. This is used to make sure all of 962 * those extents are on disk for transaction or log commit. We wait 963 * on all the pages and clear them from the dirty pages state tree 964 */ 965 int btrfs_wait_marked_extents(struct btrfs_root *root, 966 struct extent_io_tree *dirty_pages, int mark) 967 { 968 int err = 0; 969 int werr = 0; 970 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 971 struct extent_state *cached_state = NULL; 972 u64 start = 0; 973 u64 end; 974 struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode); 975 bool errors = false; 976 977 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 978 EXTENT_NEED_WAIT, &cached_state)) { 979 /* 980 * Ignore -ENOMEM errors returned by clear_extent_bit(). 981 * When committing the transaction, we'll remove any entries 982 * left in the io tree. For a log commit, we don't remove them 983 * after committing the log because the tree can be accessed 984 * concurrently - we do it only at transaction commit time when 985 * it's safe to do it (through clear_btree_io_tree()). 986 */ 987 err = clear_extent_bit(dirty_pages, start, end, 988 EXTENT_NEED_WAIT, 989 0, 0, &cached_state, GFP_NOFS); 990 if (err == -ENOMEM) 991 err = 0; 992 if (!err) 993 err = filemap_fdatawait_range(mapping, start, end); 994 if (err) 995 werr = err; 996 free_extent_state(cached_state); 997 cached_state = NULL; 998 cond_resched(); 999 start = end + 1; 1000 } 1001 if (err) 1002 werr = err; 1003 1004 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 1005 if ((mark & EXTENT_DIRTY) && 1006 test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, 1007 &btree_ino->runtime_flags)) 1008 errors = true; 1009 1010 if ((mark & EXTENT_NEW) && 1011 test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, 1012 &btree_ino->runtime_flags)) 1013 errors = true; 1014 } else { 1015 if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR, 1016 &btree_ino->runtime_flags)) 1017 errors = true; 1018 } 1019 1020 if (errors && !werr) 1021 werr = -EIO; 1022 1023 return werr; 1024 } 1025 1026 /* 1027 * when btree blocks are allocated, they have some corresponding bits set for 1028 * them in one of two extent_io trees. This is used to make sure all of 1029 * those extents are on disk for transaction or log commit 1030 */ 1031 static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 1032 struct extent_io_tree *dirty_pages, int mark) 1033 { 1034 int ret; 1035 int ret2; 1036 struct blk_plug plug; 1037 1038 blk_start_plug(&plug); 1039 ret = btrfs_write_marked_extents(root, dirty_pages, mark); 1040 blk_finish_plug(&plug); 1041 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); 1042 1043 if (ret) 1044 return ret; 1045 if (ret2) 1046 return ret2; 1047 return 0; 1048 } 1049 1050 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 1051 struct btrfs_root *root) 1052 { 1053 int ret; 1054 1055 ret = btrfs_write_and_wait_marked_extents(root, 1056 &trans->transaction->dirty_pages, 1057 EXTENT_DIRTY); 1058 clear_btree_io_tree(&trans->transaction->dirty_pages); 1059 1060 return ret; 1061 } 1062 1063 /* 1064 * this is used to update the root pointer in the tree of tree roots. 1065 * 1066 * But, in the case of the extent allocation tree, updating the root 1067 * pointer may allocate blocks which may change the root of the extent 1068 * allocation tree. 1069 * 1070 * So, this loops and repeats and makes sure the cowonly root didn't 1071 * change while the root pointer was being updated in the metadata. 1072 */ 1073 static int update_cowonly_root(struct btrfs_trans_handle *trans, 1074 struct btrfs_root *root) 1075 { 1076 int ret; 1077 u64 old_root_bytenr; 1078 u64 old_root_used; 1079 struct btrfs_root *tree_root = root->fs_info->tree_root; 1080 1081 old_root_used = btrfs_root_used(&root->root_item); 1082 1083 while (1) { 1084 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1085 if (old_root_bytenr == root->node->start && 1086 old_root_used == btrfs_root_used(&root->root_item)) 1087 break; 1088 1089 btrfs_set_root_node(&root->root_item, root->node); 1090 ret = btrfs_update_root(trans, tree_root, 1091 &root->root_key, 1092 &root->root_item); 1093 if (ret) 1094 return ret; 1095 1096 old_root_used = btrfs_root_used(&root->root_item); 1097 } 1098 1099 return 0; 1100 } 1101 1102 /* 1103 * update all the cowonly tree roots on disk 1104 * 1105 * The error handling in this function may not be obvious. Any of the 1106 * failures will cause the file system to go offline. We still need 1107 * to clean up the delayed refs. 1108 */ 1109 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 1110 struct btrfs_root *root) 1111 { 1112 struct btrfs_fs_info *fs_info = root->fs_info; 1113 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; 1114 struct list_head *io_bgs = &trans->transaction->io_bgs; 1115 struct list_head *next; 1116 struct extent_buffer *eb; 1117 int ret; 1118 1119 eb = btrfs_lock_root_node(fs_info->tree_root); 1120 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 1121 0, &eb); 1122 btrfs_tree_unlock(eb); 1123 free_extent_buffer(eb); 1124 1125 if (ret) 1126 return ret; 1127 1128 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1129 if (ret) 1130 return ret; 1131 1132 ret = btrfs_run_dev_stats(trans, root->fs_info); 1133 if (ret) 1134 return ret; 1135 ret = btrfs_run_dev_replace(trans, root->fs_info); 1136 if (ret) 1137 return ret; 1138 ret = btrfs_run_qgroups(trans, root->fs_info); 1139 if (ret) 1140 return ret; 1141 1142 ret = btrfs_setup_space_cache(trans, root); 1143 if (ret) 1144 return ret; 1145 1146 /* run_qgroups might have added some more refs */ 1147 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1148 if (ret) 1149 return ret; 1150 again: 1151 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 1152 next = fs_info->dirty_cowonly_roots.next; 1153 list_del_init(next); 1154 root = list_entry(next, struct btrfs_root, dirty_list); 1155 clear_bit(BTRFS_ROOT_DIRTY, &root->state); 1156 1157 if (root != fs_info->extent_root) 1158 list_add_tail(&root->dirty_list, 1159 &trans->transaction->switch_commits); 1160 ret = update_cowonly_root(trans, root); 1161 if (ret) 1162 return ret; 1163 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1164 if (ret) 1165 return ret; 1166 } 1167 1168 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) { 1169 ret = btrfs_write_dirty_block_groups(trans, root); 1170 if (ret) 1171 return ret; 1172 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1173 if (ret) 1174 return ret; 1175 } 1176 1177 if (!list_empty(&fs_info->dirty_cowonly_roots)) 1178 goto again; 1179 1180 list_add_tail(&fs_info->extent_root->dirty_list, 1181 &trans->transaction->switch_commits); 1182 btrfs_after_dev_replace_commit(fs_info); 1183 1184 return 0; 1185 } 1186 1187 /* 1188 * dead roots are old snapshots that need to be deleted. This allocates 1189 * a dirty root struct and adds it into the list of dead roots that need to 1190 * be deleted 1191 */ 1192 void btrfs_add_dead_root(struct btrfs_root *root) 1193 { 1194 spin_lock(&root->fs_info->trans_lock); 1195 if (list_empty(&root->root_list)) 1196 list_add_tail(&root->root_list, &root->fs_info->dead_roots); 1197 spin_unlock(&root->fs_info->trans_lock); 1198 } 1199 1200 /* 1201 * update all the cowonly tree roots on disk 1202 */ 1203 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 1204 struct btrfs_root *root) 1205 { 1206 struct btrfs_root *gang[8]; 1207 struct btrfs_fs_info *fs_info = root->fs_info; 1208 int i; 1209 int ret; 1210 int err = 0; 1211 1212 spin_lock(&fs_info->fs_roots_radix_lock); 1213 while (1) { 1214 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 1215 (void **)gang, 0, 1216 ARRAY_SIZE(gang), 1217 BTRFS_ROOT_TRANS_TAG); 1218 if (ret == 0) 1219 break; 1220 for (i = 0; i < ret; i++) { 1221 root = gang[i]; 1222 radix_tree_tag_clear(&fs_info->fs_roots_radix, 1223 (unsigned long)root->root_key.objectid, 1224 BTRFS_ROOT_TRANS_TAG); 1225 spin_unlock(&fs_info->fs_roots_radix_lock); 1226 1227 btrfs_free_log(trans, root); 1228 btrfs_update_reloc_root(trans, root); 1229 btrfs_orphan_commit_root(trans, root); 1230 1231 btrfs_save_ino_cache(root, trans); 1232 1233 /* see comments in should_cow_block() */ 1234 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1235 smp_mb__after_atomic(); 1236 1237 if (root->commit_root != root->node) { 1238 list_add_tail(&root->dirty_list, 1239 &trans->transaction->switch_commits); 1240 btrfs_set_root_node(&root->root_item, 1241 root->node); 1242 } 1243 1244 err = btrfs_update_root(trans, fs_info->tree_root, 1245 &root->root_key, 1246 &root->root_item); 1247 spin_lock(&fs_info->fs_roots_radix_lock); 1248 if (err) 1249 break; 1250 btrfs_qgroup_free_meta_all(root); 1251 } 1252 } 1253 spin_unlock(&fs_info->fs_roots_radix_lock); 1254 return err; 1255 } 1256 1257 /* 1258 * defrag a given btree. 1259 * Every leaf in the btree is read and defragged. 1260 */ 1261 int btrfs_defrag_root(struct btrfs_root *root) 1262 { 1263 struct btrfs_fs_info *info = root->fs_info; 1264 struct btrfs_trans_handle *trans; 1265 int ret; 1266 1267 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state)) 1268 return 0; 1269 1270 while (1) { 1271 trans = btrfs_start_transaction(root, 0); 1272 if (IS_ERR(trans)) 1273 return PTR_ERR(trans); 1274 1275 ret = btrfs_defrag_leaves(trans, root); 1276 1277 btrfs_end_transaction(trans, root); 1278 btrfs_btree_balance_dirty(info->tree_root); 1279 cond_resched(); 1280 1281 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 1282 break; 1283 1284 if (btrfs_defrag_cancelled(root->fs_info)) { 1285 pr_debug("BTRFS: defrag_root cancelled\n"); 1286 ret = -EAGAIN; 1287 break; 1288 } 1289 } 1290 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state); 1291 return ret; 1292 } 1293 1294 /* 1295 * new snapshots need to be created at a very specific time in the 1296 * transaction commit. This does the actual creation. 1297 * 1298 * Note: 1299 * If the error which may affect the commitment of the current transaction 1300 * happens, we should return the error number. If the error which just affect 1301 * the creation of the pending snapshots, just return 0. 1302 */ 1303 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1304 struct btrfs_fs_info *fs_info, 1305 struct btrfs_pending_snapshot *pending) 1306 { 1307 struct btrfs_key key; 1308 struct btrfs_root_item *new_root_item; 1309 struct btrfs_root *tree_root = fs_info->tree_root; 1310 struct btrfs_root *root = pending->root; 1311 struct btrfs_root *parent_root; 1312 struct btrfs_block_rsv *rsv; 1313 struct inode *parent_inode; 1314 struct btrfs_path *path; 1315 struct btrfs_dir_item *dir_item; 1316 struct dentry *dentry; 1317 struct extent_buffer *tmp; 1318 struct extent_buffer *old; 1319 struct timespec cur_time = CURRENT_TIME; 1320 int ret = 0; 1321 u64 to_reserve = 0; 1322 u64 index = 0; 1323 u64 objectid; 1324 u64 root_flags; 1325 uuid_le new_uuid; 1326 1327 path = btrfs_alloc_path(); 1328 if (!path) { 1329 pending->error = -ENOMEM; 1330 return 0; 1331 } 1332 1333 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 1334 if (!new_root_item) { 1335 pending->error = -ENOMEM; 1336 goto root_item_alloc_fail; 1337 } 1338 1339 pending->error = btrfs_find_free_objectid(tree_root, &objectid); 1340 if (pending->error) 1341 goto no_free_objectid; 1342 1343 /* 1344 * Make qgroup to skip current new snapshot's qgroupid, as it is 1345 * accounted by later btrfs_qgroup_inherit(). 1346 */ 1347 btrfs_set_skip_qgroup(trans, objectid); 1348 1349 btrfs_reloc_pre_snapshot(pending, &to_reserve); 1350 1351 if (to_reserve > 0) { 1352 pending->error = btrfs_block_rsv_add(root, 1353 &pending->block_rsv, 1354 to_reserve, 1355 BTRFS_RESERVE_NO_FLUSH); 1356 if (pending->error) 1357 goto clear_skip_qgroup; 1358 } 1359 1360 key.objectid = objectid; 1361 key.offset = (u64)-1; 1362 key.type = BTRFS_ROOT_ITEM_KEY; 1363 1364 rsv = trans->block_rsv; 1365 trans->block_rsv = &pending->block_rsv; 1366 trans->bytes_reserved = trans->block_rsv->reserved; 1367 1368 dentry = pending->dentry; 1369 parent_inode = pending->dir; 1370 parent_root = BTRFS_I(parent_inode)->root; 1371 record_root_in_trans(trans, parent_root); 1372 1373 /* 1374 * insert the directory item 1375 */ 1376 ret = btrfs_set_inode_index(parent_inode, &index); 1377 BUG_ON(ret); /* -ENOMEM */ 1378 1379 /* check if there is a file/dir which has the same name. */ 1380 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1381 btrfs_ino(parent_inode), 1382 dentry->d_name.name, 1383 dentry->d_name.len, 0); 1384 if (dir_item != NULL && !IS_ERR(dir_item)) { 1385 pending->error = -EEXIST; 1386 goto dir_item_existed; 1387 } else if (IS_ERR(dir_item)) { 1388 ret = PTR_ERR(dir_item); 1389 btrfs_abort_transaction(trans, root, ret); 1390 goto fail; 1391 } 1392 btrfs_release_path(path); 1393 1394 /* 1395 * pull in the delayed directory update 1396 * and the delayed inode item 1397 * otherwise we corrupt the FS during 1398 * snapshot 1399 */ 1400 ret = btrfs_run_delayed_items(trans, root); 1401 if (ret) { /* Transaction aborted */ 1402 btrfs_abort_transaction(trans, root, ret); 1403 goto fail; 1404 } 1405 1406 record_root_in_trans(trans, root); 1407 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1408 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1409 btrfs_check_and_init_root_item(new_root_item); 1410 1411 root_flags = btrfs_root_flags(new_root_item); 1412 if (pending->readonly) 1413 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1414 else 1415 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1416 btrfs_set_root_flags(new_root_item, root_flags); 1417 1418 btrfs_set_root_generation_v2(new_root_item, 1419 trans->transid); 1420 uuid_le_gen(&new_uuid); 1421 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); 1422 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1423 BTRFS_UUID_SIZE); 1424 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { 1425 memset(new_root_item->received_uuid, 0, 1426 sizeof(new_root_item->received_uuid)); 1427 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1428 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1429 btrfs_set_root_stransid(new_root_item, 0); 1430 btrfs_set_root_rtransid(new_root_item, 0); 1431 } 1432 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec); 1433 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec); 1434 btrfs_set_root_otransid(new_root_item, trans->transid); 1435 1436 old = btrfs_lock_root_node(root); 1437 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old); 1438 if (ret) { 1439 btrfs_tree_unlock(old); 1440 free_extent_buffer(old); 1441 btrfs_abort_transaction(trans, root, ret); 1442 goto fail; 1443 } 1444 1445 btrfs_set_lock_blocking(old); 1446 1447 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1448 /* clean up in any case */ 1449 btrfs_tree_unlock(old); 1450 free_extent_buffer(old); 1451 if (ret) { 1452 btrfs_abort_transaction(trans, root, ret); 1453 goto fail; 1454 } 1455 /* see comments in should_cow_block() */ 1456 set_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1457 smp_wmb(); 1458 1459 btrfs_set_root_node(new_root_item, tmp); 1460 /* record when the snapshot was created in key.offset */ 1461 key.offset = trans->transid; 1462 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1463 btrfs_tree_unlock(tmp); 1464 free_extent_buffer(tmp); 1465 if (ret) { 1466 btrfs_abort_transaction(trans, root, ret); 1467 goto fail; 1468 } 1469 1470 /* 1471 * insert root back/forward references 1472 */ 1473 ret = btrfs_add_root_ref(trans, tree_root, objectid, 1474 parent_root->root_key.objectid, 1475 btrfs_ino(parent_inode), index, 1476 dentry->d_name.name, dentry->d_name.len); 1477 if (ret) { 1478 btrfs_abort_transaction(trans, root, ret); 1479 goto fail; 1480 } 1481 1482 key.offset = (u64)-1; 1483 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 1484 if (IS_ERR(pending->snap)) { 1485 ret = PTR_ERR(pending->snap); 1486 btrfs_abort_transaction(trans, root, ret); 1487 goto fail; 1488 } 1489 1490 ret = btrfs_reloc_post_snapshot(trans, pending); 1491 if (ret) { 1492 btrfs_abort_transaction(trans, root, ret); 1493 goto fail; 1494 } 1495 1496 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1497 if (ret) { 1498 btrfs_abort_transaction(trans, root, ret); 1499 goto fail; 1500 } 1501 1502 ret = btrfs_insert_dir_item(trans, parent_root, 1503 dentry->d_name.name, dentry->d_name.len, 1504 parent_inode, &key, 1505 BTRFS_FT_DIR, index); 1506 /* We have check then name at the beginning, so it is impossible. */ 1507 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1508 if (ret) { 1509 btrfs_abort_transaction(trans, root, ret); 1510 goto fail; 1511 } 1512 1513 btrfs_i_size_write(parent_inode, parent_inode->i_size + 1514 dentry->d_name.len * 2); 1515 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 1516 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode); 1517 if (ret) { 1518 btrfs_abort_transaction(trans, root, ret); 1519 goto fail; 1520 } 1521 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b, 1522 BTRFS_UUID_KEY_SUBVOL, objectid); 1523 if (ret) { 1524 btrfs_abort_transaction(trans, root, ret); 1525 goto fail; 1526 } 1527 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) { 1528 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, 1529 new_root_item->received_uuid, 1530 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 1531 objectid); 1532 if (ret && ret != -EEXIST) { 1533 btrfs_abort_transaction(trans, root, ret); 1534 goto fail; 1535 } 1536 } 1537 1538 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1539 if (ret) { 1540 btrfs_abort_transaction(trans, root, ret); 1541 goto fail; 1542 } 1543 1544 /* 1545 * account qgroup counters before qgroup_inherit() 1546 */ 1547 ret = btrfs_qgroup_prepare_account_extents(trans, fs_info); 1548 if (ret) 1549 goto fail; 1550 ret = btrfs_qgroup_account_extents(trans, fs_info); 1551 if (ret) 1552 goto fail; 1553 ret = btrfs_qgroup_inherit(trans, fs_info, 1554 root->root_key.objectid, 1555 objectid, pending->inherit); 1556 if (ret) { 1557 btrfs_abort_transaction(trans, root, ret); 1558 goto fail; 1559 } 1560 1561 fail: 1562 pending->error = ret; 1563 dir_item_existed: 1564 trans->block_rsv = rsv; 1565 trans->bytes_reserved = 0; 1566 clear_skip_qgroup: 1567 btrfs_clear_skip_qgroup(trans); 1568 no_free_objectid: 1569 kfree(new_root_item); 1570 root_item_alloc_fail: 1571 btrfs_free_path(path); 1572 return ret; 1573 } 1574 1575 /* 1576 * create all the snapshots we've scheduled for creation 1577 */ 1578 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 1579 struct btrfs_fs_info *fs_info) 1580 { 1581 struct btrfs_pending_snapshot *pending, *next; 1582 struct list_head *head = &trans->transaction->pending_snapshots; 1583 int ret = 0; 1584 1585 list_for_each_entry_safe(pending, next, head, list) { 1586 list_del(&pending->list); 1587 ret = create_pending_snapshot(trans, fs_info, pending); 1588 if (ret) 1589 break; 1590 } 1591 return ret; 1592 } 1593 1594 static void update_super_roots(struct btrfs_root *root) 1595 { 1596 struct btrfs_root_item *root_item; 1597 struct btrfs_super_block *super; 1598 1599 super = root->fs_info->super_copy; 1600 1601 root_item = &root->fs_info->chunk_root->root_item; 1602 super->chunk_root = root_item->bytenr; 1603 super->chunk_root_generation = root_item->generation; 1604 super->chunk_root_level = root_item->level; 1605 1606 root_item = &root->fs_info->tree_root->root_item; 1607 super->root = root_item->bytenr; 1608 super->generation = root_item->generation; 1609 super->root_level = root_item->level; 1610 if (btrfs_test_opt(root, SPACE_CACHE)) 1611 super->cache_generation = root_item->generation; 1612 if (root->fs_info->update_uuid_tree_gen) 1613 super->uuid_tree_generation = root_item->generation; 1614 } 1615 1616 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1617 { 1618 struct btrfs_transaction *trans; 1619 int ret = 0; 1620 1621 spin_lock(&info->trans_lock); 1622 trans = info->running_transaction; 1623 if (trans) 1624 ret = (trans->state >= TRANS_STATE_COMMIT_START); 1625 spin_unlock(&info->trans_lock); 1626 return ret; 1627 } 1628 1629 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1630 { 1631 struct btrfs_transaction *trans; 1632 int ret = 0; 1633 1634 spin_lock(&info->trans_lock); 1635 trans = info->running_transaction; 1636 if (trans) 1637 ret = is_transaction_blocked(trans); 1638 spin_unlock(&info->trans_lock); 1639 return ret; 1640 } 1641 1642 /* 1643 * wait for the current transaction commit to start and block subsequent 1644 * transaction joins 1645 */ 1646 static void wait_current_trans_commit_start(struct btrfs_root *root, 1647 struct btrfs_transaction *trans) 1648 { 1649 wait_event(root->fs_info->transaction_blocked_wait, 1650 trans->state >= TRANS_STATE_COMMIT_START || 1651 trans->aborted); 1652 } 1653 1654 /* 1655 * wait for the current transaction to start and then become unblocked. 1656 * caller holds ref. 1657 */ 1658 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1659 struct btrfs_transaction *trans) 1660 { 1661 wait_event(root->fs_info->transaction_wait, 1662 trans->state >= TRANS_STATE_UNBLOCKED || 1663 trans->aborted); 1664 } 1665 1666 /* 1667 * commit transactions asynchronously. once btrfs_commit_transaction_async 1668 * returns, any subsequent transaction will not be allowed to join. 1669 */ 1670 struct btrfs_async_commit { 1671 struct btrfs_trans_handle *newtrans; 1672 struct btrfs_root *root; 1673 struct work_struct work; 1674 }; 1675 1676 static void do_async_commit(struct work_struct *work) 1677 { 1678 struct btrfs_async_commit *ac = 1679 container_of(work, struct btrfs_async_commit, work); 1680 1681 /* 1682 * We've got freeze protection passed with the transaction. 1683 * Tell lockdep about it. 1684 */ 1685 if (ac->newtrans->type & __TRANS_FREEZABLE) 1686 __sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS); 1687 1688 current->journal_info = ac->newtrans; 1689 1690 btrfs_commit_transaction(ac->newtrans, ac->root); 1691 kfree(ac); 1692 } 1693 1694 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1695 struct btrfs_root *root, 1696 int wait_for_unblock) 1697 { 1698 struct btrfs_async_commit *ac; 1699 struct btrfs_transaction *cur_trans; 1700 1701 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1702 if (!ac) 1703 return -ENOMEM; 1704 1705 INIT_WORK(&ac->work, do_async_commit); 1706 ac->root = root; 1707 ac->newtrans = btrfs_join_transaction(root); 1708 if (IS_ERR(ac->newtrans)) { 1709 int err = PTR_ERR(ac->newtrans); 1710 kfree(ac); 1711 return err; 1712 } 1713 1714 /* take transaction reference */ 1715 cur_trans = trans->transaction; 1716 atomic_inc(&cur_trans->use_count); 1717 1718 btrfs_end_transaction(trans, root); 1719 1720 /* 1721 * Tell lockdep we've released the freeze rwsem, since the 1722 * async commit thread will be the one to unlock it. 1723 */ 1724 if (ac->newtrans->type & __TRANS_FREEZABLE) 1725 __sb_writers_release(root->fs_info->sb, SB_FREEZE_FS); 1726 1727 schedule_work(&ac->work); 1728 1729 /* wait for transaction to start and unblock */ 1730 if (wait_for_unblock) 1731 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1732 else 1733 wait_current_trans_commit_start(root, cur_trans); 1734 1735 if (current->journal_info == trans) 1736 current->journal_info = NULL; 1737 1738 btrfs_put_transaction(cur_trans); 1739 return 0; 1740 } 1741 1742 1743 static void cleanup_transaction(struct btrfs_trans_handle *trans, 1744 struct btrfs_root *root, int err) 1745 { 1746 struct btrfs_transaction *cur_trans = trans->transaction; 1747 DEFINE_WAIT(wait); 1748 1749 WARN_ON(trans->use_count > 1); 1750 1751 btrfs_abort_transaction(trans, root, err); 1752 1753 spin_lock(&root->fs_info->trans_lock); 1754 1755 /* 1756 * If the transaction is removed from the list, it means this 1757 * transaction has been committed successfully, so it is impossible 1758 * to call the cleanup function. 1759 */ 1760 BUG_ON(list_empty(&cur_trans->list)); 1761 1762 list_del_init(&cur_trans->list); 1763 if (cur_trans == root->fs_info->running_transaction) { 1764 cur_trans->state = TRANS_STATE_COMMIT_DOING; 1765 spin_unlock(&root->fs_info->trans_lock); 1766 wait_event(cur_trans->writer_wait, 1767 atomic_read(&cur_trans->num_writers) == 1); 1768 1769 spin_lock(&root->fs_info->trans_lock); 1770 } 1771 spin_unlock(&root->fs_info->trans_lock); 1772 1773 btrfs_cleanup_one_transaction(trans->transaction, root); 1774 1775 spin_lock(&root->fs_info->trans_lock); 1776 if (cur_trans == root->fs_info->running_transaction) 1777 root->fs_info->running_transaction = NULL; 1778 spin_unlock(&root->fs_info->trans_lock); 1779 1780 if (trans->type & __TRANS_FREEZABLE) 1781 sb_end_intwrite(root->fs_info->sb); 1782 btrfs_put_transaction(cur_trans); 1783 btrfs_put_transaction(cur_trans); 1784 1785 trace_btrfs_transaction_commit(root); 1786 1787 if (current->journal_info == trans) 1788 current->journal_info = NULL; 1789 btrfs_scrub_cancel(root->fs_info); 1790 1791 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1792 } 1793 1794 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 1795 { 1796 if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT)) 1797 return btrfs_start_delalloc_roots(fs_info, 1, -1); 1798 return 0; 1799 } 1800 1801 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) 1802 { 1803 if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT)) 1804 btrfs_wait_ordered_roots(fs_info, -1); 1805 } 1806 1807 static inline void 1808 btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans) 1809 { 1810 wait_event(cur_trans->pending_wait, 1811 atomic_read(&cur_trans->pending_ordered) == 0); 1812 } 1813 1814 int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1815 struct btrfs_root *root) 1816 { 1817 struct btrfs_transaction *cur_trans = trans->transaction; 1818 struct btrfs_transaction *prev_trans = NULL; 1819 struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode); 1820 int ret; 1821 1822 /* Stop the commit early if ->aborted is set */ 1823 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1824 ret = cur_trans->aborted; 1825 btrfs_end_transaction(trans, root); 1826 return ret; 1827 } 1828 1829 /* make a pass through all the delayed refs we have so far 1830 * any runnings procs may add more while we are here 1831 */ 1832 ret = btrfs_run_delayed_refs(trans, root, 0); 1833 if (ret) { 1834 btrfs_end_transaction(trans, root); 1835 return ret; 1836 } 1837 1838 btrfs_trans_release_metadata(trans, root); 1839 trans->block_rsv = NULL; 1840 1841 cur_trans = trans->transaction; 1842 1843 /* 1844 * set the flushing flag so procs in this transaction have to 1845 * start sending their work down. 1846 */ 1847 cur_trans->delayed_refs.flushing = 1; 1848 smp_wmb(); 1849 1850 if (!list_empty(&trans->new_bgs)) 1851 btrfs_create_pending_block_groups(trans, root); 1852 1853 ret = btrfs_run_delayed_refs(trans, root, 0); 1854 if (ret) { 1855 btrfs_end_transaction(trans, root); 1856 return ret; 1857 } 1858 1859 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) { 1860 int run_it = 0; 1861 1862 /* this mutex is also taken before trying to set 1863 * block groups readonly. We need to make sure 1864 * that nobody has set a block group readonly 1865 * after a extents from that block group have been 1866 * allocated for cache files. btrfs_set_block_group_ro 1867 * will wait for the transaction to commit if it 1868 * finds BTRFS_TRANS_DIRTY_BG_RUN set. 1869 * 1870 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure 1871 * only one process starts all the block group IO. It wouldn't 1872 * hurt to have more than one go through, but there's no 1873 * real advantage to it either. 1874 */ 1875 mutex_lock(&root->fs_info->ro_block_group_mutex); 1876 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN, 1877 &cur_trans->flags)) 1878 run_it = 1; 1879 mutex_unlock(&root->fs_info->ro_block_group_mutex); 1880 1881 if (run_it) 1882 ret = btrfs_start_dirty_block_groups(trans, root); 1883 } 1884 if (ret) { 1885 btrfs_end_transaction(trans, root); 1886 return ret; 1887 } 1888 1889 spin_lock(&root->fs_info->trans_lock); 1890 if (cur_trans->state >= TRANS_STATE_COMMIT_START) { 1891 spin_unlock(&root->fs_info->trans_lock); 1892 atomic_inc(&cur_trans->use_count); 1893 ret = btrfs_end_transaction(trans, root); 1894 1895 wait_for_commit(root, cur_trans); 1896 1897 if (unlikely(cur_trans->aborted)) 1898 ret = cur_trans->aborted; 1899 1900 btrfs_put_transaction(cur_trans); 1901 1902 return ret; 1903 } 1904 1905 cur_trans->state = TRANS_STATE_COMMIT_START; 1906 wake_up(&root->fs_info->transaction_blocked_wait); 1907 1908 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1909 prev_trans = list_entry(cur_trans->list.prev, 1910 struct btrfs_transaction, list); 1911 if (prev_trans->state != TRANS_STATE_COMPLETED) { 1912 atomic_inc(&prev_trans->use_count); 1913 spin_unlock(&root->fs_info->trans_lock); 1914 1915 wait_for_commit(root, prev_trans); 1916 ret = prev_trans->aborted; 1917 1918 btrfs_put_transaction(prev_trans); 1919 if (ret) 1920 goto cleanup_transaction; 1921 } else { 1922 spin_unlock(&root->fs_info->trans_lock); 1923 } 1924 } else { 1925 spin_unlock(&root->fs_info->trans_lock); 1926 } 1927 1928 extwriter_counter_dec(cur_trans, trans->type); 1929 1930 ret = btrfs_start_delalloc_flush(root->fs_info); 1931 if (ret) 1932 goto cleanup_transaction; 1933 1934 ret = btrfs_run_delayed_items(trans, root); 1935 if (ret) 1936 goto cleanup_transaction; 1937 1938 wait_event(cur_trans->writer_wait, 1939 extwriter_counter_read(cur_trans) == 0); 1940 1941 /* some pending stuffs might be added after the previous flush. */ 1942 ret = btrfs_run_delayed_items(trans, root); 1943 if (ret) 1944 goto cleanup_transaction; 1945 1946 btrfs_wait_delalloc_flush(root->fs_info); 1947 1948 btrfs_wait_pending_ordered(cur_trans); 1949 1950 btrfs_scrub_pause(root); 1951 /* 1952 * Ok now we need to make sure to block out any other joins while we 1953 * commit the transaction. We could have started a join before setting 1954 * COMMIT_DOING so make sure to wait for num_writers to == 1 again. 1955 */ 1956 spin_lock(&root->fs_info->trans_lock); 1957 cur_trans->state = TRANS_STATE_COMMIT_DOING; 1958 spin_unlock(&root->fs_info->trans_lock); 1959 wait_event(cur_trans->writer_wait, 1960 atomic_read(&cur_trans->num_writers) == 1); 1961 1962 /* ->aborted might be set after the previous check, so check it */ 1963 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1964 ret = cur_trans->aborted; 1965 goto scrub_continue; 1966 } 1967 /* 1968 * the reloc mutex makes sure that we stop 1969 * the balancing code from coming in and moving 1970 * extents around in the middle of the commit 1971 */ 1972 mutex_lock(&root->fs_info->reloc_mutex); 1973 1974 /* 1975 * We needn't worry about the delayed items because we will 1976 * deal with them in create_pending_snapshot(), which is the 1977 * core function of the snapshot creation. 1978 */ 1979 ret = create_pending_snapshots(trans, root->fs_info); 1980 if (ret) { 1981 mutex_unlock(&root->fs_info->reloc_mutex); 1982 goto scrub_continue; 1983 } 1984 1985 /* 1986 * We insert the dir indexes of the snapshots and update the inode 1987 * of the snapshots' parents after the snapshot creation, so there 1988 * are some delayed items which are not dealt with. Now deal with 1989 * them. 1990 * 1991 * We needn't worry that this operation will corrupt the snapshots, 1992 * because all the tree which are snapshoted will be forced to COW 1993 * the nodes and leaves. 1994 */ 1995 ret = btrfs_run_delayed_items(trans, root); 1996 if (ret) { 1997 mutex_unlock(&root->fs_info->reloc_mutex); 1998 goto scrub_continue; 1999 } 2000 2001 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 2002 if (ret) { 2003 mutex_unlock(&root->fs_info->reloc_mutex); 2004 goto scrub_continue; 2005 } 2006 2007 /* Reocrd old roots for later qgroup accounting */ 2008 ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info); 2009 if (ret) { 2010 mutex_unlock(&root->fs_info->reloc_mutex); 2011 goto scrub_continue; 2012 } 2013 2014 /* 2015 * make sure none of the code above managed to slip in a 2016 * delayed item 2017 */ 2018 btrfs_assert_delayed_root_empty(root); 2019 2020 WARN_ON(cur_trans != trans->transaction); 2021 2022 /* btrfs_commit_tree_roots is responsible for getting the 2023 * various roots consistent with each other. Every pointer 2024 * in the tree of tree roots has to point to the most up to date 2025 * root for every subvolume and other tree. So, we have to keep 2026 * the tree logging code from jumping in and changing any 2027 * of the trees. 2028 * 2029 * At this point in the commit, there can't be any tree-log 2030 * writers, but a little lower down we drop the trans mutex 2031 * and let new people in. By holding the tree_log_mutex 2032 * from now until after the super is written, we avoid races 2033 * with the tree-log code. 2034 */ 2035 mutex_lock(&root->fs_info->tree_log_mutex); 2036 2037 ret = commit_fs_roots(trans, root); 2038 if (ret) { 2039 mutex_unlock(&root->fs_info->tree_log_mutex); 2040 mutex_unlock(&root->fs_info->reloc_mutex); 2041 goto scrub_continue; 2042 } 2043 2044 /* 2045 * Since the transaction is done, we can apply the pending changes 2046 * before the next transaction. 2047 */ 2048 btrfs_apply_pending_changes(root->fs_info); 2049 2050 /* commit_fs_roots gets rid of all the tree log roots, it is now 2051 * safe to free the root of tree log roots 2052 */ 2053 btrfs_free_log_root_tree(trans, root->fs_info); 2054 2055 /* 2056 * Since fs roots are all committed, we can get a quite accurate 2057 * new_roots. So let's do quota accounting. 2058 */ 2059 ret = btrfs_qgroup_account_extents(trans, root->fs_info); 2060 if (ret < 0) { 2061 mutex_unlock(&root->fs_info->tree_log_mutex); 2062 mutex_unlock(&root->fs_info->reloc_mutex); 2063 goto scrub_continue; 2064 } 2065 2066 ret = commit_cowonly_roots(trans, root); 2067 if (ret) { 2068 mutex_unlock(&root->fs_info->tree_log_mutex); 2069 mutex_unlock(&root->fs_info->reloc_mutex); 2070 goto scrub_continue; 2071 } 2072 2073 /* 2074 * The tasks which save the space cache and inode cache may also 2075 * update ->aborted, check it. 2076 */ 2077 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 2078 ret = cur_trans->aborted; 2079 mutex_unlock(&root->fs_info->tree_log_mutex); 2080 mutex_unlock(&root->fs_info->reloc_mutex); 2081 goto scrub_continue; 2082 } 2083 2084 btrfs_prepare_extent_commit(trans, root); 2085 2086 cur_trans = root->fs_info->running_transaction; 2087 2088 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 2089 root->fs_info->tree_root->node); 2090 list_add_tail(&root->fs_info->tree_root->dirty_list, 2091 &cur_trans->switch_commits); 2092 2093 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 2094 root->fs_info->chunk_root->node); 2095 list_add_tail(&root->fs_info->chunk_root->dirty_list, 2096 &cur_trans->switch_commits); 2097 2098 switch_commit_roots(cur_trans, root->fs_info); 2099 2100 assert_qgroups_uptodate(trans); 2101 ASSERT(list_empty(&cur_trans->dirty_bgs)); 2102 ASSERT(list_empty(&cur_trans->io_bgs)); 2103 update_super_roots(root); 2104 2105 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 2106 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0); 2107 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 2108 sizeof(*root->fs_info->super_copy)); 2109 2110 btrfs_update_commit_device_size(root->fs_info); 2111 btrfs_update_commit_device_bytes_used(root, cur_trans); 2112 2113 clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags); 2114 clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags); 2115 2116 btrfs_trans_release_chunk_metadata(trans); 2117 2118 spin_lock(&root->fs_info->trans_lock); 2119 cur_trans->state = TRANS_STATE_UNBLOCKED; 2120 root->fs_info->running_transaction = NULL; 2121 spin_unlock(&root->fs_info->trans_lock); 2122 mutex_unlock(&root->fs_info->reloc_mutex); 2123 2124 wake_up(&root->fs_info->transaction_wait); 2125 2126 ret = btrfs_write_and_wait_transaction(trans, root); 2127 if (ret) { 2128 btrfs_std_error(root->fs_info, ret, 2129 "Error while writing out transaction"); 2130 mutex_unlock(&root->fs_info->tree_log_mutex); 2131 goto scrub_continue; 2132 } 2133 2134 ret = write_ctree_super(trans, root, 0); 2135 if (ret) { 2136 mutex_unlock(&root->fs_info->tree_log_mutex); 2137 goto scrub_continue; 2138 } 2139 2140 /* 2141 * the super is written, we can safely allow the tree-loggers 2142 * to go about their business 2143 */ 2144 mutex_unlock(&root->fs_info->tree_log_mutex); 2145 2146 btrfs_finish_extent_commit(trans, root); 2147 2148 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) 2149 btrfs_clear_space_info_full(root->fs_info); 2150 2151 root->fs_info->last_trans_committed = cur_trans->transid; 2152 /* 2153 * We needn't acquire the lock here because there is no other task 2154 * which can change it. 2155 */ 2156 cur_trans->state = TRANS_STATE_COMPLETED; 2157 wake_up(&cur_trans->commit_wait); 2158 2159 spin_lock(&root->fs_info->trans_lock); 2160 list_del_init(&cur_trans->list); 2161 spin_unlock(&root->fs_info->trans_lock); 2162 2163 btrfs_put_transaction(cur_trans); 2164 btrfs_put_transaction(cur_trans); 2165 2166 if (trans->type & __TRANS_FREEZABLE) 2167 sb_end_intwrite(root->fs_info->sb); 2168 2169 trace_btrfs_transaction_commit(root); 2170 2171 btrfs_scrub_continue(root); 2172 2173 if (current->journal_info == trans) 2174 current->journal_info = NULL; 2175 2176 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2177 2178 if (current != root->fs_info->transaction_kthread && 2179 current != root->fs_info->cleaner_kthread) 2180 btrfs_run_delayed_iputs(root); 2181 2182 return ret; 2183 2184 scrub_continue: 2185 btrfs_scrub_continue(root); 2186 cleanup_transaction: 2187 btrfs_trans_release_metadata(trans, root); 2188 btrfs_trans_release_chunk_metadata(trans); 2189 trans->block_rsv = NULL; 2190 btrfs_warn(root->fs_info, "Skipping commit of aborted transaction."); 2191 if (current->journal_info == trans) 2192 current->journal_info = NULL; 2193 cleanup_transaction(trans, root, ret); 2194 2195 return ret; 2196 } 2197 2198 /* 2199 * return < 0 if error 2200 * 0 if there are no more dead_roots at the time of call 2201 * 1 there are more to be processed, call me again 2202 * 2203 * The return value indicates there are certainly more snapshots to delete, but 2204 * if there comes a new one during processing, it may return 0. We don't mind, 2205 * because btrfs_commit_super will poke cleaner thread and it will process it a 2206 * few seconds later. 2207 */ 2208 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) 2209 { 2210 int ret; 2211 struct btrfs_fs_info *fs_info = root->fs_info; 2212 2213 spin_lock(&fs_info->trans_lock); 2214 if (list_empty(&fs_info->dead_roots)) { 2215 spin_unlock(&fs_info->trans_lock); 2216 return 0; 2217 } 2218 root = list_first_entry(&fs_info->dead_roots, 2219 struct btrfs_root, root_list); 2220 list_del_init(&root->root_list); 2221 spin_unlock(&fs_info->trans_lock); 2222 2223 pr_debug("BTRFS: cleaner removing %llu\n", root->objectid); 2224 2225 btrfs_kill_all_delayed_nodes(root); 2226 2227 if (btrfs_header_backref_rev(root->node) < 2228 BTRFS_MIXED_BACKREF_REV) 2229 ret = btrfs_drop_snapshot(root, NULL, 0, 0); 2230 else 2231 ret = btrfs_drop_snapshot(root, NULL, 1, 0); 2232 2233 return (ret < 0) ? 0 : 1; 2234 } 2235 2236 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info) 2237 { 2238 unsigned long prev; 2239 unsigned long bit; 2240 2241 prev = xchg(&fs_info->pending_changes, 0); 2242 if (!prev) 2243 return; 2244 2245 bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE; 2246 if (prev & bit) 2247 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2248 prev &= ~bit; 2249 2250 bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE; 2251 if (prev & bit) 2252 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE); 2253 prev &= ~bit; 2254 2255 bit = 1 << BTRFS_PENDING_COMMIT; 2256 if (prev & bit) 2257 btrfs_debug(fs_info, "pending commit done"); 2258 prev &= ~bit; 2259 2260 if (prev) 2261 btrfs_warn(fs_info, 2262 "unknown pending changes left 0x%lx, ignoring", prev); 2263 } 2264