1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/pagemap.h> 20 #include <linux/writeback.h> 21 #include <linux/blkdev.h> 22 #include <linux/sort.h> 23 #include <linux/rcupdate.h> 24 #include <linux/kthread.h> 25 #include <linux/slab.h> 26 #include <linux/ratelimit.h> 27 #include <linux/percpu_counter.h> 28 #include "hash.h" 29 #include "ctree.h" 30 #include "disk-io.h" 31 #include "print-tree.h" 32 #include "transaction.h" 33 #include "volumes.h" 34 #include "raid56.h" 35 #include "locking.h" 36 #include "free-space-cache.h" 37 #include "math.h" 38 #include "sysfs.h" 39 40 #undef SCRAMBLE_DELAYED_REFS 41 42 /* 43 * control flags for do_chunk_alloc's force field 44 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk 45 * if we really need one. 46 * 47 * CHUNK_ALLOC_LIMITED means to only try and allocate one 48 * if we have very few chunks already allocated. This is 49 * used as part of the clustering code to help make sure 50 * we have a good pool of storage to cluster in, without 51 * filling the FS with empty chunks 52 * 53 * CHUNK_ALLOC_FORCE means it must try to allocate one 54 * 55 */ 56 enum { 57 CHUNK_ALLOC_NO_FORCE = 0, 58 CHUNK_ALLOC_LIMITED = 1, 59 CHUNK_ALLOC_FORCE = 2, 60 }; 61 62 /* 63 * Control how reservations are dealt with. 64 * 65 * RESERVE_FREE - freeing a reservation. 66 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for 67 * ENOSPC accounting 68 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update 69 * bytes_may_use as the ENOSPC accounting is done elsewhere 70 */ 71 enum { 72 RESERVE_FREE = 0, 73 RESERVE_ALLOC = 1, 74 RESERVE_ALLOC_NO_ACCOUNT = 2, 75 }; 76 77 static int update_block_group(struct btrfs_root *root, 78 u64 bytenr, u64 num_bytes, int alloc); 79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 80 struct btrfs_root *root, 81 u64 bytenr, u64 num_bytes, u64 parent, 82 u64 root_objectid, u64 owner_objectid, 83 u64 owner_offset, int refs_to_drop, 84 struct btrfs_delayed_extent_op *extra_op); 85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 86 struct extent_buffer *leaf, 87 struct btrfs_extent_item *ei); 88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 89 struct btrfs_root *root, 90 u64 parent, u64 root_objectid, 91 u64 flags, u64 owner, u64 offset, 92 struct btrfs_key *ins, int ref_mod); 93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 94 struct btrfs_root *root, 95 u64 parent, u64 root_objectid, 96 u64 flags, struct btrfs_disk_key *key, 97 int level, struct btrfs_key *ins); 98 static int do_chunk_alloc(struct btrfs_trans_handle *trans, 99 struct btrfs_root *extent_root, u64 flags, 100 int force); 101 static int find_next_key(struct btrfs_path *path, int level, 102 struct btrfs_key *key); 103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 104 int dump_block_groups); 105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 106 u64 num_bytes, int reserve); 107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, 108 u64 num_bytes); 109 int btrfs_pin_extent(struct btrfs_root *root, 110 u64 bytenr, u64 num_bytes, int reserved); 111 112 static noinline int 113 block_group_cache_done(struct btrfs_block_group_cache *cache) 114 { 115 smp_mb(); 116 return cache->cached == BTRFS_CACHE_FINISHED || 117 cache->cached == BTRFS_CACHE_ERROR; 118 } 119 120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) 121 { 122 return (cache->flags & bits) == bits; 123 } 124 125 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) 126 { 127 atomic_inc(&cache->count); 128 } 129 130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache) 131 { 132 if (atomic_dec_and_test(&cache->count)) { 133 WARN_ON(cache->pinned > 0); 134 WARN_ON(cache->reserved > 0); 135 kfree(cache->free_space_ctl); 136 kfree(cache); 137 } 138 } 139 140 /* 141 * this adds the block group to the fs_info rb tree for the block group 142 * cache 143 */ 144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 145 struct btrfs_block_group_cache *block_group) 146 { 147 struct rb_node **p; 148 struct rb_node *parent = NULL; 149 struct btrfs_block_group_cache *cache; 150 151 spin_lock(&info->block_group_cache_lock); 152 p = &info->block_group_cache_tree.rb_node; 153 154 while (*p) { 155 parent = *p; 156 cache = rb_entry(parent, struct btrfs_block_group_cache, 157 cache_node); 158 if (block_group->key.objectid < cache->key.objectid) { 159 p = &(*p)->rb_left; 160 } else if (block_group->key.objectid > cache->key.objectid) { 161 p = &(*p)->rb_right; 162 } else { 163 spin_unlock(&info->block_group_cache_lock); 164 return -EEXIST; 165 } 166 } 167 168 rb_link_node(&block_group->cache_node, parent, p); 169 rb_insert_color(&block_group->cache_node, 170 &info->block_group_cache_tree); 171 172 if (info->first_logical_byte > block_group->key.objectid) 173 info->first_logical_byte = block_group->key.objectid; 174 175 spin_unlock(&info->block_group_cache_lock); 176 177 return 0; 178 } 179 180 /* 181 * This will return the block group at or after bytenr if contains is 0, else 182 * it will return the block group that contains the bytenr 183 */ 184 static struct btrfs_block_group_cache * 185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, 186 int contains) 187 { 188 struct btrfs_block_group_cache *cache, *ret = NULL; 189 struct rb_node *n; 190 u64 end, start; 191 192 spin_lock(&info->block_group_cache_lock); 193 n = info->block_group_cache_tree.rb_node; 194 195 while (n) { 196 cache = rb_entry(n, struct btrfs_block_group_cache, 197 cache_node); 198 end = cache->key.objectid + cache->key.offset - 1; 199 start = cache->key.objectid; 200 201 if (bytenr < start) { 202 if (!contains && (!ret || start < ret->key.objectid)) 203 ret = cache; 204 n = n->rb_left; 205 } else if (bytenr > start) { 206 if (contains && bytenr <= end) { 207 ret = cache; 208 break; 209 } 210 n = n->rb_right; 211 } else { 212 ret = cache; 213 break; 214 } 215 } 216 if (ret) { 217 btrfs_get_block_group(ret); 218 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) 219 info->first_logical_byte = ret->key.objectid; 220 } 221 spin_unlock(&info->block_group_cache_lock); 222 223 return ret; 224 } 225 226 static int add_excluded_extent(struct btrfs_root *root, 227 u64 start, u64 num_bytes) 228 { 229 u64 end = start + num_bytes - 1; 230 set_extent_bits(&root->fs_info->freed_extents[0], 231 start, end, EXTENT_UPTODATE, GFP_NOFS); 232 set_extent_bits(&root->fs_info->freed_extents[1], 233 start, end, EXTENT_UPTODATE, GFP_NOFS); 234 return 0; 235 } 236 237 static void free_excluded_extents(struct btrfs_root *root, 238 struct btrfs_block_group_cache *cache) 239 { 240 u64 start, end; 241 242 start = cache->key.objectid; 243 end = start + cache->key.offset - 1; 244 245 clear_extent_bits(&root->fs_info->freed_extents[0], 246 start, end, EXTENT_UPTODATE, GFP_NOFS); 247 clear_extent_bits(&root->fs_info->freed_extents[1], 248 start, end, EXTENT_UPTODATE, GFP_NOFS); 249 } 250 251 static int exclude_super_stripes(struct btrfs_root *root, 252 struct btrfs_block_group_cache *cache) 253 { 254 u64 bytenr; 255 u64 *logical; 256 int stripe_len; 257 int i, nr, ret; 258 259 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { 260 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; 261 cache->bytes_super += stripe_len; 262 ret = add_excluded_extent(root, cache->key.objectid, 263 stripe_len); 264 if (ret) 265 return ret; 266 } 267 268 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 269 bytenr = btrfs_sb_offset(i); 270 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 271 cache->key.objectid, bytenr, 272 0, &logical, &nr, &stripe_len); 273 if (ret) 274 return ret; 275 276 while (nr--) { 277 u64 start, len; 278 279 if (logical[nr] > cache->key.objectid + 280 cache->key.offset) 281 continue; 282 283 if (logical[nr] + stripe_len <= cache->key.objectid) 284 continue; 285 286 start = logical[nr]; 287 if (start < cache->key.objectid) { 288 start = cache->key.objectid; 289 len = (logical[nr] + stripe_len) - start; 290 } else { 291 len = min_t(u64, stripe_len, 292 cache->key.objectid + 293 cache->key.offset - start); 294 } 295 296 cache->bytes_super += len; 297 ret = add_excluded_extent(root, start, len); 298 if (ret) { 299 kfree(logical); 300 return ret; 301 } 302 } 303 304 kfree(logical); 305 } 306 return 0; 307 } 308 309 static struct btrfs_caching_control * 310 get_caching_control(struct btrfs_block_group_cache *cache) 311 { 312 struct btrfs_caching_control *ctl; 313 314 spin_lock(&cache->lock); 315 if (cache->cached != BTRFS_CACHE_STARTED) { 316 spin_unlock(&cache->lock); 317 return NULL; 318 } 319 320 /* We're loading it the fast way, so we don't have a caching_ctl. */ 321 if (!cache->caching_ctl) { 322 spin_unlock(&cache->lock); 323 return NULL; 324 } 325 326 ctl = cache->caching_ctl; 327 atomic_inc(&ctl->count); 328 spin_unlock(&cache->lock); 329 return ctl; 330 } 331 332 static void put_caching_control(struct btrfs_caching_control *ctl) 333 { 334 if (atomic_dec_and_test(&ctl->count)) 335 kfree(ctl); 336 } 337 338 /* 339 * this is only called by cache_block_group, since we could have freed extents 340 * we need to check the pinned_extents for any extents that can't be used yet 341 * since their free space will be released as soon as the transaction commits. 342 */ 343 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, 344 struct btrfs_fs_info *info, u64 start, u64 end) 345 { 346 u64 extent_start, extent_end, size, total_added = 0; 347 int ret; 348 349 while (start < end) { 350 ret = find_first_extent_bit(info->pinned_extents, start, 351 &extent_start, &extent_end, 352 EXTENT_DIRTY | EXTENT_UPTODATE, 353 NULL); 354 if (ret) 355 break; 356 357 if (extent_start <= start) { 358 start = extent_end + 1; 359 } else if (extent_start > start && extent_start < end) { 360 size = extent_start - start; 361 total_added += size; 362 ret = btrfs_add_free_space(block_group, start, 363 size); 364 BUG_ON(ret); /* -ENOMEM or logic error */ 365 start = extent_end + 1; 366 } else { 367 break; 368 } 369 } 370 371 if (start < end) { 372 size = end - start; 373 total_added += size; 374 ret = btrfs_add_free_space(block_group, start, size); 375 BUG_ON(ret); /* -ENOMEM or logic error */ 376 } 377 378 return total_added; 379 } 380 381 static noinline void caching_thread(struct btrfs_work *work) 382 { 383 struct btrfs_block_group_cache *block_group; 384 struct btrfs_fs_info *fs_info; 385 struct btrfs_caching_control *caching_ctl; 386 struct btrfs_root *extent_root; 387 struct btrfs_path *path; 388 struct extent_buffer *leaf; 389 struct btrfs_key key; 390 u64 total_found = 0; 391 u64 last = 0; 392 u32 nritems; 393 int ret = -ENOMEM; 394 395 caching_ctl = container_of(work, struct btrfs_caching_control, work); 396 block_group = caching_ctl->block_group; 397 fs_info = block_group->fs_info; 398 extent_root = fs_info->extent_root; 399 400 path = btrfs_alloc_path(); 401 if (!path) 402 goto out; 403 404 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 405 406 /* 407 * We don't want to deadlock with somebody trying to allocate a new 408 * extent for the extent root while also trying to search the extent 409 * root to add free space. So we skip locking and search the commit 410 * root, since its read-only 411 */ 412 path->skip_locking = 1; 413 path->search_commit_root = 1; 414 path->reada = 1; 415 416 key.objectid = last; 417 key.offset = 0; 418 key.type = BTRFS_EXTENT_ITEM_KEY; 419 again: 420 mutex_lock(&caching_ctl->mutex); 421 /* need to make sure the commit_root doesn't disappear */ 422 down_read(&fs_info->extent_commit_sem); 423 424 next: 425 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 426 if (ret < 0) 427 goto err; 428 429 leaf = path->nodes[0]; 430 nritems = btrfs_header_nritems(leaf); 431 432 while (1) { 433 if (btrfs_fs_closing(fs_info) > 1) { 434 last = (u64)-1; 435 break; 436 } 437 438 if (path->slots[0] < nritems) { 439 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 440 } else { 441 ret = find_next_key(path, 0, &key); 442 if (ret) 443 break; 444 445 if (need_resched() || 446 rwsem_is_contended(&fs_info->extent_commit_sem)) { 447 caching_ctl->progress = last; 448 btrfs_release_path(path); 449 up_read(&fs_info->extent_commit_sem); 450 mutex_unlock(&caching_ctl->mutex); 451 cond_resched(); 452 goto again; 453 } 454 455 ret = btrfs_next_leaf(extent_root, path); 456 if (ret < 0) 457 goto err; 458 if (ret) 459 break; 460 leaf = path->nodes[0]; 461 nritems = btrfs_header_nritems(leaf); 462 continue; 463 } 464 465 if (key.objectid < last) { 466 key.objectid = last; 467 key.offset = 0; 468 key.type = BTRFS_EXTENT_ITEM_KEY; 469 470 caching_ctl->progress = last; 471 btrfs_release_path(path); 472 goto next; 473 } 474 475 if (key.objectid < block_group->key.objectid) { 476 path->slots[0]++; 477 continue; 478 } 479 480 if (key.objectid >= block_group->key.objectid + 481 block_group->key.offset) 482 break; 483 484 if (key.type == BTRFS_EXTENT_ITEM_KEY || 485 key.type == BTRFS_METADATA_ITEM_KEY) { 486 total_found += add_new_free_space(block_group, 487 fs_info, last, 488 key.objectid); 489 if (key.type == BTRFS_METADATA_ITEM_KEY) 490 last = key.objectid + 491 fs_info->tree_root->leafsize; 492 else 493 last = key.objectid + key.offset; 494 495 if (total_found > (1024 * 1024 * 2)) { 496 total_found = 0; 497 wake_up(&caching_ctl->wait); 498 } 499 } 500 path->slots[0]++; 501 } 502 ret = 0; 503 504 total_found += add_new_free_space(block_group, fs_info, last, 505 block_group->key.objectid + 506 block_group->key.offset); 507 caching_ctl->progress = (u64)-1; 508 509 spin_lock(&block_group->lock); 510 block_group->caching_ctl = NULL; 511 block_group->cached = BTRFS_CACHE_FINISHED; 512 spin_unlock(&block_group->lock); 513 514 err: 515 btrfs_free_path(path); 516 up_read(&fs_info->extent_commit_sem); 517 518 free_excluded_extents(extent_root, block_group); 519 520 mutex_unlock(&caching_ctl->mutex); 521 out: 522 if (ret) { 523 spin_lock(&block_group->lock); 524 block_group->caching_ctl = NULL; 525 block_group->cached = BTRFS_CACHE_ERROR; 526 spin_unlock(&block_group->lock); 527 } 528 wake_up(&caching_ctl->wait); 529 530 put_caching_control(caching_ctl); 531 btrfs_put_block_group(block_group); 532 } 533 534 static int cache_block_group(struct btrfs_block_group_cache *cache, 535 int load_cache_only) 536 { 537 DEFINE_WAIT(wait); 538 struct btrfs_fs_info *fs_info = cache->fs_info; 539 struct btrfs_caching_control *caching_ctl; 540 int ret = 0; 541 542 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 543 if (!caching_ctl) 544 return -ENOMEM; 545 546 INIT_LIST_HEAD(&caching_ctl->list); 547 mutex_init(&caching_ctl->mutex); 548 init_waitqueue_head(&caching_ctl->wait); 549 caching_ctl->block_group = cache; 550 caching_ctl->progress = cache->key.objectid; 551 atomic_set(&caching_ctl->count, 1); 552 caching_ctl->work.func = caching_thread; 553 554 spin_lock(&cache->lock); 555 /* 556 * This should be a rare occasion, but this could happen I think in the 557 * case where one thread starts to load the space cache info, and then 558 * some other thread starts a transaction commit which tries to do an 559 * allocation while the other thread is still loading the space cache 560 * info. The previous loop should have kept us from choosing this block 561 * group, but if we've moved to the state where we will wait on caching 562 * block groups we need to first check if we're doing a fast load here, 563 * so we can wait for it to finish, otherwise we could end up allocating 564 * from a block group who's cache gets evicted for one reason or 565 * another. 566 */ 567 while (cache->cached == BTRFS_CACHE_FAST) { 568 struct btrfs_caching_control *ctl; 569 570 ctl = cache->caching_ctl; 571 atomic_inc(&ctl->count); 572 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); 573 spin_unlock(&cache->lock); 574 575 schedule(); 576 577 finish_wait(&ctl->wait, &wait); 578 put_caching_control(ctl); 579 spin_lock(&cache->lock); 580 } 581 582 if (cache->cached != BTRFS_CACHE_NO) { 583 spin_unlock(&cache->lock); 584 kfree(caching_ctl); 585 return 0; 586 } 587 WARN_ON(cache->caching_ctl); 588 cache->caching_ctl = caching_ctl; 589 cache->cached = BTRFS_CACHE_FAST; 590 spin_unlock(&cache->lock); 591 592 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) { 593 ret = load_free_space_cache(fs_info, cache); 594 595 spin_lock(&cache->lock); 596 if (ret == 1) { 597 cache->caching_ctl = NULL; 598 cache->cached = BTRFS_CACHE_FINISHED; 599 cache->last_byte_to_unpin = (u64)-1; 600 } else { 601 if (load_cache_only) { 602 cache->caching_ctl = NULL; 603 cache->cached = BTRFS_CACHE_NO; 604 } else { 605 cache->cached = BTRFS_CACHE_STARTED; 606 } 607 } 608 spin_unlock(&cache->lock); 609 wake_up(&caching_ctl->wait); 610 if (ret == 1) { 611 put_caching_control(caching_ctl); 612 free_excluded_extents(fs_info->extent_root, cache); 613 return 0; 614 } 615 } else { 616 /* 617 * We are not going to do the fast caching, set cached to the 618 * appropriate value and wakeup any waiters. 619 */ 620 spin_lock(&cache->lock); 621 if (load_cache_only) { 622 cache->caching_ctl = NULL; 623 cache->cached = BTRFS_CACHE_NO; 624 } else { 625 cache->cached = BTRFS_CACHE_STARTED; 626 } 627 spin_unlock(&cache->lock); 628 wake_up(&caching_ctl->wait); 629 } 630 631 if (load_cache_only) { 632 put_caching_control(caching_ctl); 633 return 0; 634 } 635 636 down_write(&fs_info->extent_commit_sem); 637 atomic_inc(&caching_ctl->count); 638 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 639 up_write(&fs_info->extent_commit_sem); 640 641 btrfs_get_block_group(cache); 642 643 btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work); 644 645 return ret; 646 } 647 648 /* 649 * return the block group that starts at or after bytenr 650 */ 651 static struct btrfs_block_group_cache * 652 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) 653 { 654 struct btrfs_block_group_cache *cache; 655 656 cache = block_group_cache_tree_search(info, bytenr, 0); 657 658 return cache; 659 } 660 661 /* 662 * return the block group that contains the given bytenr 663 */ 664 struct btrfs_block_group_cache *btrfs_lookup_block_group( 665 struct btrfs_fs_info *info, 666 u64 bytenr) 667 { 668 struct btrfs_block_group_cache *cache; 669 670 cache = block_group_cache_tree_search(info, bytenr, 1); 671 672 return cache; 673 } 674 675 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, 676 u64 flags) 677 { 678 struct list_head *head = &info->space_info; 679 struct btrfs_space_info *found; 680 681 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 682 683 rcu_read_lock(); 684 list_for_each_entry_rcu(found, head, list) { 685 if (found->flags & flags) { 686 rcu_read_unlock(); 687 return found; 688 } 689 } 690 rcu_read_unlock(); 691 return NULL; 692 } 693 694 /* 695 * after adding space to the filesystem, we need to clear the full flags 696 * on all the space infos. 697 */ 698 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 699 { 700 struct list_head *head = &info->space_info; 701 struct btrfs_space_info *found; 702 703 rcu_read_lock(); 704 list_for_each_entry_rcu(found, head, list) 705 found->full = 0; 706 rcu_read_unlock(); 707 } 708 709 /* simple helper to search for an existing extent at a given offset */ 710 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) 711 { 712 int ret; 713 struct btrfs_key key; 714 struct btrfs_path *path; 715 716 path = btrfs_alloc_path(); 717 if (!path) 718 return -ENOMEM; 719 720 key.objectid = start; 721 key.offset = len; 722 key.type = BTRFS_EXTENT_ITEM_KEY; 723 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 724 0, 0); 725 if (ret > 0) { 726 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 727 if (key.objectid == start && 728 key.type == BTRFS_METADATA_ITEM_KEY) 729 ret = 0; 730 } 731 btrfs_free_path(path); 732 return ret; 733 } 734 735 /* 736 * helper function to lookup reference count and flags of a tree block. 737 * 738 * the head node for delayed ref is used to store the sum of all the 739 * reference count modifications queued up in the rbtree. the head 740 * node may also store the extent flags to set. This way you can check 741 * to see what the reference count and extent flags would be if all of 742 * the delayed refs are not processed. 743 */ 744 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 745 struct btrfs_root *root, u64 bytenr, 746 u64 offset, int metadata, u64 *refs, u64 *flags) 747 { 748 struct btrfs_delayed_ref_head *head; 749 struct btrfs_delayed_ref_root *delayed_refs; 750 struct btrfs_path *path; 751 struct btrfs_extent_item *ei; 752 struct extent_buffer *leaf; 753 struct btrfs_key key; 754 u32 item_size; 755 u64 num_refs; 756 u64 extent_flags; 757 int ret; 758 759 /* 760 * If we don't have skinny metadata, don't bother doing anything 761 * different 762 */ 763 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) { 764 offset = root->leafsize; 765 metadata = 0; 766 } 767 768 path = btrfs_alloc_path(); 769 if (!path) 770 return -ENOMEM; 771 772 if (!trans) { 773 path->skip_locking = 1; 774 path->search_commit_root = 1; 775 } 776 777 search_again: 778 key.objectid = bytenr; 779 key.offset = offset; 780 if (metadata) 781 key.type = BTRFS_METADATA_ITEM_KEY; 782 else 783 key.type = BTRFS_EXTENT_ITEM_KEY; 784 785 again: 786 ret = btrfs_search_slot(trans, root->fs_info->extent_root, 787 &key, path, 0, 0); 788 if (ret < 0) 789 goto out_free; 790 791 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 792 if (path->slots[0]) { 793 path->slots[0]--; 794 btrfs_item_key_to_cpu(path->nodes[0], &key, 795 path->slots[0]); 796 if (key.objectid == bytenr && 797 key.type == BTRFS_EXTENT_ITEM_KEY && 798 key.offset == root->leafsize) 799 ret = 0; 800 } 801 if (ret) { 802 key.objectid = bytenr; 803 key.type = BTRFS_EXTENT_ITEM_KEY; 804 key.offset = root->leafsize; 805 btrfs_release_path(path); 806 goto again; 807 } 808 } 809 810 if (ret == 0) { 811 leaf = path->nodes[0]; 812 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 813 if (item_size >= sizeof(*ei)) { 814 ei = btrfs_item_ptr(leaf, path->slots[0], 815 struct btrfs_extent_item); 816 num_refs = btrfs_extent_refs(leaf, ei); 817 extent_flags = btrfs_extent_flags(leaf, ei); 818 } else { 819 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 820 struct btrfs_extent_item_v0 *ei0; 821 BUG_ON(item_size != sizeof(*ei0)); 822 ei0 = btrfs_item_ptr(leaf, path->slots[0], 823 struct btrfs_extent_item_v0); 824 num_refs = btrfs_extent_refs_v0(leaf, ei0); 825 /* FIXME: this isn't correct for data */ 826 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 827 #else 828 BUG(); 829 #endif 830 } 831 BUG_ON(num_refs == 0); 832 } else { 833 num_refs = 0; 834 extent_flags = 0; 835 ret = 0; 836 } 837 838 if (!trans) 839 goto out; 840 841 delayed_refs = &trans->transaction->delayed_refs; 842 spin_lock(&delayed_refs->lock); 843 head = btrfs_find_delayed_ref_head(trans, bytenr); 844 if (head) { 845 if (!mutex_trylock(&head->mutex)) { 846 atomic_inc(&head->node.refs); 847 spin_unlock(&delayed_refs->lock); 848 849 btrfs_release_path(path); 850 851 /* 852 * Mutex was contended, block until it's released and try 853 * again 854 */ 855 mutex_lock(&head->mutex); 856 mutex_unlock(&head->mutex); 857 btrfs_put_delayed_ref(&head->node); 858 goto search_again; 859 } 860 spin_lock(&head->lock); 861 if (head->extent_op && head->extent_op->update_flags) 862 extent_flags |= head->extent_op->flags_to_set; 863 else 864 BUG_ON(num_refs == 0); 865 866 num_refs += head->node.ref_mod; 867 spin_unlock(&head->lock); 868 mutex_unlock(&head->mutex); 869 } 870 spin_unlock(&delayed_refs->lock); 871 out: 872 WARN_ON(num_refs == 0); 873 if (refs) 874 *refs = num_refs; 875 if (flags) 876 *flags = extent_flags; 877 out_free: 878 btrfs_free_path(path); 879 return ret; 880 } 881 882 /* 883 * Back reference rules. Back refs have three main goals: 884 * 885 * 1) differentiate between all holders of references to an extent so that 886 * when a reference is dropped we can make sure it was a valid reference 887 * before freeing the extent. 888 * 889 * 2) Provide enough information to quickly find the holders of an extent 890 * if we notice a given block is corrupted or bad. 891 * 892 * 3) Make it easy to migrate blocks for FS shrinking or storage pool 893 * maintenance. This is actually the same as #2, but with a slightly 894 * different use case. 895 * 896 * There are two kinds of back refs. The implicit back refs is optimized 897 * for pointers in non-shared tree blocks. For a given pointer in a block, 898 * back refs of this kind provide information about the block's owner tree 899 * and the pointer's key. These information allow us to find the block by 900 * b-tree searching. The full back refs is for pointers in tree blocks not 901 * referenced by their owner trees. The location of tree block is recorded 902 * in the back refs. Actually the full back refs is generic, and can be 903 * used in all cases the implicit back refs is used. The major shortcoming 904 * of the full back refs is its overhead. Every time a tree block gets 905 * COWed, we have to update back refs entry for all pointers in it. 906 * 907 * For a newly allocated tree block, we use implicit back refs for 908 * pointers in it. This means most tree related operations only involve 909 * implicit back refs. For a tree block created in old transaction, the 910 * only way to drop a reference to it is COW it. So we can detect the 911 * event that tree block loses its owner tree's reference and do the 912 * back refs conversion. 913 * 914 * When a tree block is COW'd through a tree, there are four cases: 915 * 916 * The reference count of the block is one and the tree is the block's 917 * owner tree. Nothing to do in this case. 918 * 919 * The reference count of the block is one and the tree is not the 920 * block's owner tree. In this case, full back refs is used for pointers 921 * in the block. Remove these full back refs, add implicit back refs for 922 * every pointers in the new block. 923 * 924 * The reference count of the block is greater than one and the tree is 925 * the block's owner tree. In this case, implicit back refs is used for 926 * pointers in the block. Add full back refs for every pointers in the 927 * block, increase lower level extents' reference counts. The original 928 * implicit back refs are entailed to the new block. 929 * 930 * The reference count of the block is greater than one and the tree is 931 * not the block's owner tree. Add implicit back refs for every pointer in 932 * the new block, increase lower level extents' reference count. 933 * 934 * Back Reference Key composing: 935 * 936 * The key objectid corresponds to the first byte in the extent, 937 * The key type is used to differentiate between types of back refs. 938 * There are different meanings of the key offset for different types 939 * of back refs. 940 * 941 * File extents can be referenced by: 942 * 943 * - multiple snapshots, subvolumes, or different generations in one subvol 944 * - different files inside a single subvolume 945 * - different offsets inside a file (bookend extents in file.c) 946 * 947 * The extent ref structure for the implicit back refs has fields for: 948 * 949 * - Objectid of the subvolume root 950 * - objectid of the file holding the reference 951 * - original offset in the file 952 * - how many bookend extents 953 * 954 * The key offset for the implicit back refs is hash of the first 955 * three fields. 956 * 957 * The extent ref structure for the full back refs has field for: 958 * 959 * - number of pointers in the tree leaf 960 * 961 * The key offset for the implicit back refs is the first byte of 962 * the tree leaf 963 * 964 * When a file extent is allocated, The implicit back refs is used. 965 * the fields are filled in: 966 * 967 * (root_key.objectid, inode objectid, offset in file, 1) 968 * 969 * When a file extent is removed file truncation, we find the 970 * corresponding implicit back refs and check the following fields: 971 * 972 * (btrfs_header_owner(leaf), inode objectid, offset in file) 973 * 974 * Btree extents can be referenced by: 975 * 976 * - Different subvolumes 977 * 978 * Both the implicit back refs and the full back refs for tree blocks 979 * only consist of key. The key offset for the implicit back refs is 980 * objectid of block's owner tree. The key offset for the full back refs 981 * is the first byte of parent block. 982 * 983 * When implicit back refs is used, information about the lowest key and 984 * level of the tree block are required. These information are stored in 985 * tree block info structure. 986 */ 987 988 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 989 static int convert_extent_item_v0(struct btrfs_trans_handle *trans, 990 struct btrfs_root *root, 991 struct btrfs_path *path, 992 u64 owner, u32 extra_size) 993 { 994 struct btrfs_extent_item *item; 995 struct btrfs_extent_item_v0 *ei0; 996 struct btrfs_extent_ref_v0 *ref0; 997 struct btrfs_tree_block_info *bi; 998 struct extent_buffer *leaf; 999 struct btrfs_key key; 1000 struct btrfs_key found_key; 1001 u32 new_size = sizeof(*item); 1002 u64 refs; 1003 int ret; 1004 1005 leaf = path->nodes[0]; 1006 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0)); 1007 1008 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1009 ei0 = btrfs_item_ptr(leaf, path->slots[0], 1010 struct btrfs_extent_item_v0); 1011 refs = btrfs_extent_refs_v0(leaf, ei0); 1012 1013 if (owner == (u64)-1) { 1014 while (1) { 1015 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1016 ret = btrfs_next_leaf(root, path); 1017 if (ret < 0) 1018 return ret; 1019 BUG_ON(ret > 0); /* Corruption */ 1020 leaf = path->nodes[0]; 1021 } 1022 btrfs_item_key_to_cpu(leaf, &found_key, 1023 path->slots[0]); 1024 BUG_ON(key.objectid != found_key.objectid); 1025 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) { 1026 path->slots[0]++; 1027 continue; 1028 } 1029 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1030 struct btrfs_extent_ref_v0); 1031 owner = btrfs_ref_objectid_v0(leaf, ref0); 1032 break; 1033 } 1034 } 1035 btrfs_release_path(path); 1036 1037 if (owner < BTRFS_FIRST_FREE_OBJECTID) 1038 new_size += sizeof(*bi); 1039 1040 new_size -= sizeof(*ei0); 1041 ret = btrfs_search_slot(trans, root, &key, path, 1042 new_size + extra_size, 1); 1043 if (ret < 0) 1044 return ret; 1045 BUG_ON(ret); /* Corruption */ 1046 1047 btrfs_extend_item(root, path, new_size); 1048 1049 leaf = path->nodes[0]; 1050 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1051 btrfs_set_extent_refs(leaf, item, refs); 1052 /* FIXME: get real generation */ 1053 btrfs_set_extent_generation(leaf, item, 0); 1054 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1055 btrfs_set_extent_flags(leaf, item, 1056 BTRFS_EXTENT_FLAG_TREE_BLOCK | 1057 BTRFS_BLOCK_FLAG_FULL_BACKREF); 1058 bi = (struct btrfs_tree_block_info *)(item + 1); 1059 /* FIXME: get first key of the block */ 1060 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi)); 1061 btrfs_set_tree_block_level(leaf, bi, (int)owner); 1062 } else { 1063 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA); 1064 } 1065 btrfs_mark_buffer_dirty(leaf); 1066 return 0; 1067 } 1068 #endif 1069 1070 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) 1071 { 1072 u32 high_crc = ~(u32)0; 1073 u32 low_crc = ~(u32)0; 1074 __le64 lenum; 1075 1076 lenum = cpu_to_le64(root_objectid); 1077 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum)); 1078 lenum = cpu_to_le64(owner); 1079 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 1080 lenum = cpu_to_le64(offset); 1081 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 1082 1083 return ((u64)high_crc << 31) ^ (u64)low_crc; 1084 } 1085 1086 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, 1087 struct btrfs_extent_data_ref *ref) 1088 { 1089 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), 1090 btrfs_extent_data_ref_objectid(leaf, ref), 1091 btrfs_extent_data_ref_offset(leaf, ref)); 1092 } 1093 1094 static int match_extent_data_ref(struct extent_buffer *leaf, 1095 struct btrfs_extent_data_ref *ref, 1096 u64 root_objectid, u64 owner, u64 offset) 1097 { 1098 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || 1099 btrfs_extent_data_ref_objectid(leaf, ref) != owner || 1100 btrfs_extent_data_ref_offset(leaf, ref) != offset) 1101 return 0; 1102 return 1; 1103 } 1104 1105 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, 1106 struct btrfs_root *root, 1107 struct btrfs_path *path, 1108 u64 bytenr, u64 parent, 1109 u64 root_objectid, 1110 u64 owner, u64 offset) 1111 { 1112 struct btrfs_key key; 1113 struct btrfs_extent_data_ref *ref; 1114 struct extent_buffer *leaf; 1115 u32 nritems; 1116 int ret; 1117 int recow; 1118 int err = -ENOENT; 1119 1120 key.objectid = bytenr; 1121 if (parent) { 1122 key.type = BTRFS_SHARED_DATA_REF_KEY; 1123 key.offset = parent; 1124 } else { 1125 key.type = BTRFS_EXTENT_DATA_REF_KEY; 1126 key.offset = hash_extent_data_ref(root_objectid, 1127 owner, offset); 1128 } 1129 again: 1130 recow = 0; 1131 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1132 if (ret < 0) { 1133 err = ret; 1134 goto fail; 1135 } 1136 1137 if (parent) { 1138 if (!ret) 1139 return 0; 1140 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1141 key.type = BTRFS_EXTENT_REF_V0_KEY; 1142 btrfs_release_path(path); 1143 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1144 if (ret < 0) { 1145 err = ret; 1146 goto fail; 1147 } 1148 if (!ret) 1149 return 0; 1150 #endif 1151 goto fail; 1152 } 1153 1154 leaf = path->nodes[0]; 1155 nritems = btrfs_header_nritems(leaf); 1156 while (1) { 1157 if (path->slots[0] >= nritems) { 1158 ret = btrfs_next_leaf(root, path); 1159 if (ret < 0) 1160 err = ret; 1161 if (ret) 1162 goto fail; 1163 1164 leaf = path->nodes[0]; 1165 nritems = btrfs_header_nritems(leaf); 1166 recow = 1; 1167 } 1168 1169 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1170 if (key.objectid != bytenr || 1171 key.type != BTRFS_EXTENT_DATA_REF_KEY) 1172 goto fail; 1173 1174 ref = btrfs_item_ptr(leaf, path->slots[0], 1175 struct btrfs_extent_data_ref); 1176 1177 if (match_extent_data_ref(leaf, ref, root_objectid, 1178 owner, offset)) { 1179 if (recow) { 1180 btrfs_release_path(path); 1181 goto again; 1182 } 1183 err = 0; 1184 break; 1185 } 1186 path->slots[0]++; 1187 } 1188 fail: 1189 return err; 1190 } 1191 1192 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, 1193 struct btrfs_root *root, 1194 struct btrfs_path *path, 1195 u64 bytenr, u64 parent, 1196 u64 root_objectid, u64 owner, 1197 u64 offset, int refs_to_add) 1198 { 1199 struct btrfs_key key; 1200 struct extent_buffer *leaf; 1201 u32 size; 1202 u32 num_refs; 1203 int ret; 1204 1205 key.objectid = bytenr; 1206 if (parent) { 1207 key.type = BTRFS_SHARED_DATA_REF_KEY; 1208 key.offset = parent; 1209 size = sizeof(struct btrfs_shared_data_ref); 1210 } else { 1211 key.type = BTRFS_EXTENT_DATA_REF_KEY; 1212 key.offset = hash_extent_data_ref(root_objectid, 1213 owner, offset); 1214 size = sizeof(struct btrfs_extent_data_ref); 1215 } 1216 1217 ret = btrfs_insert_empty_item(trans, root, path, &key, size); 1218 if (ret && ret != -EEXIST) 1219 goto fail; 1220 1221 leaf = path->nodes[0]; 1222 if (parent) { 1223 struct btrfs_shared_data_ref *ref; 1224 ref = btrfs_item_ptr(leaf, path->slots[0], 1225 struct btrfs_shared_data_ref); 1226 if (ret == 0) { 1227 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); 1228 } else { 1229 num_refs = btrfs_shared_data_ref_count(leaf, ref); 1230 num_refs += refs_to_add; 1231 btrfs_set_shared_data_ref_count(leaf, ref, num_refs); 1232 } 1233 } else { 1234 struct btrfs_extent_data_ref *ref; 1235 while (ret == -EEXIST) { 1236 ref = btrfs_item_ptr(leaf, path->slots[0], 1237 struct btrfs_extent_data_ref); 1238 if (match_extent_data_ref(leaf, ref, root_objectid, 1239 owner, offset)) 1240 break; 1241 btrfs_release_path(path); 1242 key.offset++; 1243 ret = btrfs_insert_empty_item(trans, root, path, &key, 1244 size); 1245 if (ret && ret != -EEXIST) 1246 goto fail; 1247 1248 leaf = path->nodes[0]; 1249 } 1250 ref = btrfs_item_ptr(leaf, path->slots[0], 1251 struct btrfs_extent_data_ref); 1252 if (ret == 0) { 1253 btrfs_set_extent_data_ref_root(leaf, ref, 1254 root_objectid); 1255 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 1256 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 1257 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); 1258 } else { 1259 num_refs = btrfs_extent_data_ref_count(leaf, ref); 1260 num_refs += refs_to_add; 1261 btrfs_set_extent_data_ref_count(leaf, ref, num_refs); 1262 } 1263 } 1264 btrfs_mark_buffer_dirty(leaf); 1265 ret = 0; 1266 fail: 1267 btrfs_release_path(path); 1268 return ret; 1269 } 1270 1271 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, 1272 struct btrfs_root *root, 1273 struct btrfs_path *path, 1274 int refs_to_drop) 1275 { 1276 struct btrfs_key key; 1277 struct btrfs_extent_data_ref *ref1 = NULL; 1278 struct btrfs_shared_data_ref *ref2 = NULL; 1279 struct extent_buffer *leaf; 1280 u32 num_refs = 0; 1281 int ret = 0; 1282 1283 leaf = path->nodes[0]; 1284 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1285 1286 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 1287 ref1 = btrfs_item_ptr(leaf, path->slots[0], 1288 struct btrfs_extent_data_ref); 1289 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 1290 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 1291 ref2 = btrfs_item_ptr(leaf, path->slots[0], 1292 struct btrfs_shared_data_ref); 1293 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 1294 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1295 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 1296 struct btrfs_extent_ref_v0 *ref0; 1297 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1298 struct btrfs_extent_ref_v0); 1299 num_refs = btrfs_ref_count_v0(leaf, ref0); 1300 #endif 1301 } else { 1302 BUG(); 1303 } 1304 1305 BUG_ON(num_refs < refs_to_drop); 1306 num_refs -= refs_to_drop; 1307 1308 if (num_refs == 0) { 1309 ret = btrfs_del_item(trans, root, path); 1310 } else { 1311 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) 1312 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); 1313 else if (key.type == BTRFS_SHARED_DATA_REF_KEY) 1314 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); 1315 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1316 else { 1317 struct btrfs_extent_ref_v0 *ref0; 1318 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1319 struct btrfs_extent_ref_v0); 1320 btrfs_set_ref_count_v0(leaf, ref0, num_refs); 1321 } 1322 #endif 1323 btrfs_mark_buffer_dirty(leaf); 1324 } 1325 return ret; 1326 } 1327 1328 static noinline u32 extent_data_ref_count(struct btrfs_root *root, 1329 struct btrfs_path *path, 1330 struct btrfs_extent_inline_ref *iref) 1331 { 1332 struct btrfs_key key; 1333 struct extent_buffer *leaf; 1334 struct btrfs_extent_data_ref *ref1; 1335 struct btrfs_shared_data_ref *ref2; 1336 u32 num_refs = 0; 1337 1338 leaf = path->nodes[0]; 1339 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1340 if (iref) { 1341 if (btrfs_extent_inline_ref_type(leaf, iref) == 1342 BTRFS_EXTENT_DATA_REF_KEY) { 1343 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); 1344 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 1345 } else { 1346 ref2 = (struct btrfs_shared_data_ref *)(iref + 1); 1347 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 1348 } 1349 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 1350 ref1 = btrfs_item_ptr(leaf, path->slots[0], 1351 struct btrfs_extent_data_ref); 1352 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 1353 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 1354 ref2 = btrfs_item_ptr(leaf, path->slots[0], 1355 struct btrfs_shared_data_ref); 1356 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 1357 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1358 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 1359 struct btrfs_extent_ref_v0 *ref0; 1360 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1361 struct btrfs_extent_ref_v0); 1362 num_refs = btrfs_ref_count_v0(leaf, ref0); 1363 #endif 1364 } else { 1365 WARN_ON(1); 1366 } 1367 return num_refs; 1368 } 1369 1370 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, 1371 struct btrfs_root *root, 1372 struct btrfs_path *path, 1373 u64 bytenr, u64 parent, 1374 u64 root_objectid) 1375 { 1376 struct btrfs_key key; 1377 int ret; 1378 1379 key.objectid = bytenr; 1380 if (parent) { 1381 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 1382 key.offset = parent; 1383 } else { 1384 key.type = BTRFS_TREE_BLOCK_REF_KEY; 1385 key.offset = root_objectid; 1386 } 1387 1388 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1389 if (ret > 0) 1390 ret = -ENOENT; 1391 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1392 if (ret == -ENOENT && parent) { 1393 btrfs_release_path(path); 1394 key.type = BTRFS_EXTENT_REF_V0_KEY; 1395 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1396 if (ret > 0) 1397 ret = -ENOENT; 1398 } 1399 #endif 1400 return ret; 1401 } 1402 1403 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, 1404 struct btrfs_root *root, 1405 struct btrfs_path *path, 1406 u64 bytenr, u64 parent, 1407 u64 root_objectid) 1408 { 1409 struct btrfs_key key; 1410 int ret; 1411 1412 key.objectid = bytenr; 1413 if (parent) { 1414 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 1415 key.offset = parent; 1416 } else { 1417 key.type = BTRFS_TREE_BLOCK_REF_KEY; 1418 key.offset = root_objectid; 1419 } 1420 1421 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1422 btrfs_release_path(path); 1423 return ret; 1424 } 1425 1426 static inline int extent_ref_type(u64 parent, u64 owner) 1427 { 1428 int type; 1429 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1430 if (parent > 0) 1431 type = BTRFS_SHARED_BLOCK_REF_KEY; 1432 else 1433 type = BTRFS_TREE_BLOCK_REF_KEY; 1434 } else { 1435 if (parent > 0) 1436 type = BTRFS_SHARED_DATA_REF_KEY; 1437 else 1438 type = BTRFS_EXTENT_DATA_REF_KEY; 1439 } 1440 return type; 1441 } 1442 1443 static int find_next_key(struct btrfs_path *path, int level, 1444 struct btrfs_key *key) 1445 1446 { 1447 for (; level < BTRFS_MAX_LEVEL; level++) { 1448 if (!path->nodes[level]) 1449 break; 1450 if (path->slots[level] + 1 >= 1451 btrfs_header_nritems(path->nodes[level])) 1452 continue; 1453 if (level == 0) 1454 btrfs_item_key_to_cpu(path->nodes[level], key, 1455 path->slots[level] + 1); 1456 else 1457 btrfs_node_key_to_cpu(path->nodes[level], key, 1458 path->slots[level] + 1); 1459 return 0; 1460 } 1461 return 1; 1462 } 1463 1464 /* 1465 * look for inline back ref. if back ref is found, *ref_ret is set 1466 * to the address of inline back ref, and 0 is returned. 1467 * 1468 * if back ref isn't found, *ref_ret is set to the address where it 1469 * should be inserted, and -ENOENT is returned. 1470 * 1471 * if insert is true and there are too many inline back refs, the path 1472 * points to the extent item, and -EAGAIN is returned. 1473 * 1474 * NOTE: inline back refs are ordered in the same way that back ref 1475 * items in the tree are ordered. 1476 */ 1477 static noinline_for_stack 1478 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, 1479 struct btrfs_root *root, 1480 struct btrfs_path *path, 1481 struct btrfs_extent_inline_ref **ref_ret, 1482 u64 bytenr, u64 num_bytes, 1483 u64 parent, u64 root_objectid, 1484 u64 owner, u64 offset, int insert) 1485 { 1486 struct btrfs_key key; 1487 struct extent_buffer *leaf; 1488 struct btrfs_extent_item *ei; 1489 struct btrfs_extent_inline_ref *iref; 1490 u64 flags; 1491 u64 item_size; 1492 unsigned long ptr; 1493 unsigned long end; 1494 int extra_size; 1495 int type; 1496 int want; 1497 int ret; 1498 int err = 0; 1499 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 1500 SKINNY_METADATA); 1501 1502 key.objectid = bytenr; 1503 key.type = BTRFS_EXTENT_ITEM_KEY; 1504 key.offset = num_bytes; 1505 1506 want = extent_ref_type(parent, owner); 1507 if (insert) { 1508 extra_size = btrfs_extent_inline_ref_size(want); 1509 path->keep_locks = 1; 1510 } else 1511 extra_size = -1; 1512 1513 /* 1514 * Owner is our parent level, so we can just add one to get the level 1515 * for the block we are interested in. 1516 */ 1517 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { 1518 key.type = BTRFS_METADATA_ITEM_KEY; 1519 key.offset = owner; 1520 } 1521 1522 again: 1523 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); 1524 if (ret < 0) { 1525 err = ret; 1526 goto out; 1527 } 1528 1529 /* 1530 * We may be a newly converted file system which still has the old fat 1531 * extent entries for metadata, so try and see if we have one of those. 1532 */ 1533 if (ret > 0 && skinny_metadata) { 1534 skinny_metadata = false; 1535 if (path->slots[0]) { 1536 path->slots[0]--; 1537 btrfs_item_key_to_cpu(path->nodes[0], &key, 1538 path->slots[0]); 1539 if (key.objectid == bytenr && 1540 key.type == BTRFS_EXTENT_ITEM_KEY && 1541 key.offset == num_bytes) 1542 ret = 0; 1543 } 1544 if (ret) { 1545 key.type = BTRFS_EXTENT_ITEM_KEY; 1546 key.offset = num_bytes; 1547 btrfs_release_path(path); 1548 goto again; 1549 } 1550 } 1551 1552 if (ret && !insert) { 1553 err = -ENOENT; 1554 goto out; 1555 } else if (WARN_ON(ret)) { 1556 err = -EIO; 1557 goto out; 1558 } 1559 1560 leaf = path->nodes[0]; 1561 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1563 if (item_size < sizeof(*ei)) { 1564 if (!insert) { 1565 err = -ENOENT; 1566 goto out; 1567 } 1568 ret = convert_extent_item_v0(trans, root, path, owner, 1569 extra_size); 1570 if (ret < 0) { 1571 err = ret; 1572 goto out; 1573 } 1574 leaf = path->nodes[0]; 1575 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1576 } 1577 #endif 1578 BUG_ON(item_size < sizeof(*ei)); 1579 1580 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1581 flags = btrfs_extent_flags(leaf, ei); 1582 1583 ptr = (unsigned long)(ei + 1); 1584 end = (unsigned long)ei + item_size; 1585 1586 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { 1587 ptr += sizeof(struct btrfs_tree_block_info); 1588 BUG_ON(ptr > end); 1589 } 1590 1591 err = -ENOENT; 1592 while (1) { 1593 if (ptr >= end) { 1594 WARN_ON(ptr > end); 1595 break; 1596 } 1597 iref = (struct btrfs_extent_inline_ref *)ptr; 1598 type = btrfs_extent_inline_ref_type(leaf, iref); 1599 if (want < type) 1600 break; 1601 if (want > type) { 1602 ptr += btrfs_extent_inline_ref_size(type); 1603 continue; 1604 } 1605 1606 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1607 struct btrfs_extent_data_ref *dref; 1608 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1609 if (match_extent_data_ref(leaf, dref, root_objectid, 1610 owner, offset)) { 1611 err = 0; 1612 break; 1613 } 1614 if (hash_extent_data_ref_item(leaf, dref) < 1615 hash_extent_data_ref(root_objectid, owner, offset)) 1616 break; 1617 } else { 1618 u64 ref_offset; 1619 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); 1620 if (parent > 0) { 1621 if (parent == ref_offset) { 1622 err = 0; 1623 break; 1624 } 1625 if (ref_offset < parent) 1626 break; 1627 } else { 1628 if (root_objectid == ref_offset) { 1629 err = 0; 1630 break; 1631 } 1632 if (ref_offset < root_objectid) 1633 break; 1634 } 1635 } 1636 ptr += btrfs_extent_inline_ref_size(type); 1637 } 1638 if (err == -ENOENT && insert) { 1639 if (item_size + extra_size >= 1640 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { 1641 err = -EAGAIN; 1642 goto out; 1643 } 1644 /* 1645 * To add new inline back ref, we have to make sure 1646 * there is no corresponding back ref item. 1647 * For simplicity, we just do not add new inline back 1648 * ref if there is any kind of item for this block 1649 */ 1650 if (find_next_key(path, 0, &key) == 0 && 1651 key.objectid == bytenr && 1652 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 1653 err = -EAGAIN; 1654 goto out; 1655 } 1656 } 1657 *ref_ret = (struct btrfs_extent_inline_ref *)ptr; 1658 out: 1659 if (insert) { 1660 path->keep_locks = 0; 1661 btrfs_unlock_up_safe(path, 1); 1662 } 1663 return err; 1664 } 1665 1666 /* 1667 * helper to add new inline back ref 1668 */ 1669 static noinline_for_stack 1670 void setup_inline_extent_backref(struct btrfs_root *root, 1671 struct btrfs_path *path, 1672 struct btrfs_extent_inline_ref *iref, 1673 u64 parent, u64 root_objectid, 1674 u64 owner, u64 offset, int refs_to_add, 1675 struct btrfs_delayed_extent_op *extent_op) 1676 { 1677 struct extent_buffer *leaf; 1678 struct btrfs_extent_item *ei; 1679 unsigned long ptr; 1680 unsigned long end; 1681 unsigned long item_offset; 1682 u64 refs; 1683 int size; 1684 int type; 1685 1686 leaf = path->nodes[0]; 1687 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1688 item_offset = (unsigned long)iref - (unsigned long)ei; 1689 1690 type = extent_ref_type(parent, owner); 1691 size = btrfs_extent_inline_ref_size(type); 1692 1693 btrfs_extend_item(root, path, size); 1694 1695 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1696 refs = btrfs_extent_refs(leaf, ei); 1697 refs += refs_to_add; 1698 btrfs_set_extent_refs(leaf, ei, refs); 1699 if (extent_op) 1700 __run_delayed_extent_op(extent_op, leaf, ei); 1701 1702 ptr = (unsigned long)ei + item_offset; 1703 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]); 1704 if (ptr < end - size) 1705 memmove_extent_buffer(leaf, ptr + size, ptr, 1706 end - size - ptr); 1707 1708 iref = (struct btrfs_extent_inline_ref *)ptr; 1709 btrfs_set_extent_inline_ref_type(leaf, iref, type); 1710 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1711 struct btrfs_extent_data_ref *dref; 1712 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1713 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); 1714 btrfs_set_extent_data_ref_objectid(leaf, dref, owner); 1715 btrfs_set_extent_data_ref_offset(leaf, dref, offset); 1716 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); 1717 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1718 struct btrfs_shared_data_ref *sref; 1719 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1720 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); 1721 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1722 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 1723 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1724 } else { 1725 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1726 } 1727 btrfs_mark_buffer_dirty(leaf); 1728 } 1729 1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1731 struct btrfs_root *root, 1732 struct btrfs_path *path, 1733 struct btrfs_extent_inline_ref **ref_ret, 1734 u64 bytenr, u64 num_bytes, u64 parent, 1735 u64 root_objectid, u64 owner, u64 offset) 1736 { 1737 int ret; 1738 1739 ret = lookup_inline_extent_backref(trans, root, path, ref_ret, 1740 bytenr, num_bytes, parent, 1741 root_objectid, owner, offset, 0); 1742 if (ret != -ENOENT) 1743 return ret; 1744 1745 btrfs_release_path(path); 1746 *ref_ret = NULL; 1747 1748 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1749 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent, 1750 root_objectid); 1751 } else { 1752 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent, 1753 root_objectid, owner, offset); 1754 } 1755 return ret; 1756 } 1757 1758 /* 1759 * helper to update/remove inline back ref 1760 */ 1761 static noinline_for_stack 1762 void update_inline_extent_backref(struct btrfs_root *root, 1763 struct btrfs_path *path, 1764 struct btrfs_extent_inline_ref *iref, 1765 int refs_to_mod, 1766 struct btrfs_delayed_extent_op *extent_op) 1767 { 1768 struct extent_buffer *leaf; 1769 struct btrfs_extent_item *ei; 1770 struct btrfs_extent_data_ref *dref = NULL; 1771 struct btrfs_shared_data_ref *sref = NULL; 1772 unsigned long ptr; 1773 unsigned long end; 1774 u32 item_size; 1775 int size; 1776 int type; 1777 u64 refs; 1778 1779 leaf = path->nodes[0]; 1780 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1781 refs = btrfs_extent_refs(leaf, ei); 1782 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0); 1783 refs += refs_to_mod; 1784 btrfs_set_extent_refs(leaf, ei, refs); 1785 if (extent_op) 1786 __run_delayed_extent_op(extent_op, leaf, ei); 1787 1788 type = btrfs_extent_inline_ref_type(leaf, iref); 1789 1790 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1791 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1792 refs = btrfs_extent_data_ref_count(leaf, dref); 1793 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1794 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1795 refs = btrfs_shared_data_ref_count(leaf, sref); 1796 } else { 1797 refs = 1; 1798 BUG_ON(refs_to_mod != -1); 1799 } 1800 1801 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); 1802 refs += refs_to_mod; 1803 1804 if (refs > 0) { 1805 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1806 btrfs_set_extent_data_ref_count(leaf, dref, refs); 1807 else 1808 btrfs_set_shared_data_ref_count(leaf, sref, refs); 1809 } else { 1810 size = btrfs_extent_inline_ref_size(type); 1811 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1812 ptr = (unsigned long)iref; 1813 end = (unsigned long)ei + item_size; 1814 if (ptr + size < end) 1815 memmove_extent_buffer(leaf, ptr, ptr + size, 1816 end - ptr - size); 1817 item_size -= size; 1818 btrfs_truncate_item(root, path, item_size, 1); 1819 } 1820 btrfs_mark_buffer_dirty(leaf); 1821 } 1822 1823 static noinline_for_stack 1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans, 1825 struct btrfs_root *root, 1826 struct btrfs_path *path, 1827 u64 bytenr, u64 num_bytes, u64 parent, 1828 u64 root_objectid, u64 owner, 1829 u64 offset, int refs_to_add, 1830 struct btrfs_delayed_extent_op *extent_op) 1831 { 1832 struct btrfs_extent_inline_ref *iref; 1833 int ret; 1834 1835 ret = lookup_inline_extent_backref(trans, root, path, &iref, 1836 bytenr, num_bytes, parent, 1837 root_objectid, owner, offset, 1); 1838 if (ret == 0) { 1839 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID); 1840 update_inline_extent_backref(root, path, iref, 1841 refs_to_add, extent_op); 1842 } else if (ret == -ENOENT) { 1843 setup_inline_extent_backref(root, path, iref, parent, 1844 root_objectid, owner, offset, 1845 refs_to_add, extent_op); 1846 ret = 0; 1847 } 1848 return ret; 1849 } 1850 1851 static int insert_extent_backref(struct btrfs_trans_handle *trans, 1852 struct btrfs_root *root, 1853 struct btrfs_path *path, 1854 u64 bytenr, u64 parent, u64 root_objectid, 1855 u64 owner, u64 offset, int refs_to_add) 1856 { 1857 int ret; 1858 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1859 BUG_ON(refs_to_add != 1); 1860 ret = insert_tree_block_ref(trans, root, path, bytenr, 1861 parent, root_objectid); 1862 } else { 1863 ret = insert_extent_data_ref(trans, root, path, bytenr, 1864 parent, root_objectid, 1865 owner, offset, refs_to_add); 1866 } 1867 return ret; 1868 } 1869 1870 static int remove_extent_backref(struct btrfs_trans_handle *trans, 1871 struct btrfs_root *root, 1872 struct btrfs_path *path, 1873 struct btrfs_extent_inline_ref *iref, 1874 int refs_to_drop, int is_data) 1875 { 1876 int ret = 0; 1877 1878 BUG_ON(!is_data && refs_to_drop != 1); 1879 if (iref) { 1880 update_inline_extent_backref(root, path, iref, 1881 -refs_to_drop, NULL); 1882 } else if (is_data) { 1883 ret = remove_extent_data_ref(trans, root, path, refs_to_drop); 1884 } else { 1885 ret = btrfs_del_item(trans, root, path); 1886 } 1887 return ret; 1888 } 1889 1890 static int btrfs_issue_discard(struct block_device *bdev, 1891 u64 start, u64 len) 1892 { 1893 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0); 1894 } 1895 1896 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1897 u64 num_bytes, u64 *actual_bytes) 1898 { 1899 int ret; 1900 u64 discarded_bytes = 0; 1901 struct btrfs_bio *bbio = NULL; 1902 1903 1904 /* Tell the block device(s) that the sectors can be discarded */ 1905 ret = btrfs_map_block(root->fs_info, REQ_DISCARD, 1906 bytenr, &num_bytes, &bbio, 0); 1907 /* Error condition is -ENOMEM */ 1908 if (!ret) { 1909 struct btrfs_bio_stripe *stripe = bbio->stripes; 1910 int i; 1911 1912 1913 for (i = 0; i < bbio->num_stripes; i++, stripe++) { 1914 if (!stripe->dev->can_discard) 1915 continue; 1916 1917 ret = btrfs_issue_discard(stripe->dev->bdev, 1918 stripe->physical, 1919 stripe->length); 1920 if (!ret) 1921 discarded_bytes += stripe->length; 1922 else if (ret != -EOPNOTSUPP) 1923 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */ 1924 1925 /* 1926 * Just in case we get back EOPNOTSUPP for some reason, 1927 * just ignore the return value so we don't screw up 1928 * people calling discard_extent. 1929 */ 1930 ret = 0; 1931 } 1932 kfree(bbio); 1933 } 1934 1935 if (actual_bytes) 1936 *actual_bytes = discarded_bytes; 1937 1938 1939 if (ret == -EOPNOTSUPP) 1940 ret = 0; 1941 return ret; 1942 } 1943 1944 /* Can return -ENOMEM */ 1945 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1946 struct btrfs_root *root, 1947 u64 bytenr, u64 num_bytes, u64 parent, 1948 u64 root_objectid, u64 owner, u64 offset, int for_cow) 1949 { 1950 int ret; 1951 struct btrfs_fs_info *fs_info = root->fs_info; 1952 1953 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID && 1954 root_objectid == BTRFS_TREE_LOG_OBJECTID); 1955 1956 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1957 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr, 1958 num_bytes, 1959 parent, root_objectid, (int)owner, 1960 BTRFS_ADD_DELAYED_REF, NULL, for_cow); 1961 } else { 1962 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr, 1963 num_bytes, 1964 parent, root_objectid, owner, offset, 1965 BTRFS_ADD_DELAYED_REF, NULL, for_cow); 1966 } 1967 return ret; 1968 } 1969 1970 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1971 struct btrfs_root *root, 1972 u64 bytenr, u64 num_bytes, 1973 u64 parent, u64 root_objectid, 1974 u64 owner, u64 offset, int refs_to_add, 1975 struct btrfs_delayed_extent_op *extent_op) 1976 { 1977 struct btrfs_path *path; 1978 struct extent_buffer *leaf; 1979 struct btrfs_extent_item *item; 1980 u64 refs; 1981 int ret; 1982 1983 path = btrfs_alloc_path(); 1984 if (!path) 1985 return -ENOMEM; 1986 1987 path->reada = 1; 1988 path->leave_spinning = 1; 1989 /* this will setup the path even if it fails to insert the back ref */ 1990 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root, 1991 path, bytenr, num_bytes, parent, 1992 root_objectid, owner, offset, 1993 refs_to_add, extent_op); 1994 if (ret != -EAGAIN) 1995 goto out; 1996 1997 leaf = path->nodes[0]; 1998 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1999 refs = btrfs_extent_refs(leaf, item); 2000 btrfs_set_extent_refs(leaf, item, refs + refs_to_add); 2001 if (extent_op) 2002 __run_delayed_extent_op(extent_op, leaf, item); 2003 2004 btrfs_mark_buffer_dirty(leaf); 2005 btrfs_release_path(path); 2006 2007 path->reada = 1; 2008 path->leave_spinning = 1; 2009 2010 /* now insert the actual backref */ 2011 ret = insert_extent_backref(trans, root->fs_info->extent_root, 2012 path, bytenr, parent, root_objectid, 2013 owner, offset, refs_to_add); 2014 if (ret) 2015 btrfs_abort_transaction(trans, root, ret); 2016 out: 2017 btrfs_free_path(path); 2018 return ret; 2019 } 2020 2021 static int run_delayed_data_ref(struct btrfs_trans_handle *trans, 2022 struct btrfs_root *root, 2023 struct btrfs_delayed_ref_node *node, 2024 struct btrfs_delayed_extent_op *extent_op, 2025 int insert_reserved) 2026 { 2027 int ret = 0; 2028 struct btrfs_delayed_data_ref *ref; 2029 struct btrfs_key ins; 2030 u64 parent = 0; 2031 u64 ref_root = 0; 2032 u64 flags = 0; 2033 2034 ins.objectid = node->bytenr; 2035 ins.offset = node->num_bytes; 2036 ins.type = BTRFS_EXTENT_ITEM_KEY; 2037 2038 ref = btrfs_delayed_node_to_data_ref(node); 2039 trace_run_delayed_data_ref(node, ref, node->action); 2040 2041 if (node->type == BTRFS_SHARED_DATA_REF_KEY) 2042 parent = ref->parent; 2043 else 2044 ref_root = ref->root; 2045 2046 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 2047 if (extent_op) 2048 flags |= extent_op->flags_to_set; 2049 ret = alloc_reserved_file_extent(trans, root, 2050 parent, ref_root, flags, 2051 ref->objectid, ref->offset, 2052 &ins, node->ref_mod); 2053 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 2054 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, 2055 node->num_bytes, parent, 2056 ref_root, ref->objectid, 2057 ref->offset, node->ref_mod, 2058 extent_op); 2059 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 2060 ret = __btrfs_free_extent(trans, root, node->bytenr, 2061 node->num_bytes, parent, 2062 ref_root, ref->objectid, 2063 ref->offset, node->ref_mod, 2064 extent_op); 2065 } else { 2066 BUG(); 2067 } 2068 return ret; 2069 } 2070 2071 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 2072 struct extent_buffer *leaf, 2073 struct btrfs_extent_item *ei) 2074 { 2075 u64 flags = btrfs_extent_flags(leaf, ei); 2076 if (extent_op->update_flags) { 2077 flags |= extent_op->flags_to_set; 2078 btrfs_set_extent_flags(leaf, ei, flags); 2079 } 2080 2081 if (extent_op->update_key) { 2082 struct btrfs_tree_block_info *bi; 2083 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); 2084 bi = (struct btrfs_tree_block_info *)(ei + 1); 2085 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); 2086 } 2087 } 2088 2089 static int run_delayed_extent_op(struct btrfs_trans_handle *trans, 2090 struct btrfs_root *root, 2091 struct btrfs_delayed_ref_node *node, 2092 struct btrfs_delayed_extent_op *extent_op) 2093 { 2094 struct btrfs_key key; 2095 struct btrfs_path *path; 2096 struct btrfs_extent_item *ei; 2097 struct extent_buffer *leaf; 2098 u32 item_size; 2099 int ret; 2100 int err = 0; 2101 int metadata = !extent_op->is_data; 2102 2103 if (trans->aborted) 2104 return 0; 2105 2106 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) 2107 metadata = 0; 2108 2109 path = btrfs_alloc_path(); 2110 if (!path) 2111 return -ENOMEM; 2112 2113 key.objectid = node->bytenr; 2114 2115 if (metadata) { 2116 key.type = BTRFS_METADATA_ITEM_KEY; 2117 key.offset = extent_op->level; 2118 } else { 2119 key.type = BTRFS_EXTENT_ITEM_KEY; 2120 key.offset = node->num_bytes; 2121 } 2122 2123 again: 2124 path->reada = 1; 2125 path->leave_spinning = 1; 2126 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, 2127 path, 0, 1); 2128 if (ret < 0) { 2129 err = ret; 2130 goto out; 2131 } 2132 if (ret > 0) { 2133 if (metadata) { 2134 if (path->slots[0] > 0) { 2135 path->slots[0]--; 2136 btrfs_item_key_to_cpu(path->nodes[0], &key, 2137 path->slots[0]); 2138 if (key.objectid == node->bytenr && 2139 key.type == BTRFS_EXTENT_ITEM_KEY && 2140 key.offset == node->num_bytes) 2141 ret = 0; 2142 } 2143 if (ret > 0) { 2144 btrfs_release_path(path); 2145 metadata = 0; 2146 2147 key.objectid = node->bytenr; 2148 key.offset = node->num_bytes; 2149 key.type = BTRFS_EXTENT_ITEM_KEY; 2150 goto again; 2151 } 2152 } else { 2153 err = -EIO; 2154 goto out; 2155 } 2156 } 2157 2158 leaf = path->nodes[0]; 2159 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 2160 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 2161 if (item_size < sizeof(*ei)) { 2162 ret = convert_extent_item_v0(trans, root->fs_info->extent_root, 2163 path, (u64)-1, 0); 2164 if (ret < 0) { 2165 err = ret; 2166 goto out; 2167 } 2168 leaf = path->nodes[0]; 2169 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 2170 } 2171 #endif 2172 BUG_ON(item_size < sizeof(*ei)); 2173 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2174 __run_delayed_extent_op(extent_op, leaf, ei); 2175 2176 btrfs_mark_buffer_dirty(leaf); 2177 out: 2178 btrfs_free_path(path); 2179 return err; 2180 } 2181 2182 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, 2183 struct btrfs_root *root, 2184 struct btrfs_delayed_ref_node *node, 2185 struct btrfs_delayed_extent_op *extent_op, 2186 int insert_reserved) 2187 { 2188 int ret = 0; 2189 struct btrfs_delayed_tree_ref *ref; 2190 struct btrfs_key ins; 2191 u64 parent = 0; 2192 u64 ref_root = 0; 2193 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 2194 SKINNY_METADATA); 2195 2196 ref = btrfs_delayed_node_to_tree_ref(node); 2197 trace_run_delayed_tree_ref(node, ref, node->action); 2198 2199 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) 2200 parent = ref->parent; 2201 else 2202 ref_root = ref->root; 2203 2204 ins.objectid = node->bytenr; 2205 if (skinny_metadata) { 2206 ins.offset = ref->level; 2207 ins.type = BTRFS_METADATA_ITEM_KEY; 2208 } else { 2209 ins.offset = node->num_bytes; 2210 ins.type = BTRFS_EXTENT_ITEM_KEY; 2211 } 2212 2213 BUG_ON(node->ref_mod != 1); 2214 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 2215 BUG_ON(!extent_op || !extent_op->update_flags); 2216 ret = alloc_reserved_tree_block(trans, root, 2217 parent, ref_root, 2218 extent_op->flags_to_set, 2219 &extent_op->key, 2220 ref->level, &ins); 2221 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 2222 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, 2223 node->num_bytes, parent, ref_root, 2224 ref->level, 0, 1, extent_op); 2225 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 2226 ret = __btrfs_free_extent(trans, root, node->bytenr, 2227 node->num_bytes, parent, ref_root, 2228 ref->level, 0, 1, extent_op); 2229 } else { 2230 BUG(); 2231 } 2232 return ret; 2233 } 2234 2235 /* helper function to actually process a single delayed ref entry */ 2236 static int run_one_delayed_ref(struct btrfs_trans_handle *trans, 2237 struct btrfs_root *root, 2238 struct btrfs_delayed_ref_node *node, 2239 struct btrfs_delayed_extent_op *extent_op, 2240 int insert_reserved) 2241 { 2242 int ret = 0; 2243 2244 if (trans->aborted) { 2245 if (insert_reserved) 2246 btrfs_pin_extent(root, node->bytenr, 2247 node->num_bytes, 1); 2248 return 0; 2249 } 2250 2251 if (btrfs_delayed_ref_is_head(node)) { 2252 struct btrfs_delayed_ref_head *head; 2253 /* 2254 * we've hit the end of the chain and we were supposed 2255 * to insert this extent into the tree. But, it got 2256 * deleted before we ever needed to insert it, so all 2257 * we have to do is clean up the accounting 2258 */ 2259 BUG_ON(extent_op); 2260 head = btrfs_delayed_node_to_head(node); 2261 trace_run_delayed_ref_head(node, head, node->action); 2262 2263 if (insert_reserved) { 2264 btrfs_pin_extent(root, node->bytenr, 2265 node->num_bytes, 1); 2266 if (head->is_data) { 2267 ret = btrfs_del_csums(trans, root, 2268 node->bytenr, 2269 node->num_bytes); 2270 } 2271 } 2272 return ret; 2273 } 2274 2275 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 2276 node->type == BTRFS_SHARED_BLOCK_REF_KEY) 2277 ret = run_delayed_tree_ref(trans, root, node, extent_op, 2278 insert_reserved); 2279 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || 2280 node->type == BTRFS_SHARED_DATA_REF_KEY) 2281 ret = run_delayed_data_ref(trans, root, node, extent_op, 2282 insert_reserved); 2283 else 2284 BUG(); 2285 return ret; 2286 } 2287 2288 static noinline struct btrfs_delayed_ref_node * 2289 select_delayed_ref(struct btrfs_delayed_ref_head *head) 2290 { 2291 struct rb_node *node; 2292 struct btrfs_delayed_ref_node *ref, *last = NULL;; 2293 2294 /* 2295 * select delayed ref of type BTRFS_ADD_DELAYED_REF first. 2296 * this prevents ref count from going down to zero when 2297 * there still are pending delayed ref. 2298 */ 2299 node = rb_first(&head->ref_root); 2300 while (node) { 2301 ref = rb_entry(node, struct btrfs_delayed_ref_node, 2302 rb_node); 2303 if (ref->action == BTRFS_ADD_DELAYED_REF) 2304 return ref; 2305 else if (last == NULL) 2306 last = ref; 2307 node = rb_next(node); 2308 } 2309 return last; 2310 } 2311 2312 /* 2313 * Returns 0 on success or if called with an already aborted transaction. 2314 * Returns -ENOMEM or -EIO on failure and will abort the transaction. 2315 */ 2316 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2317 struct btrfs_root *root, 2318 unsigned long nr) 2319 { 2320 struct btrfs_delayed_ref_root *delayed_refs; 2321 struct btrfs_delayed_ref_node *ref; 2322 struct btrfs_delayed_ref_head *locked_ref = NULL; 2323 struct btrfs_delayed_extent_op *extent_op; 2324 struct btrfs_fs_info *fs_info = root->fs_info; 2325 ktime_t start = ktime_get(); 2326 int ret; 2327 unsigned long count = 0; 2328 unsigned long actual_count = 0; 2329 int must_insert_reserved = 0; 2330 2331 delayed_refs = &trans->transaction->delayed_refs; 2332 while (1) { 2333 if (!locked_ref) { 2334 if (count >= nr) 2335 break; 2336 2337 spin_lock(&delayed_refs->lock); 2338 locked_ref = btrfs_select_ref_head(trans); 2339 if (!locked_ref) { 2340 spin_unlock(&delayed_refs->lock); 2341 break; 2342 } 2343 2344 /* grab the lock that says we are going to process 2345 * all the refs for this head */ 2346 ret = btrfs_delayed_ref_lock(trans, locked_ref); 2347 spin_unlock(&delayed_refs->lock); 2348 /* 2349 * we may have dropped the spin lock to get the head 2350 * mutex lock, and that might have given someone else 2351 * time to free the head. If that's true, it has been 2352 * removed from our list and we can move on. 2353 */ 2354 if (ret == -EAGAIN) { 2355 locked_ref = NULL; 2356 count++; 2357 continue; 2358 } 2359 } 2360 2361 /* 2362 * We need to try and merge add/drops of the same ref since we 2363 * can run into issues with relocate dropping the implicit ref 2364 * and then it being added back again before the drop can 2365 * finish. If we merged anything we need to re-loop so we can 2366 * get a good ref. 2367 */ 2368 spin_lock(&locked_ref->lock); 2369 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs, 2370 locked_ref); 2371 2372 /* 2373 * locked_ref is the head node, so we have to go one 2374 * node back for any delayed ref updates 2375 */ 2376 ref = select_delayed_ref(locked_ref); 2377 2378 if (ref && ref->seq && 2379 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { 2380 spin_unlock(&locked_ref->lock); 2381 btrfs_delayed_ref_unlock(locked_ref); 2382 spin_lock(&delayed_refs->lock); 2383 locked_ref->processing = 0; 2384 delayed_refs->num_heads_ready++; 2385 spin_unlock(&delayed_refs->lock); 2386 locked_ref = NULL; 2387 cond_resched(); 2388 continue; 2389 } 2390 2391 /* 2392 * record the must insert reserved flag before we 2393 * drop the spin lock. 2394 */ 2395 must_insert_reserved = locked_ref->must_insert_reserved; 2396 locked_ref->must_insert_reserved = 0; 2397 2398 extent_op = locked_ref->extent_op; 2399 locked_ref->extent_op = NULL; 2400 2401 if (!ref) { 2402 2403 2404 /* All delayed refs have been processed, Go ahead 2405 * and send the head node to run_one_delayed_ref, 2406 * so that any accounting fixes can happen 2407 */ 2408 ref = &locked_ref->node; 2409 2410 if (extent_op && must_insert_reserved) { 2411 btrfs_free_delayed_extent_op(extent_op); 2412 extent_op = NULL; 2413 } 2414 2415 if (extent_op) { 2416 spin_unlock(&locked_ref->lock); 2417 ret = run_delayed_extent_op(trans, root, 2418 ref, extent_op); 2419 btrfs_free_delayed_extent_op(extent_op); 2420 2421 if (ret) { 2422 /* 2423 * Need to reset must_insert_reserved if 2424 * there was an error so the abort stuff 2425 * can cleanup the reserved space 2426 * properly. 2427 */ 2428 if (must_insert_reserved) 2429 locked_ref->must_insert_reserved = 1; 2430 locked_ref->processing = 0; 2431 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); 2432 btrfs_delayed_ref_unlock(locked_ref); 2433 return ret; 2434 } 2435 continue; 2436 } 2437 2438 /* 2439 * Need to drop our head ref lock and re-aqcuire the 2440 * delayed ref lock and then re-check to make sure 2441 * nobody got added. 2442 */ 2443 spin_unlock(&locked_ref->lock); 2444 spin_lock(&delayed_refs->lock); 2445 spin_lock(&locked_ref->lock); 2446 if (rb_first(&locked_ref->ref_root)) { 2447 spin_unlock(&locked_ref->lock); 2448 spin_unlock(&delayed_refs->lock); 2449 continue; 2450 } 2451 ref->in_tree = 0; 2452 delayed_refs->num_heads--; 2453 rb_erase(&locked_ref->href_node, 2454 &delayed_refs->href_root); 2455 spin_unlock(&delayed_refs->lock); 2456 } else { 2457 actual_count++; 2458 ref->in_tree = 0; 2459 rb_erase(&ref->rb_node, &locked_ref->ref_root); 2460 } 2461 atomic_dec(&delayed_refs->num_entries); 2462 2463 if (!btrfs_delayed_ref_is_head(ref)) { 2464 /* 2465 * when we play the delayed ref, also correct the 2466 * ref_mod on head 2467 */ 2468 switch (ref->action) { 2469 case BTRFS_ADD_DELAYED_REF: 2470 case BTRFS_ADD_DELAYED_EXTENT: 2471 locked_ref->node.ref_mod -= ref->ref_mod; 2472 break; 2473 case BTRFS_DROP_DELAYED_REF: 2474 locked_ref->node.ref_mod += ref->ref_mod; 2475 break; 2476 default: 2477 WARN_ON(1); 2478 } 2479 } 2480 spin_unlock(&locked_ref->lock); 2481 2482 ret = run_one_delayed_ref(trans, root, ref, extent_op, 2483 must_insert_reserved); 2484 2485 btrfs_free_delayed_extent_op(extent_op); 2486 if (ret) { 2487 locked_ref->processing = 0; 2488 btrfs_delayed_ref_unlock(locked_ref); 2489 btrfs_put_delayed_ref(ref); 2490 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret); 2491 return ret; 2492 } 2493 2494 /* 2495 * If this node is a head, that means all the refs in this head 2496 * have been dealt with, and we will pick the next head to deal 2497 * with, so we must unlock the head and drop it from the cluster 2498 * list before we release it. 2499 */ 2500 if (btrfs_delayed_ref_is_head(ref)) { 2501 btrfs_delayed_ref_unlock(locked_ref); 2502 locked_ref = NULL; 2503 } 2504 btrfs_put_delayed_ref(ref); 2505 count++; 2506 cond_resched(); 2507 } 2508 2509 /* 2510 * We don't want to include ref heads since we can have empty ref heads 2511 * and those will drastically skew our runtime down since we just do 2512 * accounting, no actual extent tree updates. 2513 */ 2514 if (actual_count > 0) { 2515 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start)); 2516 u64 avg; 2517 2518 /* 2519 * We weigh the current average higher than our current runtime 2520 * to avoid large swings in the average. 2521 */ 2522 spin_lock(&delayed_refs->lock); 2523 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime; 2524 avg = div64_u64(avg, 4); 2525 fs_info->avg_delayed_ref_runtime = avg; 2526 spin_unlock(&delayed_refs->lock); 2527 } 2528 return 0; 2529 } 2530 2531 #ifdef SCRAMBLE_DELAYED_REFS 2532 /* 2533 * Normally delayed refs get processed in ascending bytenr order. This 2534 * correlates in most cases to the order added. To expose dependencies on this 2535 * order, we start to process the tree in the middle instead of the beginning 2536 */ 2537 static u64 find_middle(struct rb_root *root) 2538 { 2539 struct rb_node *n = root->rb_node; 2540 struct btrfs_delayed_ref_node *entry; 2541 int alt = 1; 2542 u64 middle; 2543 u64 first = 0, last = 0; 2544 2545 n = rb_first(root); 2546 if (n) { 2547 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2548 first = entry->bytenr; 2549 } 2550 n = rb_last(root); 2551 if (n) { 2552 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2553 last = entry->bytenr; 2554 } 2555 n = root->rb_node; 2556 2557 while (n) { 2558 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2559 WARN_ON(!entry->in_tree); 2560 2561 middle = entry->bytenr; 2562 2563 if (alt) 2564 n = n->rb_left; 2565 else 2566 n = n->rb_right; 2567 2568 alt = 1 - alt; 2569 } 2570 return middle; 2571 } 2572 #endif 2573 2574 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, 2575 struct btrfs_fs_info *fs_info) 2576 { 2577 struct qgroup_update *qgroup_update; 2578 int ret = 0; 2579 2580 if (list_empty(&trans->qgroup_ref_list) != 2581 !trans->delayed_ref_elem.seq) { 2582 /* list without seq or seq without list */ 2583 btrfs_err(fs_info, 2584 "qgroup accounting update error, list is%s empty, seq is %#x.%x", 2585 list_empty(&trans->qgroup_ref_list) ? "" : " not", 2586 (u32)(trans->delayed_ref_elem.seq >> 32), 2587 (u32)trans->delayed_ref_elem.seq); 2588 BUG(); 2589 } 2590 2591 if (!trans->delayed_ref_elem.seq) 2592 return 0; 2593 2594 while (!list_empty(&trans->qgroup_ref_list)) { 2595 qgroup_update = list_first_entry(&trans->qgroup_ref_list, 2596 struct qgroup_update, list); 2597 list_del(&qgroup_update->list); 2598 if (!ret) 2599 ret = btrfs_qgroup_account_ref( 2600 trans, fs_info, qgroup_update->node, 2601 qgroup_update->extent_op); 2602 kfree(qgroup_update); 2603 } 2604 2605 btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem); 2606 2607 return ret; 2608 } 2609 2610 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads) 2611 { 2612 u64 num_bytes; 2613 2614 num_bytes = heads * (sizeof(struct btrfs_extent_item) + 2615 sizeof(struct btrfs_extent_inline_ref)); 2616 if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) 2617 num_bytes += heads * sizeof(struct btrfs_tree_block_info); 2618 2619 /* 2620 * We don't ever fill up leaves all the way so multiply by 2 just to be 2621 * closer to what we're really going to want to ouse. 2622 */ 2623 return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root)); 2624 } 2625 2626 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, 2627 struct btrfs_root *root) 2628 { 2629 struct btrfs_block_rsv *global_rsv; 2630 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready; 2631 u64 num_bytes; 2632 int ret = 0; 2633 2634 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 2635 num_heads = heads_to_leaves(root, num_heads); 2636 if (num_heads > 1) 2637 num_bytes += (num_heads - 1) * root->leafsize; 2638 num_bytes <<= 1; 2639 global_rsv = &root->fs_info->global_block_rsv; 2640 2641 /* 2642 * If we can't allocate any more chunks lets make sure we have _lots_ of 2643 * wiggle room since running delayed refs can create more delayed refs. 2644 */ 2645 if (global_rsv->space_info->full) 2646 num_bytes <<= 1; 2647 2648 spin_lock(&global_rsv->lock); 2649 if (global_rsv->reserved <= num_bytes) 2650 ret = 1; 2651 spin_unlock(&global_rsv->lock); 2652 return ret; 2653 } 2654 2655 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 2656 struct btrfs_root *root) 2657 { 2658 struct btrfs_fs_info *fs_info = root->fs_info; 2659 u64 num_entries = 2660 atomic_read(&trans->transaction->delayed_refs.num_entries); 2661 u64 avg_runtime; 2662 2663 smp_mb(); 2664 avg_runtime = fs_info->avg_delayed_ref_runtime; 2665 if (num_entries * avg_runtime >= NSEC_PER_SEC) 2666 return 1; 2667 2668 return btrfs_check_space_for_delayed_refs(trans, root); 2669 } 2670 2671 /* 2672 * this starts processing the delayed reference count updates and 2673 * extent insertions we have queued up so far. count can be 2674 * 0, which means to process everything in the tree at the start 2675 * of the run (but not newly added entries), or it can be some target 2676 * number you'd like to process. 2677 * 2678 * Returns 0 on success or if called with an aborted transaction 2679 * Returns <0 on error and aborts the transaction 2680 */ 2681 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2682 struct btrfs_root *root, unsigned long count) 2683 { 2684 struct rb_node *node; 2685 struct btrfs_delayed_ref_root *delayed_refs; 2686 struct btrfs_delayed_ref_head *head; 2687 int ret; 2688 int run_all = count == (unsigned long)-1; 2689 int run_most = 0; 2690 2691 /* We'll clean this up in btrfs_cleanup_transaction */ 2692 if (trans->aborted) 2693 return 0; 2694 2695 if (root == root->fs_info->extent_root) 2696 root = root->fs_info->tree_root; 2697 2698 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info); 2699 2700 delayed_refs = &trans->transaction->delayed_refs; 2701 if (count == 0) { 2702 count = atomic_read(&delayed_refs->num_entries) * 2; 2703 run_most = 1; 2704 } 2705 2706 again: 2707 #ifdef SCRAMBLE_DELAYED_REFS 2708 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); 2709 #endif 2710 ret = __btrfs_run_delayed_refs(trans, root, count); 2711 if (ret < 0) { 2712 btrfs_abort_transaction(trans, root, ret); 2713 return ret; 2714 } 2715 2716 if (run_all) { 2717 if (!list_empty(&trans->new_bgs)) 2718 btrfs_create_pending_block_groups(trans, root); 2719 2720 spin_lock(&delayed_refs->lock); 2721 node = rb_first(&delayed_refs->href_root); 2722 if (!node) { 2723 spin_unlock(&delayed_refs->lock); 2724 goto out; 2725 } 2726 count = (unsigned long)-1; 2727 2728 while (node) { 2729 head = rb_entry(node, struct btrfs_delayed_ref_head, 2730 href_node); 2731 if (btrfs_delayed_ref_is_head(&head->node)) { 2732 struct btrfs_delayed_ref_node *ref; 2733 2734 ref = &head->node; 2735 atomic_inc(&ref->refs); 2736 2737 spin_unlock(&delayed_refs->lock); 2738 /* 2739 * Mutex was contended, block until it's 2740 * released and try again 2741 */ 2742 mutex_lock(&head->mutex); 2743 mutex_unlock(&head->mutex); 2744 2745 btrfs_put_delayed_ref(ref); 2746 cond_resched(); 2747 goto again; 2748 } else { 2749 WARN_ON(1); 2750 } 2751 node = rb_next(node); 2752 } 2753 spin_unlock(&delayed_refs->lock); 2754 cond_resched(); 2755 goto again; 2756 } 2757 out: 2758 assert_qgroups_uptodate(trans); 2759 return 0; 2760 } 2761 2762 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2763 struct btrfs_root *root, 2764 u64 bytenr, u64 num_bytes, u64 flags, 2765 int level, int is_data) 2766 { 2767 struct btrfs_delayed_extent_op *extent_op; 2768 int ret; 2769 2770 extent_op = btrfs_alloc_delayed_extent_op(); 2771 if (!extent_op) 2772 return -ENOMEM; 2773 2774 extent_op->flags_to_set = flags; 2775 extent_op->update_flags = 1; 2776 extent_op->update_key = 0; 2777 extent_op->is_data = is_data ? 1 : 0; 2778 extent_op->level = level; 2779 2780 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr, 2781 num_bytes, extent_op); 2782 if (ret) 2783 btrfs_free_delayed_extent_op(extent_op); 2784 return ret; 2785 } 2786 2787 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, 2788 struct btrfs_root *root, 2789 struct btrfs_path *path, 2790 u64 objectid, u64 offset, u64 bytenr) 2791 { 2792 struct btrfs_delayed_ref_head *head; 2793 struct btrfs_delayed_ref_node *ref; 2794 struct btrfs_delayed_data_ref *data_ref; 2795 struct btrfs_delayed_ref_root *delayed_refs; 2796 struct rb_node *node; 2797 int ret = 0; 2798 2799 delayed_refs = &trans->transaction->delayed_refs; 2800 spin_lock(&delayed_refs->lock); 2801 head = btrfs_find_delayed_ref_head(trans, bytenr); 2802 if (!head) { 2803 spin_unlock(&delayed_refs->lock); 2804 return 0; 2805 } 2806 2807 if (!mutex_trylock(&head->mutex)) { 2808 atomic_inc(&head->node.refs); 2809 spin_unlock(&delayed_refs->lock); 2810 2811 btrfs_release_path(path); 2812 2813 /* 2814 * Mutex was contended, block until it's released and let 2815 * caller try again 2816 */ 2817 mutex_lock(&head->mutex); 2818 mutex_unlock(&head->mutex); 2819 btrfs_put_delayed_ref(&head->node); 2820 return -EAGAIN; 2821 } 2822 spin_unlock(&delayed_refs->lock); 2823 2824 spin_lock(&head->lock); 2825 node = rb_first(&head->ref_root); 2826 while (node) { 2827 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); 2828 node = rb_next(node); 2829 2830 /* If it's a shared ref we know a cross reference exists */ 2831 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { 2832 ret = 1; 2833 break; 2834 } 2835 2836 data_ref = btrfs_delayed_node_to_data_ref(ref); 2837 2838 /* 2839 * If our ref doesn't match the one we're currently looking at 2840 * then we have a cross reference. 2841 */ 2842 if (data_ref->root != root->root_key.objectid || 2843 data_ref->objectid != objectid || 2844 data_ref->offset != offset) { 2845 ret = 1; 2846 break; 2847 } 2848 } 2849 spin_unlock(&head->lock); 2850 mutex_unlock(&head->mutex); 2851 return ret; 2852 } 2853 2854 static noinline int check_committed_ref(struct btrfs_trans_handle *trans, 2855 struct btrfs_root *root, 2856 struct btrfs_path *path, 2857 u64 objectid, u64 offset, u64 bytenr) 2858 { 2859 struct btrfs_root *extent_root = root->fs_info->extent_root; 2860 struct extent_buffer *leaf; 2861 struct btrfs_extent_data_ref *ref; 2862 struct btrfs_extent_inline_ref *iref; 2863 struct btrfs_extent_item *ei; 2864 struct btrfs_key key; 2865 u32 item_size; 2866 int ret; 2867 2868 key.objectid = bytenr; 2869 key.offset = (u64)-1; 2870 key.type = BTRFS_EXTENT_ITEM_KEY; 2871 2872 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2873 if (ret < 0) 2874 goto out; 2875 BUG_ON(ret == 0); /* Corruption */ 2876 2877 ret = -ENOENT; 2878 if (path->slots[0] == 0) 2879 goto out; 2880 2881 path->slots[0]--; 2882 leaf = path->nodes[0]; 2883 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2884 2885 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) 2886 goto out; 2887 2888 ret = 1; 2889 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 2890 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 2891 if (item_size < sizeof(*ei)) { 2892 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 2893 goto out; 2894 } 2895 #endif 2896 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2897 2898 if (item_size != sizeof(*ei) + 2899 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) 2900 goto out; 2901 2902 if (btrfs_extent_generation(leaf, ei) <= 2903 btrfs_root_last_snapshot(&root->root_item)) 2904 goto out; 2905 2906 iref = (struct btrfs_extent_inline_ref *)(ei + 1); 2907 if (btrfs_extent_inline_ref_type(leaf, iref) != 2908 BTRFS_EXTENT_DATA_REF_KEY) 2909 goto out; 2910 2911 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 2912 if (btrfs_extent_refs(leaf, ei) != 2913 btrfs_extent_data_ref_count(leaf, ref) || 2914 btrfs_extent_data_ref_root(leaf, ref) != 2915 root->root_key.objectid || 2916 btrfs_extent_data_ref_objectid(leaf, ref) != objectid || 2917 btrfs_extent_data_ref_offset(leaf, ref) != offset) 2918 goto out; 2919 2920 ret = 0; 2921 out: 2922 return ret; 2923 } 2924 2925 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 2926 struct btrfs_root *root, 2927 u64 objectid, u64 offset, u64 bytenr) 2928 { 2929 struct btrfs_path *path; 2930 int ret; 2931 int ret2; 2932 2933 path = btrfs_alloc_path(); 2934 if (!path) 2935 return -ENOENT; 2936 2937 do { 2938 ret = check_committed_ref(trans, root, path, objectid, 2939 offset, bytenr); 2940 if (ret && ret != -ENOENT) 2941 goto out; 2942 2943 ret2 = check_delayed_ref(trans, root, path, objectid, 2944 offset, bytenr); 2945 } while (ret2 == -EAGAIN); 2946 2947 if (ret2 && ret2 != -ENOENT) { 2948 ret = ret2; 2949 goto out; 2950 } 2951 2952 if (ret != -ENOENT || ret2 != -ENOENT) 2953 ret = 0; 2954 out: 2955 btrfs_free_path(path); 2956 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 2957 WARN_ON(ret > 0); 2958 return ret; 2959 } 2960 2961 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, 2962 struct btrfs_root *root, 2963 struct extent_buffer *buf, 2964 int full_backref, int inc, int for_cow) 2965 { 2966 u64 bytenr; 2967 u64 num_bytes; 2968 u64 parent; 2969 u64 ref_root; 2970 u32 nritems; 2971 struct btrfs_key key; 2972 struct btrfs_file_extent_item *fi; 2973 int i; 2974 int level; 2975 int ret = 0; 2976 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *, 2977 u64, u64, u64, u64, u64, u64, int); 2978 2979 ref_root = btrfs_header_owner(buf); 2980 nritems = btrfs_header_nritems(buf); 2981 level = btrfs_header_level(buf); 2982 2983 if (!root->ref_cows && level == 0) 2984 return 0; 2985 2986 if (inc) 2987 process_func = btrfs_inc_extent_ref; 2988 else 2989 process_func = btrfs_free_extent; 2990 2991 if (full_backref) 2992 parent = buf->start; 2993 else 2994 parent = 0; 2995 2996 for (i = 0; i < nritems; i++) { 2997 if (level == 0) { 2998 btrfs_item_key_to_cpu(buf, &key, i); 2999 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) 3000 continue; 3001 fi = btrfs_item_ptr(buf, i, 3002 struct btrfs_file_extent_item); 3003 if (btrfs_file_extent_type(buf, fi) == 3004 BTRFS_FILE_EXTENT_INLINE) 3005 continue; 3006 bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 3007 if (bytenr == 0) 3008 continue; 3009 3010 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); 3011 key.offset -= btrfs_file_extent_offset(buf, fi); 3012 ret = process_func(trans, root, bytenr, num_bytes, 3013 parent, ref_root, key.objectid, 3014 key.offset, for_cow); 3015 if (ret) 3016 goto fail; 3017 } else { 3018 bytenr = btrfs_node_blockptr(buf, i); 3019 num_bytes = btrfs_level_size(root, level - 1); 3020 ret = process_func(trans, root, bytenr, num_bytes, 3021 parent, ref_root, level - 1, 0, 3022 for_cow); 3023 if (ret) 3024 goto fail; 3025 } 3026 } 3027 return 0; 3028 fail: 3029 return ret; 3030 } 3031 3032 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3033 struct extent_buffer *buf, int full_backref, int for_cow) 3034 { 3035 return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow); 3036 } 3037 3038 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3039 struct extent_buffer *buf, int full_backref, int for_cow) 3040 { 3041 return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow); 3042 } 3043 3044 static int write_one_cache_group(struct btrfs_trans_handle *trans, 3045 struct btrfs_root *root, 3046 struct btrfs_path *path, 3047 struct btrfs_block_group_cache *cache) 3048 { 3049 int ret; 3050 struct btrfs_root *extent_root = root->fs_info->extent_root; 3051 unsigned long bi; 3052 struct extent_buffer *leaf; 3053 3054 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); 3055 if (ret < 0) 3056 goto fail; 3057 BUG_ON(ret); /* Corruption */ 3058 3059 leaf = path->nodes[0]; 3060 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 3061 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); 3062 btrfs_mark_buffer_dirty(leaf); 3063 btrfs_release_path(path); 3064 fail: 3065 if (ret) { 3066 btrfs_abort_transaction(trans, root, ret); 3067 return ret; 3068 } 3069 return 0; 3070 3071 } 3072 3073 static struct btrfs_block_group_cache * 3074 next_block_group(struct btrfs_root *root, 3075 struct btrfs_block_group_cache *cache) 3076 { 3077 struct rb_node *node; 3078 spin_lock(&root->fs_info->block_group_cache_lock); 3079 node = rb_next(&cache->cache_node); 3080 btrfs_put_block_group(cache); 3081 if (node) { 3082 cache = rb_entry(node, struct btrfs_block_group_cache, 3083 cache_node); 3084 btrfs_get_block_group(cache); 3085 } else 3086 cache = NULL; 3087 spin_unlock(&root->fs_info->block_group_cache_lock); 3088 return cache; 3089 } 3090 3091 static int cache_save_setup(struct btrfs_block_group_cache *block_group, 3092 struct btrfs_trans_handle *trans, 3093 struct btrfs_path *path) 3094 { 3095 struct btrfs_root *root = block_group->fs_info->tree_root; 3096 struct inode *inode = NULL; 3097 u64 alloc_hint = 0; 3098 int dcs = BTRFS_DC_ERROR; 3099 int num_pages = 0; 3100 int retries = 0; 3101 int ret = 0; 3102 3103 /* 3104 * If this block group is smaller than 100 megs don't bother caching the 3105 * block group. 3106 */ 3107 if (block_group->key.offset < (100 * 1024 * 1024)) { 3108 spin_lock(&block_group->lock); 3109 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 3110 spin_unlock(&block_group->lock); 3111 return 0; 3112 } 3113 3114 again: 3115 inode = lookup_free_space_inode(root, block_group, path); 3116 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 3117 ret = PTR_ERR(inode); 3118 btrfs_release_path(path); 3119 goto out; 3120 } 3121 3122 if (IS_ERR(inode)) { 3123 BUG_ON(retries); 3124 retries++; 3125 3126 if (block_group->ro) 3127 goto out_free; 3128 3129 ret = create_free_space_inode(root, trans, block_group, path); 3130 if (ret) 3131 goto out_free; 3132 goto again; 3133 } 3134 3135 /* We've already setup this transaction, go ahead and exit */ 3136 if (block_group->cache_generation == trans->transid && 3137 i_size_read(inode)) { 3138 dcs = BTRFS_DC_SETUP; 3139 goto out_put; 3140 } 3141 3142 /* 3143 * We want to set the generation to 0, that way if anything goes wrong 3144 * from here on out we know not to trust this cache when we load up next 3145 * time. 3146 */ 3147 BTRFS_I(inode)->generation = 0; 3148 ret = btrfs_update_inode(trans, root, inode); 3149 WARN_ON(ret); 3150 3151 if (i_size_read(inode) > 0) { 3152 ret = btrfs_check_trunc_cache_free_space(root, 3153 &root->fs_info->global_block_rsv); 3154 if (ret) 3155 goto out_put; 3156 3157 ret = btrfs_truncate_free_space_cache(root, trans, inode); 3158 if (ret) 3159 goto out_put; 3160 } 3161 3162 spin_lock(&block_group->lock); 3163 if (block_group->cached != BTRFS_CACHE_FINISHED || 3164 !btrfs_test_opt(root, SPACE_CACHE)) { 3165 /* 3166 * don't bother trying to write stuff out _if_ 3167 * a) we're not cached, 3168 * b) we're with nospace_cache mount option. 3169 */ 3170 dcs = BTRFS_DC_WRITTEN; 3171 spin_unlock(&block_group->lock); 3172 goto out_put; 3173 } 3174 spin_unlock(&block_group->lock); 3175 3176 /* 3177 * Try to preallocate enough space based on how big the block group is. 3178 * Keep in mind this has to include any pinned space which could end up 3179 * taking up quite a bit since it's not folded into the other space 3180 * cache. 3181 */ 3182 num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024); 3183 if (!num_pages) 3184 num_pages = 1; 3185 3186 num_pages *= 16; 3187 num_pages *= PAGE_CACHE_SIZE; 3188 3189 ret = btrfs_check_data_free_space(inode, num_pages); 3190 if (ret) 3191 goto out_put; 3192 3193 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, 3194 num_pages, num_pages, 3195 &alloc_hint); 3196 if (!ret) 3197 dcs = BTRFS_DC_SETUP; 3198 btrfs_free_reserved_data_space(inode, num_pages); 3199 3200 out_put: 3201 iput(inode); 3202 out_free: 3203 btrfs_release_path(path); 3204 out: 3205 spin_lock(&block_group->lock); 3206 if (!ret && dcs == BTRFS_DC_SETUP) 3207 block_group->cache_generation = trans->transid; 3208 block_group->disk_cache_state = dcs; 3209 spin_unlock(&block_group->lock); 3210 3211 return ret; 3212 } 3213 3214 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3215 struct btrfs_root *root) 3216 { 3217 struct btrfs_block_group_cache *cache; 3218 int err = 0; 3219 struct btrfs_path *path; 3220 u64 last = 0; 3221 3222 path = btrfs_alloc_path(); 3223 if (!path) 3224 return -ENOMEM; 3225 3226 again: 3227 while (1) { 3228 cache = btrfs_lookup_first_block_group(root->fs_info, last); 3229 while (cache) { 3230 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 3231 break; 3232 cache = next_block_group(root, cache); 3233 } 3234 if (!cache) { 3235 if (last == 0) 3236 break; 3237 last = 0; 3238 continue; 3239 } 3240 err = cache_save_setup(cache, trans, path); 3241 last = cache->key.objectid + cache->key.offset; 3242 btrfs_put_block_group(cache); 3243 } 3244 3245 while (1) { 3246 if (last == 0) { 3247 err = btrfs_run_delayed_refs(trans, root, 3248 (unsigned long)-1); 3249 if (err) /* File system offline */ 3250 goto out; 3251 } 3252 3253 cache = btrfs_lookup_first_block_group(root->fs_info, last); 3254 while (cache) { 3255 if (cache->disk_cache_state == BTRFS_DC_CLEAR) { 3256 btrfs_put_block_group(cache); 3257 goto again; 3258 } 3259 3260 if (cache->dirty) 3261 break; 3262 cache = next_block_group(root, cache); 3263 } 3264 if (!cache) { 3265 if (last == 0) 3266 break; 3267 last = 0; 3268 continue; 3269 } 3270 3271 if (cache->disk_cache_state == BTRFS_DC_SETUP) 3272 cache->disk_cache_state = BTRFS_DC_NEED_WRITE; 3273 cache->dirty = 0; 3274 last = cache->key.objectid + cache->key.offset; 3275 3276 err = write_one_cache_group(trans, root, path, cache); 3277 btrfs_put_block_group(cache); 3278 if (err) /* File system offline */ 3279 goto out; 3280 } 3281 3282 while (1) { 3283 /* 3284 * I don't think this is needed since we're just marking our 3285 * preallocated extent as written, but just in case it can't 3286 * hurt. 3287 */ 3288 if (last == 0) { 3289 err = btrfs_run_delayed_refs(trans, root, 3290 (unsigned long)-1); 3291 if (err) /* File system offline */ 3292 goto out; 3293 } 3294 3295 cache = btrfs_lookup_first_block_group(root->fs_info, last); 3296 while (cache) { 3297 /* 3298 * Really this shouldn't happen, but it could if we 3299 * couldn't write the entire preallocated extent and 3300 * splitting the extent resulted in a new block. 3301 */ 3302 if (cache->dirty) { 3303 btrfs_put_block_group(cache); 3304 goto again; 3305 } 3306 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) 3307 break; 3308 cache = next_block_group(root, cache); 3309 } 3310 if (!cache) { 3311 if (last == 0) 3312 break; 3313 last = 0; 3314 continue; 3315 } 3316 3317 err = btrfs_write_out_cache(root, trans, cache, path); 3318 3319 /* 3320 * If we didn't have an error then the cache state is still 3321 * NEED_WRITE, so we can set it to WRITTEN. 3322 */ 3323 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE) 3324 cache->disk_cache_state = BTRFS_DC_WRITTEN; 3325 last = cache->key.objectid + cache->key.offset; 3326 btrfs_put_block_group(cache); 3327 } 3328 out: 3329 3330 btrfs_free_path(path); 3331 return err; 3332 } 3333 3334 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 3335 { 3336 struct btrfs_block_group_cache *block_group; 3337 int readonly = 0; 3338 3339 block_group = btrfs_lookup_block_group(root->fs_info, bytenr); 3340 if (!block_group || block_group->ro) 3341 readonly = 1; 3342 if (block_group) 3343 btrfs_put_block_group(block_group); 3344 return readonly; 3345 } 3346 3347 static const char *alloc_name(u64 flags) 3348 { 3349 switch (flags) { 3350 case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA: 3351 return "mixed"; 3352 case BTRFS_BLOCK_GROUP_METADATA: 3353 return "metadata"; 3354 case BTRFS_BLOCK_GROUP_DATA: 3355 return "data"; 3356 case BTRFS_BLOCK_GROUP_SYSTEM: 3357 return "system"; 3358 default: 3359 WARN_ON(1); 3360 return "invalid-combination"; 3361 }; 3362 } 3363 3364 static int update_space_info(struct btrfs_fs_info *info, u64 flags, 3365 u64 total_bytes, u64 bytes_used, 3366 struct btrfs_space_info **space_info) 3367 { 3368 struct btrfs_space_info *found; 3369 int i; 3370 int factor; 3371 int ret; 3372 3373 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | 3374 BTRFS_BLOCK_GROUP_RAID10)) 3375 factor = 2; 3376 else 3377 factor = 1; 3378 3379 found = __find_space_info(info, flags); 3380 if (found) { 3381 spin_lock(&found->lock); 3382 found->total_bytes += total_bytes; 3383 found->disk_total += total_bytes * factor; 3384 found->bytes_used += bytes_used; 3385 found->disk_used += bytes_used * factor; 3386 found->full = 0; 3387 spin_unlock(&found->lock); 3388 *space_info = found; 3389 return 0; 3390 } 3391 found = kzalloc(sizeof(*found), GFP_NOFS); 3392 if (!found) 3393 return -ENOMEM; 3394 3395 ret = percpu_counter_init(&found->total_bytes_pinned, 0); 3396 if (ret) { 3397 kfree(found); 3398 return ret; 3399 } 3400 3401 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 3402 INIT_LIST_HEAD(&found->block_groups[i]); 3403 kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype); 3404 } 3405 init_rwsem(&found->groups_sem); 3406 spin_lock_init(&found->lock); 3407 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 3408 found->total_bytes = total_bytes; 3409 found->disk_total = total_bytes * factor; 3410 found->bytes_used = bytes_used; 3411 found->disk_used = bytes_used * factor; 3412 found->bytes_pinned = 0; 3413 found->bytes_reserved = 0; 3414 found->bytes_readonly = 0; 3415 found->bytes_may_use = 0; 3416 found->full = 0; 3417 found->force_alloc = CHUNK_ALLOC_NO_FORCE; 3418 found->chunk_alloc = 0; 3419 found->flush = 0; 3420 init_waitqueue_head(&found->wait); 3421 3422 ret = kobject_init_and_add(&found->kobj, &space_info_ktype, 3423 info->space_info_kobj, "%s", 3424 alloc_name(found->flags)); 3425 if (ret) { 3426 kfree(found); 3427 return ret; 3428 } 3429 3430 *space_info = found; 3431 list_add_rcu(&found->list, &info->space_info); 3432 if (flags & BTRFS_BLOCK_GROUP_DATA) 3433 info->data_sinfo = found; 3434 3435 return ret; 3436 } 3437 3438 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 3439 { 3440 u64 extra_flags = chunk_to_extended(flags) & 3441 BTRFS_EXTENDED_PROFILE_MASK; 3442 3443 write_seqlock(&fs_info->profiles_lock); 3444 if (flags & BTRFS_BLOCK_GROUP_DATA) 3445 fs_info->avail_data_alloc_bits |= extra_flags; 3446 if (flags & BTRFS_BLOCK_GROUP_METADATA) 3447 fs_info->avail_metadata_alloc_bits |= extra_flags; 3448 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 3449 fs_info->avail_system_alloc_bits |= extra_flags; 3450 write_sequnlock(&fs_info->profiles_lock); 3451 } 3452 3453 /* 3454 * returns target flags in extended format or 0 if restripe for this 3455 * chunk_type is not in progress 3456 * 3457 * should be called with either volume_mutex or balance_lock held 3458 */ 3459 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 3460 { 3461 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3462 u64 target = 0; 3463 3464 if (!bctl) 3465 return 0; 3466 3467 if (flags & BTRFS_BLOCK_GROUP_DATA && 3468 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3469 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 3470 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 3471 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3472 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 3473 } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 3474 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3475 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 3476 } 3477 3478 return target; 3479 } 3480 3481 /* 3482 * @flags: available profiles in extended format (see ctree.h) 3483 * 3484 * Returns reduced profile in chunk format. If profile changing is in 3485 * progress (either running or paused) picks the target profile (if it's 3486 * already available), otherwise falls back to plain reducing. 3487 */ 3488 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) 3489 { 3490 /* 3491 * we add in the count of missing devices because we want 3492 * to make sure that any RAID levels on a degraded FS 3493 * continue to be honored. 3494 */ 3495 u64 num_devices = root->fs_info->fs_devices->rw_devices + 3496 root->fs_info->fs_devices->missing_devices; 3497 u64 target; 3498 u64 tmp; 3499 3500 /* 3501 * see if restripe for this chunk_type is in progress, if so 3502 * try to reduce to the target profile 3503 */ 3504 spin_lock(&root->fs_info->balance_lock); 3505 target = get_restripe_target(root->fs_info, flags); 3506 if (target) { 3507 /* pick target profile only if it's already available */ 3508 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { 3509 spin_unlock(&root->fs_info->balance_lock); 3510 return extended_to_chunk(target); 3511 } 3512 } 3513 spin_unlock(&root->fs_info->balance_lock); 3514 3515 /* First, mask out the RAID levels which aren't possible */ 3516 if (num_devices == 1) 3517 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 | 3518 BTRFS_BLOCK_GROUP_RAID5); 3519 if (num_devices < 3) 3520 flags &= ~BTRFS_BLOCK_GROUP_RAID6; 3521 if (num_devices < 4) 3522 flags &= ~BTRFS_BLOCK_GROUP_RAID10; 3523 3524 tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 | 3525 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 | 3526 BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10); 3527 flags &= ~tmp; 3528 3529 if (tmp & BTRFS_BLOCK_GROUP_RAID6) 3530 tmp = BTRFS_BLOCK_GROUP_RAID6; 3531 else if (tmp & BTRFS_BLOCK_GROUP_RAID5) 3532 tmp = BTRFS_BLOCK_GROUP_RAID5; 3533 else if (tmp & BTRFS_BLOCK_GROUP_RAID10) 3534 tmp = BTRFS_BLOCK_GROUP_RAID10; 3535 else if (tmp & BTRFS_BLOCK_GROUP_RAID1) 3536 tmp = BTRFS_BLOCK_GROUP_RAID1; 3537 else if (tmp & BTRFS_BLOCK_GROUP_RAID0) 3538 tmp = BTRFS_BLOCK_GROUP_RAID0; 3539 3540 return extended_to_chunk(flags | tmp); 3541 } 3542 3543 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) 3544 { 3545 unsigned seq; 3546 3547 do { 3548 seq = read_seqbegin(&root->fs_info->profiles_lock); 3549 3550 if (flags & BTRFS_BLOCK_GROUP_DATA) 3551 flags |= root->fs_info->avail_data_alloc_bits; 3552 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 3553 flags |= root->fs_info->avail_system_alloc_bits; 3554 else if (flags & BTRFS_BLOCK_GROUP_METADATA) 3555 flags |= root->fs_info->avail_metadata_alloc_bits; 3556 } while (read_seqretry(&root->fs_info->profiles_lock, seq)); 3557 3558 return btrfs_reduce_alloc_profile(root, flags); 3559 } 3560 3561 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) 3562 { 3563 u64 flags; 3564 u64 ret; 3565 3566 if (data) 3567 flags = BTRFS_BLOCK_GROUP_DATA; 3568 else if (root == root->fs_info->chunk_root) 3569 flags = BTRFS_BLOCK_GROUP_SYSTEM; 3570 else 3571 flags = BTRFS_BLOCK_GROUP_METADATA; 3572 3573 ret = get_alloc_profile(root, flags); 3574 return ret; 3575 } 3576 3577 /* 3578 * This will check the space that the inode allocates from to make sure we have 3579 * enough space for bytes. 3580 */ 3581 int btrfs_check_data_free_space(struct inode *inode, u64 bytes) 3582 { 3583 struct btrfs_space_info *data_sinfo; 3584 struct btrfs_root *root = BTRFS_I(inode)->root; 3585 struct btrfs_fs_info *fs_info = root->fs_info; 3586 u64 used; 3587 int ret = 0, committed = 0, alloc_chunk = 1; 3588 3589 /* make sure bytes are sectorsize aligned */ 3590 bytes = ALIGN(bytes, root->sectorsize); 3591 3592 if (btrfs_is_free_space_inode(inode)) { 3593 committed = 1; 3594 ASSERT(current->journal_info); 3595 } 3596 3597 data_sinfo = fs_info->data_sinfo; 3598 if (!data_sinfo) 3599 goto alloc; 3600 3601 again: 3602 /* make sure we have enough space to handle the data first */ 3603 spin_lock(&data_sinfo->lock); 3604 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved + 3605 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly + 3606 data_sinfo->bytes_may_use; 3607 3608 if (used + bytes > data_sinfo->total_bytes) { 3609 struct btrfs_trans_handle *trans; 3610 3611 /* 3612 * if we don't have enough free bytes in this space then we need 3613 * to alloc a new chunk. 3614 */ 3615 if (!data_sinfo->full && alloc_chunk) { 3616 u64 alloc_target; 3617 3618 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE; 3619 spin_unlock(&data_sinfo->lock); 3620 alloc: 3621 alloc_target = btrfs_get_alloc_profile(root, 1); 3622 /* 3623 * It is ugly that we don't call nolock join 3624 * transaction for the free space inode case here. 3625 * But it is safe because we only do the data space 3626 * reservation for the free space cache in the 3627 * transaction context, the common join transaction 3628 * just increase the counter of the current transaction 3629 * handler, doesn't try to acquire the trans_lock of 3630 * the fs. 3631 */ 3632 trans = btrfs_join_transaction(root); 3633 if (IS_ERR(trans)) 3634 return PTR_ERR(trans); 3635 3636 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 3637 alloc_target, 3638 CHUNK_ALLOC_NO_FORCE); 3639 btrfs_end_transaction(trans, root); 3640 if (ret < 0) { 3641 if (ret != -ENOSPC) 3642 return ret; 3643 else 3644 goto commit_trans; 3645 } 3646 3647 if (!data_sinfo) 3648 data_sinfo = fs_info->data_sinfo; 3649 3650 goto again; 3651 } 3652 3653 /* 3654 * If we don't have enough pinned space to deal with this 3655 * allocation don't bother committing the transaction. 3656 */ 3657 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned, 3658 bytes) < 0) 3659 committed = 1; 3660 spin_unlock(&data_sinfo->lock); 3661 3662 /* commit the current transaction and try again */ 3663 commit_trans: 3664 if (!committed && 3665 !atomic_read(&root->fs_info->open_ioctl_trans)) { 3666 committed = 1; 3667 3668 trans = btrfs_join_transaction(root); 3669 if (IS_ERR(trans)) 3670 return PTR_ERR(trans); 3671 ret = btrfs_commit_transaction(trans, root); 3672 if (ret) 3673 return ret; 3674 goto again; 3675 } 3676 3677 trace_btrfs_space_reservation(root->fs_info, 3678 "space_info:enospc", 3679 data_sinfo->flags, bytes, 1); 3680 return -ENOSPC; 3681 } 3682 data_sinfo->bytes_may_use += bytes; 3683 trace_btrfs_space_reservation(root->fs_info, "space_info", 3684 data_sinfo->flags, bytes, 1); 3685 spin_unlock(&data_sinfo->lock); 3686 3687 return 0; 3688 } 3689 3690 /* 3691 * Called if we need to clear a data reservation for this inode. 3692 */ 3693 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) 3694 { 3695 struct btrfs_root *root = BTRFS_I(inode)->root; 3696 struct btrfs_space_info *data_sinfo; 3697 3698 /* make sure bytes are sectorsize aligned */ 3699 bytes = ALIGN(bytes, root->sectorsize); 3700 3701 data_sinfo = root->fs_info->data_sinfo; 3702 spin_lock(&data_sinfo->lock); 3703 WARN_ON(data_sinfo->bytes_may_use < bytes); 3704 data_sinfo->bytes_may_use -= bytes; 3705 trace_btrfs_space_reservation(root->fs_info, "space_info", 3706 data_sinfo->flags, bytes, 0); 3707 spin_unlock(&data_sinfo->lock); 3708 } 3709 3710 static void force_metadata_allocation(struct btrfs_fs_info *info) 3711 { 3712 struct list_head *head = &info->space_info; 3713 struct btrfs_space_info *found; 3714 3715 rcu_read_lock(); 3716 list_for_each_entry_rcu(found, head, list) { 3717 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3718 found->force_alloc = CHUNK_ALLOC_FORCE; 3719 } 3720 rcu_read_unlock(); 3721 } 3722 3723 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global) 3724 { 3725 return (global->size << 1); 3726 } 3727 3728 static int should_alloc_chunk(struct btrfs_root *root, 3729 struct btrfs_space_info *sinfo, int force) 3730 { 3731 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 3732 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; 3733 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved; 3734 u64 thresh; 3735 3736 if (force == CHUNK_ALLOC_FORCE) 3737 return 1; 3738 3739 /* 3740 * We need to take into account the global rsv because for all intents 3741 * and purposes it's used space. Don't worry about locking the 3742 * global_rsv, it doesn't change except when the transaction commits. 3743 */ 3744 if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA) 3745 num_allocated += calc_global_rsv_need_space(global_rsv); 3746 3747 /* 3748 * in limited mode, we want to have some free space up to 3749 * about 1% of the FS size. 3750 */ 3751 if (force == CHUNK_ALLOC_LIMITED) { 3752 thresh = btrfs_super_total_bytes(root->fs_info->super_copy); 3753 thresh = max_t(u64, 64 * 1024 * 1024, 3754 div_factor_fine(thresh, 1)); 3755 3756 if (num_bytes - num_allocated < thresh) 3757 return 1; 3758 } 3759 3760 if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8)) 3761 return 0; 3762 return 1; 3763 } 3764 3765 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type) 3766 { 3767 u64 num_dev; 3768 3769 if (type & (BTRFS_BLOCK_GROUP_RAID10 | 3770 BTRFS_BLOCK_GROUP_RAID0 | 3771 BTRFS_BLOCK_GROUP_RAID5 | 3772 BTRFS_BLOCK_GROUP_RAID6)) 3773 num_dev = root->fs_info->fs_devices->rw_devices; 3774 else if (type & BTRFS_BLOCK_GROUP_RAID1) 3775 num_dev = 2; 3776 else 3777 num_dev = 1; /* DUP or single */ 3778 3779 /* metadata for updaing devices and chunk tree */ 3780 return btrfs_calc_trans_metadata_size(root, num_dev + 1); 3781 } 3782 3783 static void check_system_chunk(struct btrfs_trans_handle *trans, 3784 struct btrfs_root *root, u64 type) 3785 { 3786 struct btrfs_space_info *info; 3787 u64 left; 3788 u64 thresh; 3789 3790 info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 3791 spin_lock(&info->lock); 3792 left = info->total_bytes - info->bytes_used - info->bytes_pinned - 3793 info->bytes_reserved - info->bytes_readonly; 3794 spin_unlock(&info->lock); 3795 3796 thresh = get_system_chunk_thresh(root, type); 3797 if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) { 3798 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu", 3799 left, thresh, type); 3800 dump_space_info(info, 0, 0); 3801 } 3802 3803 if (left < thresh) { 3804 u64 flags; 3805 3806 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0); 3807 btrfs_alloc_chunk(trans, root, flags); 3808 } 3809 } 3810 3811 static int do_chunk_alloc(struct btrfs_trans_handle *trans, 3812 struct btrfs_root *extent_root, u64 flags, int force) 3813 { 3814 struct btrfs_space_info *space_info; 3815 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3816 int wait_for_alloc = 0; 3817 int ret = 0; 3818 3819 /* Don't re-enter if we're already allocating a chunk */ 3820 if (trans->allocating_chunk) 3821 return -ENOSPC; 3822 3823 space_info = __find_space_info(extent_root->fs_info, flags); 3824 if (!space_info) { 3825 ret = update_space_info(extent_root->fs_info, flags, 3826 0, 0, &space_info); 3827 BUG_ON(ret); /* -ENOMEM */ 3828 } 3829 BUG_ON(!space_info); /* Logic error */ 3830 3831 again: 3832 spin_lock(&space_info->lock); 3833 if (force < space_info->force_alloc) 3834 force = space_info->force_alloc; 3835 if (space_info->full) { 3836 if (should_alloc_chunk(extent_root, space_info, force)) 3837 ret = -ENOSPC; 3838 else 3839 ret = 0; 3840 spin_unlock(&space_info->lock); 3841 return ret; 3842 } 3843 3844 if (!should_alloc_chunk(extent_root, space_info, force)) { 3845 spin_unlock(&space_info->lock); 3846 return 0; 3847 } else if (space_info->chunk_alloc) { 3848 wait_for_alloc = 1; 3849 } else { 3850 space_info->chunk_alloc = 1; 3851 } 3852 3853 spin_unlock(&space_info->lock); 3854 3855 mutex_lock(&fs_info->chunk_mutex); 3856 3857 /* 3858 * The chunk_mutex is held throughout the entirety of a chunk 3859 * allocation, so once we've acquired the chunk_mutex we know that the 3860 * other guy is done and we need to recheck and see if we should 3861 * allocate. 3862 */ 3863 if (wait_for_alloc) { 3864 mutex_unlock(&fs_info->chunk_mutex); 3865 wait_for_alloc = 0; 3866 goto again; 3867 } 3868 3869 trans->allocating_chunk = true; 3870 3871 /* 3872 * If we have mixed data/metadata chunks we want to make sure we keep 3873 * allocating mixed chunks instead of individual chunks. 3874 */ 3875 if (btrfs_mixed_space_info(space_info)) 3876 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 3877 3878 /* 3879 * if we're doing a data chunk, go ahead and make sure that 3880 * we keep a reasonable number of metadata chunks allocated in the 3881 * FS as well. 3882 */ 3883 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 3884 fs_info->data_chunk_allocations++; 3885 if (!(fs_info->data_chunk_allocations % 3886 fs_info->metadata_ratio)) 3887 force_metadata_allocation(fs_info); 3888 } 3889 3890 /* 3891 * Check if we have enough space in SYSTEM chunk because we may need 3892 * to update devices. 3893 */ 3894 check_system_chunk(trans, extent_root, flags); 3895 3896 ret = btrfs_alloc_chunk(trans, extent_root, flags); 3897 trans->allocating_chunk = false; 3898 3899 spin_lock(&space_info->lock); 3900 if (ret < 0 && ret != -ENOSPC) 3901 goto out; 3902 if (ret) 3903 space_info->full = 1; 3904 else 3905 ret = 1; 3906 3907 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 3908 out: 3909 space_info->chunk_alloc = 0; 3910 spin_unlock(&space_info->lock); 3911 mutex_unlock(&fs_info->chunk_mutex); 3912 return ret; 3913 } 3914 3915 static int can_overcommit(struct btrfs_root *root, 3916 struct btrfs_space_info *space_info, u64 bytes, 3917 enum btrfs_reserve_flush_enum flush) 3918 { 3919 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 3920 u64 profile = btrfs_get_alloc_profile(root, 0); 3921 u64 space_size; 3922 u64 avail; 3923 u64 used; 3924 3925 used = space_info->bytes_used + space_info->bytes_reserved + 3926 space_info->bytes_pinned + space_info->bytes_readonly; 3927 3928 /* 3929 * We only want to allow over committing if we have lots of actual space 3930 * free, but if we don't have enough space to handle the global reserve 3931 * space then we could end up having a real enospc problem when trying 3932 * to allocate a chunk or some other such important allocation. 3933 */ 3934 spin_lock(&global_rsv->lock); 3935 space_size = calc_global_rsv_need_space(global_rsv); 3936 spin_unlock(&global_rsv->lock); 3937 if (used + space_size >= space_info->total_bytes) 3938 return 0; 3939 3940 used += space_info->bytes_may_use; 3941 3942 spin_lock(&root->fs_info->free_chunk_lock); 3943 avail = root->fs_info->free_chunk_space; 3944 spin_unlock(&root->fs_info->free_chunk_lock); 3945 3946 /* 3947 * If we have dup, raid1 or raid10 then only half of the free 3948 * space is actually useable. For raid56, the space info used 3949 * doesn't include the parity drive, so we don't have to 3950 * change the math 3951 */ 3952 if (profile & (BTRFS_BLOCK_GROUP_DUP | 3953 BTRFS_BLOCK_GROUP_RAID1 | 3954 BTRFS_BLOCK_GROUP_RAID10)) 3955 avail >>= 1; 3956 3957 /* 3958 * If we aren't flushing all things, let us overcommit up to 3959 * 1/2th of the space. If we can flush, don't let us overcommit 3960 * too much, let it overcommit up to 1/8 of the space. 3961 */ 3962 if (flush == BTRFS_RESERVE_FLUSH_ALL) 3963 avail >>= 3; 3964 else 3965 avail >>= 1; 3966 3967 if (used + bytes < space_info->total_bytes + avail) 3968 return 1; 3969 return 0; 3970 } 3971 3972 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root, 3973 unsigned long nr_pages) 3974 { 3975 struct super_block *sb = root->fs_info->sb; 3976 3977 if (down_read_trylock(&sb->s_umount)) { 3978 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE); 3979 up_read(&sb->s_umount); 3980 } else { 3981 /* 3982 * We needn't worry the filesystem going from r/w to r/o though 3983 * we don't acquire ->s_umount mutex, because the filesystem 3984 * should guarantee the delalloc inodes list be empty after 3985 * the filesystem is readonly(all dirty pages are written to 3986 * the disk). 3987 */ 3988 btrfs_start_delalloc_roots(root->fs_info, 0); 3989 if (!current->journal_info) 3990 btrfs_wait_ordered_roots(root->fs_info, -1); 3991 } 3992 } 3993 3994 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim) 3995 { 3996 u64 bytes; 3997 int nr; 3998 3999 bytes = btrfs_calc_trans_metadata_size(root, 1); 4000 nr = (int)div64_u64(to_reclaim, bytes); 4001 if (!nr) 4002 nr = 1; 4003 return nr; 4004 } 4005 4006 #define EXTENT_SIZE_PER_ITEM (256 * 1024) 4007 4008 /* 4009 * shrink metadata reservation for delalloc 4010 */ 4011 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, 4012 bool wait_ordered) 4013 { 4014 struct btrfs_block_rsv *block_rsv; 4015 struct btrfs_space_info *space_info; 4016 struct btrfs_trans_handle *trans; 4017 u64 delalloc_bytes; 4018 u64 max_reclaim; 4019 long time_left; 4020 unsigned long nr_pages; 4021 int loops; 4022 int items; 4023 enum btrfs_reserve_flush_enum flush; 4024 4025 /* Calc the number of the pages we need flush for space reservation */ 4026 items = calc_reclaim_items_nr(root, to_reclaim); 4027 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 4028 4029 trans = (struct btrfs_trans_handle *)current->journal_info; 4030 block_rsv = &root->fs_info->delalloc_block_rsv; 4031 space_info = block_rsv->space_info; 4032 4033 delalloc_bytes = percpu_counter_sum_positive( 4034 &root->fs_info->delalloc_bytes); 4035 if (delalloc_bytes == 0) { 4036 if (trans) 4037 return; 4038 if (wait_ordered) 4039 btrfs_wait_ordered_roots(root->fs_info, items); 4040 return; 4041 } 4042 4043 loops = 0; 4044 while (delalloc_bytes && loops < 3) { 4045 max_reclaim = min(delalloc_bytes, to_reclaim); 4046 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; 4047 btrfs_writeback_inodes_sb_nr(root, nr_pages); 4048 /* 4049 * We need to wait for the async pages to actually start before 4050 * we do anything. 4051 */ 4052 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages); 4053 if (!max_reclaim) 4054 goto skip_async; 4055 4056 if (max_reclaim <= nr_pages) 4057 max_reclaim = 0; 4058 else 4059 max_reclaim -= nr_pages; 4060 4061 wait_event(root->fs_info->async_submit_wait, 4062 atomic_read(&root->fs_info->async_delalloc_pages) <= 4063 (int)max_reclaim); 4064 skip_async: 4065 if (!trans) 4066 flush = BTRFS_RESERVE_FLUSH_ALL; 4067 else 4068 flush = BTRFS_RESERVE_NO_FLUSH; 4069 spin_lock(&space_info->lock); 4070 if (can_overcommit(root, space_info, orig, flush)) { 4071 spin_unlock(&space_info->lock); 4072 break; 4073 } 4074 spin_unlock(&space_info->lock); 4075 4076 loops++; 4077 if (wait_ordered && !trans) { 4078 btrfs_wait_ordered_roots(root->fs_info, items); 4079 } else { 4080 time_left = schedule_timeout_killable(1); 4081 if (time_left) 4082 break; 4083 } 4084 delalloc_bytes = percpu_counter_sum_positive( 4085 &root->fs_info->delalloc_bytes); 4086 } 4087 } 4088 4089 /** 4090 * maybe_commit_transaction - possibly commit the transaction if its ok to 4091 * @root - the root we're allocating for 4092 * @bytes - the number of bytes we want to reserve 4093 * @force - force the commit 4094 * 4095 * This will check to make sure that committing the transaction will actually 4096 * get us somewhere and then commit the transaction if it does. Otherwise it 4097 * will return -ENOSPC. 4098 */ 4099 static int may_commit_transaction(struct btrfs_root *root, 4100 struct btrfs_space_info *space_info, 4101 u64 bytes, int force) 4102 { 4103 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv; 4104 struct btrfs_trans_handle *trans; 4105 4106 trans = (struct btrfs_trans_handle *)current->journal_info; 4107 if (trans) 4108 return -EAGAIN; 4109 4110 if (force) 4111 goto commit; 4112 4113 /* See if there is enough pinned space to make this reservation */ 4114 spin_lock(&space_info->lock); 4115 if (percpu_counter_compare(&space_info->total_bytes_pinned, 4116 bytes) >= 0) { 4117 spin_unlock(&space_info->lock); 4118 goto commit; 4119 } 4120 spin_unlock(&space_info->lock); 4121 4122 /* 4123 * See if there is some space in the delayed insertion reservation for 4124 * this reservation. 4125 */ 4126 if (space_info != delayed_rsv->space_info) 4127 return -ENOSPC; 4128 4129 spin_lock(&space_info->lock); 4130 spin_lock(&delayed_rsv->lock); 4131 if (percpu_counter_compare(&space_info->total_bytes_pinned, 4132 bytes - delayed_rsv->size) >= 0) { 4133 spin_unlock(&delayed_rsv->lock); 4134 spin_unlock(&space_info->lock); 4135 return -ENOSPC; 4136 } 4137 spin_unlock(&delayed_rsv->lock); 4138 spin_unlock(&space_info->lock); 4139 4140 commit: 4141 trans = btrfs_join_transaction(root); 4142 if (IS_ERR(trans)) 4143 return -ENOSPC; 4144 4145 return btrfs_commit_transaction(trans, root); 4146 } 4147 4148 enum flush_state { 4149 FLUSH_DELAYED_ITEMS_NR = 1, 4150 FLUSH_DELAYED_ITEMS = 2, 4151 FLUSH_DELALLOC = 3, 4152 FLUSH_DELALLOC_WAIT = 4, 4153 ALLOC_CHUNK = 5, 4154 COMMIT_TRANS = 6, 4155 }; 4156 4157 static int flush_space(struct btrfs_root *root, 4158 struct btrfs_space_info *space_info, u64 num_bytes, 4159 u64 orig_bytes, int state) 4160 { 4161 struct btrfs_trans_handle *trans; 4162 int nr; 4163 int ret = 0; 4164 4165 switch (state) { 4166 case FLUSH_DELAYED_ITEMS_NR: 4167 case FLUSH_DELAYED_ITEMS: 4168 if (state == FLUSH_DELAYED_ITEMS_NR) 4169 nr = calc_reclaim_items_nr(root, num_bytes) * 2; 4170 else 4171 nr = -1; 4172 4173 trans = btrfs_join_transaction(root); 4174 if (IS_ERR(trans)) { 4175 ret = PTR_ERR(trans); 4176 break; 4177 } 4178 ret = btrfs_run_delayed_items_nr(trans, root, nr); 4179 btrfs_end_transaction(trans, root); 4180 break; 4181 case FLUSH_DELALLOC: 4182 case FLUSH_DELALLOC_WAIT: 4183 shrink_delalloc(root, num_bytes, orig_bytes, 4184 state == FLUSH_DELALLOC_WAIT); 4185 break; 4186 case ALLOC_CHUNK: 4187 trans = btrfs_join_transaction(root); 4188 if (IS_ERR(trans)) { 4189 ret = PTR_ERR(trans); 4190 break; 4191 } 4192 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 4193 btrfs_get_alloc_profile(root, 0), 4194 CHUNK_ALLOC_NO_FORCE); 4195 btrfs_end_transaction(trans, root); 4196 if (ret == -ENOSPC) 4197 ret = 0; 4198 break; 4199 case COMMIT_TRANS: 4200 ret = may_commit_transaction(root, space_info, orig_bytes, 0); 4201 break; 4202 default: 4203 ret = -ENOSPC; 4204 break; 4205 } 4206 4207 return ret; 4208 } 4209 /** 4210 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 4211 * @root - the root we're allocating for 4212 * @block_rsv - the block_rsv we're allocating for 4213 * @orig_bytes - the number of bytes we want 4214 * @flush - whether or not we can flush to make our reservation 4215 * 4216 * This will reserve orgi_bytes number of bytes from the space info associated 4217 * with the block_rsv. If there is not enough space it will make an attempt to 4218 * flush out space to make room. It will do this by flushing delalloc if 4219 * possible or committing the transaction. If flush is 0 then no attempts to 4220 * regain reservations will be made and this will fail if there is not enough 4221 * space already. 4222 */ 4223 static int reserve_metadata_bytes(struct btrfs_root *root, 4224 struct btrfs_block_rsv *block_rsv, 4225 u64 orig_bytes, 4226 enum btrfs_reserve_flush_enum flush) 4227 { 4228 struct btrfs_space_info *space_info = block_rsv->space_info; 4229 u64 used; 4230 u64 num_bytes = orig_bytes; 4231 int flush_state = FLUSH_DELAYED_ITEMS_NR; 4232 int ret = 0; 4233 bool flushing = false; 4234 4235 again: 4236 ret = 0; 4237 spin_lock(&space_info->lock); 4238 /* 4239 * We only want to wait if somebody other than us is flushing and we 4240 * are actually allowed to flush all things. 4241 */ 4242 while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing && 4243 space_info->flush) { 4244 spin_unlock(&space_info->lock); 4245 /* 4246 * If we have a trans handle we can't wait because the flusher 4247 * may have to commit the transaction, which would mean we would 4248 * deadlock since we are waiting for the flusher to finish, but 4249 * hold the current transaction open. 4250 */ 4251 if (current->journal_info) 4252 return -EAGAIN; 4253 ret = wait_event_killable(space_info->wait, !space_info->flush); 4254 /* Must have been killed, return */ 4255 if (ret) 4256 return -EINTR; 4257 4258 spin_lock(&space_info->lock); 4259 } 4260 4261 ret = -ENOSPC; 4262 used = space_info->bytes_used + space_info->bytes_reserved + 4263 space_info->bytes_pinned + space_info->bytes_readonly + 4264 space_info->bytes_may_use; 4265 4266 /* 4267 * The idea here is that we've not already over-reserved the block group 4268 * then we can go ahead and save our reservation first and then start 4269 * flushing if we need to. Otherwise if we've already overcommitted 4270 * lets start flushing stuff first and then come back and try to make 4271 * our reservation. 4272 */ 4273 if (used <= space_info->total_bytes) { 4274 if (used + orig_bytes <= space_info->total_bytes) { 4275 space_info->bytes_may_use += orig_bytes; 4276 trace_btrfs_space_reservation(root->fs_info, 4277 "space_info", space_info->flags, orig_bytes, 1); 4278 ret = 0; 4279 } else { 4280 /* 4281 * Ok set num_bytes to orig_bytes since we aren't 4282 * overocmmitted, this way we only try and reclaim what 4283 * we need. 4284 */ 4285 num_bytes = orig_bytes; 4286 } 4287 } else { 4288 /* 4289 * Ok we're over committed, set num_bytes to the overcommitted 4290 * amount plus the amount of bytes that we need for this 4291 * reservation. 4292 */ 4293 num_bytes = used - space_info->total_bytes + 4294 (orig_bytes * 2); 4295 } 4296 4297 if (ret && can_overcommit(root, space_info, orig_bytes, flush)) { 4298 space_info->bytes_may_use += orig_bytes; 4299 trace_btrfs_space_reservation(root->fs_info, "space_info", 4300 space_info->flags, orig_bytes, 4301 1); 4302 ret = 0; 4303 } 4304 4305 /* 4306 * Couldn't make our reservation, save our place so while we're trying 4307 * to reclaim space we can actually use it instead of somebody else 4308 * stealing it from us. 4309 * 4310 * We make the other tasks wait for the flush only when we can flush 4311 * all things. 4312 */ 4313 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 4314 flushing = true; 4315 space_info->flush = 1; 4316 } 4317 4318 spin_unlock(&space_info->lock); 4319 4320 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 4321 goto out; 4322 4323 ret = flush_space(root, space_info, num_bytes, orig_bytes, 4324 flush_state); 4325 flush_state++; 4326 4327 /* 4328 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock 4329 * would happen. So skip delalloc flush. 4330 */ 4331 if (flush == BTRFS_RESERVE_FLUSH_LIMIT && 4332 (flush_state == FLUSH_DELALLOC || 4333 flush_state == FLUSH_DELALLOC_WAIT)) 4334 flush_state = ALLOC_CHUNK; 4335 4336 if (!ret) 4337 goto again; 4338 else if (flush == BTRFS_RESERVE_FLUSH_LIMIT && 4339 flush_state < COMMIT_TRANS) 4340 goto again; 4341 else if (flush == BTRFS_RESERVE_FLUSH_ALL && 4342 flush_state <= COMMIT_TRANS) 4343 goto again; 4344 4345 out: 4346 if (ret == -ENOSPC && 4347 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 4348 struct btrfs_block_rsv *global_rsv = 4349 &root->fs_info->global_block_rsv; 4350 4351 if (block_rsv != global_rsv && 4352 !block_rsv_use_bytes(global_rsv, orig_bytes)) 4353 ret = 0; 4354 } 4355 if (ret == -ENOSPC) 4356 trace_btrfs_space_reservation(root->fs_info, 4357 "space_info:enospc", 4358 space_info->flags, orig_bytes, 1); 4359 if (flushing) { 4360 spin_lock(&space_info->lock); 4361 space_info->flush = 0; 4362 wake_up_all(&space_info->wait); 4363 spin_unlock(&space_info->lock); 4364 } 4365 return ret; 4366 } 4367 4368 static struct btrfs_block_rsv *get_block_rsv( 4369 const struct btrfs_trans_handle *trans, 4370 const struct btrfs_root *root) 4371 { 4372 struct btrfs_block_rsv *block_rsv = NULL; 4373 4374 if (root->ref_cows) 4375 block_rsv = trans->block_rsv; 4376 4377 if (root == root->fs_info->csum_root && trans->adding_csums) 4378 block_rsv = trans->block_rsv; 4379 4380 if (root == root->fs_info->uuid_root) 4381 block_rsv = trans->block_rsv; 4382 4383 if (!block_rsv) 4384 block_rsv = root->block_rsv; 4385 4386 if (!block_rsv) 4387 block_rsv = &root->fs_info->empty_block_rsv; 4388 4389 return block_rsv; 4390 } 4391 4392 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, 4393 u64 num_bytes) 4394 { 4395 int ret = -ENOSPC; 4396 spin_lock(&block_rsv->lock); 4397 if (block_rsv->reserved >= num_bytes) { 4398 block_rsv->reserved -= num_bytes; 4399 if (block_rsv->reserved < block_rsv->size) 4400 block_rsv->full = 0; 4401 ret = 0; 4402 } 4403 spin_unlock(&block_rsv->lock); 4404 return ret; 4405 } 4406 4407 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, 4408 u64 num_bytes, int update_size) 4409 { 4410 spin_lock(&block_rsv->lock); 4411 block_rsv->reserved += num_bytes; 4412 if (update_size) 4413 block_rsv->size += num_bytes; 4414 else if (block_rsv->reserved >= block_rsv->size) 4415 block_rsv->full = 1; 4416 spin_unlock(&block_rsv->lock); 4417 } 4418 4419 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, 4420 struct btrfs_block_rsv *dest, u64 num_bytes, 4421 int min_factor) 4422 { 4423 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 4424 u64 min_bytes; 4425 4426 if (global_rsv->space_info != dest->space_info) 4427 return -ENOSPC; 4428 4429 spin_lock(&global_rsv->lock); 4430 min_bytes = div_factor(global_rsv->size, min_factor); 4431 if (global_rsv->reserved < min_bytes + num_bytes) { 4432 spin_unlock(&global_rsv->lock); 4433 return -ENOSPC; 4434 } 4435 global_rsv->reserved -= num_bytes; 4436 if (global_rsv->reserved < global_rsv->size) 4437 global_rsv->full = 0; 4438 spin_unlock(&global_rsv->lock); 4439 4440 block_rsv_add_bytes(dest, num_bytes, 1); 4441 return 0; 4442 } 4443 4444 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info, 4445 struct btrfs_block_rsv *block_rsv, 4446 struct btrfs_block_rsv *dest, u64 num_bytes) 4447 { 4448 struct btrfs_space_info *space_info = block_rsv->space_info; 4449 4450 spin_lock(&block_rsv->lock); 4451 if (num_bytes == (u64)-1) 4452 num_bytes = block_rsv->size; 4453 block_rsv->size -= num_bytes; 4454 if (block_rsv->reserved >= block_rsv->size) { 4455 num_bytes = block_rsv->reserved - block_rsv->size; 4456 block_rsv->reserved = block_rsv->size; 4457 block_rsv->full = 1; 4458 } else { 4459 num_bytes = 0; 4460 } 4461 spin_unlock(&block_rsv->lock); 4462 4463 if (num_bytes > 0) { 4464 if (dest) { 4465 spin_lock(&dest->lock); 4466 if (!dest->full) { 4467 u64 bytes_to_add; 4468 4469 bytes_to_add = dest->size - dest->reserved; 4470 bytes_to_add = min(num_bytes, bytes_to_add); 4471 dest->reserved += bytes_to_add; 4472 if (dest->reserved >= dest->size) 4473 dest->full = 1; 4474 num_bytes -= bytes_to_add; 4475 } 4476 spin_unlock(&dest->lock); 4477 } 4478 if (num_bytes) { 4479 spin_lock(&space_info->lock); 4480 space_info->bytes_may_use -= num_bytes; 4481 trace_btrfs_space_reservation(fs_info, "space_info", 4482 space_info->flags, num_bytes, 0); 4483 spin_unlock(&space_info->lock); 4484 } 4485 } 4486 } 4487 4488 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src, 4489 struct btrfs_block_rsv *dst, u64 num_bytes) 4490 { 4491 int ret; 4492 4493 ret = block_rsv_use_bytes(src, num_bytes); 4494 if (ret) 4495 return ret; 4496 4497 block_rsv_add_bytes(dst, num_bytes, 1); 4498 return 0; 4499 } 4500 4501 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type) 4502 { 4503 memset(rsv, 0, sizeof(*rsv)); 4504 spin_lock_init(&rsv->lock); 4505 rsv->type = type; 4506 } 4507 4508 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, 4509 unsigned short type) 4510 { 4511 struct btrfs_block_rsv *block_rsv; 4512 struct btrfs_fs_info *fs_info = root->fs_info; 4513 4514 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS); 4515 if (!block_rsv) 4516 return NULL; 4517 4518 btrfs_init_block_rsv(block_rsv, type); 4519 block_rsv->space_info = __find_space_info(fs_info, 4520 BTRFS_BLOCK_GROUP_METADATA); 4521 return block_rsv; 4522 } 4523 4524 void btrfs_free_block_rsv(struct btrfs_root *root, 4525 struct btrfs_block_rsv *rsv) 4526 { 4527 if (!rsv) 4528 return; 4529 btrfs_block_rsv_release(root, rsv, (u64)-1); 4530 kfree(rsv); 4531 } 4532 4533 int btrfs_block_rsv_add(struct btrfs_root *root, 4534 struct btrfs_block_rsv *block_rsv, u64 num_bytes, 4535 enum btrfs_reserve_flush_enum flush) 4536 { 4537 int ret; 4538 4539 if (num_bytes == 0) 4540 return 0; 4541 4542 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); 4543 if (!ret) { 4544 block_rsv_add_bytes(block_rsv, num_bytes, 1); 4545 return 0; 4546 } 4547 4548 return ret; 4549 } 4550 4551 int btrfs_block_rsv_check(struct btrfs_root *root, 4552 struct btrfs_block_rsv *block_rsv, int min_factor) 4553 { 4554 u64 num_bytes = 0; 4555 int ret = -ENOSPC; 4556 4557 if (!block_rsv) 4558 return 0; 4559 4560 spin_lock(&block_rsv->lock); 4561 num_bytes = div_factor(block_rsv->size, min_factor); 4562 if (block_rsv->reserved >= num_bytes) 4563 ret = 0; 4564 spin_unlock(&block_rsv->lock); 4565 4566 return ret; 4567 } 4568 4569 int btrfs_block_rsv_refill(struct btrfs_root *root, 4570 struct btrfs_block_rsv *block_rsv, u64 min_reserved, 4571 enum btrfs_reserve_flush_enum flush) 4572 { 4573 u64 num_bytes = 0; 4574 int ret = -ENOSPC; 4575 4576 if (!block_rsv) 4577 return 0; 4578 4579 spin_lock(&block_rsv->lock); 4580 num_bytes = min_reserved; 4581 if (block_rsv->reserved >= num_bytes) 4582 ret = 0; 4583 else 4584 num_bytes -= block_rsv->reserved; 4585 spin_unlock(&block_rsv->lock); 4586 4587 if (!ret) 4588 return 0; 4589 4590 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); 4591 if (!ret) { 4592 block_rsv_add_bytes(block_rsv, num_bytes, 0); 4593 return 0; 4594 } 4595 4596 return ret; 4597 } 4598 4599 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 4600 struct btrfs_block_rsv *dst_rsv, 4601 u64 num_bytes) 4602 { 4603 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 4604 } 4605 4606 void btrfs_block_rsv_release(struct btrfs_root *root, 4607 struct btrfs_block_rsv *block_rsv, 4608 u64 num_bytes) 4609 { 4610 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 4611 if (global_rsv == block_rsv || 4612 block_rsv->space_info != global_rsv->space_info) 4613 global_rsv = NULL; 4614 block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv, 4615 num_bytes); 4616 } 4617 4618 /* 4619 * helper to calculate size of global block reservation. 4620 * the desired value is sum of space used by extent tree, 4621 * checksum tree and root tree 4622 */ 4623 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) 4624 { 4625 struct btrfs_space_info *sinfo; 4626 u64 num_bytes; 4627 u64 meta_used; 4628 u64 data_used; 4629 int csum_size = btrfs_super_csum_size(fs_info->super_copy); 4630 4631 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); 4632 spin_lock(&sinfo->lock); 4633 data_used = sinfo->bytes_used; 4634 spin_unlock(&sinfo->lock); 4635 4636 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 4637 spin_lock(&sinfo->lock); 4638 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) 4639 data_used = 0; 4640 meta_used = sinfo->bytes_used; 4641 spin_unlock(&sinfo->lock); 4642 4643 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) * 4644 csum_size * 2; 4645 num_bytes += div64_u64(data_used + meta_used, 50); 4646 4647 if (num_bytes * 3 > meta_used) 4648 num_bytes = div64_u64(meta_used, 3); 4649 4650 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); 4651 } 4652 4653 static void update_global_block_rsv(struct btrfs_fs_info *fs_info) 4654 { 4655 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; 4656 struct btrfs_space_info *sinfo = block_rsv->space_info; 4657 u64 num_bytes; 4658 4659 num_bytes = calc_global_metadata_size(fs_info); 4660 4661 spin_lock(&sinfo->lock); 4662 spin_lock(&block_rsv->lock); 4663 4664 block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024); 4665 4666 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + 4667 sinfo->bytes_reserved + sinfo->bytes_readonly + 4668 sinfo->bytes_may_use; 4669 4670 if (sinfo->total_bytes > num_bytes) { 4671 num_bytes = sinfo->total_bytes - num_bytes; 4672 block_rsv->reserved += num_bytes; 4673 sinfo->bytes_may_use += num_bytes; 4674 trace_btrfs_space_reservation(fs_info, "space_info", 4675 sinfo->flags, num_bytes, 1); 4676 } 4677 4678 if (block_rsv->reserved >= block_rsv->size) { 4679 num_bytes = block_rsv->reserved - block_rsv->size; 4680 sinfo->bytes_may_use -= num_bytes; 4681 trace_btrfs_space_reservation(fs_info, "space_info", 4682 sinfo->flags, num_bytes, 0); 4683 block_rsv->reserved = block_rsv->size; 4684 block_rsv->full = 1; 4685 } 4686 4687 spin_unlock(&block_rsv->lock); 4688 spin_unlock(&sinfo->lock); 4689 } 4690 4691 static void init_global_block_rsv(struct btrfs_fs_info *fs_info) 4692 { 4693 struct btrfs_space_info *space_info; 4694 4695 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 4696 fs_info->chunk_block_rsv.space_info = space_info; 4697 4698 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 4699 fs_info->global_block_rsv.space_info = space_info; 4700 fs_info->delalloc_block_rsv.space_info = space_info; 4701 fs_info->trans_block_rsv.space_info = space_info; 4702 fs_info->empty_block_rsv.space_info = space_info; 4703 fs_info->delayed_block_rsv.space_info = space_info; 4704 4705 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv; 4706 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; 4707 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; 4708 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; 4709 if (fs_info->quota_root) 4710 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv; 4711 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; 4712 4713 update_global_block_rsv(fs_info); 4714 } 4715 4716 static void release_global_block_rsv(struct btrfs_fs_info *fs_info) 4717 { 4718 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL, 4719 (u64)-1); 4720 WARN_ON(fs_info->delalloc_block_rsv.size > 0); 4721 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0); 4722 WARN_ON(fs_info->trans_block_rsv.size > 0); 4723 WARN_ON(fs_info->trans_block_rsv.reserved > 0); 4724 WARN_ON(fs_info->chunk_block_rsv.size > 0); 4725 WARN_ON(fs_info->chunk_block_rsv.reserved > 0); 4726 WARN_ON(fs_info->delayed_block_rsv.size > 0); 4727 WARN_ON(fs_info->delayed_block_rsv.reserved > 0); 4728 } 4729 4730 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 4731 struct btrfs_root *root) 4732 { 4733 if (!trans->block_rsv) 4734 return; 4735 4736 if (!trans->bytes_reserved) 4737 return; 4738 4739 trace_btrfs_space_reservation(root->fs_info, "transaction", 4740 trans->transid, trans->bytes_reserved, 0); 4741 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); 4742 trans->bytes_reserved = 0; 4743 } 4744 4745 /* Can only return 0 or -ENOSPC */ 4746 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 4747 struct inode *inode) 4748 { 4749 struct btrfs_root *root = BTRFS_I(inode)->root; 4750 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); 4751 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; 4752 4753 /* 4754 * We need to hold space in order to delete our orphan item once we've 4755 * added it, so this takes the reservation so we can release it later 4756 * when we are truly done with the orphan item. 4757 */ 4758 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 4759 trace_btrfs_space_reservation(root->fs_info, "orphan", 4760 btrfs_ino(inode), num_bytes, 1); 4761 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 4762 } 4763 4764 void btrfs_orphan_release_metadata(struct inode *inode) 4765 { 4766 struct btrfs_root *root = BTRFS_I(inode)->root; 4767 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 4768 trace_btrfs_space_reservation(root->fs_info, "orphan", 4769 btrfs_ino(inode), num_bytes, 0); 4770 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); 4771 } 4772 4773 /* 4774 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation 4775 * root: the root of the parent directory 4776 * rsv: block reservation 4777 * items: the number of items that we need do reservation 4778 * qgroup_reserved: used to return the reserved size in qgroup 4779 * 4780 * This function is used to reserve the space for snapshot/subvolume 4781 * creation and deletion. Those operations are different with the 4782 * common file/directory operations, they change two fs/file trees 4783 * and root tree, the number of items that the qgroup reserves is 4784 * different with the free space reservation. So we can not use 4785 * the space reseravtion mechanism in start_transaction(). 4786 */ 4787 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 4788 struct btrfs_block_rsv *rsv, 4789 int items, 4790 u64 *qgroup_reserved, 4791 bool use_global_rsv) 4792 { 4793 u64 num_bytes; 4794 int ret; 4795 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 4796 4797 if (root->fs_info->quota_enabled) { 4798 /* One for parent inode, two for dir entries */ 4799 num_bytes = 3 * root->leafsize; 4800 ret = btrfs_qgroup_reserve(root, num_bytes); 4801 if (ret) 4802 return ret; 4803 } else { 4804 num_bytes = 0; 4805 } 4806 4807 *qgroup_reserved = num_bytes; 4808 4809 num_bytes = btrfs_calc_trans_metadata_size(root, items); 4810 rsv->space_info = __find_space_info(root->fs_info, 4811 BTRFS_BLOCK_GROUP_METADATA); 4812 ret = btrfs_block_rsv_add(root, rsv, num_bytes, 4813 BTRFS_RESERVE_FLUSH_ALL); 4814 4815 if (ret == -ENOSPC && use_global_rsv) 4816 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes); 4817 4818 if (ret) { 4819 if (*qgroup_reserved) 4820 btrfs_qgroup_free(root, *qgroup_reserved); 4821 } 4822 4823 return ret; 4824 } 4825 4826 void btrfs_subvolume_release_metadata(struct btrfs_root *root, 4827 struct btrfs_block_rsv *rsv, 4828 u64 qgroup_reserved) 4829 { 4830 btrfs_block_rsv_release(root, rsv, (u64)-1); 4831 if (qgroup_reserved) 4832 btrfs_qgroup_free(root, qgroup_reserved); 4833 } 4834 4835 /** 4836 * drop_outstanding_extent - drop an outstanding extent 4837 * @inode: the inode we're dropping the extent for 4838 * 4839 * This is called when we are freeing up an outstanding extent, either called 4840 * after an error or after an extent is written. This will return the number of 4841 * reserved extents that need to be freed. This must be called with 4842 * BTRFS_I(inode)->lock held. 4843 */ 4844 static unsigned drop_outstanding_extent(struct inode *inode) 4845 { 4846 unsigned drop_inode_space = 0; 4847 unsigned dropped_extents = 0; 4848 4849 BUG_ON(!BTRFS_I(inode)->outstanding_extents); 4850 BTRFS_I(inode)->outstanding_extents--; 4851 4852 if (BTRFS_I(inode)->outstanding_extents == 0 && 4853 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, 4854 &BTRFS_I(inode)->runtime_flags)) 4855 drop_inode_space = 1; 4856 4857 /* 4858 * If we have more or the same amount of outsanding extents than we have 4859 * reserved then we need to leave the reserved extents count alone. 4860 */ 4861 if (BTRFS_I(inode)->outstanding_extents >= 4862 BTRFS_I(inode)->reserved_extents) 4863 return drop_inode_space; 4864 4865 dropped_extents = BTRFS_I(inode)->reserved_extents - 4866 BTRFS_I(inode)->outstanding_extents; 4867 BTRFS_I(inode)->reserved_extents -= dropped_extents; 4868 return dropped_extents + drop_inode_space; 4869 } 4870 4871 /** 4872 * calc_csum_metadata_size - return the amount of metada space that must be 4873 * reserved/free'd for the given bytes. 4874 * @inode: the inode we're manipulating 4875 * @num_bytes: the number of bytes in question 4876 * @reserve: 1 if we are reserving space, 0 if we are freeing space 4877 * 4878 * This adjusts the number of csum_bytes in the inode and then returns the 4879 * correct amount of metadata that must either be reserved or freed. We 4880 * calculate how many checksums we can fit into one leaf and then divide the 4881 * number of bytes that will need to be checksumed by this value to figure out 4882 * how many checksums will be required. If we are adding bytes then the number 4883 * may go up and we will return the number of additional bytes that must be 4884 * reserved. If it is going down we will return the number of bytes that must 4885 * be freed. 4886 * 4887 * This must be called with BTRFS_I(inode)->lock held. 4888 */ 4889 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, 4890 int reserve) 4891 { 4892 struct btrfs_root *root = BTRFS_I(inode)->root; 4893 u64 csum_size; 4894 int num_csums_per_leaf; 4895 int num_csums; 4896 int old_csums; 4897 4898 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM && 4899 BTRFS_I(inode)->csum_bytes == 0) 4900 return 0; 4901 4902 old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize); 4903 if (reserve) 4904 BTRFS_I(inode)->csum_bytes += num_bytes; 4905 else 4906 BTRFS_I(inode)->csum_bytes -= num_bytes; 4907 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item); 4908 num_csums_per_leaf = (int)div64_u64(csum_size, 4909 sizeof(struct btrfs_csum_item) + 4910 sizeof(struct btrfs_disk_key)); 4911 num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize); 4912 num_csums = num_csums + num_csums_per_leaf - 1; 4913 num_csums = num_csums / num_csums_per_leaf; 4914 4915 old_csums = old_csums + num_csums_per_leaf - 1; 4916 old_csums = old_csums / num_csums_per_leaf; 4917 4918 /* No change, no need to reserve more */ 4919 if (old_csums == num_csums) 4920 return 0; 4921 4922 if (reserve) 4923 return btrfs_calc_trans_metadata_size(root, 4924 num_csums - old_csums); 4925 4926 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums); 4927 } 4928 4929 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) 4930 { 4931 struct btrfs_root *root = BTRFS_I(inode)->root; 4932 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; 4933 u64 to_reserve = 0; 4934 u64 csum_bytes; 4935 unsigned nr_extents = 0; 4936 int extra_reserve = 0; 4937 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; 4938 int ret = 0; 4939 bool delalloc_lock = true; 4940 u64 to_free = 0; 4941 unsigned dropped; 4942 4943 /* If we are a free space inode we need to not flush since we will be in 4944 * the middle of a transaction commit. We also don't need the delalloc 4945 * mutex since we won't race with anybody. We need this mostly to make 4946 * lockdep shut its filthy mouth. 4947 */ 4948 if (btrfs_is_free_space_inode(inode)) { 4949 flush = BTRFS_RESERVE_NO_FLUSH; 4950 delalloc_lock = false; 4951 } 4952 4953 if (flush != BTRFS_RESERVE_NO_FLUSH && 4954 btrfs_transaction_in_commit(root->fs_info)) 4955 schedule_timeout(1); 4956 4957 if (delalloc_lock) 4958 mutex_lock(&BTRFS_I(inode)->delalloc_mutex); 4959 4960 num_bytes = ALIGN(num_bytes, root->sectorsize); 4961 4962 spin_lock(&BTRFS_I(inode)->lock); 4963 BTRFS_I(inode)->outstanding_extents++; 4964 4965 if (BTRFS_I(inode)->outstanding_extents > 4966 BTRFS_I(inode)->reserved_extents) 4967 nr_extents = BTRFS_I(inode)->outstanding_extents - 4968 BTRFS_I(inode)->reserved_extents; 4969 4970 /* 4971 * Add an item to reserve for updating the inode when we complete the 4972 * delalloc io. 4973 */ 4974 if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED, 4975 &BTRFS_I(inode)->runtime_flags)) { 4976 nr_extents++; 4977 extra_reserve = 1; 4978 } 4979 4980 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); 4981 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); 4982 csum_bytes = BTRFS_I(inode)->csum_bytes; 4983 spin_unlock(&BTRFS_I(inode)->lock); 4984 4985 if (root->fs_info->quota_enabled) { 4986 ret = btrfs_qgroup_reserve(root, num_bytes + 4987 nr_extents * root->leafsize); 4988 if (ret) 4989 goto out_fail; 4990 } 4991 4992 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); 4993 if (unlikely(ret)) { 4994 if (root->fs_info->quota_enabled) 4995 btrfs_qgroup_free(root, num_bytes + 4996 nr_extents * root->leafsize); 4997 goto out_fail; 4998 } 4999 5000 spin_lock(&BTRFS_I(inode)->lock); 5001 if (extra_reserve) { 5002 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED, 5003 &BTRFS_I(inode)->runtime_flags); 5004 nr_extents--; 5005 } 5006 BTRFS_I(inode)->reserved_extents += nr_extents; 5007 spin_unlock(&BTRFS_I(inode)->lock); 5008 5009 if (delalloc_lock) 5010 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); 5011 5012 if (to_reserve) 5013 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5014 btrfs_ino(inode), to_reserve, 1); 5015 block_rsv_add_bytes(block_rsv, to_reserve, 1); 5016 5017 return 0; 5018 5019 out_fail: 5020 spin_lock(&BTRFS_I(inode)->lock); 5021 dropped = drop_outstanding_extent(inode); 5022 /* 5023 * If the inodes csum_bytes is the same as the original 5024 * csum_bytes then we know we haven't raced with any free()ers 5025 * so we can just reduce our inodes csum bytes and carry on. 5026 */ 5027 if (BTRFS_I(inode)->csum_bytes == csum_bytes) { 5028 calc_csum_metadata_size(inode, num_bytes, 0); 5029 } else { 5030 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes; 5031 u64 bytes; 5032 5033 /* 5034 * This is tricky, but first we need to figure out how much we 5035 * free'd from any free-ers that occured during this 5036 * reservation, so we reset ->csum_bytes to the csum_bytes 5037 * before we dropped our lock, and then call the free for the 5038 * number of bytes that were freed while we were trying our 5039 * reservation. 5040 */ 5041 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes; 5042 BTRFS_I(inode)->csum_bytes = csum_bytes; 5043 to_free = calc_csum_metadata_size(inode, bytes, 0); 5044 5045 5046 /* 5047 * Now we need to see how much we would have freed had we not 5048 * been making this reservation and our ->csum_bytes were not 5049 * artificially inflated. 5050 */ 5051 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes; 5052 bytes = csum_bytes - orig_csum_bytes; 5053 bytes = calc_csum_metadata_size(inode, bytes, 0); 5054 5055 /* 5056 * Now reset ->csum_bytes to what it should be. If bytes is 5057 * more than to_free then we would have free'd more space had we 5058 * not had an artificially high ->csum_bytes, so we need to free 5059 * the remainder. If bytes is the same or less then we don't 5060 * need to do anything, the other free-ers did the correct 5061 * thing. 5062 */ 5063 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes; 5064 if (bytes > to_free) 5065 to_free = bytes - to_free; 5066 else 5067 to_free = 0; 5068 } 5069 spin_unlock(&BTRFS_I(inode)->lock); 5070 if (dropped) 5071 to_free += btrfs_calc_trans_metadata_size(root, dropped); 5072 5073 if (to_free) { 5074 btrfs_block_rsv_release(root, block_rsv, to_free); 5075 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5076 btrfs_ino(inode), to_free, 0); 5077 } 5078 if (delalloc_lock) 5079 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); 5080 return ret; 5081 } 5082 5083 /** 5084 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode 5085 * @inode: the inode to release the reservation for 5086 * @num_bytes: the number of bytes we're releasing 5087 * 5088 * This will release the metadata reservation for an inode. This can be called 5089 * once we complete IO for a given set of bytes to release their metadata 5090 * reservations. 5091 */ 5092 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) 5093 { 5094 struct btrfs_root *root = BTRFS_I(inode)->root; 5095 u64 to_free = 0; 5096 unsigned dropped; 5097 5098 num_bytes = ALIGN(num_bytes, root->sectorsize); 5099 spin_lock(&BTRFS_I(inode)->lock); 5100 dropped = drop_outstanding_extent(inode); 5101 5102 if (num_bytes) 5103 to_free = calc_csum_metadata_size(inode, num_bytes, 0); 5104 spin_unlock(&BTRFS_I(inode)->lock); 5105 if (dropped > 0) 5106 to_free += btrfs_calc_trans_metadata_size(root, dropped); 5107 5108 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5109 btrfs_ino(inode), to_free, 0); 5110 if (root->fs_info->quota_enabled) { 5111 btrfs_qgroup_free(root, num_bytes + 5112 dropped * root->leafsize); 5113 } 5114 5115 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, 5116 to_free); 5117 } 5118 5119 /** 5120 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc 5121 * @inode: inode we're writing to 5122 * @num_bytes: the number of bytes we want to allocate 5123 * 5124 * This will do the following things 5125 * 5126 * o reserve space in the data space info for num_bytes 5127 * o reserve space in the metadata space info based on number of outstanding 5128 * extents and how much csums will be needed 5129 * o add to the inodes ->delalloc_bytes 5130 * o add it to the fs_info's delalloc inodes list. 5131 * 5132 * This will return 0 for success and -ENOSPC if there is no space left. 5133 */ 5134 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes) 5135 { 5136 int ret; 5137 5138 ret = btrfs_check_data_free_space(inode, num_bytes); 5139 if (ret) 5140 return ret; 5141 5142 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes); 5143 if (ret) { 5144 btrfs_free_reserved_data_space(inode, num_bytes); 5145 return ret; 5146 } 5147 5148 return 0; 5149 } 5150 5151 /** 5152 * btrfs_delalloc_release_space - release data and metadata space for delalloc 5153 * @inode: inode we're releasing space for 5154 * @num_bytes: the number of bytes we want to free up 5155 * 5156 * This must be matched with a call to btrfs_delalloc_reserve_space. This is 5157 * called in the case that we don't need the metadata AND data reservations 5158 * anymore. So if there is an error or we insert an inline extent. 5159 * 5160 * This function will release the metadata space that was not used and will 5161 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes 5162 * list if there are no delalloc bytes left. 5163 */ 5164 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes) 5165 { 5166 btrfs_delalloc_release_metadata(inode, num_bytes); 5167 btrfs_free_reserved_data_space(inode, num_bytes); 5168 } 5169 5170 static int update_block_group(struct btrfs_root *root, 5171 u64 bytenr, u64 num_bytes, int alloc) 5172 { 5173 struct btrfs_block_group_cache *cache = NULL; 5174 struct btrfs_fs_info *info = root->fs_info; 5175 u64 total = num_bytes; 5176 u64 old_val; 5177 u64 byte_in_group; 5178 int factor; 5179 5180 /* block accounting for super block */ 5181 spin_lock(&info->delalloc_root_lock); 5182 old_val = btrfs_super_bytes_used(info->super_copy); 5183 if (alloc) 5184 old_val += num_bytes; 5185 else 5186 old_val -= num_bytes; 5187 btrfs_set_super_bytes_used(info->super_copy, old_val); 5188 spin_unlock(&info->delalloc_root_lock); 5189 5190 while (total) { 5191 cache = btrfs_lookup_block_group(info, bytenr); 5192 if (!cache) 5193 return -ENOENT; 5194 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | 5195 BTRFS_BLOCK_GROUP_RAID1 | 5196 BTRFS_BLOCK_GROUP_RAID10)) 5197 factor = 2; 5198 else 5199 factor = 1; 5200 /* 5201 * If this block group has free space cache written out, we 5202 * need to make sure to load it if we are removing space. This 5203 * is because we need the unpinning stage to actually add the 5204 * space back to the block group, otherwise we will leak space. 5205 */ 5206 if (!alloc && cache->cached == BTRFS_CACHE_NO) 5207 cache_block_group(cache, 1); 5208 5209 byte_in_group = bytenr - cache->key.objectid; 5210 WARN_ON(byte_in_group > cache->key.offset); 5211 5212 spin_lock(&cache->space_info->lock); 5213 spin_lock(&cache->lock); 5214 5215 if (btrfs_test_opt(root, SPACE_CACHE) && 5216 cache->disk_cache_state < BTRFS_DC_CLEAR) 5217 cache->disk_cache_state = BTRFS_DC_CLEAR; 5218 5219 cache->dirty = 1; 5220 old_val = btrfs_block_group_used(&cache->item); 5221 num_bytes = min(total, cache->key.offset - byte_in_group); 5222 if (alloc) { 5223 old_val += num_bytes; 5224 btrfs_set_block_group_used(&cache->item, old_val); 5225 cache->reserved -= num_bytes; 5226 cache->space_info->bytes_reserved -= num_bytes; 5227 cache->space_info->bytes_used += num_bytes; 5228 cache->space_info->disk_used += num_bytes * factor; 5229 spin_unlock(&cache->lock); 5230 spin_unlock(&cache->space_info->lock); 5231 } else { 5232 old_val -= num_bytes; 5233 btrfs_set_block_group_used(&cache->item, old_val); 5234 cache->pinned += num_bytes; 5235 cache->space_info->bytes_pinned += num_bytes; 5236 cache->space_info->bytes_used -= num_bytes; 5237 cache->space_info->disk_used -= num_bytes * factor; 5238 spin_unlock(&cache->lock); 5239 spin_unlock(&cache->space_info->lock); 5240 5241 set_extent_dirty(info->pinned_extents, 5242 bytenr, bytenr + num_bytes - 1, 5243 GFP_NOFS | __GFP_NOFAIL); 5244 } 5245 btrfs_put_block_group(cache); 5246 total -= num_bytes; 5247 bytenr += num_bytes; 5248 } 5249 return 0; 5250 } 5251 5252 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) 5253 { 5254 struct btrfs_block_group_cache *cache; 5255 u64 bytenr; 5256 5257 spin_lock(&root->fs_info->block_group_cache_lock); 5258 bytenr = root->fs_info->first_logical_byte; 5259 spin_unlock(&root->fs_info->block_group_cache_lock); 5260 5261 if (bytenr < (u64)-1) 5262 return bytenr; 5263 5264 cache = btrfs_lookup_first_block_group(root->fs_info, search_start); 5265 if (!cache) 5266 return 0; 5267 5268 bytenr = cache->key.objectid; 5269 btrfs_put_block_group(cache); 5270 5271 return bytenr; 5272 } 5273 5274 static int pin_down_extent(struct btrfs_root *root, 5275 struct btrfs_block_group_cache *cache, 5276 u64 bytenr, u64 num_bytes, int reserved) 5277 { 5278 spin_lock(&cache->space_info->lock); 5279 spin_lock(&cache->lock); 5280 cache->pinned += num_bytes; 5281 cache->space_info->bytes_pinned += num_bytes; 5282 if (reserved) { 5283 cache->reserved -= num_bytes; 5284 cache->space_info->bytes_reserved -= num_bytes; 5285 } 5286 spin_unlock(&cache->lock); 5287 spin_unlock(&cache->space_info->lock); 5288 5289 set_extent_dirty(root->fs_info->pinned_extents, bytenr, 5290 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); 5291 if (reserved) 5292 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes); 5293 return 0; 5294 } 5295 5296 /* 5297 * this function must be called within transaction 5298 */ 5299 int btrfs_pin_extent(struct btrfs_root *root, 5300 u64 bytenr, u64 num_bytes, int reserved) 5301 { 5302 struct btrfs_block_group_cache *cache; 5303 5304 cache = btrfs_lookup_block_group(root->fs_info, bytenr); 5305 BUG_ON(!cache); /* Logic error */ 5306 5307 pin_down_extent(root, cache, bytenr, num_bytes, reserved); 5308 5309 btrfs_put_block_group(cache); 5310 return 0; 5311 } 5312 5313 /* 5314 * this function must be called within transaction 5315 */ 5316 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, 5317 u64 bytenr, u64 num_bytes) 5318 { 5319 struct btrfs_block_group_cache *cache; 5320 int ret; 5321 5322 cache = btrfs_lookup_block_group(root->fs_info, bytenr); 5323 if (!cache) 5324 return -EINVAL; 5325 5326 /* 5327 * pull in the free space cache (if any) so that our pin 5328 * removes the free space from the cache. We have load_only set 5329 * to one because the slow code to read in the free extents does check 5330 * the pinned extents. 5331 */ 5332 cache_block_group(cache, 1); 5333 5334 pin_down_extent(root, cache, bytenr, num_bytes, 0); 5335 5336 /* remove us from the free space cache (if we're there at all) */ 5337 ret = btrfs_remove_free_space(cache, bytenr, num_bytes); 5338 btrfs_put_block_group(cache); 5339 return ret; 5340 } 5341 5342 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes) 5343 { 5344 int ret; 5345 struct btrfs_block_group_cache *block_group; 5346 struct btrfs_caching_control *caching_ctl; 5347 5348 block_group = btrfs_lookup_block_group(root->fs_info, start); 5349 if (!block_group) 5350 return -EINVAL; 5351 5352 cache_block_group(block_group, 0); 5353 caching_ctl = get_caching_control(block_group); 5354 5355 if (!caching_ctl) { 5356 /* Logic error */ 5357 BUG_ON(!block_group_cache_done(block_group)); 5358 ret = btrfs_remove_free_space(block_group, start, num_bytes); 5359 } else { 5360 mutex_lock(&caching_ctl->mutex); 5361 5362 if (start >= caching_ctl->progress) { 5363 ret = add_excluded_extent(root, start, num_bytes); 5364 } else if (start + num_bytes <= caching_ctl->progress) { 5365 ret = btrfs_remove_free_space(block_group, 5366 start, num_bytes); 5367 } else { 5368 num_bytes = caching_ctl->progress - start; 5369 ret = btrfs_remove_free_space(block_group, 5370 start, num_bytes); 5371 if (ret) 5372 goto out_lock; 5373 5374 num_bytes = (start + num_bytes) - 5375 caching_ctl->progress; 5376 start = caching_ctl->progress; 5377 ret = add_excluded_extent(root, start, num_bytes); 5378 } 5379 out_lock: 5380 mutex_unlock(&caching_ctl->mutex); 5381 put_caching_control(caching_ctl); 5382 } 5383 btrfs_put_block_group(block_group); 5384 return ret; 5385 } 5386 5387 int btrfs_exclude_logged_extents(struct btrfs_root *log, 5388 struct extent_buffer *eb) 5389 { 5390 struct btrfs_file_extent_item *item; 5391 struct btrfs_key key; 5392 int found_type; 5393 int i; 5394 5395 if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) 5396 return 0; 5397 5398 for (i = 0; i < btrfs_header_nritems(eb); i++) { 5399 btrfs_item_key_to_cpu(eb, &key, i); 5400 if (key.type != BTRFS_EXTENT_DATA_KEY) 5401 continue; 5402 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 5403 found_type = btrfs_file_extent_type(eb, item); 5404 if (found_type == BTRFS_FILE_EXTENT_INLINE) 5405 continue; 5406 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 5407 continue; 5408 key.objectid = btrfs_file_extent_disk_bytenr(eb, item); 5409 key.offset = btrfs_file_extent_disk_num_bytes(eb, item); 5410 __exclude_logged_extent(log, key.objectid, key.offset); 5411 } 5412 5413 return 0; 5414 } 5415 5416 /** 5417 * btrfs_update_reserved_bytes - update the block_group and space info counters 5418 * @cache: The cache we are manipulating 5419 * @num_bytes: The number of bytes in question 5420 * @reserve: One of the reservation enums 5421 * 5422 * This is called by the allocator when it reserves space, or by somebody who is 5423 * freeing space that was never actually used on disk. For example if you 5424 * reserve some space for a new leaf in transaction A and before transaction A 5425 * commits you free that leaf, you call this with reserve set to 0 in order to 5426 * clear the reservation. 5427 * 5428 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper 5429 * ENOSPC accounting. For data we handle the reservation through clearing the 5430 * delalloc bits in the io_tree. We have to do this since we could end up 5431 * allocating less disk space for the amount of data we have reserved in the 5432 * case of compression. 5433 * 5434 * If this is a reservation and the block group has become read only we cannot 5435 * make the reservation and return -EAGAIN, otherwise this function always 5436 * succeeds. 5437 */ 5438 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 5439 u64 num_bytes, int reserve) 5440 { 5441 struct btrfs_space_info *space_info = cache->space_info; 5442 int ret = 0; 5443 5444 spin_lock(&space_info->lock); 5445 spin_lock(&cache->lock); 5446 if (reserve != RESERVE_FREE) { 5447 if (cache->ro) { 5448 ret = -EAGAIN; 5449 } else { 5450 cache->reserved += num_bytes; 5451 space_info->bytes_reserved += num_bytes; 5452 if (reserve == RESERVE_ALLOC) { 5453 trace_btrfs_space_reservation(cache->fs_info, 5454 "space_info", space_info->flags, 5455 num_bytes, 0); 5456 space_info->bytes_may_use -= num_bytes; 5457 } 5458 } 5459 } else { 5460 if (cache->ro) 5461 space_info->bytes_readonly += num_bytes; 5462 cache->reserved -= num_bytes; 5463 space_info->bytes_reserved -= num_bytes; 5464 } 5465 spin_unlock(&cache->lock); 5466 spin_unlock(&space_info->lock); 5467 return ret; 5468 } 5469 5470 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 5471 struct btrfs_root *root) 5472 { 5473 struct btrfs_fs_info *fs_info = root->fs_info; 5474 struct btrfs_caching_control *next; 5475 struct btrfs_caching_control *caching_ctl; 5476 struct btrfs_block_group_cache *cache; 5477 struct btrfs_space_info *space_info; 5478 5479 down_write(&fs_info->extent_commit_sem); 5480 5481 list_for_each_entry_safe(caching_ctl, next, 5482 &fs_info->caching_block_groups, list) { 5483 cache = caching_ctl->block_group; 5484 if (block_group_cache_done(cache)) { 5485 cache->last_byte_to_unpin = (u64)-1; 5486 list_del_init(&caching_ctl->list); 5487 put_caching_control(caching_ctl); 5488 } else { 5489 cache->last_byte_to_unpin = caching_ctl->progress; 5490 } 5491 } 5492 5493 if (fs_info->pinned_extents == &fs_info->freed_extents[0]) 5494 fs_info->pinned_extents = &fs_info->freed_extents[1]; 5495 else 5496 fs_info->pinned_extents = &fs_info->freed_extents[0]; 5497 5498 up_write(&fs_info->extent_commit_sem); 5499 5500 list_for_each_entry_rcu(space_info, &fs_info->space_info, list) 5501 percpu_counter_set(&space_info->total_bytes_pinned, 0); 5502 5503 update_global_block_rsv(fs_info); 5504 } 5505 5506 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 5507 { 5508 struct btrfs_fs_info *fs_info = root->fs_info; 5509 struct btrfs_block_group_cache *cache = NULL; 5510 struct btrfs_space_info *space_info; 5511 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 5512 u64 len; 5513 bool readonly; 5514 5515 while (start <= end) { 5516 readonly = false; 5517 if (!cache || 5518 start >= cache->key.objectid + cache->key.offset) { 5519 if (cache) 5520 btrfs_put_block_group(cache); 5521 cache = btrfs_lookup_block_group(fs_info, start); 5522 BUG_ON(!cache); /* Logic error */ 5523 } 5524 5525 len = cache->key.objectid + cache->key.offset - start; 5526 len = min(len, end + 1 - start); 5527 5528 if (start < cache->last_byte_to_unpin) { 5529 len = min(len, cache->last_byte_to_unpin - start); 5530 btrfs_add_free_space(cache, start, len); 5531 } 5532 5533 start += len; 5534 space_info = cache->space_info; 5535 5536 spin_lock(&space_info->lock); 5537 spin_lock(&cache->lock); 5538 cache->pinned -= len; 5539 space_info->bytes_pinned -= len; 5540 if (cache->ro) { 5541 space_info->bytes_readonly += len; 5542 readonly = true; 5543 } 5544 spin_unlock(&cache->lock); 5545 if (!readonly && global_rsv->space_info == space_info) { 5546 spin_lock(&global_rsv->lock); 5547 if (!global_rsv->full) { 5548 len = min(len, global_rsv->size - 5549 global_rsv->reserved); 5550 global_rsv->reserved += len; 5551 space_info->bytes_may_use += len; 5552 if (global_rsv->reserved >= global_rsv->size) 5553 global_rsv->full = 1; 5554 } 5555 spin_unlock(&global_rsv->lock); 5556 } 5557 spin_unlock(&space_info->lock); 5558 } 5559 5560 if (cache) 5561 btrfs_put_block_group(cache); 5562 return 0; 5563 } 5564 5565 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 5566 struct btrfs_root *root) 5567 { 5568 struct btrfs_fs_info *fs_info = root->fs_info; 5569 struct extent_io_tree *unpin; 5570 u64 start; 5571 u64 end; 5572 int ret; 5573 5574 if (trans->aborted) 5575 return 0; 5576 5577 if (fs_info->pinned_extents == &fs_info->freed_extents[0]) 5578 unpin = &fs_info->freed_extents[1]; 5579 else 5580 unpin = &fs_info->freed_extents[0]; 5581 5582 while (1) { 5583 ret = find_first_extent_bit(unpin, 0, &start, &end, 5584 EXTENT_DIRTY, NULL); 5585 if (ret) 5586 break; 5587 5588 if (btrfs_test_opt(root, DISCARD)) 5589 ret = btrfs_discard_extent(root, start, 5590 end + 1 - start, NULL); 5591 5592 clear_extent_dirty(unpin, start, end, GFP_NOFS); 5593 unpin_extent_range(root, start, end); 5594 cond_resched(); 5595 } 5596 5597 return 0; 5598 } 5599 5600 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes, 5601 u64 owner, u64 root_objectid) 5602 { 5603 struct btrfs_space_info *space_info; 5604 u64 flags; 5605 5606 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 5607 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID) 5608 flags = BTRFS_BLOCK_GROUP_SYSTEM; 5609 else 5610 flags = BTRFS_BLOCK_GROUP_METADATA; 5611 } else { 5612 flags = BTRFS_BLOCK_GROUP_DATA; 5613 } 5614 5615 space_info = __find_space_info(fs_info, flags); 5616 BUG_ON(!space_info); /* Logic bug */ 5617 percpu_counter_add(&space_info->total_bytes_pinned, num_bytes); 5618 } 5619 5620 5621 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 5622 struct btrfs_root *root, 5623 u64 bytenr, u64 num_bytes, u64 parent, 5624 u64 root_objectid, u64 owner_objectid, 5625 u64 owner_offset, int refs_to_drop, 5626 struct btrfs_delayed_extent_op *extent_op) 5627 { 5628 struct btrfs_key key; 5629 struct btrfs_path *path; 5630 struct btrfs_fs_info *info = root->fs_info; 5631 struct btrfs_root *extent_root = info->extent_root; 5632 struct extent_buffer *leaf; 5633 struct btrfs_extent_item *ei; 5634 struct btrfs_extent_inline_ref *iref; 5635 int ret; 5636 int is_data; 5637 int extent_slot = 0; 5638 int found_extent = 0; 5639 int num_to_del = 1; 5640 u32 item_size; 5641 u64 refs; 5642 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 5643 SKINNY_METADATA); 5644 5645 path = btrfs_alloc_path(); 5646 if (!path) 5647 return -ENOMEM; 5648 5649 path->reada = 1; 5650 path->leave_spinning = 1; 5651 5652 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; 5653 BUG_ON(!is_data && refs_to_drop != 1); 5654 5655 if (is_data) 5656 skinny_metadata = 0; 5657 5658 ret = lookup_extent_backref(trans, extent_root, path, &iref, 5659 bytenr, num_bytes, parent, 5660 root_objectid, owner_objectid, 5661 owner_offset); 5662 if (ret == 0) { 5663 extent_slot = path->slots[0]; 5664 while (extent_slot >= 0) { 5665 btrfs_item_key_to_cpu(path->nodes[0], &key, 5666 extent_slot); 5667 if (key.objectid != bytenr) 5668 break; 5669 if (key.type == BTRFS_EXTENT_ITEM_KEY && 5670 key.offset == num_bytes) { 5671 found_extent = 1; 5672 break; 5673 } 5674 if (key.type == BTRFS_METADATA_ITEM_KEY && 5675 key.offset == owner_objectid) { 5676 found_extent = 1; 5677 break; 5678 } 5679 if (path->slots[0] - extent_slot > 5) 5680 break; 5681 extent_slot--; 5682 } 5683 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 5684 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot); 5685 if (found_extent && item_size < sizeof(*ei)) 5686 found_extent = 0; 5687 #endif 5688 if (!found_extent) { 5689 BUG_ON(iref); 5690 ret = remove_extent_backref(trans, extent_root, path, 5691 NULL, refs_to_drop, 5692 is_data); 5693 if (ret) { 5694 btrfs_abort_transaction(trans, extent_root, ret); 5695 goto out; 5696 } 5697 btrfs_release_path(path); 5698 path->leave_spinning = 1; 5699 5700 key.objectid = bytenr; 5701 key.type = BTRFS_EXTENT_ITEM_KEY; 5702 key.offset = num_bytes; 5703 5704 if (!is_data && skinny_metadata) { 5705 key.type = BTRFS_METADATA_ITEM_KEY; 5706 key.offset = owner_objectid; 5707 } 5708 5709 ret = btrfs_search_slot(trans, extent_root, 5710 &key, path, -1, 1); 5711 if (ret > 0 && skinny_metadata && path->slots[0]) { 5712 /* 5713 * Couldn't find our skinny metadata item, 5714 * see if we have ye olde extent item. 5715 */ 5716 path->slots[0]--; 5717 btrfs_item_key_to_cpu(path->nodes[0], &key, 5718 path->slots[0]); 5719 if (key.objectid == bytenr && 5720 key.type == BTRFS_EXTENT_ITEM_KEY && 5721 key.offset == num_bytes) 5722 ret = 0; 5723 } 5724 5725 if (ret > 0 && skinny_metadata) { 5726 skinny_metadata = false; 5727 key.type = BTRFS_EXTENT_ITEM_KEY; 5728 key.offset = num_bytes; 5729 btrfs_release_path(path); 5730 ret = btrfs_search_slot(trans, extent_root, 5731 &key, path, -1, 1); 5732 } 5733 5734 if (ret) { 5735 btrfs_err(info, "umm, got %d back from search, was looking for %llu", 5736 ret, bytenr); 5737 if (ret > 0) 5738 btrfs_print_leaf(extent_root, 5739 path->nodes[0]); 5740 } 5741 if (ret < 0) { 5742 btrfs_abort_transaction(trans, extent_root, ret); 5743 goto out; 5744 } 5745 extent_slot = path->slots[0]; 5746 } 5747 } else if (WARN_ON(ret == -ENOENT)) { 5748 btrfs_print_leaf(extent_root, path->nodes[0]); 5749 btrfs_err(info, 5750 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu", 5751 bytenr, parent, root_objectid, owner_objectid, 5752 owner_offset); 5753 } else { 5754 btrfs_abort_transaction(trans, extent_root, ret); 5755 goto out; 5756 } 5757 5758 leaf = path->nodes[0]; 5759 item_size = btrfs_item_size_nr(leaf, extent_slot); 5760 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 5761 if (item_size < sizeof(*ei)) { 5762 BUG_ON(found_extent || extent_slot != path->slots[0]); 5763 ret = convert_extent_item_v0(trans, extent_root, path, 5764 owner_objectid, 0); 5765 if (ret < 0) { 5766 btrfs_abort_transaction(trans, extent_root, ret); 5767 goto out; 5768 } 5769 5770 btrfs_release_path(path); 5771 path->leave_spinning = 1; 5772 5773 key.objectid = bytenr; 5774 key.type = BTRFS_EXTENT_ITEM_KEY; 5775 key.offset = num_bytes; 5776 5777 ret = btrfs_search_slot(trans, extent_root, &key, path, 5778 -1, 1); 5779 if (ret) { 5780 btrfs_err(info, "umm, got %d back from search, was looking for %llu", 5781 ret, bytenr); 5782 btrfs_print_leaf(extent_root, path->nodes[0]); 5783 } 5784 if (ret < 0) { 5785 btrfs_abort_transaction(trans, extent_root, ret); 5786 goto out; 5787 } 5788 5789 extent_slot = path->slots[0]; 5790 leaf = path->nodes[0]; 5791 item_size = btrfs_item_size_nr(leaf, extent_slot); 5792 } 5793 #endif 5794 BUG_ON(item_size < sizeof(*ei)); 5795 ei = btrfs_item_ptr(leaf, extent_slot, 5796 struct btrfs_extent_item); 5797 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && 5798 key.type == BTRFS_EXTENT_ITEM_KEY) { 5799 struct btrfs_tree_block_info *bi; 5800 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi)); 5801 bi = (struct btrfs_tree_block_info *)(ei + 1); 5802 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); 5803 } 5804 5805 refs = btrfs_extent_refs(leaf, ei); 5806 if (refs < refs_to_drop) { 5807 btrfs_err(info, "trying to drop %d refs but we only have %Lu " 5808 "for bytenr %Lu\n", refs_to_drop, refs, bytenr); 5809 ret = -EINVAL; 5810 btrfs_abort_transaction(trans, extent_root, ret); 5811 goto out; 5812 } 5813 refs -= refs_to_drop; 5814 5815 if (refs > 0) { 5816 if (extent_op) 5817 __run_delayed_extent_op(extent_op, leaf, ei); 5818 /* 5819 * In the case of inline back ref, reference count will 5820 * be updated by remove_extent_backref 5821 */ 5822 if (iref) { 5823 BUG_ON(!found_extent); 5824 } else { 5825 btrfs_set_extent_refs(leaf, ei, refs); 5826 btrfs_mark_buffer_dirty(leaf); 5827 } 5828 if (found_extent) { 5829 ret = remove_extent_backref(trans, extent_root, path, 5830 iref, refs_to_drop, 5831 is_data); 5832 if (ret) { 5833 btrfs_abort_transaction(trans, extent_root, ret); 5834 goto out; 5835 } 5836 } 5837 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid, 5838 root_objectid); 5839 } else { 5840 if (found_extent) { 5841 BUG_ON(is_data && refs_to_drop != 5842 extent_data_ref_count(root, path, iref)); 5843 if (iref) { 5844 BUG_ON(path->slots[0] != extent_slot); 5845 } else { 5846 BUG_ON(path->slots[0] != extent_slot + 1); 5847 path->slots[0] = extent_slot; 5848 num_to_del = 2; 5849 } 5850 } 5851 5852 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 5853 num_to_del); 5854 if (ret) { 5855 btrfs_abort_transaction(trans, extent_root, ret); 5856 goto out; 5857 } 5858 btrfs_release_path(path); 5859 5860 if (is_data) { 5861 ret = btrfs_del_csums(trans, root, bytenr, num_bytes); 5862 if (ret) { 5863 btrfs_abort_transaction(trans, extent_root, ret); 5864 goto out; 5865 } 5866 } 5867 5868 ret = update_block_group(root, bytenr, num_bytes, 0); 5869 if (ret) { 5870 btrfs_abort_transaction(trans, extent_root, ret); 5871 goto out; 5872 } 5873 } 5874 out: 5875 btrfs_free_path(path); 5876 return ret; 5877 } 5878 5879 /* 5880 * when we free an block, it is possible (and likely) that we free the last 5881 * delayed ref for that extent as well. This searches the delayed ref tree for 5882 * a given extent, and if there are no other delayed refs to be processed, it 5883 * removes it from the tree. 5884 */ 5885 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, 5886 struct btrfs_root *root, u64 bytenr) 5887 { 5888 struct btrfs_delayed_ref_head *head; 5889 struct btrfs_delayed_ref_root *delayed_refs; 5890 int ret = 0; 5891 5892 delayed_refs = &trans->transaction->delayed_refs; 5893 spin_lock(&delayed_refs->lock); 5894 head = btrfs_find_delayed_ref_head(trans, bytenr); 5895 if (!head) 5896 goto out_delayed_unlock; 5897 5898 spin_lock(&head->lock); 5899 if (rb_first(&head->ref_root)) 5900 goto out; 5901 5902 if (head->extent_op) { 5903 if (!head->must_insert_reserved) 5904 goto out; 5905 btrfs_free_delayed_extent_op(head->extent_op); 5906 head->extent_op = NULL; 5907 } 5908 5909 /* 5910 * waiting for the lock here would deadlock. If someone else has it 5911 * locked they are already in the process of dropping it anyway 5912 */ 5913 if (!mutex_trylock(&head->mutex)) 5914 goto out; 5915 5916 /* 5917 * at this point we have a head with no other entries. Go 5918 * ahead and process it. 5919 */ 5920 head->node.in_tree = 0; 5921 rb_erase(&head->href_node, &delayed_refs->href_root); 5922 5923 atomic_dec(&delayed_refs->num_entries); 5924 5925 /* 5926 * we don't take a ref on the node because we're removing it from the 5927 * tree, so we just steal the ref the tree was holding. 5928 */ 5929 delayed_refs->num_heads--; 5930 if (head->processing == 0) 5931 delayed_refs->num_heads_ready--; 5932 head->processing = 0; 5933 spin_unlock(&head->lock); 5934 spin_unlock(&delayed_refs->lock); 5935 5936 BUG_ON(head->extent_op); 5937 if (head->must_insert_reserved) 5938 ret = 1; 5939 5940 mutex_unlock(&head->mutex); 5941 btrfs_put_delayed_ref(&head->node); 5942 return ret; 5943 out: 5944 spin_unlock(&head->lock); 5945 5946 out_delayed_unlock: 5947 spin_unlock(&delayed_refs->lock); 5948 return 0; 5949 } 5950 5951 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 5952 struct btrfs_root *root, 5953 struct extent_buffer *buf, 5954 u64 parent, int last_ref) 5955 { 5956 struct btrfs_block_group_cache *cache = NULL; 5957 int pin = 1; 5958 int ret; 5959 5960 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 5961 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, 5962 buf->start, buf->len, 5963 parent, root->root_key.objectid, 5964 btrfs_header_level(buf), 5965 BTRFS_DROP_DELAYED_REF, NULL, 0); 5966 BUG_ON(ret); /* -ENOMEM */ 5967 } 5968 5969 if (!last_ref) 5970 return; 5971 5972 cache = btrfs_lookup_block_group(root->fs_info, buf->start); 5973 5974 if (btrfs_header_generation(buf) == trans->transid) { 5975 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 5976 ret = check_ref_cleanup(trans, root, buf->start); 5977 if (!ret) 5978 goto out; 5979 } 5980 5981 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 5982 pin_down_extent(root, cache, buf->start, buf->len, 1); 5983 goto out; 5984 } 5985 5986 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 5987 5988 btrfs_add_free_space(cache, buf->start, buf->len); 5989 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE); 5990 trace_btrfs_reserved_extent_free(root, buf->start, buf->len); 5991 pin = 0; 5992 } 5993 out: 5994 if (pin) 5995 add_pinned_bytes(root->fs_info, buf->len, 5996 btrfs_header_level(buf), 5997 root->root_key.objectid); 5998 5999 /* 6000 * Deleting the buffer, clear the corrupt flag since it doesn't matter 6001 * anymore. 6002 */ 6003 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); 6004 btrfs_put_block_group(cache); 6005 } 6006 6007 /* Can return -ENOMEM */ 6008 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, 6009 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 6010 u64 owner, u64 offset, int for_cow) 6011 { 6012 int ret; 6013 struct btrfs_fs_info *fs_info = root->fs_info; 6014 6015 add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid); 6016 6017 /* 6018 * tree log blocks never actually go into the extent allocation 6019 * tree, just update pinning info and exit early. 6020 */ 6021 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) { 6022 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID); 6023 /* unlocks the pinned mutex */ 6024 btrfs_pin_extent(root, bytenr, num_bytes, 1); 6025 ret = 0; 6026 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) { 6027 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr, 6028 num_bytes, 6029 parent, root_objectid, (int)owner, 6030 BTRFS_DROP_DELAYED_REF, NULL, for_cow); 6031 } else { 6032 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr, 6033 num_bytes, 6034 parent, root_objectid, owner, 6035 offset, BTRFS_DROP_DELAYED_REF, 6036 NULL, for_cow); 6037 } 6038 return ret; 6039 } 6040 6041 static u64 stripe_align(struct btrfs_root *root, 6042 struct btrfs_block_group_cache *cache, 6043 u64 val, u64 num_bytes) 6044 { 6045 u64 ret = ALIGN(val, root->stripesize); 6046 return ret; 6047 } 6048 6049 /* 6050 * when we wait for progress in the block group caching, its because 6051 * our allocation attempt failed at least once. So, we must sleep 6052 * and let some progress happen before we try again. 6053 * 6054 * This function will sleep at least once waiting for new free space to 6055 * show up, and then it will check the block group free space numbers 6056 * for our min num_bytes. Another option is to have it go ahead 6057 * and look in the rbtree for a free extent of a given size, but this 6058 * is a good start. 6059 * 6060 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 6061 * any of the information in this block group. 6062 */ 6063 static noinline void 6064 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, 6065 u64 num_bytes) 6066 { 6067 struct btrfs_caching_control *caching_ctl; 6068 6069 caching_ctl = get_caching_control(cache); 6070 if (!caching_ctl) 6071 return; 6072 6073 wait_event(caching_ctl->wait, block_group_cache_done(cache) || 6074 (cache->free_space_ctl->free_space >= num_bytes)); 6075 6076 put_caching_control(caching_ctl); 6077 } 6078 6079 static noinline int 6080 wait_block_group_cache_done(struct btrfs_block_group_cache *cache) 6081 { 6082 struct btrfs_caching_control *caching_ctl; 6083 int ret = 0; 6084 6085 caching_ctl = get_caching_control(cache); 6086 if (!caching_ctl) 6087 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 6088 6089 wait_event(caching_ctl->wait, block_group_cache_done(cache)); 6090 if (cache->cached == BTRFS_CACHE_ERROR) 6091 ret = -EIO; 6092 put_caching_control(caching_ctl); 6093 return ret; 6094 } 6095 6096 int __get_raid_index(u64 flags) 6097 { 6098 if (flags & BTRFS_BLOCK_GROUP_RAID10) 6099 return BTRFS_RAID_RAID10; 6100 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 6101 return BTRFS_RAID_RAID1; 6102 else if (flags & BTRFS_BLOCK_GROUP_DUP) 6103 return BTRFS_RAID_DUP; 6104 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 6105 return BTRFS_RAID_RAID0; 6106 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 6107 return BTRFS_RAID_RAID5; 6108 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 6109 return BTRFS_RAID_RAID6; 6110 6111 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 6112 } 6113 6114 int get_block_group_index(struct btrfs_block_group_cache *cache) 6115 { 6116 return __get_raid_index(cache->flags); 6117 } 6118 6119 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = { 6120 [BTRFS_RAID_RAID10] = "raid10", 6121 [BTRFS_RAID_RAID1] = "raid1", 6122 [BTRFS_RAID_DUP] = "dup", 6123 [BTRFS_RAID_RAID0] = "raid0", 6124 [BTRFS_RAID_SINGLE] = "single", 6125 [BTRFS_RAID_RAID5] = "raid5", 6126 [BTRFS_RAID_RAID6] = "raid6", 6127 }; 6128 6129 static const char *get_raid_name(enum btrfs_raid_types type) 6130 { 6131 if (type >= BTRFS_NR_RAID_TYPES) 6132 return NULL; 6133 6134 return btrfs_raid_type_names[type]; 6135 } 6136 6137 enum btrfs_loop_type { 6138 LOOP_CACHING_NOWAIT = 0, 6139 LOOP_CACHING_WAIT = 1, 6140 LOOP_ALLOC_CHUNK = 2, 6141 LOOP_NO_EMPTY_SIZE = 3, 6142 }; 6143 6144 /* 6145 * walks the btree of allocated extents and find a hole of a given size. 6146 * The key ins is changed to record the hole: 6147 * ins->objectid == start position 6148 * ins->flags = BTRFS_EXTENT_ITEM_KEY 6149 * ins->offset == the size of the hole. 6150 * Any available blocks before search_start are skipped. 6151 * 6152 * If there is no suitable free space, we will record the max size of 6153 * the free space extent currently. 6154 */ 6155 static noinline int find_free_extent(struct btrfs_root *orig_root, 6156 u64 num_bytes, u64 empty_size, 6157 u64 hint_byte, struct btrfs_key *ins, 6158 u64 flags) 6159 { 6160 int ret = 0; 6161 struct btrfs_root *root = orig_root->fs_info->extent_root; 6162 struct btrfs_free_cluster *last_ptr = NULL; 6163 struct btrfs_block_group_cache *block_group = NULL; 6164 u64 search_start = 0; 6165 u64 max_extent_size = 0; 6166 int empty_cluster = 2 * 1024 * 1024; 6167 struct btrfs_space_info *space_info; 6168 int loop = 0; 6169 int index = __get_raid_index(flags); 6170 int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ? 6171 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC; 6172 bool failed_cluster_refill = false; 6173 bool failed_alloc = false; 6174 bool use_cluster = true; 6175 bool have_caching_bg = false; 6176 6177 WARN_ON(num_bytes < root->sectorsize); 6178 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 6179 ins->objectid = 0; 6180 ins->offset = 0; 6181 6182 trace_find_free_extent(orig_root, num_bytes, empty_size, flags); 6183 6184 space_info = __find_space_info(root->fs_info, flags); 6185 if (!space_info) { 6186 btrfs_err(root->fs_info, "No space info for %llu", flags); 6187 return -ENOSPC; 6188 } 6189 6190 /* 6191 * If the space info is for both data and metadata it means we have a 6192 * small filesystem and we can't use the clustering stuff. 6193 */ 6194 if (btrfs_mixed_space_info(space_info)) 6195 use_cluster = false; 6196 6197 if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) { 6198 last_ptr = &root->fs_info->meta_alloc_cluster; 6199 if (!btrfs_test_opt(root, SSD)) 6200 empty_cluster = 64 * 1024; 6201 } 6202 6203 if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster && 6204 btrfs_test_opt(root, SSD)) { 6205 last_ptr = &root->fs_info->data_alloc_cluster; 6206 } 6207 6208 if (last_ptr) { 6209 spin_lock(&last_ptr->lock); 6210 if (last_ptr->block_group) 6211 hint_byte = last_ptr->window_start; 6212 spin_unlock(&last_ptr->lock); 6213 } 6214 6215 search_start = max(search_start, first_logical_byte(root, 0)); 6216 search_start = max(search_start, hint_byte); 6217 6218 if (!last_ptr) 6219 empty_cluster = 0; 6220 6221 if (search_start == hint_byte) { 6222 block_group = btrfs_lookup_block_group(root->fs_info, 6223 search_start); 6224 /* 6225 * we don't want to use the block group if it doesn't match our 6226 * allocation bits, or if its not cached. 6227 * 6228 * However if we are re-searching with an ideal block group 6229 * picked out then we don't care that the block group is cached. 6230 */ 6231 if (block_group && block_group_bits(block_group, flags) && 6232 block_group->cached != BTRFS_CACHE_NO) { 6233 down_read(&space_info->groups_sem); 6234 if (list_empty(&block_group->list) || 6235 block_group->ro) { 6236 /* 6237 * someone is removing this block group, 6238 * we can't jump into the have_block_group 6239 * target because our list pointers are not 6240 * valid 6241 */ 6242 btrfs_put_block_group(block_group); 6243 up_read(&space_info->groups_sem); 6244 } else { 6245 index = get_block_group_index(block_group); 6246 goto have_block_group; 6247 } 6248 } else if (block_group) { 6249 btrfs_put_block_group(block_group); 6250 } 6251 } 6252 search: 6253 have_caching_bg = false; 6254 down_read(&space_info->groups_sem); 6255 list_for_each_entry(block_group, &space_info->block_groups[index], 6256 list) { 6257 u64 offset; 6258 int cached; 6259 6260 btrfs_get_block_group(block_group); 6261 search_start = block_group->key.objectid; 6262 6263 /* 6264 * this can happen if we end up cycling through all the 6265 * raid types, but we want to make sure we only allocate 6266 * for the proper type. 6267 */ 6268 if (!block_group_bits(block_group, flags)) { 6269 u64 extra = BTRFS_BLOCK_GROUP_DUP | 6270 BTRFS_BLOCK_GROUP_RAID1 | 6271 BTRFS_BLOCK_GROUP_RAID5 | 6272 BTRFS_BLOCK_GROUP_RAID6 | 6273 BTRFS_BLOCK_GROUP_RAID10; 6274 6275 /* 6276 * if they asked for extra copies and this block group 6277 * doesn't provide them, bail. This does allow us to 6278 * fill raid0 from raid1. 6279 */ 6280 if ((flags & extra) && !(block_group->flags & extra)) 6281 goto loop; 6282 } 6283 6284 have_block_group: 6285 cached = block_group_cache_done(block_group); 6286 if (unlikely(!cached)) { 6287 ret = cache_block_group(block_group, 0); 6288 BUG_ON(ret < 0); 6289 ret = 0; 6290 } 6291 6292 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) 6293 goto loop; 6294 if (unlikely(block_group->ro)) 6295 goto loop; 6296 6297 /* 6298 * Ok we want to try and use the cluster allocator, so 6299 * lets look there 6300 */ 6301 if (last_ptr) { 6302 struct btrfs_block_group_cache *used_block_group; 6303 unsigned long aligned_cluster; 6304 /* 6305 * the refill lock keeps out other 6306 * people trying to start a new cluster 6307 */ 6308 spin_lock(&last_ptr->refill_lock); 6309 used_block_group = last_ptr->block_group; 6310 if (used_block_group != block_group && 6311 (!used_block_group || 6312 used_block_group->ro || 6313 !block_group_bits(used_block_group, flags))) 6314 goto refill_cluster; 6315 6316 if (used_block_group != block_group) 6317 btrfs_get_block_group(used_block_group); 6318 6319 offset = btrfs_alloc_from_cluster(used_block_group, 6320 last_ptr, 6321 num_bytes, 6322 used_block_group->key.objectid, 6323 &max_extent_size); 6324 if (offset) { 6325 /* we have a block, we're done */ 6326 spin_unlock(&last_ptr->refill_lock); 6327 trace_btrfs_reserve_extent_cluster(root, 6328 used_block_group, 6329 search_start, num_bytes); 6330 if (used_block_group != block_group) { 6331 btrfs_put_block_group(block_group); 6332 block_group = used_block_group; 6333 } 6334 goto checks; 6335 } 6336 6337 WARN_ON(last_ptr->block_group != used_block_group); 6338 if (used_block_group != block_group) 6339 btrfs_put_block_group(used_block_group); 6340 refill_cluster: 6341 /* If we are on LOOP_NO_EMPTY_SIZE, we can't 6342 * set up a new clusters, so lets just skip it 6343 * and let the allocator find whatever block 6344 * it can find. If we reach this point, we 6345 * will have tried the cluster allocator 6346 * plenty of times and not have found 6347 * anything, so we are likely way too 6348 * fragmented for the clustering stuff to find 6349 * anything. 6350 * 6351 * However, if the cluster is taken from the 6352 * current block group, release the cluster 6353 * first, so that we stand a better chance of 6354 * succeeding in the unclustered 6355 * allocation. */ 6356 if (loop >= LOOP_NO_EMPTY_SIZE && 6357 last_ptr->block_group != block_group) { 6358 spin_unlock(&last_ptr->refill_lock); 6359 goto unclustered_alloc; 6360 } 6361 6362 /* 6363 * this cluster didn't work out, free it and 6364 * start over 6365 */ 6366 btrfs_return_cluster_to_free_space(NULL, last_ptr); 6367 6368 if (loop >= LOOP_NO_EMPTY_SIZE) { 6369 spin_unlock(&last_ptr->refill_lock); 6370 goto unclustered_alloc; 6371 } 6372 6373 aligned_cluster = max_t(unsigned long, 6374 empty_cluster + empty_size, 6375 block_group->full_stripe_len); 6376 6377 /* allocate a cluster in this block group */ 6378 ret = btrfs_find_space_cluster(root, block_group, 6379 last_ptr, search_start, 6380 num_bytes, 6381 aligned_cluster); 6382 if (ret == 0) { 6383 /* 6384 * now pull our allocation out of this 6385 * cluster 6386 */ 6387 offset = btrfs_alloc_from_cluster(block_group, 6388 last_ptr, 6389 num_bytes, 6390 search_start, 6391 &max_extent_size); 6392 if (offset) { 6393 /* we found one, proceed */ 6394 spin_unlock(&last_ptr->refill_lock); 6395 trace_btrfs_reserve_extent_cluster(root, 6396 block_group, search_start, 6397 num_bytes); 6398 goto checks; 6399 } 6400 } else if (!cached && loop > LOOP_CACHING_NOWAIT 6401 && !failed_cluster_refill) { 6402 spin_unlock(&last_ptr->refill_lock); 6403 6404 failed_cluster_refill = true; 6405 wait_block_group_cache_progress(block_group, 6406 num_bytes + empty_cluster + empty_size); 6407 goto have_block_group; 6408 } 6409 6410 /* 6411 * at this point we either didn't find a cluster 6412 * or we weren't able to allocate a block from our 6413 * cluster. Free the cluster we've been trying 6414 * to use, and go to the next block group 6415 */ 6416 btrfs_return_cluster_to_free_space(NULL, last_ptr); 6417 spin_unlock(&last_ptr->refill_lock); 6418 goto loop; 6419 } 6420 6421 unclustered_alloc: 6422 spin_lock(&block_group->free_space_ctl->tree_lock); 6423 if (cached && 6424 block_group->free_space_ctl->free_space < 6425 num_bytes + empty_cluster + empty_size) { 6426 if (block_group->free_space_ctl->free_space > 6427 max_extent_size) 6428 max_extent_size = 6429 block_group->free_space_ctl->free_space; 6430 spin_unlock(&block_group->free_space_ctl->tree_lock); 6431 goto loop; 6432 } 6433 spin_unlock(&block_group->free_space_ctl->tree_lock); 6434 6435 offset = btrfs_find_space_for_alloc(block_group, search_start, 6436 num_bytes, empty_size, 6437 &max_extent_size); 6438 /* 6439 * If we didn't find a chunk, and we haven't failed on this 6440 * block group before, and this block group is in the middle of 6441 * caching and we are ok with waiting, then go ahead and wait 6442 * for progress to be made, and set failed_alloc to true. 6443 * 6444 * If failed_alloc is true then we've already waited on this 6445 * block group once and should move on to the next block group. 6446 */ 6447 if (!offset && !failed_alloc && !cached && 6448 loop > LOOP_CACHING_NOWAIT) { 6449 wait_block_group_cache_progress(block_group, 6450 num_bytes + empty_size); 6451 failed_alloc = true; 6452 goto have_block_group; 6453 } else if (!offset) { 6454 if (!cached) 6455 have_caching_bg = true; 6456 goto loop; 6457 } 6458 checks: 6459 search_start = stripe_align(root, block_group, 6460 offset, num_bytes); 6461 6462 /* move on to the next group */ 6463 if (search_start + num_bytes > 6464 block_group->key.objectid + block_group->key.offset) { 6465 btrfs_add_free_space(block_group, offset, num_bytes); 6466 goto loop; 6467 } 6468 6469 if (offset < search_start) 6470 btrfs_add_free_space(block_group, offset, 6471 search_start - offset); 6472 BUG_ON(offset > search_start); 6473 6474 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 6475 alloc_type); 6476 if (ret == -EAGAIN) { 6477 btrfs_add_free_space(block_group, offset, num_bytes); 6478 goto loop; 6479 } 6480 6481 /* we are all good, lets return */ 6482 ins->objectid = search_start; 6483 ins->offset = num_bytes; 6484 6485 trace_btrfs_reserve_extent(orig_root, block_group, 6486 search_start, num_bytes); 6487 btrfs_put_block_group(block_group); 6488 break; 6489 loop: 6490 failed_cluster_refill = false; 6491 failed_alloc = false; 6492 BUG_ON(index != get_block_group_index(block_group)); 6493 btrfs_put_block_group(block_group); 6494 } 6495 up_read(&space_info->groups_sem); 6496 6497 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg) 6498 goto search; 6499 6500 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES) 6501 goto search; 6502 6503 /* 6504 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking 6505 * caching kthreads as we move along 6506 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 6507 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 6508 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 6509 * again 6510 */ 6511 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) { 6512 index = 0; 6513 loop++; 6514 if (loop == LOOP_ALLOC_CHUNK) { 6515 struct btrfs_trans_handle *trans; 6516 6517 trans = btrfs_join_transaction(root); 6518 if (IS_ERR(trans)) { 6519 ret = PTR_ERR(trans); 6520 goto out; 6521 } 6522 6523 ret = do_chunk_alloc(trans, root, flags, 6524 CHUNK_ALLOC_FORCE); 6525 /* 6526 * Do not bail out on ENOSPC since we 6527 * can do more things. 6528 */ 6529 if (ret < 0 && ret != -ENOSPC) 6530 btrfs_abort_transaction(trans, 6531 root, ret); 6532 else 6533 ret = 0; 6534 btrfs_end_transaction(trans, root); 6535 if (ret) 6536 goto out; 6537 } 6538 6539 if (loop == LOOP_NO_EMPTY_SIZE) { 6540 empty_size = 0; 6541 empty_cluster = 0; 6542 } 6543 6544 goto search; 6545 } else if (!ins->objectid) { 6546 ret = -ENOSPC; 6547 } else if (ins->objectid) { 6548 ret = 0; 6549 } 6550 out: 6551 if (ret == -ENOSPC) 6552 ins->offset = max_extent_size; 6553 return ret; 6554 } 6555 6556 static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 6557 int dump_block_groups) 6558 { 6559 struct btrfs_block_group_cache *cache; 6560 int index = 0; 6561 6562 spin_lock(&info->lock); 6563 printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n", 6564 info->flags, 6565 info->total_bytes - info->bytes_used - info->bytes_pinned - 6566 info->bytes_reserved - info->bytes_readonly, 6567 (info->full) ? "" : "not "); 6568 printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, " 6569 "reserved=%llu, may_use=%llu, readonly=%llu\n", 6570 info->total_bytes, info->bytes_used, info->bytes_pinned, 6571 info->bytes_reserved, info->bytes_may_use, 6572 info->bytes_readonly); 6573 spin_unlock(&info->lock); 6574 6575 if (!dump_block_groups) 6576 return; 6577 6578 down_read(&info->groups_sem); 6579 again: 6580 list_for_each_entry(cache, &info->block_groups[index], list) { 6581 spin_lock(&cache->lock); 6582 printk(KERN_INFO "BTRFS: " 6583 "block group %llu has %llu bytes, " 6584 "%llu used %llu pinned %llu reserved %s\n", 6585 cache->key.objectid, cache->key.offset, 6586 btrfs_block_group_used(&cache->item), cache->pinned, 6587 cache->reserved, cache->ro ? "[readonly]" : ""); 6588 btrfs_dump_free_space(cache, bytes); 6589 spin_unlock(&cache->lock); 6590 } 6591 if (++index < BTRFS_NR_RAID_TYPES) 6592 goto again; 6593 up_read(&info->groups_sem); 6594 } 6595 6596 int btrfs_reserve_extent(struct btrfs_root *root, 6597 u64 num_bytes, u64 min_alloc_size, 6598 u64 empty_size, u64 hint_byte, 6599 struct btrfs_key *ins, int is_data) 6600 { 6601 bool final_tried = false; 6602 u64 flags; 6603 int ret; 6604 6605 flags = btrfs_get_alloc_profile(root, is_data); 6606 again: 6607 WARN_ON(num_bytes < root->sectorsize); 6608 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins, 6609 flags); 6610 6611 if (ret == -ENOSPC) { 6612 if (!final_tried && ins->offset) { 6613 num_bytes = min(num_bytes >> 1, ins->offset); 6614 num_bytes = round_down(num_bytes, root->sectorsize); 6615 num_bytes = max(num_bytes, min_alloc_size); 6616 if (num_bytes == min_alloc_size) 6617 final_tried = true; 6618 goto again; 6619 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) { 6620 struct btrfs_space_info *sinfo; 6621 6622 sinfo = __find_space_info(root->fs_info, flags); 6623 btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu", 6624 flags, num_bytes); 6625 if (sinfo) 6626 dump_space_info(sinfo, num_bytes, 1); 6627 } 6628 } 6629 6630 return ret; 6631 } 6632 6633 static int __btrfs_free_reserved_extent(struct btrfs_root *root, 6634 u64 start, u64 len, int pin) 6635 { 6636 struct btrfs_block_group_cache *cache; 6637 int ret = 0; 6638 6639 cache = btrfs_lookup_block_group(root->fs_info, start); 6640 if (!cache) { 6641 btrfs_err(root->fs_info, "Unable to find block group for %llu", 6642 start); 6643 return -ENOSPC; 6644 } 6645 6646 if (btrfs_test_opt(root, DISCARD)) 6647 ret = btrfs_discard_extent(root, start, len, NULL); 6648 6649 if (pin) 6650 pin_down_extent(root, cache, start, len, 1); 6651 else { 6652 btrfs_add_free_space(cache, start, len); 6653 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE); 6654 } 6655 btrfs_put_block_group(cache); 6656 6657 trace_btrfs_reserved_extent_free(root, start, len); 6658 6659 return ret; 6660 } 6661 6662 int btrfs_free_reserved_extent(struct btrfs_root *root, 6663 u64 start, u64 len) 6664 { 6665 return __btrfs_free_reserved_extent(root, start, len, 0); 6666 } 6667 6668 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 6669 u64 start, u64 len) 6670 { 6671 return __btrfs_free_reserved_extent(root, start, len, 1); 6672 } 6673 6674 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 6675 struct btrfs_root *root, 6676 u64 parent, u64 root_objectid, 6677 u64 flags, u64 owner, u64 offset, 6678 struct btrfs_key *ins, int ref_mod) 6679 { 6680 int ret; 6681 struct btrfs_fs_info *fs_info = root->fs_info; 6682 struct btrfs_extent_item *extent_item; 6683 struct btrfs_extent_inline_ref *iref; 6684 struct btrfs_path *path; 6685 struct extent_buffer *leaf; 6686 int type; 6687 u32 size; 6688 6689 if (parent > 0) 6690 type = BTRFS_SHARED_DATA_REF_KEY; 6691 else 6692 type = BTRFS_EXTENT_DATA_REF_KEY; 6693 6694 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); 6695 6696 path = btrfs_alloc_path(); 6697 if (!path) 6698 return -ENOMEM; 6699 6700 path->leave_spinning = 1; 6701 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 6702 ins, size); 6703 if (ret) { 6704 btrfs_free_path(path); 6705 return ret; 6706 } 6707 6708 leaf = path->nodes[0]; 6709 extent_item = btrfs_item_ptr(leaf, path->slots[0], 6710 struct btrfs_extent_item); 6711 btrfs_set_extent_refs(leaf, extent_item, ref_mod); 6712 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 6713 btrfs_set_extent_flags(leaf, extent_item, 6714 flags | BTRFS_EXTENT_FLAG_DATA); 6715 6716 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 6717 btrfs_set_extent_inline_ref_type(leaf, iref, type); 6718 if (parent > 0) { 6719 struct btrfs_shared_data_ref *ref; 6720 ref = (struct btrfs_shared_data_ref *)(iref + 1); 6721 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 6722 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); 6723 } else { 6724 struct btrfs_extent_data_ref *ref; 6725 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 6726 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); 6727 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 6728 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 6729 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); 6730 } 6731 6732 btrfs_mark_buffer_dirty(path->nodes[0]); 6733 btrfs_free_path(path); 6734 6735 ret = update_block_group(root, ins->objectid, ins->offset, 1); 6736 if (ret) { /* -ENOENT, logic error */ 6737 btrfs_err(fs_info, "update block group failed for %llu %llu", 6738 ins->objectid, ins->offset); 6739 BUG(); 6740 } 6741 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset); 6742 return ret; 6743 } 6744 6745 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 6746 struct btrfs_root *root, 6747 u64 parent, u64 root_objectid, 6748 u64 flags, struct btrfs_disk_key *key, 6749 int level, struct btrfs_key *ins) 6750 { 6751 int ret; 6752 struct btrfs_fs_info *fs_info = root->fs_info; 6753 struct btrfs_extent_item *extent_item; 6754 struct btrfs_tree_block_info *block_info; 6755 struct btrfs_extent_inline_ref *iref; 6756 struct btrfs_path *path; 6757 struct extent_buffer *leaf; 6758 u32 size = sizeof(*extent_item) + sizeof(*iref); 6759 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 6760 SKINNY_METADATA); 6761 6762 if (!skinny_metadata) 6763 size += sizeof(*block_info); 6764 6765 path = btrfs_alloc_path(); 6766 if (!path) { 6767 btrfs_free_and_pin_reserved_extent(root, ins->objectid, 6768 root->leafsize); 6769 return -ENOMEM; 6770 } 6771 6772 path->leave_spinning = 1; 6773 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 6774 ins, size); 6775 if (ret) { 6776 btrfs_free_and_pin_reserved_extent(root, ins->objectid, 6777 root->leafsize); 6778 btrfs_free_path(path); 6779 return ret; 6780 } 6781 6782 leaf = path->nodes[0]; 6783 extent_item = btrfs_item_ptr(leaf, path->slots[0], 6784 struct btrfs_extent_item); 6785 btrfs_set_extent_refs(leaf, extent_item, 1); 6786 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 6787 btrfs_set_extent_flags(leaf, extent_item, 6788 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); 6789 6790 if (skinny_metadata) { 6791 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 6792 } else { 6793 block_info = (struct btrfs_tree_block_info *)(extent_item + 1); 6794 btrfs_set_tree_block_key(leaf, block_info, key); 6795 btrfs_set_tree_block_level(leaf, block_info, level); 6796 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); 6797 } 6798 6799 if (parent > 0) { 6800 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 6801 btrfs_set_extent_inline_ref_type(leaf, iref, 6802 BTRFS_SHARED_BLOCK_REF_KEY); 6803 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 6804 } else { 6805 btrfs_set_extent_inline_ref_type(leaf, iref, 6806 BTRFS_TREE_BLOCK_REF_KEY); 6807 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 6808 } 6809 6810 btrfs_mark_buffer_dirty(leaf); 6811 btrfs_free_path(path); 6812 6813 ret = update_block_group(root, ins->objectid, root->leafsize, 1); 6814 if (ret) { /* -ENOENT, logic error */ 6815 btrfs_err(fs_info, "update block group failed for %llu %llu", 6816 ins->objectid, ins->offset); 6817 BUG(); 6818 } 6819 6820 trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize); 6821 return ret; 6822 } 6823 6824 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 6825 struct btrfs_root *root, 6826 u64 root_objectid, u64 owner, 6827 u64 offset, struct btrfs_key *ins) 6828 { 6829 int ret; 6830 6831 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID); 6832 6833 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid, 6834 ins->offset, 0, 6835 root_objectid, owner, offset, 6836 BTRFS_ADD_DELAYED_EXTENT, NULL, 0); 6837 return ret; 6838 } 6839 6840 /* 6841 * this is used by the tree logging recovery code. It records that 6842 * an extent has been allocated and makes sure to clear the free 6843 * space cache bits as well 6844 */ 6845 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 6846 struct btrfs_root *root, 6847 u64 root_objectid, u64 owner, u64 offset, 6848 struct btrfs_key *ins) 6849 { 6850 int ret; 6851 struct btrfs_block_group_cache *block_group; 6852 6853 /* 6854 * Mixed block groups will exclude before processing the log so we only 6855 * need to do the exlude dance if this fs isn't mixed. 6856 */ 6857 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) { 6858 ret = __exclude_logged_extent(root, ins->objectid, ins->offset); 6859 if (ret) 6860 return ret; 6861 } 6862 6863 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 6864 if (!block_group) 6865 return -EINVAL; 6866 6867 ret = btrfs_update_reserved_bytes(block_group, ins->offset, 6868 RESERVE_ALLOC_NO_ACCOUNT); 6869 BUG_ON(ret); /* logic error */ 6870 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 6871 0, owner, offset, ins, 1); 6872 btrfs_put_block_group(block_group); 6873 return ret; 6874 } 6875 6876 static struct extent_buffer * 6877 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, 6878 u64 bytenr, u32 blocksize, int level) 6879 { 6880 struct extent_buffer *buf; 6881 6882 buf = btrfs_find_create_tree_block(root, bytenr, blocksize); 6883 if (!buf) 6884 return ERR_PTR(-ENOMEM); 6885 btrfs_set_header_generation(buf, trans->transid); 6886 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); 6887 btrfs_tree_lock(buf); 6888 clean_tree_block(trans, root, buf); 6889 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); 6890 6891 btrfs_set_lock_blocking(buf); 6892 btrfs_set_buffer_uptodate(buf); 6893 6894 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 6895 /* 6896 * we allow two log transactions at a time, use different 6897 * EXENT bit to differentiate dirty pages. 6898 */ 6899 if (root->log_transid % 2 == 0) 6900 set_extent_dirty(&root->dirty_log_pages, buf->start, 6901 buf->start + buf->len - 1, GFP_NOFS); 6902 else 6903 set_extent_new(&root->dirty_log_pages, buf->start, 6904 buf->start + buf->len - 1, GFP_NOFS); 6905 } else { 6906 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 6907 buf->start + buf->len - 1, GFP_NOFS); 6908 } 6909 trans->blocks_used++; 6910 /* this returns a buffer locked for blocking */ 6911 return buf; 6912 } 6913 6914 static struct btrfs_block_rsv * 6915 use_block_rsv(struct btrfs_trans_handle *trans, 6916 struct btrfs_root *root, u32 blocksize) 6917 { 6918 struct btrfs_block_rsv *block_rsv; 6919 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 6920 int ret; 6921 bool global_updated = false; 6922 6923 block_rsv = get_block_rsv(trans, root); 6924 6925 if (unlikely(block_rsv->size == 0)) 6926 goto try_reserve; 6927 again: 6928 ret = block_rsv_use_bytes(block_rsv, blocksize); 6929 if (!ret) 6930 return block_rsv; 6931 6932 if (block_rsv->failfast) 6933 return ERR_PTR(ret); 6934 6935 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) { 6936 global_updated = true; 6937 update_global_block_rsv(root->fs_info); 6938 goto again; 6939 } 6940 6941 if (btrfs_test_opt(root, ENOSPC_DEBUG)) { 6942 static DEFINE_RATELIMIT_STATE(_rs, 6943 DEFAULT_RATELIMIT_INTERVAL * 10, 6944 /*DEFAULT_RATELIMIT_BURST*/ 1); 6945 if (__ratelimit(&_rs)) 6946 WARN(1, KERN_DEBUG 6947 "BTRFS: block rsv returned %d\n", ret); 6948 } 6949 try_reserve: 6950 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 6951 BTRFS_RESERVE_NO_FLUSH); 6952 if (!ret) 6953 return block_rsv; 6954 /* 6955 * If we couldn't reserve metadata bytes try and use some from 6956 * the global reserve if its space type is the same as the global 6957 * reservation. 6958 */ 6959 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL && 6960 block_rsv->space_info == global_rsv->space_info) { 6961 ret = block_rsv_use_bytes(global_rsv, blocksize); 6962 if (!ret) 6963 return global_rsv; 6964 } 6965 return ERR_PTR(ret); 6966 } 6967 6968 static void unuse_block_rsv(struct btrfs_fs_info *fs_info, 6969 struct btrfs_block_rsv *block_rsv, u32 blocksize) 6970 { 6971 block_rsv_add_bytes(block_rsv, blocksize, 0); 6972 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0); 6973 } 6974 6975 /* 6976 * finds a free extent and does all the dirty work required for allocation 6977 * returns the key for the extent through ins, and a tree buffer for 6978 * the first block of the extent through buf. 6979 * 6980 * returns the tree buffer or NULL. 6981 */ 6982 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, 6983 struct btrfs_root *root, u32 blocksize, 6984 u64 parent, u64 root_objectid, 6985 struct btrfs_disk_key *key, int level, 6986 u64 hint, u64 empty_size) 6987 { 6988 struct btrfs_key ins; 6989 struct btrfs_block_rsv *block_rsv; 6990 struct extent_buffer *buf; 6991 u64 flags = 0; 6992 int ret; 6993 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 6994 SKINNY_METADATA); 6995 6996 block_rsv = use_block_rsv(trans, root, blocksize); 6997 if (IS_ERR(block_rsv)) 6998 return ERR_CAST(block_rsv); 6999 7000 ret = btrfs_reserve_extent(root, blocksize, blocksize, 7001 empty_size, hint, &ins, 0); 7002 if (ret) { 7003 unuse_block_rsv(root->fs_info, block_rsv, blocksize); 7004 return ERR_PTR(ret); 7005 } 7006 7007 buf = btrfs_init_new_buffer(trans, root, ins.objectid, 7008 blocksize, level); 7009 BUG_ON(IS_ERR(buf)); /* -ENOMEM */ 7010 7011 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 7012 if (parent == 0) 7013 parent = ins.objectid; 7014 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 7015 } else 7016 BUG_ON(parent > 0); 7017 7018 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 7019 struct btrfs_delayed_extent_op *extent_op; 7020 extent_op = btrfs_alloc_delayed_extent_op(); 7021 BUG_ON(!extent_op); /* -ENOMEM */ 7022 if (key) 7023 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 7024 else 7025 memset(&extent_op->key, 0, sizeof(extent_op->key)); 7026 extent_op->flags_to_set = flags; 7027 if (skinny_metadata) 7028 extent_op->update_key = 0; 7029 else 7030 extent_op->update_key = 1; 7031 extent_op->update_flags = 1; 7032 extent_op->is_data = 0; 7033 extent_op->level = level; 7034 7035 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, 7036 ins.objectid, 7037 ins.offset, parent, root_objectid, 7038 level, BTRFS_ADD_DELAYED_EXTENT, 7039 extent_op, 0); 7040 BUG_ON(ret); /* -ENOMEM */ 7041 } 7042 return buf; 7043 } 7044 7045 struct walk_control { 7046 u64 refs[BTRFS_MAX_LEVEL]; 7047 u64 flags[BTRFS_MAX_LEVEL]; 7048 struct btrfs_key update_progress; 7049 int stage; 7050 int level; 7051 int shared_level; 7052 int update_ref; 7053 int keep_locks; 7054 int reada_slot; 7055 int reada_count; 7056 int for_reloc; 7057 }; 7058 7059 #define DROP_REFERENCE 1 7060 #define UPDATE_BACKREF 2 7061 7062 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, 7063 struct btrfs_root *root, 7064 struct walk_control *wc, 7065 struct btrfs_path *path) 7066 { 7067 u64 bytenr; 7068 u64 generation; 7069 u64 refs; 7070 u64 flags; 7071 u32 nritems; 7072 u32 blocksize; 7073 struct btrfs_key key; 7074 struct extent_buffer *eb; 7075 int ret; 7076 int slot; 7077 int nread = 0; 7078 7079 if (path->slots[wc->level] < wc->reada_slot) { 7080 wc->reada_count = wc->reada_count * 2 / 3; 7081 wc->reada_count = max(wc->reada_count, 2); 7082 } else { 7083 wc->reada_count = wc->reada_count * 3 / 2; 7084 wc->reada_count = min_t(int, wc->reada_count, 7085 BTRFS_NODEPTRS_PER_BLOCK(root)); 7086 } 7087 7088 eb = path->nodes[wc->level]; 7089 nritems = btrfs_header_nritems(eb); 7090 blocksize = btrfs_level_size(root, wc->level - 1); 7091 7092 for (slot = path->slots[wc->level]; slot < nritems; slot++) { 7093 if (nread >= wc->reada_count) 7094 break; 7095 7096 cond_resched(); 7097 bytenr = btrfs_node_blockptr(eb, slot); 7098 generation = btrfs_node_ptr_generation(eb, slot); 7099 7100 if (slot == path->slots[wc->level]) 7101 goto reada; 7102 7103 if (wc->stage == UPDATE_BACKREF && 7104 generation <= root->root_key.offset) 7105 continue; 7106 7107 /* We don't lock the tree block, it's OK to be racy here */ 7108 ret = btrfs_lookup_extent_info(trans, root, bytenr, 7109 wc->level - 1, 1, &refs, 7110 &flags); 7111 /* We don't care about errors in readahead. */ 7112 if (ret < 0) 7113 continue; 7114 BUG_ON(refs == 0); 7115 7116 if (wc->stage == DROP_REFERENCE) { 7117 if (refs == 1) 7118 goto reada; 7119 7120 if (wc->level == 1 && 7121 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 7122 continue; 7123 if (!wc->update_ref || 7124 generation <= root->root_key.offset) 7125 continue; 7126 btrfs_node_key_to_cpu(eb, &key, slot); 7127 ret = btrfs_comp_cpu_keys(&key, 7128 &wc->update_progress); 7129 if (ret < 0) 7130 continue; 7131 } else { 7132 if (wc->level == 1 && 7133 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 7134 continue; 7135 } 7136 reada: 7137 ret = readahead_tree_block(root, bytenr, blocksize, 7138 generation); 7139 if (ret) 7140 break; 7141 nread++; 7142 } 7143 wc->reada_slot = slot; 7144 } 7145 7146 /* 7147 * helper to process tree block while walking down the tree. 7148 * 7149 * when wc->stage == UPDATE_BACKREF, this function updates 7150 * back refs for pointers in the block. 7151 * 7152 * NOTE: return value 1 means we should stop walking down. 7153 */ 7154 static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 7155 struct btrfs_root *root, 7156 struct btrfs_path *path, 7157 struct walk_control *wc, int lookup_info) 7158 { 7159 int level = wc->level; 7160 struct extent_buffer *eb = path->nodes[level]; 7161 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; 7162 int ret; 7163 7164 if (wc->stage == UPDATE_BACKREF && 7165 btrfs_header_owner(eb) != root->root_key.objectid) 7166 return 1; 7167 7168 /* 7169 * when reference count of tree block is 1, it won't increase 7170 * again. once full backref flag is set, we never clear it. 7171 */ 7172 if (lookup_info && 7173 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 7174 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { 7175 BUG_ON(!path->locks[level]); 7176 ret = btrfs_lookup_extent_info(trans, root, 7177 eb->start, level, 1, 7178 &wc->refs[level], 7179 &wc->flags[level]); 7180 BUG_ON(ret == -ENOMEM); 7181 if (ret) 7182 return ret; 7183 BUG_ON(wc->refs[level] == 0); 7184 } 7185 7186 if (wc->stage == DROP_REFERENCE) { 7187 if (wc->refs[level] > 1) 7188 return 1; 7189 7190 if (path->locks[level] && !wc->keep_locks) { 7191 btrfs_tree_unlock_rw(eb, path->locks[level]); 7192 path->locks[level] = 0; 7193 } 7194 return 0; 7195 } 7196 7197 /* wc->stage == UPDATE_BACKREF */ 7198 if (!(wc->flags[level] & flag)) { 7199 BUG_ON(!path->locks[level]); 7200 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc); 7201 BUG_ON(ret); /* -ENOMEM */ 7202 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc); 7203 BUG_ON(ret); /* -ENOMEM */ 7204 ret = btrfs_set_disk_extent_flags(trans, root, eb->start, 7205 eb->len, flag, 7206 btrfs_header_level(eb), 0); 7207 BUG_ON(ret); /* -ENOMEM */ 7208 wc->flags[level] |= flag; 7209 } 7210 7211 /* 7212 * the block is shared by multiple trees, so it's not good to 7213 * keep the tree lock 7214 */ 7215 if (path->locks[level] && level > 0) { 7216 btrfs_tree_unlock_rw(eb, path->locks[level]); 7217 path->locks[level] = 0; 7218 } 7219 return 0; 7220 } 7221 7222 /* 7223 * helper to process tree block pointer. 7224 * 7225 * when wc->stage == DROP_REFERENCE, this function checks 7226 * reference count of the block pointed to. if the block 7227 * is shared and we need update back refs for the subtree 7228 * rooted at the block, this function changes wc->stage to 7229 * UPDATE_BACKREF. if the block is shared and there is no 7230 * need to update back, this function drops the reference 7231 * to the block. 7232 * 7233 * NOTE: return value 1 means we should stop walking down. 7234 */ 7235 static noinline int do_walk_down(struct btrfs_trans_handle *trans, 7236 struct btrfs_root *root, 7237 struct btrfs_path *path, 7238 struct walk_control *wc, int *lookup_info) 7239 { 7240 u64 bytenr; 7241 u64 generation; 7242 u64 parent; 7243 u32 blocksize; 7244 struct btrfs_key key; 7245 struct extent_buffer *next; 7246 int level = wc->level; 7247 int reada = 0; 7248 int ret = 0; 7249 7250 generation = btrfs_node_ptr_generation(path->nodes[level], 7251 path->slots[level]); 7252 /* 7253 * if the lower level block was created before the snapshot 7254 * was created, we know there is no need to update back refs 7255 * for the subtree 7256 */ 7257 if (wc->stage == UPDATE_BACKREF && 7258 generation <= root->root_key.offset) { 7259 *lookup_info = 1; 7260 return 1; 7261 } 7262 7263 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 7264 blocksize = btrfs_level_size(root, level - 1); 7265 7266 next = btrfs_find_tree_block(root, bytenr, blocksize); 7267 if (!next) { 7268 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 7269 if (!next) 7270 return -ENOMEM; 7271 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next, 7272 level - 1); 7273 reada = 1; 7274 } 7275 btrfs_tree_lock(next); 7276 btrfs_set_lock_blocking(next); 7277 7278 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1, 7279 &wc->refs[level - 1], 7280 &wc->flags[level - 1]); 7281 if (ret < 0) { 7282 btrfs_tree_unlock(next); 7283 return ret; 7284 } 7285 7286 if (unlikely(wc->refs[level - 1] == 0)) { 7287 btrfs_err(root->fs_info, "Missing references."); 7288 BUG(); 7289 } 7290 *lookup_info = 0; 7291 7292 if (wc->stage == DROP_REFERENCE) { 7293 if (wc->refs[level - 1] > 1) { 7294 if (level == 1 && 7295 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 7296 goto skip; 7297 7298 if (!wc->update_ref || 7299 generation <= root->root_key.offset) 7300 goto skip; 7301 7302 btrfs_node_key_to_cpu(path->nodes[level], &key, 7303 path->slots[level]); 7304 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); 7305 if (ret < 0) 7306 goto skip; 7307 7308 wc->stage = UPDATE_BACKREF; 7309 wc->shared_level = level - 1; 7310 } 7311 } else { 7312 if (level == 1 && 7313 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 7314 goto skip; 7315 } 7316 7317 if (!btrfs_buffer_uptodate(next, generation, 0)) { 7318 btrfs_tree_unlock(next); 7319 free_extent_buffer(next); 7320 next = NULL; 7321 *lookup_info = 1; 7322 } 7323 7324 if (!next) { 7325 if (reada && level == 1) 7326 reada_walk_down(trans, root, wc, path); 7327 next = read_tree_block(root, bytenr, blocksize, generation); 7328 if (!next || !extent_buffer_uptodate(next)) { 7329 free_extent_buffer(next); 7330 return -EIO; 7331 } 7332 btrfs_tree_lock(next); 7333 btrfs_set_lock_blocking(next); 7334 } 7335 7336 level--; 7337 BUG_ON(level != btrfs_header_level(next)); 7338 path->nodes[level] = next; 7339 path->slots[level] = 0; 7340 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 7341 wc->level = level; 7342 if (wc->level == 1) 7343 wc->reada_slot = 0; 7344 return 0; 7345 skip: 7346 wc->refs[level - 1] = 0; 7347 wc->flags[level - 1] = 0; 7348 if (wc->stage == DROP_REFERENCE) { 7349 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 7350 parent = path->nodes[level]->start; 7351 } else { 7352 BUG_ON(root->root_key.objectid != 7353 btrfs_header_owner(path->nodes[level])); 7354 parent = 0; 7355 } 7356 7357 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent, 7358 root->root_key.objectid, level - 1, 0, 0); 7359 BUG_ON(ret); /* -ENOMEM */ 7360 } 7361 btrfs_tree_unlock(next); 7362 free_extent_buffer(next); 7363 *lookup_info = 1; 7364 return 1; 7365 } 7366 7367 /* 7368 * helper to process tree block while walking up the tree. 7369 * 7370 * when wc->stage == DROP_REFERENCE, this function drops 7371 * reference count on the block. 7372 * 7373 * when wc->stage == UPDATE_BACKREF, this function changes 7374 * wc->stage back to DROP_REFERENCE if we changed wc->stage 7375 * to UPDATE_BACKREF previously while processing the block. 7376 * 7377 * NOTE: return value 1 means we should stop walking up. 7378 */ 7379 static noinline int walk_up_proc(struct btrfs_trans_handle *trans, 7380 struct btrfs_root *root, 7381 struct btrfs_path *path, 7382 struct walk_control *wc) 7383 { 7384 int ret; 7385 int level = wc->level; 7386 struct extent_buffer *eb = path->nodes[level]; 7387 u64 parent = 0; 7388 7389 if (wc->stage == UPDATE_BACKREF) { 7390 BUG_ON(wc->shared_level < level); 7391 if (level < wc->shared_level) 7392 goto out; 7393 7394 ret = find_next_key(path, level + 1, &wc->update_progress); 7395 if (ret > 0) 7396 wc->update_ref = 0; 7397 7398 wc->stage = DROP_REFERENCE; 7399 wc->shared_level = -1; 7400 path->slots[level] = 0; 7401 7402 /* 7403 * check reference count again if the block isn't locked. 7404 * we should start walking down the tree again if reference 7405 * count is one. 7406 */ 7407 if (!path->locks[level]) { 7408 BUG_ON(level == 0); 7409 btrfs_tree_lock(eb); 7410 btrfs_set_lock_blocking(eb); 7411 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 7412 7413 ret = btrfs_lookup_extent_info(trans, root, 7414 eb->start, level, 1, 7415 &wc->refs[level], 7416 &wc->flags[level]); 7417 if (ret < 0) { 7418 btrfs_tree_unlock_rw(eb, path->locks[level]); 7419 path->locks[level] = 0; 7420 return ret; 7421 } 7422 BUG_ON(wc->refs[level] == 0); 7423 if (wc->refs[level] == 1) { 7424 btrfs_tree_unlock_rw(eb, path->locks[level]); 7425 path->locks[level] = 0; 7426 return 1; 7427 } 7428 } 7429 } 7430 7431 /* wc->stage == DROP_REFERENCE */ 7432 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); 7433 7434 if (wc->refs[level] == 1) { 7435 if (level == 0) { 7436 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 7437 ret = btrfs_dec_ref(trans, root, eb, 1, 7438 wc->for_reloc); 7439 else 7440 ret = btrfs_dec_ref(trans, root, eb, 0, 7441 wc->for_reloc); 7442 BUG_ON(ret); /* -ENOMEM */ 7443 } 7444 /* make block locked assertion in clean_tree_block happy */ 7445 if (!path->locks[level] && 7446 btrfs_header_generation(eb) == trans->transid) { 7447 btrfs_tree_lock(eb); 7448 btrfs_set_lock_blocking(eb); 7449 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 7450 } 7451 clean_tree_block(trans, root, eb); 7452 } 7453 7454 if (eb == root->node) { 7455 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 7456 parent = eb->start; 7457 else 7458 BUG_ON(root->root_key.objectid != 7459 btrfs_header_owner(eb)); 7460 } else { 7461 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 7462 parent = path->nodes[level + 1]->start; 7463 else 7464 BUG_ON(root->root_key.objectid != 7465 btrfs_header_owner(path->nodes[level + 1])); 7466 } 7467 7468 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); 7469 out: 7470 wc->refs[level] = 0; 7471 wc->flags[level] = 0; 7472 return 0; 7473 } 7474 7475 static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 7476 struct btrfs_root *root, 7477 struct btrfs_path *path, 7478 struct walk_control *wc) 7479 { 7480 int level = wc->level; 7481 int lookup_info = 1; 7482 int ret; 7483 7484 while (level >= 0) { 7485 ret = walk_down_proc(trans, root, path, wc, lookup_info); 7486 if (ret > 0) 7487 break; 7488 7489 if (level == 0) 7490 break; 7491 7492 if (path->slots[level] >= 7493 btrfs_header_nritems(path->nodes[level])) 7494 break; 7495 7496 ret = do_walk_down(trans, root, path, wc, &lookup_info); 7497 if (ret > 0) { 7498 path->slots[level]++; 7499 continue; 7500 } else if (ret < 0) 7501 return ret; 7502 level = wc->level; 7503 } 7504 return 0; 7505 } 7506 7507 static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 7508 struct btrfs_root *root, 7509 struct btrfs_path *path, 7510 struct walk_control *wc, int max_level) 7511 { 7512 int level = wc->level; 7513 int ret; 7514 7515 path->slots[level] = btrfs_header_nritems(path->nodes[level]); 7516 while (level < max_level && path->nodes[level]) { 7517 wc->level = level; 7518 if (path->slots[level] + 1 < 7519 btrfs_header_nritems(path->nodes[level])) { 7520 path->slots[level]++; 7521 return 0; 7522 } else { 7523 ret = walk_up_proc(trans, root, path, wc); 7524 if (ret > 0) 7525 return 0; 7526 7527 if (path->locks[level]) { 7528 btrfs_tree_unlock_rw(path->nodes[level], 7529 path->locks[level]); 7530 path->locks[level] = 0; 7531 } 7532 free_extent_buffer(path->nodes[level]); 7533 path->nodes[level] = NULL; 7534 level++; 7535 } 7536 } 7537 return 1; 7538 } 7539 7540 /* 7541 * drop a subvolume tree. 7542 * 7543 * this function traverses the tree freeing any blocks that only 7544 * referenced by the tree. 7545 * 7546 * when a shared tree block is found. this function decreases its 7547 * reference count by one. if update_ref is true, this function 7548 * also make sure backrefs for the shared block and all lower level 7549 * blocks are properly updated. 7550 * 7551 * If called with for_reloc == 0, may exit early with -EAGAIN 7552 */ 7553 int btrfs_drop_snapshot(struct btrfs_root *root, 7554 struct btrfs_block_rsv *block_rsv, int update_ref, 7555 int for_reloc) 7556 { 7557 struct btrfs_path *path; 7558 struct btrfs_trans_handle *trans; 7559 struct btrfs_root *tree_root = root->fs_info->tree_root; 7560 struct btrfs_root_item *root_item = &root->root_item; 7561 struct walk_control *wc; 7562 struct btrfs_key key; 7563 int err = 0; 7564 int ret; 7565 int level; 7566 bool root_dropped = false; 7567 7568 path = btrfs_alloc_path(); 7569 if (!path) { 7570 err = -ENOMEM; 7571 goto out; 7572 } 7573 7574 wc = kzalloc(sizeof(*wc), GFP_NOFS); 7575 if (!wc) { 7576 btrfs_free_path(path); 7577 err = -ENOMEM; 7578 goto out; 7579 } 7580 7581 trans = btrfs_start_transaction(tree_root, 0); 7582 if (IS_ERR(trans)) { 7583 err = PTR_ERR(trans); 7584 goto out_free; 7585 } 7586 7587 if (block_rsv) 7588 trans->block_rsv = block_rsv; 7589 7590 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 7591 level = btrfs_header_level(root->node); 7592 path->nodes[level] = btrfs_lock_root_node(root); 7593 btrfs_set_lock_blocking(path->nodes[level]); 7594 path->slots[level] = 0; 7595 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 7596 memset(&wc->update_progress, 0, 7597 sizeof(wc->update_progress)); 7598 } else { 7599 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 7600 memcpy(&wc->update_progress, &key, 7601 sizeof(wc->update_progress)); 7602 7603 level = root_item->drop_level; 7604 BUG_ON(level == 0); 7605 path->lowest_level = level; 7606 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 7607 path->lowest_level = 0; 7608 if (ret < 0) { 7609 err = ret; 7610 goto out_end_trans; 7611 } 7612 WARN_ON(ret > 0); 7613 7614 /* 7615 * unlock our path, this is safe because only this 7616 * function is allowed to delete this snapshot 7617 */ 7618 btrfs_unlock_up_safe(path, 0); 7619 7620 level = btrfs_header_level(root->node); 7621 while (1) { 7622 btrfs_tree_lock(path->nodes[level]); 7623 btrfs_set_lock_blocking(path->nodes[level]); 7624 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 7625 7626 ret = btrfs_lookup_extent_info(trans, root, 7627 path->nodes[level]->start, 7628 level, 1, &wc->refs[level], 7629 &wc->flags[level]); 7630 if (ret < 0) { 7631 err = ret; 7632 goto out_end_trans; 7633 } 7634 BUG_ON(wc->refs[level] == 0); 7635 7636 if (level == root_item->drop_level) 7637 break; 7638 7639 btrfs_tree_unlock(path->nodes[level]); 7640 path->locks[level] = 0; 7641 WARN_ON(wc->refs[level] != 1); 7642 level--; 7643 } 7644 } 7645 7646 wc->level = level; 7647 wc->shared_level = -1; 7648 wc->stage = DROP_REFERENCE; 7649 wc->update_ref = update_ref; 7650 wc->keep_locks = 0; 7651 wc->for_reloc = for_reloc; 7652 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); 7653 7654 while (1) { 7655 7656 ret = walk_down_tree(trans, root, path, wc); 7657 if (ret < 0) { 7658 err = ret; 7659 break; 7660 } 7661 7662 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); 7663 if (ret < 0) { 7664 err = ret; 7665 break; 7666 } 7667 7668 if (ret > 0) { 7669 BUG_ON(wc->stage != DROP_REFERENCE); 7670 break; 7671 } 7672 7673 if (wc->stage == DROP_REFERENCE) { 7674 level = wc->level; 7675 btrfs_node_key(path->nodes[level], 7676 &root_item->drop_progress, 7677 path->slots[level]); 7678 root_item->drop_level = level; 7679 } 7680 7681 BUG_ON(wc->level == 0); 7682 if (btrfs_should_end_transaction(trans, tree_root) || 7683 (!for_reloc && btrfs_need_cleaner_sleep(root))) { 7684 ret = btrfs_update_root(trans, tree_root, 7685 &root->root_key, 7686 root_item); 7687 if (ret) { 7688 btrfs_abort_transaction(trans, tree_root, ret); 7689 err = ret; 7690 goto out_end_trans; 7691 } 7692 7693 btrfs_end_transaction_throttle(trans, tree_root); 7694 if (!for_reloc && btrfs_need_cleaner_sleep(root)) { 7695 pr_debug("BTRFS: drop snapshot early exit\n"); 7696 err = -EAGAIN; 7697 goto out_free; 7698 } 7699 7700 trans = btrfs_start_transaction(tree_root, 0); 7701 if (IS_ERR(trans)) { 7702 err = PTR_ERR(trans); 7703 goto out_free; 7704 } 7705 if (block_rsv) 7706 trans->block_rsv = block_rsv; 7707 } 7708 } 7709 btrfs_release_path(path); 7710 if (err) 7711 goto out_end_trans; 7712 7713 ret = btrfs_del_root(trans, tree_root, &root->root_key); 7714 if (ret) { 7715 btrfs_abort_transaction(trans, tree_root, ret); 7716 goto out_end_trans; 7717 } 7718 7719 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 7720 ret = btrfs_find_root(tree_root, &root->root_key, path, 7721 NULL, NULL); 7722 if (ret < 0) { 7723 btrfs_abort_transaction(trans, tree_root, ret); 7724 err = ret; 7725 goto out_end_trans; 7726 } else if (ret > 0) { 7727 /* if we fail to delete the orphan item this time 7728 * around, it'll get picked up the next time. 7729 * 7730 * The most common failure here is just -ENOENT. 7731 */ 7732 btrfs_del_orphan_item(trans, tree_root, 7733 root->root_key.objectid); 7734 } 7735 } 7736 7737 if (root->in_radix) { 7738 btrfs_drop_and_free_fs_root(tree_root->fs_info, root); 7739 } else { 7740 free_extent_buffer(root->node); 7741 free_extent_buffer(root->commit_root); 7742 btrfs_put_fs_root(root); 7743 } 7744 root_dropped = true; 7745 out_end_trans: 7746 btrfs_end_transaction_throttle(trans, tree_root); 7747 out_free: 7748 kfree(wc); 7749 btrfs_free_path(path); 7750 out: 7751 /* 7752 * So if we need to stop dropping the snapshot for whatever reason we 7753 * need to make sure to add it back to the dead root list so that we 7754 * keep trying to do the work later. This also cleans up roots if we 7755 * don't have it in the radix (like when we recover after a power fail 7756 * or unmount) so we don't leak memory. 7757 */ 7758 if (!for_reloc && root_dropped == false) 7759 btrfs_add_dead_root(root); 7760 if (err && err != -EAGAIN) 7761 btrfs_std_error(root->fs_info, err); 7762 return err; 7763 } 7764 7765 /* 7766 * drop subtree rooted at tree block 'node'. 7767 * 7768 * NOTE: this function will unlock and release tree block 'node' 7769 * only used by relocation code 7770 */ 7771 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 7772 struct btrfs_root *root, 7773 struct extent_buffer *node, 7774 struct extent_buffer *parent) 7775 { 7776 struct btrfs_path *path; 7777 struct walk_control *wc; 7778 int level; 7779 int parent_level; 7780 int ret = 0; 7781 int wret; 7782 7783 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 7784 7785 path = btrfs_alloc_path(); 7786 if (!path) 7787 return -ENOMEM; 7788 7789 wc = kzalloc(sizeof(*wc), GFP_NOFS); 7790 if (!wc) { 7791 btrfs_free_path(path); 7792 return -ENOMEM; 7793 } 7794 7795 btrfs_assert_tree_locked(parent); 7796 parent_level = btrfs_header_level(parent); 7797 extent_buffer_get(parent); 7798 path->nodes[parent_level] = parent; 7799 path->slots[parent_level] = btrfs_header_nritems(parent); 7800 7801 btrfs_assert_tree_locked(node); 7802 level = btrfs_header_level(node); 7803 path->nodes[level] = node; 7804 path->slots[level] = 0; 7805 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 7806 7807 wc->refs[parent_level] = 1; 7808 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; 7809 wc->level = level; 7810 wc->shared_level = -1; 7811 wc->stage = DROP_REFERENCE; 7812 wc->update_ref = 0; 7813 wc->keep_locks = 1; 7814 wc->for_reloc = 1; 7815 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); 7816 7817 while (1) { 7818 wret = walk_down_tree(trans, root, path, wc); 7819 if (wret < 0) { 7820 ret = wret; 7821 break; 7822 } 7823 7824 wret = walk_up_tree(trans, root, path, wc, parent_level); 7825 if (wret < 0) 7826 ret = wret; 7827 if (wret != 0) 7828 break; 7829 } 7830 7831 kfree(wc); 7832 btrfs_free_path(path); 7833 return ret; 7834 } 7835 7836 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) 7837 { 7838 u64 num_devices; 7839 u64 stripped; 7840 7841 /* 7842 * if restripe for this chunk_type is on pick target profile and 7843 * return, otherwise do the usual balance 7844 */ 7845 stripped = get_restripe_target(root->fs_info, flags); 7846 if (stripped) 7847 return extended_to_chunk(stripped); 7848 7849 /* 7850 * we add in the count of missing devices because we want 7851 * to make sure that any RAID levels on a degraded FS 7852 * continue to be honored. 7853 */ 7854 num_devices = root->fs_info->fs_devices->rw_devices + 7855 root->fs_info->fs_devices->missing_devices; 7856 7857 stripped = BTRFS_BLOCK_GROUP_RAID0 | 7858 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 | 7859 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; 7860 7861 if (num_devices == 1) { 7862 stripped |= BTRFS_BLOCK_GROUP_DUP; 7863 stripped = flags & ~stripped; 7864 7865 /* turn raid0 into single device chunks */ 7866 if (flags & BTRFS_BLOCK_GROUP_RAID0) 7867 return stripped; 7868 7869 /* turn mirroring into duplication */ 7870 if (flags & (BTRFS_BLOCK_GROUP_RAID1 | 7871 BTRFS_BLOCK_GROUP_RAID10)) 7872 return stripped | BTRFS_BLOCK_GROUP_DUP; 7873 } else { 7874 /* they already had raid on here, just return */ 7875 if (flags & stripped) 7876 return flags; 7877 7878 stripped |= BTRFS_BLOCK_GROUP_DUP; 7879 stripped = flags & ~stripped; 7880 7881 /* switch duplicated blocks with raid1 */ 7882 if (flags & BTRFS_BLOCK_GROUP_DUP) 7883 return stripped | BTRFS_BLOCK_GROUP_RAID1; 7884 7885 /* this is drive concat, leave it alone */ 7886 } 7887 7888 return flags; 7889 } 7890 7891 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force) 7892 { 7893 struct btrfs_space_info *sinfo = cache->space_info; 7894 u64 num_bytes; 7895 u64 min_allocable_bytes; 7896 int ret = -ENOSPC; 7897 7898 7899 /* 7900 * We need some metadata space and system metadata space for 7901 * allocating chunks in some corner cases until we force to set 7902 * it to be readonly. 7903 */ 7904 if ((sinfo->flags & 7905 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) && 7906 !force) 7907 min_allocable_bytes = 1 * 1024 * 1024; 7908 else 7909 min_allocable_bytes = 0; 7910 7911 spin_lock(&sinfo->lock); 7912 spin_lock(&cache->lock); 7913 7914 if (cache->ro) { 7915 ret = 0; 7916 goto out; 7917 } 7918 7919 num_bytes = cache->key.offset - cache->reserved - cache->pinned - 7920 cache->bytes_super - btrfs_block_group_used(&cache->item); 7921 7922 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + 7923 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes + 7924 min_allocable_bytes <= sinfo->total_bytes) { 7925 sinfo->bytes_readonly += num_bytes; 7926 cache->ro = 1; 7927 ret = 0; 7928 } 7929 out: 7930 spin_unlock(&cache->lock); 7931 spin_unlock(&sinfo->lock); 7932 return ret; 7933 } 7934 7935 int btrfs_set_block_group_ro(struct btrfs_root *root, 7936 struct btrfs_block_group_cache *cache) 7937 7938 { 7939 struct btrfs_trans_handle *trans; 7940 u64 alloc_flags; 7941 int ret; 7942 7943 BUG_ON(cache->ro); 7944 7945 trans = btrfs_join_transaction(root); 7946 if (IS_ERR(trans)) 7947 return PTR_ERR(trans); 7948 7949 alloc_flags = update_block_group_flags(root, cache->flags); 7950 if (alloc_flags != cache->flags) { 7951 ret = do_chunk_alloc(trans, root, alloc_flags, 7952 CHUNK_ALLOC_FORCE); 7953 if (ret < 0) 7954 goto out; 7955 } 7956 7957 ret = set_block_group_ro(cache, 0); 7958 if (!ret) 7959 goto out; 7960 alloc_flags = get_alloc_profile(root, cache->space_info->flags); 7961 ret = do_chunk_alloc(trans, root, alloc_flags, 7962 CHUNK_ALLOC_FORCE); 7963 if (ret < 0) 7964 goto out; 7965 ret = set_block_group_ro(cache, 0); 7966 out: 7967 btrfs_end_transaction(trans, root); 7968 return ret; 7969 } 7970 7971 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 7972 struct btrfs_root *root, u64 type) 7973 { 7974 u64 alloc_flags = get_alloc_profile(root, type); 7975 return do_chunk_alloc(trans, root, alloc_flags, 7976 CHUNK_ALLOC_FORCE); 7977 } 7978 7979 /* 7980 * helper to account the unused space of all the readonly block group in the 7981 * list. takes mirrors into account. 7982 */ 7983 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list) 7984 { 7985 struct btrfs_block_group_cache *block_group; 7986 u64 free_bytes = 0; 7987 int factor; 7988 7989 list_for_each_entry(block_group, groups_list, list) { 7990 spin_lock(&block_group->lock); 7991 7992 if (!block_group->ro) { 7993 spin_unlock(&block_group->lock); 7994 continue; 7995 } 7996 7997 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 | 7998 BTRFS_BLOCK_GROUP_RAID10 | 7999 BTRFS_BLOCK_GROUP_DUP)) 8000 factor = 2; 8001 else 8002 factor = 1; 8003 8004 free_bytes += (block_group->key.offset - 8005 btrfs_block_group_used(&block_group->item)) * 8006 factor; 8007 8008 spin_unlock(&block_group->lock); 8009 } 8010 8011 return free_bytes; 8012 } 8013 8014 /* 8015 * helper to account the unused space of all the readonly block group in the 8016 * space_info. takes mirrors into account. 8017 */ 8018 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) 8019 { 8020 int i; 8021 u64 free_bytes = 0; 8022 8023 spin_lock(&sinfo->lock); 8024 8025 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 8026 if (!list_empty(&sinfo->block_groups[i])) 8027 free_bytes += __btrfs_get_ro_block_group_free_space( 8028 &sinfo->block_groups[i]); 8029 8030 spin_unlock(&sinfo->lock); 8031 8032 return free_bytes; 8033 } 8034 8035 void btrfs_set_block_group_rw(struct btrfs_root *root, 8036 struct btrfs_block_group_cache *cache) 8037 { 8038 struct btrfs_space_info *sinfo = cache->space_info; 8039 u64 num_bytes; 8040 8041 BUG_ON(!cache->ro); 8042 8043 spin_lock(&sinfo->lock); 8044 spin_lock(&cache->lock); 8045 num_bytes = cache->key.offset - cache->reserved - cache->pinned - 8046 cache->bytes_super - btrfs_block_group_used(&cache->item); 8047 sinfo->bytes_readonly -= num_bytes; 8048 cache->ro = 0; 8049 spin_unlock(&cache->lock); 8050 spin_unlock(&sinfo->lock); 8051 } 8052 8053 /* 8054 * checks to see if its even possible to relocate this block group. 8055 * 8056 * @return - -1 if it's not a good idea to relocate this block group, 0 if its 8057 * ok to go ahead and try. 8058 */ 8059 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) 8060 { 8061 struct btrfs_block_group_cache *block_group; 8062 struct btrfs_space_info *space_info; 8063 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 8064 struct btrfs_device *device; 8065 struct btrfs_trans_handle *trans; 8066 u64 min_free; 8067 u64 dev_min = 1; 8068 u64 dev_nr = 0; 8069 u64 target; 8070 int index; 8071 int full = 0; 8072 int ret = 0; 8073 8074 block_group = btrfs_lookup_block_group(root->fs_info, bytenr); 8075 8076 /* odd, couldn't find the block group, leave it alone */ 8077 if (!block_group) 8078 return -1; 8079 8080 min_free = btrfs_block_group_used(&block_group->item); 8081 8082 /* no bytes used, we're good */ 8083 if (!min_free) 8084 goto out; 8085 8086 space_info = block_group->space_info; 8087 spin_lock(&space_info->lock); 8088 8089 full = space_info->full; 8090 8091 /* 8092 * if this is the last block group we have in this space, we can't 8093 * relocate it unless we're able to allocate a new chunk below. 8094 * 8095 * Otherwise, we need to make sure we have room in the space to handle 8096 * all of the extents from this block group. If we can, we're good 8097 */ 8098 if ((space_info->total_bytes != block_group->key.offset) && 8099 (space_info->bytes_used + space_info->bytes_reserved + 8100 space_info->bytes_pinned + space_info->bytes_readonly + 8101 min_free < space_info->total_bytes)) { 8102 spin_unlock(&space_info->lock); 8103 goto out; 8104 } 8105 spin_unlock(&space_info->lock); 8106 8107 /* 8108 * ok we don't have enough space, but maybe we have free space on our 8109 * devices to allocate new chunks for relocation, so loop through our 8110 * alloc devices and guess if we have enough space. if this block 8111 * group is going to be restriped, run checks against the target 8112 * profile instead of the current one. 8113 */ 8114 ret = -1; 8115 8116 /* 8117 * index: 8118 * 0: raid10 8119 * 1: raid1 8120 * 2: dup 8121 * 3: raid0 8122 * 4: single 8123 */ 8124 target = get_restripe_target(root->fs_info, block_group->flags); 8125 if (target) { 8126 index = __get_raid_index(extended_to_chunk(target)); 8127 } else { 8128 /* 8129 * this is just a balance, so if we were marked as full 8130 * we know there is no space for a new chunk 8131 */ 8132 if (full) 8133 goto out; 8134 8135 index = get_block_group_index(block_group); 8136 } 8137 8138 if (index == BTRFS_RAID_RAID10) { 8139 dev_min = 4; 8140 /* Divide by 2 */ 8141 min_free >>= 1; 8142 } else if (index == BTRFS_RAID_RAID1) { 8143 dev_min = 2; 8144 } else if (index == BTRFS_RAID_DUP) { 8145 /* Multiply by 2 */ 8146 min_free <<= 1; 8147 } else if (index == BTRFS_RAID_RAID0) { 8148 dev_min = fs_devices->rw_devices; 8149 do_div(min_free, dev_min); 8150 } 8151 8152 /* We need to do this so that we can look at pending chunks */ 8153 trans = btrfs_join_transaction(root); 8154 if (IS_ERR(trans)) { 8155 ret = PTR_ERR(trans); 8156 goto out; 8157 } 8158 8159 mutex_lock(&root->fs_info->chunk_mutex); 8160 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 8161 u64 dev_offset; 8162 8163 /* 8164 * check to make sure we can actually find a chunk with enough 8165 * space to fit our block group in. 8166 */ 8167 if (device->total_bytes > device->bytes_used + min_free && 8168 !device->is_tgtdev_for_dev_replace) { 8169 ret = find_free_dev_extent(trans, device, min_free, 8170 &dev_offset, NULL); 8171 if (!ret) 8172 dev_nr++; 8173 8174 if (dev_nr >= dev_min) 8175 break; 8176 8177 ret = -1; 8178 } 8179 } 8180 mutex_unlock(&root->fs_info->chunk_mutex); 8181 btrfs_end_transaction(trans, root); 8182 out: 8183 btrfs_put_block_group(block_group); 8184 return ret; 8185 } 8186 8187 static int find_first_block_group(struct btrfs_root *root, 8188 struct btrfs_path *path, struct btrfs_key *key) 8189 { 8190 int ret = 0; 8191 struct btrfs_key found_key; 8192 struct extent_buffer *leaf; 8193 int slot; 8194 8195 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 8196 if (ret < 0) 8197 goto out; 8198 8199 while (1) { 8200 slot = path->slots[0]; 8201 leaf = path->nodes[0]; 8202 if (slot >= btrfs_header_nritems(leaf)) { 8203 ret = btrfs_next_leaf(root, path); 8204 if (ret == 0) 8205 continue; 8206 if (ret < 0) 8207 goto out; 8208 break; 8209 } 8210 btrfs_item_key_to_cpu(leaf, &found_key, slot); 8211 8212 if (found_key.objectid >= key->objectid && 8213 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 8214 ret = 0; 8215 goto out; 8216 } 8217 path->slots[0]++; 8218 } 8219 out: 8220 return ret; 8221 } 8222 8223 void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 8224 { 8225 struct btrfs_block_group_cache *block_group; 8226 u64 last = 0; 8227 8228 while (1) { 8229 struct inode *inode; 8230 8231 block_group = btrfs_lookup_first_block_group(info, last); 8232 while (block_group) { 8233 spin_lock(&block_group->lock); 8234 if (block_group->iref) 8235 break; 8236 spin_unlock(&block_group->lock); 8237 block_group = next_block_group(info->tree_root, 8238 block_group); 8239 } 8240 if (!block_group) { 8241 if (last == 0) 8242 break; 8243 last = 0; 8244 continue; 8245 } 8246 8247 inode = block_group->inode; 8248 block_group->iref = 0; 8249 block_group->inode = NULL; 8250 spin_unlock(&block_group->lock); 8251 iput(inode); 8252 last = block_group->key.objectid + block_group->key.offset; 8253 btrfs_put_block_group(block_group); 8254 } 8255 } 8256 8257 int btrfs_free_block_groups(struct btrfs_fs_info *info) 8258 { 8259 struct btrfs_block_group_cache *block_group; 8260 struct btrfs_space_info *space_info; 8261 struct btrfs_caching_control *caching_ctl; 8262 struct rb_node *n; 8263 8264 down_write(&info->extent_commit_sem); 8265 while (!list_empty(&info->caching_block_groups)) { 8266 caching_ctl = list_entry(info->caching_block_groups.next, 8267 struct btrfs_caching_control, list); 8268 list_del(&caching_ctl->list); 8269 put_caching_control(caching_ctl); 8270 } 8271 up_write(&info->extent_commit_sem); 8272 8273 spin_lock(&info->block_group_cache_lock); 8274 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { 8275 block_group = rb_entry(n, struct btrfs_block_group_cache, 8276 cache_node); 8277 rb_erase(&block_group->cache_node, 8278 &info->block_group_cache_tree); 8279 spin_unlock(&info->block_group_cache_lock); 8280 8281 down_write(&block_group->space_info->groups_sem); 8282 list_del(&block_group->list); 8283 up_write(&block_group->space_info->groups_sem); 8284 8285 if (block_group->cached == BTRFS_CACHE_STARTED) 8286 wait_block_group_cache_done(block_group); 8287 8288 /* 8289 * We haven't cached this block group, which means we could 8290 * possibly have excluded extents on this block group. 8291 */ 8292 if (block_group->cached == BTRFS_CACHE_NO || 8293 block_group->cached == BTRFS_CACHE_ERROR) 8294 free_excluded_extents(info->extent_root, block_group); 8295 8296 btrfs_remove_free_space_cache(block_group); 8297 btrfs_put_block_group(block_group); 8298 8299 spin_lock(&info->block_group_cache_lock); 8300 } 8301 spin_unlock(&info->block_group_cache_lock); 8302 8303 /* now that all the block groups are freed, go through and 8304 * free all the space_info structs. This is only called during 8305 * the final stages of unmount, and so we know nobody is 8306 * using them. We call synchronize_rcu() once before we start, 8307 * just to be on the safe side. 8308 */ 8309 synchronize_rcu(); 8310 8311 release_global_block_rsv(info); 8312 8313 while (!list_empty(&info->space_info)) { 8314 int i; 8315 8316 space_info = list_entry(info->space_info.next, 8317 struct btrfs_space_info, 8318 list); 8319 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) { 8320 if (WARN_ON(space_info->bytes_pinned > 0 || 8321 space_info->bytes_reserved > 0 || 8322 space_info->bytes_may_use > 0)) { 8323 dump_space_info(space_info, 0, 0); 8324 } 8325 } 8326 list_del(&space_info->list); 8327 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 8328 struct kobject *kobj; 8329 kobj = &space_info->block_group_kobjs[i]; 8330 if (kobj->parent) { 8331 kobject_del(kobj); 8332 kobject_put(kobj); 8333 } 8334 } 8335 kobject_del(&space_info->kobj); 8336 kobject_put(&space_info->kobj); 8337 } 8338 return 0; 8339 } 8340 8341 static void __link_block_group(struct btrfs_space_info *space_info, 8342 struct btrfs_block_group_cache *cache) 8343 { 8344 int index = get_block_group_index(cache); 8345 8346 down_write(&space_info->groups_sem); 8347 if (list_empty(&space_info->block_groups[index])) { 8348 struct kobject *kobj = &space_info->block_group_kobjs[index]; 8349 int ret; 8350 8351 kobject_get(&space_info->kobj); /* put in release */ 8352 ret = kobject_add(kobj, &space_info->kobj, "%s", 8353 get_raid_name(index)); 8354 if (ret) { 8355 pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n"); 8356 kobject_put(&space_info->kobj); 8357 } 8358 } 8359 list_add_tail(&cache->list, &space_info->block_groups[index]); 8360 up_write(&space_info->groups_sem); 8361 } 8362 8363 static struct btrfs_block_group_cache * 8364 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) 8365 { 8366 struct btrfs_block_group_cache *cache; 8367 8368 cache = kzalloc(sizeof(*cache), GFP_NOFS); 8369 if (!cache) 8370 return NULL; 8371 8372 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 8373 GFP_NOFS); 8374 if (!cache->free_space_ctl) { 8375 kfree(cache); 8376 return NULL; 8377 } 8378 8379 cache->key.objectid = start; 8380 cache->key.offset = size; 8381 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 8382 8383 cache->sectorsize = root->sectorsize; 8384 cache->fs_info = root->fs_info; 8385 cache->full_stripe_len = btrfs_full_stripe_len(root, 8386 &root->fs_info->mapping_tree, 8387 start); 8388 atomic_set(&cache->count, 1); 8389 spin_lock_init(&cache->lock); 8390 INIT_LIST_HEAD(&cache->list); 8391 INIT_LIST_HEAD(&cache->cluster_list); 8392 INIT_LIST_HEAD(&cache->new_bg_list); 8393 btrfs_init_free_space_ctl(cache); 8394 8395 return cache; 8396 } 8397 8398 int btrfs_read_block_groups(struct btrfs_root *root) 8399 { 8400 struct btrfs_path *path; 8401 int ret; 8402 struct btrfs_block_group_cache *cache; 8403 struct btrfs_fs_info *info = root->fs_info; 8404 struct btrfs_space_info *space_info; 8405 struct btrfs_key key; 8406 struct btrfs_key found_key; 8407 struct extent_buffer *leaf; 8408 int need_clear = 0; 8409 u64 cache_gen; 8410 8411 root = info->extent_root; 8412 key.objectid = 0; 8413 key.offset = 0; 8414 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY); 8415 path = btrfs_alloc_path(); 8416 if (!path) 8417 return -ENOMEM; 8418 path->reada = 1; 8419 8420 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy); 8421 if (btrfs_test_opt(root, SPACE_CACHE) && 8422 btrfs_super_generation(root->fs_info->super_copy) != cache_gen) 8423 need_clear = 1; 8424 if (btrfs_test_opt(root, CLEAR_CACHE)) 8425 need_clear = 1; 8426 8427 while (1) { 8428 ret = find_first_block_group(root, path, &key); 8429 if (ret > 0) 8430 break; 8431 if (ret != 0) 8432 goto error; 8433 8434 leaf = path->nodes[0]; 8435 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 8436 8437 cache = btrfs_create_block_group_cache(root, found_key.objectid, 8438 found_key.offset); 8439 if (!cache) { 8440 ret = -ENOMEM; 8441 goto error; 8442 } 8443 8444 if (need_clear) { 8445 /* 8446 * When we mount with old space cache, we need to 8447 * set BTRFS_DC_CLEAR and set dirty flag. 8448 * 8449 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 8450 * truncate the old free space cache inode and 8451 * setup a new one. 8452 * b) Setting 'dirty flag' makes sure that we flush 8453 * the new space cache info onto disk. 8454 */ 8455 cache->disk_cache_state = BTRFS_DC_CLEAR; 8456 if (btrfs_test_opt(root, SPACE_CACHE)) 8457 cache->dirty = 1; 8458 } 8459 8460 read_extent_buffer(leaf, &cache->item, 8461 btrfs_item_ptr_offset(leaf, path->slots[0]), 8462 sizeof(cache->item)); 8463 cache->flags = btrfs_block_group_flags(&cache->item); 8464 8465 key.objectid = found_key.objectid + found_key.offset; 8466 btrfs_release_path(path); 8467 8468 /* 8469 * We need to exclude the super stripes now so that the space 8470 * info has super bytes accounted for, otherwise we'll think 8471 * we have more space than we actually do. 8472 */ 8473 ret = exclude_super_stripes(root, cache); 8474 if (ret) { 8475 /* 8476 * We may have excluded something, so call this just in 8477 * case. 8478 */ 8479 free_excluded_extents(root, cache); 8480 btrfs_put_block_group(cache); 8481 goto error; 8482 } 8483 8484 /* 8485 * check for two cases, either we are full, and therefore 8486 * don't need to bother with the caching work since we won't 8487 * find any space, or we are empty, and we can just add all 8488 * the space in and be done with it. This saves us _alot_ of 8489 * time, particularly in the full case. 8490 */ 8491 if (found_key.offset == btrfs_block_group_used(&cache->item)) { 8492 cache->last_byte_to_unpin = (u64)-1; 8493 cache->cached = BTRFS_CACHE_FINISHED; 8494 free_excluded_extents(root, cache); 8495 } else if (btrfs_block_group_used(&cache->item) == 0) { 8496 cache->last_byte_to_unpin = (u64)-1; 8497 cache->cached = BTRFS_CACHE_FINISHED; 8498 add_new_free_space(cache, root->fs_info, 8499 found_key.objectid, 8500 found_key.objectid + 8501 found_key.offset); 8502 free_excluded_extents(root, cache); 8503 } 8504 8505 ret = btrfs_add_block_group_cache(root->fs_info, cache); 8506 if (ret) { 8507 btrfs_remove_free_space_cache(cache); 8508 btrfs_put_block_group(cache); 8509 goto error; 8510 } 8511 8512 ret = update_space_info(info, cache->flags, found_key.offset, 8513 btrfs_block_group_used(&cache->item), 8514 &space_info); 8515 if (ret) { 8516 btrfs_remove_free_space_cache(cache); 8517 spin_lock(&info->block_group_cache_lock); 8518 rb_erase(&cache->cache_node, 8519 &info->block_group_cache_tree); 8520 spin_unlock(&info->block_group_cache_lock); 8521 btrfs_put_block_group(cache); 8522 goto error; 8523 } 8524 8525 cache->space_info = space_info; 8526 spin_lock(&cache->space_info->lock); 8527 cache->space_info->bytes_readonly += cache->bytes_super; 8528 spin_unlock(&cache->space_info->lock); 8529 8530 __link_block_group(space_info, cache); 8531 8532 set_avail_alloc_bits(root->fs_info, cache->flags); 8533 if (btrfs_chunk_readonly(root, cache->key.objectid)) 8534 set_block_group_ro(cache, 1); 8535 } 8536 8537 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) { 8538 if (!(get_alloc_profile(root, space_info->flags) & 8539 (BTRFS_BLOCK_GROUP_RAID10 | 8540 BTRFS_BLOCK_GROUP_RAID1 | 8541 BTRFS_BLOCK_GROUP_RAID5 | 8542 BTRFS_BLOCK_GROUP_RAID6 | 8543 BTRFS_BLOCK_GROUP_DUP))) 8544 continue; 8545 /* 8546 * avoid allocating from un-mirrored block group if there are 8547 * mirrored block groups. 8548 */ 8549 list_for_each_entry(cache, 8550 &space_info->block_groups[BTRFS_RAID_RAID0], 8551 list) 8552 set_block_group_ro(cache, 1); 8553 list_for_each_entry(cache, 8554 &space_info->block_groups[BTRFS_RAID_SINGLE], 8555 list) 8556 set_block_group_ro(cache, 1); 8557 } 8558 8559 init_global_block_rsv(info); 8560 ret = 0; 8561 error: 8562 btrfs_free_path(path); 8563 return ret; 8564 } 8565 8566 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 8567 struct btrfs_root *root) 8568 { 8569 struct btrfs_block_group_cache *block_group, *tmp; 8570 struct btrfs_root *extent_root = root->fs_info->extent_root; 8571 struct btrfs_block_group_item item; 8572 struct btrfs_key key; 8573 int ret = 0; 8574 8575 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, 8576 new_bg_list) { 8577 list_del_init(&block_group->new_bg_list); 8578 8579 if (ret) 8580 continue; 8581 8582 spin_lock(&block_group->lock); 8583 memcpy(&item, &block_group->item, sizeof(item)); 8584 memcpy(&key, &block_group->key, sizeof(key)); 8585 spin_unlock(&block_group->lock); 8586 8587 ret = btrfs_insert_item(trans, extent_root, &key, &item, 8588 sizeof(item)); 8589 if (ret) 8590 btrfs_abort_transaction(trans, extent_root, ret); 8591 ret = btrfs_finish_chunk_alloc(trans, extent_root, 8592 key.objectid, key.offset); 8593 if (ret) 8594 btrfs_abort_transaction(trans, extent_root, ret); 8595 } 8596 } 8597 8598 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 8599 struct btrfs_root *root, u64 bytes_used, 8600 u64 type, u64 chunk_objectid, u64 chunk_offset, 8601 u64 size) 8602 { 8603 int ret; 8604 struct btrfs_root *extent_root; 8605 struct btrfs_block_group_cache *cache; 8606 8607 extent_root = root->fs_info->extent_root; 8608 8609 root->fs_info->last_trans_log_full_commit = trans->transid; 8610 8611 cache = btrfs_create_block_group_cache(root, chunk_offset, size); 8612 if (!cache) 8613 return -ENOMEM; 8614 8615 btrfs_set_block_group_used(&cache->item, bytes_used); 8616 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); 8617 btrfs_set_block_group_flags(&cache->item, type); 8618 8619 cache->flags = type; 8620 cache->last_byte_to_unpin = (u64)-1; 8621 cache->cached = BTRFS_CACHE_FINISHED; 8622 ret = exclude_super_stripes(root, cache); 8623 if (ret) { 8624 /* 8625 * We may have excluded something, so call this just in 8626 * case. 8627 */ 8628 free_excluded_extents(root, cache); 8629 btrfs_put_block_group(cache); 8630 return ret; 8631 } 8632 8633 add_new_free_space(cache, root->fs_info, chunk_offset, 8634 chunk_offset + size); 8635 8636 free_excluded_extents(root, cache); 8637 8638 ret = btrfs_add_block_group_cache(root->fs_info, cache); 8639 if (ret) { 8640 btrfs_remove_free_space_cache(cache); 8641 btrfs_put_block_group(cache); 8642 return ret; 8643 } 8644 8645 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, 8646 &cache->space_info); 8647 if (ret) { 8648 btrfs_remove_free_space_cache(cache); 8649 spin_lock(&root->fs_info->block_group_cache_lock); 8650 rb_erase(&cache->cache_node, 8651 &root->fs_info->block_group_cache_tree); 8652 spin_unlock(&root->fs_info->block_group_cache_lock); 8653 btrfs_put_block_group(cache); 8654 return ret; 8655 } 8656 update_global_block_rsv(root->fs_info); 8657 8658 spin_lock(&cache->space_info->lock); 8659 cache->space_info->bytes_readonly += cache->bytes_super; 8660 spin_unlock(&cache->space_info->lock); 8661 8662 __link_block_group(cache->space_info, cache); 8663 8664 list_add_tail(&cache->new_bg_list, &trans->new_bgs); 8665 8666 set_avail_alloc_bits(extent_root->fs_info, type); 8667 8668 return 0; 8669 } 8670 8671 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 8672 { 8673 u64 extra_flags = chunk_to_extended(flags) & 8674 BTRFS_EXTENDED_PROFILE_MASK; 8675 8676 write_seqlock(&fs_info->profiles_lock); 8677 if (flags & BTRFS_BLOCK_GROUP_DATA) 8678 fs_info->avail_data_alloc_bits &= ~extra_flags; 8679 if (flags & BTRFS_BLOCK_GROUP_METADATA) 8680 fs_info->avail_metadata_alloc_bits &= ~extra_flags; 8681 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 8682 fs_info->avail_system_alloc_bits &= ~extra_flags; 8683 write_sequnlock(&fs_info->profiles_lock); 8684 } 8685 8686 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 8687 struct btrfs_root *root, u64 group_start) 8688 { 8689 struct btrfs_path *path; 8690 struct btrfs_block_group_cache *block_group; 8691 struct btrfs_free_cluster *cluster; 8692 struct btrfs_root *tree_root = root->fs_info->tree_root; 8693 struct btrfs_key key; 8694 struct inode *inode; 8695 int ret; 8696 int index; 8697 int factor; 8698 8699 root = root->fs_info->extent_root; 8700 8701 block_group = btrfs_lookup_block_group(root->fs_info, group_start); 8702 BUG_ON(!block_group); 8703 BUG_ON(!block_group->ro); 8704 8705 /* 8706 * Free the reserved super bytes from this block group before 8707 * remove it. 8708 */ 8709 free_excluded_extents(root, block_group); 8710 8711 memcpy(&key, &block_group->key, sizeof(key)); 8712 index = get_block_group_index(block_group); 8713 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | 8714 BTRFS_BLOCK_GROUP_RAID1 | 8715 BTRFS_BLOCK_GROUP_RAID10)) 8716 factor = 2; 8717 else 8718 factor = 1; 8719 8720 /* make sure this block group isn't part of an allocation cluster */ 8721 cluster = &root->fs_info->data_alloc_cluster; 8722 spin_lock(&cluster->refill_lock); 8723 btrfs_return_cluster_to_free_space(block_group, cluster); 8724 spin_unlock(&cluster->refill_lock); 8725 8726 /* 8727 * make sure this block group isn't part of a metadata 8728 * allocation cluster 8729 */ 8730 cluster = &root->fs_info->meta_alloc_cluster; 8731 spin_lock(&cluster->refill_lock); 8732 btrfs_return_cluster_to_free_space(block_group, cluster); 8733 spin_unlock(&cluster->refill_lock); 8734 8735 path = btrfs_alloc_path(); 8736 if (!path) { 8737 ret = -ENOMEM; 8738 goto out; 8739 } 8740 8741 inode = lookup_free_space_inode(tree_root, block_group, path); 8742 if (!IS_ERR(inode)) { 8743 ret = btrfs_orphan_add(trans, inode); 8744 if (ret) { 8745 btrfs_add_delayed_iput(inode); 8746 goto out; 8747 } 8748 clear_nlink(inode); 8749 /* One for the block groups ref */ 8750 spin_lock(&block_group->lock); 8751 if (block_group->iref) { 8752 block_group->iref = 0; 8753 block_group->inode = NULL; 8754 spin_unlock(&block_group->lock); 8755 iput(inode); 8756 } else { 8757 spin_unlock(&block_group->lock); 8758 } 8759 /* One for our lookup ref */ 8760 btrfs_add_delayed_iput(inode); 8761 } 8762 8763 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 8764 key.offset = block_group->key.objectid; 8765 key.type = 0; 8766 8767 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); 8768 if (ret < 0) 8769 goto out; 8770 if (ret > 0) 8771 btrfs_release_path(path); 8772 if (ret == 0) { 8773 ret = btrfs_del_item(trans, tree_root, path); 8774 if (ret) 8775 goto out; 8776 btrfs_release_path(path); 8777 } 8778 8779 spin_lock(&root->fs_info->block_group_cache_lock); 8780 rb_erase(&block_group->cache_node, 8781 &root->fs_info->block_group_cache_tree); 8782 8783 if (root->fs_info->first_logical_byte == block_group->key.objectid) 8784 root->fs_info->first_logical_byte = (u64)-1; 8785 spin_unlock(&root->fs_info->block_group_cache_lock); 8786 8787 down_write(&block_group->space_info->groups_sem); 8788 /* 8789 * we must use list_del_init so people can check to see if they 8790 * are still on the list after taking the semaphore 8791 */ 8792 list_del_init(&block_group->list); 8793 if (list_empty(&block_group->space_info->block_groups[index])) { 8794 kobject_del(&block_group->space_info->block_group_kobjs[index]); 8795 kobject_put(&block_group->space_info->block_group_kobjs[index]); 8796 clear_avail_alloc_bits(root->fs_info, block_group->flags); 8797 } 8798 up_write(&block_group->space_info->groups_sem); 8799 8800 if (block_group->cached == BTRFS_CACHE_STARTED) 8801 wait_block_group_cache_done(block_group); 8802 8803 btrfs_remove_free_space_cache(block_group); 8804 8805 spin_lock(&block_group->space_info->lock); 8806 block_group->space_info->total_bytes -= block_group->key.offset; 8807 block_group->space_info->bytes_readonly -= block_group->key.offset; 8808 block_group->space_info->disk_total -= block_group->key.offset * factor; 8809 spin_unlock(&block_group->space_info->lock); 8810 8811 memcpy(&key, &block_group->key, sizeof(key)); 8812 8813 btrfs_clear_space_info_full(root->fs_info); 8814 8815 btrfs_put_block_group(block_group); 8816 btrfs_put_block_group(block_group); 8817 8818 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 8819 if (ret > 0) 8820 ret = -EIO; 8821 if (ret < 0) 8822 goto out; 8823 8824 ret = btrfs_del_item(trans, root, path); 8825 out: 8826 btrfs_free_path(path); 8827 return ret; 8828 } 8829 8830 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 8831 { 8832 struct btrfs_space_info *space_info; 8833 struct btrfs_super_block *disk_super; 8834 u64 features; 8835 u64 flags; 8836 int mixed = 0; 8837 int ret; 8838 8839 disk_super = fs_info->super_copy; 8840 if (!btrfs_super_root(disk_super)) 8841 return 1; 8842 8843 features = btrfs_super_incompat_flags(disk_super); 8844 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 8845 mixed = 1; 8846 8847 flags = BTRFS_BLOCK_GROUP_SYSTEM; 8848 ret = update_space_info(fs_info, flags, 0, 0, &space_info); 8849 if (ret) 8850 goto out; 8851 8852 if (mixed) { 8853 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 8854 ret = update_space_info(fs_info, flags, 0, 0, &space_info); 8855 } else { 8856 flags = BTRFS_BLOCK_GROUP_METADATA; 8857 ret = update_space_info(fs_info, flags, 0, 0, &space_info); 8858 if (ret) 8859 goto out; 8860 8861 flags = BTRFS_BLOCK_GROUP_DATA; 8862 ret = update_space_info(fs_info, flags, 0, 0, &space_info); 8863 } 8864 out: 8865 return ret; 8866 } 8867 8868 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 8869 { 8870 return unpin_extent_range(root, start, end); 8871 } 8872 8873 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, 8874 u64 num_bytes, u64 *actual_bytes) 8875 { 8876 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes); 8877 } 8878 8879 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) 8880 { 8881 struct btrfs_fs_info *fs_info = root->fs_info; 8882 struct btrfs_block_group_cache *cache = NULL; 8883 u64 group_trimmed; 8884 u64 start; 8885 u64 end; 8886 u64 trimmed = 0; 8887 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 8888 int ret = 0; 8889 8890 /* 8891 * try to trim all FS space, our block group may start from non-zero. 8892 */ 8893 if (range->len == total_bytes) 8894 cache = btrfs_lookup_first_block_group(fs_info, range->start); 8895 else 8896 cache = btrfs_lookup_block_group(fs_info, range->start); 8897 8898 while (cache) { 8899 if (cache->key.objectid >= (range->start + range->len)) { 8900 btrfs_put_block_group(cache); 8901 break; 8902 } 8903 8904 start = max(range->start, cache->key.objectid); 8905 end = min(range->start + range->len, 8906 cache->key.objectid + cache->key.offset); 8907 8908 if (end - start >= range->minlen) { 8909 if (!block_group_cache_done(cache)) { 8910 ret = cache_block_group(cache, 0); 8911 if (ret) { 8912 btrfs_put_block_group(cache); 8913 break; 8914 } 8915 ret = wait_block_group_cache_done(cache); 8916 if (ret) { 8917 btrfs_put_block_group(cache); 8918 break; 8919 } 8920 } 8921 ret = btrfs_trim_block_group(cache, 8922 &group_trimmed, 8923 start, 8924 end, 8925 range->minlen); 8926 8927 trimmed += group_trimmed; 8928 if (ret) { 8929 btrfs_put_block_group(cache); 8930 break; 8931 } 8932 } 8933 8934 cache = next_block_group(fs_info->tree_root, cache); 8935 } 8936 8937 range->len = trimmed; 8938 return ret; 8939 } 8940