1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/signal.h> 8 #include <linux/pagemap.h> 9 #include <linux/writeback.h> 10 #include <linux/blkdev.h> 11 #include <linux/sort.h> 12 #include <linux/rcupdate.h> 13 #include <linux/kthread.h> 14 #include <linux/slab.h> 15 #include <linux/ratelimit.h> 16 #include <linux/percpu_counter.h> 17 #include <linux/lockdep.h> 18 #include <linux/crc32c.h> 19 #include "ctree.h" 20 #include "extent-tree.h" 21 #include "tree-log.h" 22 #include "disk-io.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "locking.h" 27 #include "free-space-cache.h" 28 #include "free-space-tree.h" 29 #include "sysfs.h" 30 #include "qgroup.h" 31 #include "ref-verify.h" 32 #include "space-info.h" 33 #include "block-rsv.h" 34 #include "delalloc-space.h" 35 #include "discard.h" 36 #include "rcu-string.h" 37 #include "zoned.h" 38 #include "dev-replace.h" 39 #include "fs.h" 40 #include "accessors.h" 41 #include "root-tree.h" 42 #include "file-item.h" 43 #include "orphan.h" 44 #include "tree-checker.h" 45 46 #undef SCRAMBLE_DELAYED_REFS 47 48 49 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 50 struct btrfs_delayed_ref_node *node, u64 parent, 51 u64 root_objectid, u64 owner_objectid, 52 u64 owner_offset, int refs_to_drop, 53 struct btrfs_delayed_extent_op *extra_op); 54 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 55 struct extent_buffer *leaf, 56 struct btrfs_extent_item *ei); 57 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 58 u64 parent, u64 root_objectid, 59 u64 flags, u64 owner, u64 offset, 60 struct btrfs_key *ins, int ref_mod); 61 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 62 struct btrfs_delayed_ref_node *node, 63 struct btrfs_delayed_extent_op *extent_op); 64 static int find_next_key(struct btrfs_path *path, int level, 65 struct btrfs_key *key); 66 67 static int block_group_bits(struct btrfs_block_group *cache, u64 bits) 68 { 69 return (cache->flags & bits) == bits; 70 } 71 72 int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, 73 u64 start, u64 num_bytes) 74 { 75 u64 end = start + num_bytes - 1; 76 set_extent_bits(&fs_info->excluded_extents, start, end, 77 EXTENT_UPTODATE); 78 return 0; 79 } 80 81 void btrfs_free_excluded_extents(struct btrfs_block_group *cache) 82 { 83 struct btrfs_fs_info *fs_info = cache->fs_info; 84 u64 start, end; 85 86 start = cache->start; 87 end = start + cache->length - 1; 88 89 clear_extent_bits(&fs_info->excluded_extents, start, end, 90 EXTENT_UPTODATE); 91 } 92 93 /* simple helper to search for an existing data extent at a given offset */ 94 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) 95 { 96 struct btrfs_root *root = btrfs_extent_root(fs_info, start); 97 int ret; 98 struct btrfs_key key; 99 struct btrfs_path *path; 100 101 path = btrfs_alloc_path(); 102 if (!path) 103 return -ENOMEM; 104 105 key.objectid = start; 106 key.offset = len; 107 key.type = BTRFS_EXTENT_ITEM_KEY; 108 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 109 btrfs_free_path(path); 110 return ret; 111 } 112 113 /* 114 * helper function to lookup reference count and flags of a tree block. 115 * 116 * the head node for delayed ref is used to store the sum of all the 117 * reference count modifications queued up in the rbtree. the head 118 * node may also store the extent flags to set. This way you can check 119 * to see what the reference count and extent flags would be if all of 120 * the delayed refs are not processed. 121 */ 122 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 123 struct btrfs_fs_info *fs_info, u64 bytenr, 124 u64 offset, int metadata, u64 *refs, u64 *flags) 125 { 126 struct btrfs_root *extent_root; 127 struct btrfs_delayed_ref_head *head; 128 struct btrfs_delayed_ref_root *delayed_refs; 129 struct btrfs_path *path; 130 struct btrfs_extent_item *ei; 131 struct extent_buffer *leaf; 132 struct btrfs_key key; 133 u32 item_size; 134 u64 num_refs; 135 u64 extent_flags; 136 int ret; 137 138 /* 139 * If we don't have skinny metadata, don't bother doing anything 140 * different 141 */ 142 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { 143 offset = fs_info->nodesize; 144 metadata = 0; 145 } 146 147 path = btrfs_alloc_path(); 148 if (!path) 149 return -ENOMEM; 150 151 if (!trans) { 152 path->skip_locking = 1; 153 path->search_commit_root = 1; 154 } 155 156 search_again: 157 key.objectid = bytenr; 158 key.offset = offset; 159 if (metadata) 160 key.type = BTRFS_METADATA_ITEM_KEY; 161 else 162 key.type = BTRFS_EXTENT_ITEM_KEY; 163 164 extent_root = btrfs_extent_root(fs_info, bytenr); 165 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 166 if (ret < 0) 167 goto out_free; 168 169 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 170 if (path->slots[0]) { 171 path->slots[0]--; 172 btrfs_item_key_to_cpu(path->nodes[0], &key, 173 path->slots[0]); 174 if (key.objectid == bytenr && 175 key.type == BTRFS_EXTENT_ITEM_KEY && 176 key.offset == fs_info->nodesize) 177 ret = 0; 178 } 179 } 180 181 if (ret == 0) { 182 leaf = path->nodes[0]; 183 item_size = btrfs_item_size(leaf, path->slots[0]); 184 if (item_size >= sizeof(*ei)) { 185 ei = btrfs_item_ptr(leaf, path->slots[0], 186 struct btrfs_extent_item); 187 num_refs = btrfs_extent_refs(leaf, ei); 188 extent_flags = btrfs_extent_flags(leaf, ei); 189 } else { 190 ret = -EINVAL; 191 btrfs_print_v0_err(fs_info); 192 if (trans) 193 btrfs_abort_transaction(trans, ret); 194 else 195 btrfs_handle_fs_error(fs_info, ret, NULL); 196 197 goto out_free; 198 } 199 200 BUG_ON(num_refs == 0); 201 } else { 202 num_refs = 0; 203 extent_flags = 0; 204 ret = 0; 205 } 206 207 if (!trans) 208 goto out; 209 210 delayed_refs = &trans->transaction->delayed_refs; 211 spin_lock(&delayed_refs->lock); 212 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 213 if (head) { 214 if (!mutex_trylock(&head->mutex)) { 215 refcount_inc(&head->refs); 216 spin_unlock(&delayed_refs->lock); 217 218 btrfs_release_path(path); 219 220 /* 221 * Mutex was contended, block until it's released and try 222 * again 223 */ 224 mutex_lock(&head->mutex); 225 mutex_unlock(&head->mutex); 226 btrfs_put_delayed_ref_head(head); 227 goto search_again; 228 } 229 spin_lock(&head->lock); 230 if (head->extent_op && head->extent_op->update_flags) 231 extent_flags |= head->extent_op->flags_to_set; 232 else 233 BUG_ON(num_refs == 0); 234 235 num_refs += head->ref_mod; 236 spin_unlock(&head->lock); 237 mutex_unlock(&head->mutex); 238 } 239 spin_unlock(&delayed_refs->lock); 240 out: 241 WARN_ON(num_refs == 0); 242 if (refs) 243 *refs = num_refs; 244 if (flags) 245 *flags = extent_flags; 246 out_free: 247 btrfs_free_path(path); 248 return ret; 249 } 250 251 /* 252 * Back reference rules. Back refs have three main goals: 253 * 254 * 1) differentiate between all holders of references to an extent so that 255 * when a reference is dropped we can make sure it was a valid reference 256 * before freeing the extent. 257 * 258 * 2) Provide enough information to quickly find the holders of an extent 259 * if we notice a given block is corrupted or bad. 260 * 261 * 3) Make it easy to migrate blocks for FS shrinking or storage pool 262 * maintenance. This is actually the same as #2, but with a slightly 263 * different use case. 264 * 265 * There are two kinds of back refs. The implicit back refs is optimized 266 * for pointers in non-shared tree blocks. For a given pointer in a block, 267 * back refs of this kind provide information about the block's owner tree 268 * and the pointer's key. These information allow us to find the block by 269 * b-tree searching. The full back refs is for pointers in tree blocks not 270 * referenced by their owner trees. The location of tree block is recorded 271 * in the back refs. Actually the full back refs is generic, and can be 272 * used in all cases the implicit back refs is used. The major shortcoming 273 * of the full back refs is its overhead. Every time a tree block gets 274 * COWed, we have to update back refs entry for all pointers in it. 275 * 276 * For a newly allocated tree block, we use implicit back refs for 277 * pointers in it. This means most tree related operations only involve 278 * implicit back refs. For a tree block created in old transaction, the 279 * only way to drop a reference to it is COW it. So we can detect the 280 * event that tree block loses its owner tree's reference and do the 281 * back refs conversion. 282 * 283 * When a tree block is COWed through a tree, there are four cases: 284 * 285 * The reference count of the block is one and the tree is the block's 286 * owner tree. Nothing to do in this case. 287 * 288 * The reference count of the block is one and the tree is not the 289 * block's owner tree. In this case, full back refs is used for pointers 290 * in the block. Remove these full back refs, add implicit back refs for 291 * every pointers in the new block. 292 * 293 * The reference count of the block is greater than one and the tree is 294 * the block's owner tree. In this case, implicit back refs is used for 295 * pointers in the block. Add full back refs for every pointers in the 296 * block, increase lower level extents' reference counts. The original 297 * implicit back refs are entailed to the new block. 298 * 299 * The reference count of the block is greater than one and the tree is 300 * not the block's owner tree. Add implicit back refs for every pointer in 301 * the new block, increase lower level extents' reference count. 302 * 303 * Back Reference Key composing: 304 * 305 * The key objectid corresponds to the first byte in the extent, 306 * The key type is used to differentiate between types of back refs. 307 * There are different meanings of the key offset for different types 308 * of back refs. 309 * 310 * File extents can be referenced by: 311 * 312 * - multiple snapshots, subvolumes, or different generations in one subvol 313 * - different files inside a single subvolume 314 * - different offsets inside a file (bookend extents in file.c) 315 * 316 * The extent ref structure for the implicit back refs has fields for: 317 * 318 * - Objectid of the subvolume root 319 * - objectid of the file holding the reference 320 * - original offset in the file 321 * - how many bookend extents 322 * 323 * The key offset for the implicit back refs is hash of the first 324 * three fields. 325 * 326 * The extent ref structure for the full back refs has field for: 327 * 328 * - number of pointers in the tree leaf 329 * 330 * The key offset for the implicit back refs is the first byte of 331 * the tree leaf 332 * 333 * When a file extent is allocated, The implicit back refs is used. 334 * the fields are filled in: 335 * 336 * (root_key.objectid, inode objectid, offset in file, 1) 337 * 338 * When a file extent is removed file truncation, we find the 339 * corresponding implicit back refs and check the following fields: 340 * 341 * (btrfs_header_owner(leaf), inode objectid, offset in file) 342 * 343 * Btree extents can be referenced by: 344 * 345 * - Different subvolumes 346 * 347 * Both the implicit back refs and the full back refs for tree blocks 348 * only consist of key. The key offset for the implicit back refs is 349 * objectid of block's owner tree. The key offset for the full back refs 350 * is the first byte of parent block. 351 * 352 * When implicit back refs is used, information about the lowest key and 353 * level of the tree block are required. These information are stored in 354 * tree block info structure. 355 */ 356 357 /* 358 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, 359 * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, 360 * is_data == BTRFS_REF_TYPE_ANY, either type is OK. 361 */ 362 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, 363 struct btrfs_extent_inline_ref *iref, 364 enum btrfs_inline_ref_type is_data) 365 { 366 int type = btrfs_extent_inline_ref_type(eb, iref); 367 u64 offset = btrfs_extent_inline_ref_offset(eb, iref); 368 369 if (type == BTRFS_TREE_BLOCK_REF_KEY || 370 type == BTRFS_SHARED_BLOCK_REF_KEY || 371 type == BTRFS_SHARED_DATA_REF_KEY || 372 type == BTRFS_EXTENT_DATA_REF_KEY) { 373 if (is_data == BTRFS_REF_TYPE_BLOCK) { 374 if (type == BTRFS_TREE_BLOCK_REF_KEY) 375 return type; 376 if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 377 ASSERT(eb->fs_info); 378 /* 379 * Every shared one has parent tree block, 380 * which must be aligned to sector size. 381 */ 382 if (offset && 383 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 384 return type; 385 } 386 } else if (is_data == BTRFS_REF_TYPE_DATA) { 387 if (type == BTRFS_EXTENT_DATA_REF_KEY) 388 return type; 389 if (type == BTRFS_SHARED_DATA_REF_KEY) { 390 ASSERT(eb->fs_info); 391 /* 392 * Every shared one has parent tree block, 393 * which must be aligned to sector size. 394 */ 395 if (offset && 396 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 397 return type; 398 } 399 } else { 400 ASSERT(is_data == BTRFS_REF_TYPE_ANY); 401 return type; 402 } 403 } 404 405 btrfs_print_leaf((struct extent_buffer *)eb); 406 btrfs_err(eb->fs_info, 407 "eb %llu iref 0x%lx invalid extent inline ref type %d", 408 eb->start, (unsigned long)iref, type); 409 WARN_ON(1); 410 411 return BTRFS_REF_TYPE_INVALID; 412 } 413 414 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) 415 { 416 u32 high_crc = ~(u32)0; 417 u32 low_crc = ~(u32)0; 418 __le64 lenum; 419 420 lenum = cpu_to_le64(root_objectid); 421 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum)); 422 lenum = cpu_to_le64(owner); 423 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 424 lenum = cpu_to_le64(offset); 425 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 426 427 return ((u64)high_crc << 31) ^ (u64)low_crc; 428 } 429 430 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, 431 struct btrfs_extent_data_ref *ref) 432 { 433 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), 434 btrfs_extent_data_ref_objectid(leaf, ref), 435 btrfs_extent_data_ref_offset(leaf, ref)); 436 } 437 438 static int match_extent_data_ref(struct extent_buffer *leaf, 439 struct btrfs_extent_data_ref *ref, 440 u64 root_objectid, u64 owner, u64 offset) 441 { 442 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || 443 btrfs_extent_data_ref_objectid(leaf, ref) != owner || 444 btrfs_extent_data_ref_offset(leaf, ref) != offset) 445 return 0; 446 return 1; 447 } 448 449 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, 450 struct btrfs_path *path, 451 u64 bytenr, u64 parent, 452 u64 root_objectid, 453 u64 owner, u64 offset) 454 { 455 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 456 struct btrfs_key key; 457 struct btrfs_extent_data_ref *ref; 458 struct extent_buffer *leaf; 459 u32 nritems; 460 int ret; 461 int recow; 462 int err = -ENOENT; 463 464 key.objectid = bytenr; 465 if (parent) { 466 key.type = BTRFS_SHARED_DATA_REF_KEY; 467 key.offset = parent; 468 } else { 469 key.type = BTRFS_EXTENT_DATA_REF_KEY; 470 key.offset = hash_extent_data_ref(root_objectid, 471 owner, offset); 472 } 473 again: 474 recow = 0; 475 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 476 if (ret < 0) { 477 err = ret; 478 goto fail; 479 } 480 481 if (parent) { 482 if (!ret) 483 return 0; 484 goto fail; 485 } 486 487 leaf = path->nodes[0]; 488 nritems = btrfs_header_nritems(leaf); 489 while (1) { 490 if (path->slots[0] >= nritems) { 491 ret = btrfs_next_leaf(root, path); 492 if (ret < 0) 493 err = ret; 494 if (ret) 495 goto fail; 496 497 leaf = path->nodes[0]; 498 nritems = btrfs_header_nritems(leaf); 499 recow = 1; 500 } 501 502 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 503 if (key.objectid != bytenr || 504 key.type != BTRFS_EXTENT_DATA_REF_KEY) 505 goto fail; 506 507 ref = btrfs_item_ptr(leaf, path->slots[0], 508 struct btrfs_extent_data_ref); 509 510 if (match_extent_data_ref(leaf, ref, root_objectid, 511 owner, offset)) { 512 if (recow) { 513 btrfs_release_path(path); 514 goto again; 515 } 516 err = 0; 517 break; 518 } 519 path->slots[0]++; 520 } 521 fail: 522 return err; 523 } 524 525 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, 526 struct btrfs_path *path, 527 u64 bytenr, u64 parent, 528 u64 root_objectid, u64 owner, 529 u64 offset, int refs_to_add) 530 { 531 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 532 struct btrfs_key key; 533 struct extent_buffer *leaf; 534 u32 size; 535 u32 num_refs; 536 int ret; 537 538 key.objectid = bytenr; 539 if (parent) { 540 key.type = BTRFS_SHARED_DATA_REF_KEY; 541 key.offset = parent; 542 size = sizeof(struct btrfs_shared_data_ref); 543 } else { 544 key.type = BTRFS_EXTENT_DATA_REF_KEY; 545 key.offset = hash_extent_data_ref(root_objectid, 546 owner, offset); 547 size = sizeof(struct btrfs_extent_data_ref); 548 } 549 550 ret = btrfs_insert_empty_item(trans, root, path, &key, size); 551 if (ret && ret != -EEXIST) 552 goto fail; 553 554 leaf = path->nodes[0]; 555 if (parent) { 556 struct btrfs_shared_data_ref *ref; 557 ref = btrfs_item_ptr(leaf, path->slots[0], 558 struct btrfs_shared_data_ref); 559 if (ret == 0) { 560 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); 561 } else { 562 num_refs = btrfs_shared_data_ref_count(leaf, ref); 563 num_refs += refs_to_add; 564 btrfs_set_shared_data_ref_count(leaf, ref, num_refs); 565 } 566 } else { 567 struct btrfs_extent_data_ref *ref; 568 while (ret == -EEXIST) { 569 ref = btrfs_item_ptr(leaf, path->slots[0], 570 struct btrfs_extent_data_ref); 571 if (match_extent_data_ref(leaf, ref, root_objectid, 572 owner, offset)) 573 break; 574 btrfs_release_path(path); 575 key.offset++; 576 ret = btrfs_insert_empty_item(trans, root, path, &key, 577 size); 578 if (ret && ret != -EEXIST) 579 goto fail; 580 581 leaf = path->nodes[0]; 582 } 583 ref = btrfs_item_ptr(leaf, path->slots[0], 584 struct btrfs_extent_data_ref); 585 if (ret == 0) { 586 btrfs_set_extent_data_ref_root(leaf, ref, 587 root_objectid); 588 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 589 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 590 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); 591 } else { 592 num_refs = btrfs_extent_data_ref_count(leaf, ref); 593 num_refs += refs_to_add; 594 btrfs_set_extent_data_ref_count(leaf, ref, num_refs); 595 } 596 } 597 btrfs_mark_buffer_dirty(leaf); 598 ret = 0; 599 fail: 600 btrfs_release_path(path); 601 return ret; 602 } 603 604 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, 605 struct btrfs_root *root, 606 struct btrfs_path *path, 607 int refs_to_drop) 608 { 609 struct btrfs_key key; 610 struct btrfs_extent_data_ref *ref1 = NULL; 611 struct btrfs_shared_data_ref *ref2 = NULL; 612 struct extent_buffer *leaf; 613 u32 num_refs = 0; 614 int ret = 0; 615 616 leaf = path->nodes[0]; 617 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 618 619 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 620 ref1 = btrfs_item_ptr(leaf, path->slots[0], 621 struct btrfs_extent_data_ref); 622 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 623 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 624 ref2 = btrfs_item_ptr(leaf, path->slots[0], 625 struct btrfs_shared_data_ref); 626 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 627 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { 628 btrfs_print_v0_err(trans->fs_info); 629 btrfs_abort_transaction(trans, -EINVAL); 630 return -EINVAL; 631 } else { 632 BUG(); 633 } 634 635 BUG_ON(num_refs < refs_to_drop); 636 num_refs -= refs_to_drop; 637 638 if (num_refs == 0) { 639 ret = btrfs_del_item(trans, root, path); 640 } else { 641 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) 642 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); 643 else if (key.type == BTRFS_SHARED_DATA_REF_KEY) 644 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); 645 btrfs_mark_buffer_dirty(leaf); 646 } 647 return ret; 648 } 649 650 static noinline u32 extent_data_ref_count(struct btrfs_path *path, 651 struct btrfs_extent_inline_ref *iref) 652 { 653 struct btrfs_key key; 654 struct extent_buffer *leaf; 655 struct btrfs_extent_data_ref *ref1; 656 struct btrfs_shared_data_ref *ref2; 657 u32 num_refs = 0; 658 int type; 659 660 leaf = path->nodes[0]; 661 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 662 663 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 664 if (iref) { 665 /* 666 * If type is invalid, we should have bailed out earlier than 667 * this call. 668 */ 669 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 670 ASSERT(type != BTRFS_REF_TYPE_INVALID); 671 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 672 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); 673 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 674 } else { 675 ref2 = (struct btrfs_shared_data_ref *)(iref + 1); 676 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 677 } 678 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 679 ref1 = btrfs_item_ptr(leaf, path->slots[0], 680 struct btrfs_extent_data_ref); 681 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 682 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 683 ref2 = btrfs_item_ptr(leaf, path->slots[0], 684 struct btrfs_shared_data_ref); 685 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 686 } else { 687 WARN_ON(1); 688 } 689 return num_refs; 690 } 691 692 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, 693 struct btrfs_path *path, 694 u64 bytenr, u64 parent, 695 u64 root_objectid) 696 { 697 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 698 struct btrfs_key key; 699 int ret; 700 701 key.objectid = bytenr; 702 if (parent) { 703 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 704 key.offset = parent; 705 } else { 706 key.type = BTRFS_TREE_BLOCK_REF_KEY; 707 key.offset = root_objectid; 708 } 709 710 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 711 if (ret > 0) 712 ret = -ENOENT; 713 return ret; 714 } 715 716 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, 717 struct btrfs_path *path, 718 u64 bytenr, u64 parent, 719 u64 root_objectid) 720 { 721 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 722 struct btrfs_key key; 723 int ret; 724 725 key.objectid = bytenr; 726 if (parent) { 727 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 728 key.offset = parent; 729 } else { 730 key.type = BTRFS_TREE_BLOCK_REF_KEY; 731 key.offset = root_objectid; 732 } 733 734 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 735 btrfs_release_path(path); 736 return ret; 737 } 738 739 static inline int extent_ref_type(u64 parent, u64 owner) 740 { 741 int type; 742 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 743 if (parent > 0) 744 type = BTRFS_SHARED_BLOCK_REF_KEY; 745 else 746 type = BTRFS_TREE_BLOCK_REF_KEY; 747 } else { 748 if (parent > 0) 749 type = BTRFS_SHARED_DATA_REF_KEY; 750 else 751 type = BTRFS_EXTENT_DATA_REF_KEY; 752 } 753 return type; 754 } 755 756 static int find_next_key(struct btrfs_path *path, int level, 757 struct btrfs_key *key) 758 759 { 760 for (; level < BTRFS_MAX_LEVEL; level++) { 761 if (!path->nodes[level]) 762 break; 763 if (path->slots[level] + 1 >= 764 btrfs_header_nritems(path->nodes[level])) 765 continue; 766 if (level == 0) 767 btrfs_item_key_to_cpu(path->nodes[level], key, 768 path->slots[level] + 1); 769 else 770 btrfs_node_key_to_cpu(path->nodes[level], key, 771 path->slots[level] + 1); 772 return 0; 773 } 774 return 1; 775 } 776 777 /* 778 * look for inline back ref. if back ref is found, *ref_ret is set 779 * to the address of inline back ref, and 0 is returned. 780 * 781 * if back ref isn't found, *ref_ret is set to the address where it 782 * should be inserted, and -ENOENT is returned. 783 * 784 * if insert is true and there are too many inline back refs, the path 785 * points to the extent item, and -EAGAIN is returned. 786 * 787 * NOTE: inline back refs are ordered in the same way that back ref 788 * items in the tree are ordered. 789 */ 790 static noinline_for_stack 791 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, 792 struct btrfs_path *path, 793 struct btrfs_extent_inline_ref **ref_ret, 794 u64 bytenr, u64 num_bytes, 795 u64 parent, u64 root_objectid, 796 u64 owner, u64 offset, int insert) 797 { 798 struct btrfs_fs_info *fs_info = trans->fs_info; 799 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); 800 struct btrfs_key key; 801 struct extent_buffer *leaf; 802 struct btrfs_extent_item *ei; 803 struct btrfs_extent_inline_ref *iref; 804 u64 flags; 805 u64 item_size; 806 unsigned long ptr; 807 unsigned long end; 808 int extra_size; 809 int type; 810 int want; 811 int ret; 812 int err = 0; 813 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 814 int needed; 815 816 key.objectid = bytenr; 817 key.type = BTRFS_EXTENT_ITEM_KEY; 818 key.offset = num_bytes; 819 820 want = extent_ref_type(parent, owner); 821 if (insert) { 822 extra_size = btrfs_extent_inline_ref_size(want); 823 path->search_for_extension = 1; 824 path->keep_locks = 1; 825 } else 826 extra_size = -1; 827 828 /* 829 * Owner is our level, so we can just add one to get the level for the 830 * block we are interested in. 831 */ 832 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { 833 key.type = BTRFS_METADATA_ITEM_KEY; 834 key.offset = owner; 835 } 836 837 again: 838 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); 839 if (ret < 0) { 840 err = ret; 841 goto out; 842 } 843 844 /* 845 * We may be a newly converted file system which still has the old fat 846 * extent entries for metadata, so try and see if we have one of those. 847 */ 848 if (ret > 0 && skinny_metadata) { 849 skinny_metadata = false; 850 if (path->slots[0]) { 851 path->slots[0]--; 852 btrfs_item_key_to_cpu(path->nodes[0], &key, 853 path->slots[0]); 854 if (key.objectid == bytenr && 855 key.type == BTRFS_EXTENT_ITEM_KEY && 856 key.offset == num_bytes) 857 ret = 0; 858 } 859 if (ret) { 860 key.objectid = bytenr; 861 key.type = BTRFS_EXTENT_ITEM_KEY; 862 key.offset = num_bytes; 863 btrfs_release_path(path); 864 goto again; 865 } 866 } 867 868 if (ret && !insert) { 869 err = -ENOENT; 870 goto out; 871 } else if (WARN_ON(ret)) { 872 err = -EIO; 873 goto out; 874 } 875 876 leaf = path->nodes[0]; 877 item_size = btrfs_item_size(leaf, path->slots[0]); 878 if (unlikely(item_size < sizeof(*ei))) { 879 err = -EINVAL; 880 btrfs_print_v0_err(fs_info); 881 btrfs_abort_transaction(trans, err); 882 goto out; 883 } 884 885 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 886 flags = btrfs_extent_flags(leaf, ei); 887 888 ptr = (unsigned long)(ei + 1); 889 end = (unsigned long)ei + item_size; 890 891 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { 892 ptr += sizeof(struct btrfs_tree_block_info); 893 BUG_ON(ptr > end); 894 } 895 896 if (owner >= BTRFS_FIRST_FREE_OBJECTID) 897 needed = BTRFS_REF_TYPE_DATA; 898 else 899 needed = BTRFS_REF_TYPE_BLOCK; 900 901 err = -ENOENT; 902 while (1) { 903 if (ptr >= end) { 904 if (ptr > end) { 905 err = -EUCLEAN; 906 btrfs_print_leaf(path->nodes[0]); 907 btrfs_crit(fs_info, 908 "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu", 909 path->slots[0], root_objectid, owner, offset, parent); 910 } 911 break; 912 } 913 iref = (struct btrfs_extent_inline_ref *)ptr; 914 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed); 915 if (type == BTRFS_REF_TYPE_INVALID) { 916 err = -EUCLEAN; 917 goto out; 918 } 919 920 if (want < type) 921 break; 922 if (want > type) { 923 ptr += btrfs_extent_inline_ref_size(type); 924 continue; 925 } 926 927 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 928 struct btrfs_extent_data_ref *dref; 929 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 930 if (match_extent_data_ref(leaf, dref, root_objectid, 931 owner, offset)) { 932 err = 0; 933 break; 934 } 935 if (hash_extent_data_ref_item(leaf, dref) < 936 hash_extent_data_ref(root_objectid, owner, offset)) 937 break; 938 } else { 939 u64 ref_offset; 940 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); 941 if (parent > 0) { 942 if (parent == ref_offset) { 943 err = 0; 944 break; 945 } 946 if (ref_offset < parent) 947 break; 948 } else { 949 if (root_objectid == ref_offset) { 950 err = 0; 951 break; 952 } 953 if (ref_offset < root_objectid) 954 break; 955 } 956 } 957 ptr += btrfs_extent_inline_ref_size(type); 958 } 959 if (err == -ENOENT && insert) { 960 if (item_size + extra_size >= 961 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { 962 err = -EAGAIN; 963 goto out; 964 } 965 /* 966 * To add new inline back ref, we have to make sure 967 * there is no corresponding back ref item. 968 * For simplicity, we just do not add new inline back 969 * ref if there is any kind of item for this block 970 */ 971 if (find_next_key(path, 0, &key) == 0 && 972 key.objectid == bytenr && 973 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 974 err = -EAGAIN; 975 goto out; 976 } 977 } 978 *ref_ret = (struct btrfs_extent_inline_ref *)ptr; 979 out: 980 if (insert) { 981 path->keep_locks = 0; 982 path->search_for_extension = 0; 983 btrfs_unlock_up_safe(path, 1); 984 } 985 return err; 986 } 987 988 /* 989 * helper to add new inline back ref 990 */ 991 static noinline_for_stack 992 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info, 993 struct btrfs_path *path, 994 struct btrfs_extent_inline_ref *iref, 995 u64 parent, u64 root_objectid, 996 u64 owner, u64 offset, int refs_to_add, 997 struct btrfs_delayed_extent_op *extent_op) 998 { 999 struct extent_buffer *leaf; 1000 struct btrfs_extent_item *ei; 1001 unsigned long ptr; 1002 unsigned long end; 1003 unsigned long item_offset; 1004 u64 refs; 1005 int size; 1006 int type; 1007 1008 leaf = path->nodes[0]; 1009 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1010 item_offset = (unsigned long)iref - (unsigned long)ei; 1011 1012 type = extent_ref_type(parent, owner); 1013 size = btrfs_extent_inline_ref_size(type); 1014 1015 btrfs_extend_item(path, size); 1016 1017 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1018 refs = btrfs_extent_refs(leaf, ei); 1019 refs += refs_to_add; 1020 btrfs_set_extent_refs(leaf, ei, refs); 1021 if (extent_op) 1022 __run_delayed_extent_op(extent_op, leaf, ei); 1023 1024 ptr = (unsigned long)ei + item_offset; 1025 end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]); 1026 if (ptr < end - size) 1027 memmove_extent_buffer(leaf, ptr + size, ptr, 1028 end - size - ptr); 1029 1030 iref = (struct btrfs_extent_inline_ref *)ptr; 1031 btrfs_set_extent_inline_ref_type(leaf, iref, type); 1032 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1033 struct btrfs_extent_data_ref *dref; 1034 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1035 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); 1036 btrfs_set_extent_data_ref_objectid(leaf, dref, owner); 1037 btrfs_set_extent_data_ref_offset(leaf, dref, offset); 1038 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); 1039 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1040 struct btrfs_shared_data_ref *sref; 1041 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1042 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); 1043 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1044 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 1045 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1046 } else { 1047 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1048 } 1049 btrfs_mark_buffer_dirty(leaf); 1050 } 1051 1052 static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1053 struct btrfs_path *path, 1054 struct btrfs_extent_inline_ref **ref_ret, 1055 u64 bytenr, u64 num_bytes, u64 parent, 1056 u64 root_objectid, u64 owner, u64 offset) 1057 { 1058 int ret; 1059 1060 ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr, 1061 num_bytes, parent, root_objectid, 1062 owner, offset, 0); 1063 if (ret != -ENOENT) 1064 return ret; 1065 1066 btrfs_release_path(path); 1067 *ref_ret = NULL; 1068 1069 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1070 ret = lookup_tree_block_ref(trans, path, bytenr, parent, 1071 root_objectid); 1072 } else { 1073 ret = lookup_extent_data_ref(trans, path, bytenr, parent, 1074 root_objectid, owner, offset); 1075 } 1076 return ret; 1077 } 1078 1079 /* 1080 * helper to update/remove inline back ref 1081 */ 1082 static noinline_for_stack 1083 void update_inline_extent_backref(struct btrfs_path *path, 1084 struct btrfs_extent_inline_ref *iref, 1085 int refs_to_mod, 1086 struct btrfs_delayed_extent_op *extent_op) 1087 { 1088 struct extent_buffer *leaf = path->nodes[0]; 1089 struct btrfs_extent_item *ei; 1090 struct btrfs_extent_data_ref *dref = NULL; 1091 struct btrfs_shared_data_ref *sref = NULL; 1092 unsigned long ptr; 1093 unsigned long end; 1094 u32 item_size; 1095 int size; 1096 int type; 1097 u64 refs; 1098 1099 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1100 refs = btrfs_extent_refs(leaf, ei); 1101 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0); 1102 refs += refs_to_mod; 1103 btrfs_set_extent_refs(leaf, ei, refs); 1104 if (extent_op) 1105 __run_delayed_extent_op(extent_op, leaf, ei); 1106 1107 /* 1108 * If type is invalid, we should have bailed out after 1109 * lookup_inline_extent_backref(). 1110 */ 1111 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); 1112 ASSERT(type != BTRFS_REF_TYPE_INVALID); 1113 1114 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1115 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1116 refs = btrfs_extent_data_ref_count(leaf, dref); 1117 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1118 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1119 refs = btrfs_shared_data_ref_count(leaf, sref); 1120 } else { 1121 refs = 1; 1122 BUG_ON(refs_to_mod != -1); 1123 } 1124 1125 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); 1126 refs += refs_to_mod; 1127 1128 if (refs > 0) { 1129 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1130 btrfs_set_extent_data_ref_count(leaf, dref, refs); 1131 else 1132 btrfs_set_shared_data_ref_count(leaf, sref, refs); 1133 } else { 1134 size = btrfs_extent_inline_ref_size(type); 1135 item_size = btrfs_item_size(leaf, path->slots[0]); 1136 ptr = (unsigned long)iref; 1137 end = (unsigned long)ei + item_size; 1138 if (ptr + size < end) 1139 memmove_extent_buffer(leaf, ptr, ptr + size, 1140 end - ptr - size); 1141 item_size -= size; 1142 btrfs_truncate_item(path, item_size, 1); 1143 } 1144 btrfs_mark_buffer_dirty(leaf); 1145 } 1146 1147 static noinline_for_stack 1148 int insert_inline_extent_backref(struct btrfs_trans_handle *trans, 1149 struct btrfs_path *path, 1150 u64 bytenr, u64 num_bytes, u64 parent, 1151 u64 root_objectid, u64 owner, 1152 u64 offset, int refs_to_add, 1153 struct btrfs_delayed_extent_op *extent_op) 1154 { 1155 struct btrfs_extent_inline_ref *iref; 1156 int ret; 1157 1158 ret = lookup_inline_extent_backref(trans, path, &iref, bytenr, 1159 num_bytes, parent, root_objectid, 1160 owner, offset, 1); 1161 if (ret == 0) { 1162 /* 1163 * We're adding refs to a tree block we already own, this 1164 * should not happen at all. 1165 */ 1166 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1167 btrfs_crit(trans->fs_info, 1168 "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu", 1169 bytenr, num_bytes, root_objectid); 1170 if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) { 1171 WARN_ON(1); 1172 btrfs_crit(trans->fs_info, 1173 "path->slots[0]=%d path->nodes[0]:", path->slots[0]); 1174 btrfs_print_leaf(path->nodes[0]); 1175 } 1176 return -EUCLEAN; 1177 } 1178 update_inline_extent_backref(path, iref, refs_to_add, extent_op); 1179 } else if (ret == -ENOENT) { 1180 setup_inline_extent_backref(trans->fs_info, path, iref, parent, 1181 root_objectid, owner, offset, 1182 refs_to_add, extent_op); 1183 ret = 0; 1184 } 1185 return ret; 1186 } 1187 1188 static int remove_extent_backref(struct btrfs_trans_handle *trans, 1189 struct btrfs_root *root, 1190 struct btrfs_path *path, 1191 struct btrfs_extent_inline_ref *iref, 1192 int refs_to_drop, int is_data) 1193 { 1194 int ret = 0; 1195 1196 BUG_ON(!is_data && refs_to_drop != 1); 1197 if (iref) 1198 update_inline_extent_backref(path, iref, -refs_to_drop, NULL); 1199 else if (is_data) 1200 ret = remove_extent_data_ref(trans, root, path, refs_to_drop); 1201 else 1202 ret = btrfs_del_item(trans, root, path); 1203 return ret; 1204 } 1205 1206 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, 1207 u64 *discarded_bytes) 1208 { 1209 int j, ret = 0; 1210 u64 bytes_left, end; 1211 u64 aligned_start = ALIGN(start, 1 << 9); 1212 1213 if (WARN_ON(start != aligned_start)) { 1214 len -= aligned_start - start; 1215 len = round_down(len, 1 << 9); 1216 start = aligned_start; 1217 } 1218 1219 *discarded_bytes = 0; 1220 1221 if (!len) 1222 return 0; 1223 1224 end = start + len; 1225 bytes_left = len; 1226 1227 /* Skip any superblocks on this device. */ 1228 for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) { 1229 u64 sb_start = btrfs_sb_offset(j); 1230 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE; 1231 u64 size = sb_start - start; 1232 1233 if (!in_range(sb_start, start, bytes_left) && 1234 !in_range(sb_end, start, bytes_left) && 1235 !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE)) 1236 continue; 1237 1238 /* 1239 * Superblock spans beginning of range. Adjust start and 1240 * try again. 1241 */ 1242 if (sb_start <= start) { 1243 start += sb_end - start; 1244 if (start > end) { 1245 bytes_left = 0; 1246 break; 1247 } 1248 bytes_left = end - start; 1249 continue; 1250 } 1251 1252 if (size) { 1253 ret = blkdev_issue_discard(bdev, start >> 9, size >> 9, 1254 GFP_NOFS); 1255 if (!ret) 1256 *discarded_bytes += size; 1257 else if (ret != -EOPNOTSUPP) 1258 return ret; 1259 } 1260 1261 start = sb_end; 1262 if (start > end) { 1263 bytes_left = 0; 1264 break; 1265 } 1266 bytes_left = end - start; 1267 } 1268 1269 if (bytes_left) { 1270 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9, 1271 GFP_NOFS); 1272 if (!ret) 1273 *discarded_bytes += bytes_left; 1274 } 1275 return ret; 1276 } 1277 1278 static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes) 1279 { 1280 struct btrfs_device *dev = stripe->dev; 1281 struct btrfs_fs_info *fs_info = dev->fs_info; 1282 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1283 u64 phys = stripe->physical; 1284 u64 len = stripe->length; 1285 u64 discarded = 0; 1286 int ret = 0; 1287 1288 /* Zone reset on a zoned filesystem */ 1289 if (btrfs_can_zone_reset(dev, phys, len)) { 1290 u64 src_disc; 1291 1292 ret = btrfs_reset_device_zone(dev, phys, len, &discarded); 1293 if (ret) 1294 goto out; 1295 1296 if (!btrfs_dev_replace_is_ongoing(dev_replace) || 1297 dev != dev_replace->srcdev) 1298 goto out; 1299 1300 src_disc = discarded; 1301 1302 /* Send to replace target as well */ 1303 ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len, 1304 &discarded); 1305 discarded += src_disc; 1306 } else if (bdev_max_discard_sectors(stripe->dev->bdev)) { 1307 ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded); 1308 } else { 1309 ret = 0; 1310 *bytes = 0; 1311 } 1312 1313 out: 1314 *bytes = discarded; 1315 return ret; 1316 } 1317 1318 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, 1319 u64 num_bytes, u64 *actual_bytes) 1320 { 1321 int ret = 0; 1322 u64 discarded_bytes = 0; 1323 u64 end = bytenr + num_bytes; 1324 u64 cur = bytenr; 1325 1326 /* 1327 * Avoid races with device replace and make sure the devices in the 1328 * stripes don't go away while we are discarding. 1329 */ 1330 btrfs_bio_counter_inc_blocked(fs_info); 1331 while (cur < end) { 1332 struct btrfs_discard_stripe *stripes; 1333 unsigned int num_stripes; 1334 int i; 1335 1336 num_bytes = end - cur; 1337 stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes); 1338 if (IS_ERR(stripes)) { 1339 ret = PTR_ERR(stripes); 1340 if (ret == -EOPNOTSUPP) 1341 ret = 0; 1342 break; 1343 } 1344 1345 for (i = 0; i < num_stripes; i++) { 1346 struct btrfs_discard_stripe *stripe = stripes + i; 1347 u64 bytes; 1348 1349 if (!stripe->dev->bdev) { 1350 ASSERT(btrfs_test_opt(fs_info, DEGRADED)); 1351 continue; 1352 } 1353 1354 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 1355 &stripe->dev->dev_state)) 1356 continue; 1357 1358 ret = do_discard_extent(stripe, &bytes); 1359 if (ret) { 1360 /* 1361 * Keep going if discard is not supported by the 1362 * device. 1363 */ 1364 if (ret != -EOPNOTSUPP) 1365 break; 1366 ret = 0; 1367 } else { 1368 discarded_bytes += bytes; 1369 } 1370 } 1371 kfree(stripes); 1372 if (ret) 1373 break; 1374 cur += num_bytes; 1375 } 1376 btrfs_bio_counter_dec(fs_info); 1377 if (actual_bytes) 1378 *actual_bytes = discarded_bytes; 1379 return ret; 1380 } 1381 1382 /* Can return -ENOMEM */ 1383 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1384 struct btrfs_ref *generic_ref) 1385 { 1386 struct btrfs_fs_info *fs_info = trans->fs_info; 1387 int ret; 1388 1389 ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && 1390 generic_ref->action); 1391 BUG_ON(generic_ref->type == BTRFS_REF_METADATA && 1392 generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID); 1393 1394 if (generic_ref->type == BTRFS_REF_METADATA) 1395 ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL); 1396 else 1397 ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0); 1398 1399 btrfs_ref_tree_mod(fs_info, generic_ref); 1400 1401 return ret; 1402 } 1403 1404 /* 1405 * __btrfs_inc_extent_ref - insert backreference for a given extent 1406 * 1407 * The counterpart is in __btrfs_free_extent(), with examples and more details 1408 * how it works. 1409 * 1410 * @trans: Handle of transaction 1411 * 1412 * @node: The delayed ref node used to get the bytenr/length for 1413 * extent whose references are incremented. 1414 * 1415 * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/ 1416 * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical 1417 * bytenr of the parent block. Since new extents are always 1418 * created with indirect references, this will only be the case 1419 * when relocating a shared extent. In that case, root_objectid 1420 * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must 1421 * be 0 1422 * 1423 * @root_objectid: The id of the root where this modification has originated, 1424 * this can be either one of the well-known metadata trees or 1425 * the subvolume id which references this extent. 1426 * 1427 * @owner: For data extents it is the inode number of the owning file. 1428 * For metadata extents this parameter holds the level in the 1429 * tree of the extent. 1430 * 1431 * @offset: For metadata extents the offset is ignored and is currently 1432 * always passed as 0. For data extents it is the fileoffset 1433 * this extent belongs to. 1434 * 1435 * @refs_to_add Number of references to add 1436 * 1437 * @extent_op Pointer to a structure, holding information necessary when 1438 * updating a tree block's flags 1439 * 1440 */ 1441 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1442 struct btrfs_delayed_ref_node *node, 1443 u64 parent, u64 root_objectid, 1444 u64 owner, u64 offset, int refs_to_add, 1445 struct btrfs_delayed_extent_op *extent_op) 1446 { 1447 struct btrfs_path *path; 1448 struct extent_buffer *leaf; 1449 struct btrfs_extent_item *item; 1450 struct btrfs_key key; 1451 u64 bytenr = node->bytenr; 1452 u64 num_bytes = node->num_bytes; 1453 u64 refs; 1454 int ret; 1455 1456 path = btrfs_alloc_path(); 1457 if (!path) 1458 return -ENOMEM; 1459 1460 /* this will setup the path even if it fails to insert the back ref */ 1461 ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes, 1462 parent, root_objectid, owner, 1463 offset, refs_to_add, extent_op); 1464 if ((ret < 0 && ret != -EAGAIN) || !ret) 1465 goto out; 1466 1467 /* 1468 * Ok we had -EAGAIN which means we didn't have space to insert and 1469 * inline extent ref, so just update the reference count and add a 1470 * normal backref. 1471 */ 1472 leaf = path->nodes[0]; 1473 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1474 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1475 refs = btrfs_extent_refs(leaf, item); 1476 btrfs_set_extent_refs(leaf, item, refs + refs_to_add); 1477 if (extent_op) 1478 __run_delayed_extent_op(extent_op, leaf, item); 1479 1480 btrfs_mark_buffer_dirty(leaf); 1481 btrfs_release_path(path); 1482 1483 /* now insert the actual backref */ 1484 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1485 BUG_ON(refs_to_add != 1); 1486 ret = insert_tree_block_ref(trans, path, bytenr, parent, 1487 root_objectid); 1488 } else { 1489 ret = insert_extent_data_ref(trans, path, bytenr, parent, 1490 root_objectid, owner, offset, 1491 refs_to_add); 1492 } 1493 if (ret) 1494 btrfs_abort_transaction(trans, ret); 1495 out: 1496 btrfs_free_path(path); 1497 return ret; 1498 } 1499 1500 static int run_delayed_data_ref(struct btrfs_trans_handle *trans, 1501 struct btrfs_delayed_ref_node *node, 1502 struct btrfs_delayed_extent_op *extent_op, 1503 int insert_reserved) 1504 { 1505 int ret = 0; 1506 struct btrfs_delayed_data_ref *ref; 1507 struct btrfs_key ins; 1508 u64 parent = 0; 1509 u64 ref_root = 0; 1510 u64 flags = 0; 1511 1512 ins.objectid = node->bytenr; 1513 ins.offset = node->num_bytes; 1514 ins.type = BTRFS_EXTENT_ITEM_KEY; 1515 1516 ref = btrfs_delayed_node_to_data_ref(node); 1517 trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action); 1518 1519 if (node->type == BTRFS_SHARED_DATA_REF_KEY) 1520 parent = ref->parent; 1521 ref_root = ref->root; 1522 1523 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1524 if (extent_op) 1525 flags |= extent_op->flags_to_set; 1526 ret = alloc_reserved_file_extent(trans, parent, ref_root, 1527 flags, ref->objectid, 1528 ref->offset, &ins, 1529 node->ref_mod); 1530 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1531 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1532 ref->objectid, ref->offset, 1533 node->ref_mod, extent_op); 1534 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1535 ret = __btrfs_free_extent(trans, node, parent, 1536 ref_root, ref->objectid, 1537 ref->offset, node->ref_mod, 1538 extent_op); 1539 } else { 1540 BUG(); 1541 } 1542 return ret; 1543 } 1544 1545 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 1546 struct extent_buffer *leaf, 1547 struct btrfs_extent_item *ei) 1548 { 1549 u64 flags = btrfs_extent_flags(leaf, ei); 1550 if (extent_op->update_flags) { 1551 flags |= extent_op->flags_to_set; 1552 btrfs_set_extent_flags(leaf, ei, flags); 1553 } 1554 1555 if (extent_op->update_key) { 1556 struct btrfs_tree_block_info *bi; 1557 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); 1558 bi = (struct btrfs_tree_block_info *)(ei + 1); 1559 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); 1560 } 1561 } 1562 1563 static int run_delayed_extent_op(struct btrfs_trans_handle *trans, 1564 struct btrfs_delayed_ref_head *head, 1565 struct btrfs_delayed_extent_op *extent_op) 1566 { 1567 struct btrfs_fs_info *fs_info = trans->fs_info; 1568 struct btrfs_root *root; 1569 struct btrfs_key key; 1570 struct btrfs_path *path; 1571 struct btrfs_extent_item *ei; 1572 struct extent_buffer *leaf; 1573 u32 item_size; 1574 int ret; 1575 int err = 0; 1576 int metadata = 1; 1577 1578 if (TRANS_ABORTED(trans)) 1579 return 0; 1580 1581 if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1582 metadata = 0; 1583 1584 path = btrfs_alloc_path(); 1585 if (!path) 1586 return -ENOMEM; 1587 1588 key.objectid = head->bytenr; 1589 1590 if (metadata) { 1591 key.type = BTRFS_METADATA_ITEM_KEY; 1592 key.offset = extent_op->level; 1593 } else { 1594 key.type = BTRFS_EXTENT_ITEM_KEY; 1595 key.offset = head->num_bytes; 1596 } 1597 1598 root = btrfs_extent_root(fs_info, key.objectid); 1599 again: 1600 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1601 if (ret < 0) { 1602 err = ret; 1603 goto out; 1604 } 1605 if (ret > 0) { 1606 if (metadata) { 1607 if (path->slots[0] > 0) { 1608 path->slots[0]--; 1609 btrfs_item_key_to_cpu(path->nodes[0], &key, 1610 path->slots[0]); 1611 if (key.objectid == head->bytenr && 1612 key.type == BTRFS_EXTENT_ITEM_KEY && 1613 key.offset == head->num_bytes) 1614 ret = 0; 1615 } 1616 if (ret > 0) { 1617 btrfs_release_path(path); 1618 metadata = 0; 1619 1620 key.objectid = head->bytenr; 1621 key.offset = head->num_bytes; 1622 key.type = BTRFS_EXTENT_ITEM_KEY; 1623 goto again; 1624 } 1625 } else { 1626 err = -EIO; 1627 goto out; 1628 } 1629 } 1630 1631 leaf = path->nodes[0]; 1632 item_size = btrfs_item_size(leaf, path->slots[0]); 1633 1634 if (unlikely(item_size < sizeof(*ei))) { 1635 err = -EINVAL; 1636 btrfs_print_v0_err(fs_info); 1637 btrfs_abort_transaction(trans, err); 1638 goto out; 1639 } 1640 1641 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1642 __run_delayed_extent_op(extent_op, leaf, ei); 1643 1644 btrfs_mark_buffer_dirty(leaf); 1645 out: 1646 btrfs_free_path(path); 1647 return err; 1648 } 1649 1650 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, 1651 struct btrfs_delayed_ref_node *node, 1652 struct btrfs_delayed_extent_op *extent_op, 1653 int insert_reserved) 1654 { 1655 int ret = 0; 1656 struct btrfs_delayed_tree_ref *ref; 1657 u64 parent = 0; 1658 u64 ref_root = 0; 1659 1660 ref = btrfs_delayed_node_to_tree_ref(node); 1661 trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action); 1662 1663 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1664 parent = ref->parent; 1665 ref_root = ref->root; 1666 1667 if (node->ref_mod != 1) { 1668 btrfs_err(trans->fs_info, 1669 "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu", 1670 node->bytenr, node->ref_mod, node->action, ref_root, 1671 parent); 1672 return -EIO; 1673 } 1674 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1675 BUG_ON(!extent_op || !extent_op->update_flags); 1676 ret = alloc_reserved_tree_block(trans, node, extent_op); 1677 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1678 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1679 ref->level, 0, 1, extent_op); 1680 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1681 ret = __btrfs_free_extent(trans, node, parent, ref_root, 1682 ref->level, 0, 1, extent_op); 1683 } else { 1684 BUG(); 1685 } 1686 return ret; 1687 } 1688 1689 /* helper function to actually process a single delayed ref entry */ 1690 static int run_one_delayed_ref(struct btrfs_trans_handle *trans, 1691 struct btrfs_delayed_ref_node *node, 1692 struct btrfs_delayed_extent_op *extent_op, 1693 int insert_reserved) 1694 { 1695 int ret = 0; 1696 1697 if (TRANS_ABORTED(trans)) { 1698 if (insert_reserved) 1699 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1700 return 0; 1701 } 1702 1703 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 1704 node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1705 ret = run_delayed_tree_ref(trans, node, extent_op, 1706 insert_reserved); 1707 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || 1708 node->type == BTRFS_SHARED_DATA_REF_KEY) 1709 ret = run_delayed_data_ref(trans, node, extent_op, 1710 insert_reserved); 1711 else 1712 BUG(); 1713 if (ret && insert_reserved) 1714 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1715 if (ret < 0) 1716 btrfs_err(trans->fs_info, 1717 "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d", 1718 node->bytenr, node->num_bytes, node->type, 1719 node->action, node->ref_mod, ret); 1720 return ret; 1721 } 1722 1723 static inline struct btrfs_delayed_ref_node * 1724 select_delayed_ref(struct btrfs_delayed_ref_head *head) 1725 { 1726 struct btrfs_delayed_ref_node *ref; 1727 1728 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 1729 return NULL; 1730 1731 /* 1732 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first. 1733 * This is to prevent a ref count from going down to zero, which deletes 1734 * the extent item from the extent tree, when there still are references 1735 * to add, which would fail because they would not find the extent item. 1736 */ 1737 if (!list_empty(&head->ref_add_list)) 1738 return list_first_entry(&head->ref_add_list, 1739 struct btrfs_delayed_ref_node, add_list); 1740 1741 ref = rb_entry(rb_first_cached(&head->ref_tree), 1742 struct btrfs_delayed_ref_node, ref_node); 1743 ASSERT(list_empty(&ref->add_list)); 1744 return ref; 1745 } 1746 1747 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 1748 struct btrfs_delayed_ref_head *head) 1749 { 1750 spin_lock(&delayed_refs->lock); 1751 head->processing = 0; 1752 delayed_refs->num_heads_ready++; 1753 spin_unlock(&delayed_refs->lock); 1754 btrfs_delayed_ref_unlock(head); 1755 } 1756 1757 static struct btrfs_delayed_extent_op *cleanup_extent_op( 1758 struct btrfs_delayed_ref_head *head) 1759 { 1760 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 1761 1762 if (!extent_op) 1763 return NULL; 1764 1765 if (head->must_insert_reserved) { 1766 head->extent_op = NULL; 1767 btrfs_free_delayed_extent_op(extent_op); 1768 return NULL; 1769 } 1770 return extent_op; 1771 } 1772 1773 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, 1774 struct btrfs_delayed_ref_head *head) 1775 { 1776 struct btrfs_delayed_extent_op *extent_op; 1777 int ret; 1778 1779 extent_op = cleanup_extent_op(head); 1780 if (!extent_op) 1781 return 0; 1782 head->extent_op = NULL; 1783 spin_unlock(&head->lock); 1784 ret = run_delayed_extent_op(trans, head, extent_op); 1785 btrfs_free_delayed_extent_op(extent_op); 1786 return ret ? ret : 1; 1787 } 1788 1789 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, 1790 struct btrfs_delayed_ref_root *delayed_refs, 1791 struct btrfs_delayed_ref_head *head) 1792 { 1793 int nr_items = 1; /* Dropping this ref head update. */ 1794 1795 /* 1796 * We had csum deletions accounted for in our delayed refs rsv, we need 1797 * to drop the csum leaves for this update from our delayed_refs_rsv. 1798 */ 1799 if (head->total_ref_mod < 0 && head->is_data) { 1800 spin_lock(&delayed_refs->lock); 1801 delayed_refs->pending_csums -= head->num_bytes; 1802 spin_unlock(&delayed_refs->lock); 1803 nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes); 1804 } 1805 1806 btrfs_delayed_refs_rsv_release(fs_info, nr_items); 1807 } 1808 1809 static int cleanup_ref_head(struct btrfs_trans_handle *trans, 1810 struct btrfs_delayed_ref_head *head) 1811 { 1812 1813 struct btrfs_fs_info *fs_info = trans->fs_info; 1814 struct btrfs_delayed_ref_root *delayed_refs; 1815 int ret; 1816 1817 delayed_refs = &trans->transaction->delayed_refs; 1818 1819 ret = run_and_cleanup_extent_op(trans, head); 1820 if (ret < 0) { 1821 unselect_delayed_ref_head(delayed_refs, head); 1822 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); 1823 return ret; 1824 } else if (ret) { 1825 return ret; 1826 } 1827 1828 /* 1829 * Need to drop our head ref lock and re-acquire the delayed ref lock 1830 * and then re-check to make sure nobody got added. 1831 */ 1832 spin_unlock(&head->lock); 1833 spin_lock(&delayed_refs->lock); 1834 spin_lock(&head->lock); 1835 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { 1836 spin_unlock(&head->lock); 1837 spin_unlock(&delayed_refs->lock); 1838 return 1; 1839 } 1840 btrfs_delete_ref_head(delayed_refs, head); 1841 spin_unlock(&head->lock); 1842 spin_unlock(&delayed_refs->lock); 1843 1844 if (head->must_insert_reserved) { 1845 btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1); 1846 if (head->is_data) { 1847 struct btrfs_root *csum_root; 1848 1849 csum_root = btrfs_csum_root(fs_info, head->bytenr); 1850 ret = btrfs_del_csums(trans, csum_root, head->bytenr, 1851 head->num_bytes); 1852 } 1853 } 1854 1855 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); 1856 1857 trace_run_delayed_ref_head(fs_info, head, 0); 1858 btrfs_delayed_ref_unlock(head); 1859 btrfs_put_delayed_ref_head(head); 1860 return ret; 1861 } 1862 1863 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( 1864 struct btrfs_trans_handle *trans) 1865 { 1866 struct btrfs_delayed_ref_root *delayed_refs = 1867 &trans->transaction->delayed_refs; 1868 struct btrfs_delayed_ref_head *head = NULL; 1869 int ret; 1870 1871 spin_lock(&delayed_refs->lock); 1872 head = btrfs_select_ref_head(delayed_refs); 1873 if (!head) { 1874 spin_unlock(&delayed_refs->lock); 1875 return head; 1876 } 1877 1878 /* 1879 * Grab the lock that says we are going to process all the refs for 1880 * this head 1881 */ 1882 ret = btrfs_delayed_ref_lock(delayed_refs, head); 1883 spin_unlock(&delayed_refs->lock); 1884 1885 /* 1886 * We may have dropped the spin lock to get the head mutex lock, and 1887 * that might have given someone else time to free the head. If that's 1888 * true, it has been removed from our list and we can move on. 1889 */ 1890 if (ret == -EAGAIN) 1891 head = ERR_PTR(-EAGAIN); 1892 1893 return head; 1894 } 1895 1896 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, 1897 struct btrfs_delayed_ref_head *locked_ref, 1898 unsigned long *run_refs) 1899 { 1900 struct btrfs_fs_info *fs_info = trans->fs_info; 1901 struct btrfs_delayed_ref_root *delayed_refs; 1902 struct btrfs_delayed_extent_op *extent_op; 1903 struct btrfs_delayed_ref_node *ref; 1904 int must_insert_reserved = 0; 1905 int ret; 1906 1907 delayed_refs = &trans->transaction->delayed_refs; 1908 1909 lockdep_assert_held(&locked_ref->mutex); 1910 lockdep_assert_held(&locked_ref->lock); 1911 1912 while ((ref = select_delayed_ref(locked_ref))) { 1913 if (ref->seq && 1914 btrfs_check_delayed_seq(fs_info, ref->seq)) { 1915 spin_unlock(&locked_ref->lock); 1916 unselect_delayed_ref_head(delayed_refs, locked_ref); 1917 return -EAGAIN; 1918 } 1919 1920 (*run_refs)++; 1921 ref->in_tree = 0; 1922 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); 1923 RB_CLEAR_NODE(&ref->ref_node); 1924 if (!list_empty(&ref->add_list)) 1925 list_del(&ref->add_list); 1926 /* 1927 * When we play the delayed ref, also correct the ref_mod on 1928 * head 1929 */ 1930 switch (ref->action) { 1931 case BTRFS_ADD_DELAYED_REF: 1932 case BTRFS_ADD_DELAYED_EXTENT: 1933 locked_ref->ref_mod -= ref->ref_mod; 1934 break; 1935 case BTRFS_DROP_DELAYED_REF: 1936 locked_ref->ref_mod += ref->ref_mod; 1937 break; 1938 default: 1939 WARN_ON(1); 1940 } 1941 atomic_dec(&delayed_refs->num_entries); 1942 1943 /* 1944 * Record the must_insert_reserved flag before we drop the 1945 * spin lock. 1946 */ 1947 must_insert_reserved = locked_ref->must_insert_reserved; 1948 locked_ref->must_insert_reserved = 0; 1949 1950 extent_op = locked_ref->extent_op; 1951 locked_ref->extent_op = NULL; 1952 spin_unlock(&locked_ref->lock); 1953 1954 ret = run_one_delayed_ref(trans, ref, extent_op, 1955 must_insert_reserved); 1956 1957 btrfs_free_delayed_extent_op(extent_op); 1958 if (ret) { 1959 unselect_delayed_ref_head(delayed_refs, locked_ref); 1960 btrfs_put_delayed_ref(ref); 1961 return ret; 1962 } 1963 1964 btrfs_put_delayed_ref(ref); 1965 cond_resched(); 1966 1967 spin_lock(&locked_ref->lock); 1968 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); 1969 } 1970 1971 return 0; 1972 } 1973 1974 /* 1975 * Returns 0 on success or if called with an already aborted transaction. 1976 * Returns -ENOMEM or -EIO on failure and will abort the transaction. 1977 */ 1978 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 1979 unsigned long nr) 1980 { 1981 struct btrfs_fs_info *fs_info = trans->fs_info; 1982 struct btrfs_delayed_ref_root *delayed_refs; 1983 struct btrfs_delayed_ref_head *locked_ref = NULL; 1984 ktime_t start = ktime_get(); 1985 int ret; 1986 unsigned long count = 0; 1987 unsigned long actual_count = 0; 1988 1989 delayed_refs = &trans->transaction->delayed_refs; 1990 do { 1991 if (!locked_ref) { 1992 locked_ref = btrfs_obtain_ref_head(trans); 1993 if (IS_ERR_OR_NULL(locked_ref)) { 1994 if (PTR_ERR(locked_ref) == -EAGAIN) { 1995 continue; 1996 } else { 1997 break; 1998 } 1999 } 2000 count++; 2001 } 2002 /* 2003 * We need to try and merge add/drops of the same ref since we 2004 * can run into issues with relocate dropping the implicit ref 2005 * and then it being added back again before the drop can 2006 * finish. If we merged anything we need to re-loop so we can 2007 * get a good ref. 2008 * Or we can get node references of the same type that weren't 2009 * merged when created due to bumps in the tree mod seq, and 2010 * we need to merge them to prevent adding an inline extent 2011 * backref before dropping it (triggering a BUG_ON at 2012 * insert_inline_extent_backref()). 2013 */ 2014 spin_lock(&locked_ref->lock); 2015 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); 2016 2017 ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, 2018 &actual_count); 2019 if (ret < 0 && ret != -EAGAIN) { 2020 /* 2021 * Error, btrfs_run_delayed_refs_for_head already 2022 * unlocked everything so just bail out 2023 */ 2024 return ret; 2025 } else if (!ret) { 2026 /* 2027 * Success, perform the usual cleanup of a processed 2028 * head 2029 */ 2030 ret = cleanup_ref_head(trans, locked_ref); 2031 if (ret > 0 ) { 2032 /* We dropped our lock, we need to loop. */ 2033 ret = 0; 2034 continue; 2035 } else if (ret) { 2036 return ret; 2037 } 2038 } 2039 2040 /* 2041 * Either success case or btrfs_run_delayed_refs_for_head 2042 * returned -EAGAIN, meaning we need to select another head 2043 */ 2044 2045 locked_ref = NULL; 2046 cond_resched(); 2047 } while ((nr != -1 && count < nr) || locked_ref); 2048 2049 /* 2050 * We don't want to include ref heads since we can have empty ref heads 2051 * and those will drastically skew our runtime down since we just do 2052 * accounting, no actual extent tree updates. 2053 */ 2054 if (actual_count > 0) { 2055 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start)); 2056 u64 avg; 2057 2058 /* 2059 * We weigh the current average higher than our current runtime 2060 * to avoid large swings in the average. 2061 */ 2062 spin_lock(&delayed_refs->lock); 2063 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime; 2064 fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */ 2065 spin_unlock(&delayed_refs->lock); 2066 } 2067 return 0; 2068 } 2069 2070 #ifdef SCRAMBLE_DELAYED_REFS 2071 /* 2072 * Normally delayed refs get processed in ascending bytenr order. This 2073 * correlates in most cases to the order added. To expose dependencies on this 2074 * order, we start to process the tree in the middle instead of the beginning 2075 */ 2076 static u64 find_middle(struct rb_root *root) 2077 { 2078 struct rb_node *n = root->rb_node; 2079 struct btrfs_delayed_ref_node *entry; 2080 int alt = 1; 2081 u64 middle; 2082 u64 first = 0, last = 0; 2083 2084 n = rb_first(root); 2085 if (n) { 2086 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2087 first = entry->bytenr; 2088 } 2089 n = rb_last(root); 2090 if (n) { 2091 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2092 last = entry->bytenr; 2093 } 2094 n = root->rb_node; 2095 2096 while (n) { 2097 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2098 WARN_ON(!entry->in_tree); 2099 2100 middle = entry->bytenr; 2101 2102 if (alt) 2103 n = n->rb_left; 2104 else 2105 n = n->rb_right; 2106 2107 alt = 1 - alt; 2108 } 2109 return middle; 2110 } 2111 #endif 2112 2113 /* 2114 * this starts processing the delayed reference count updates and 2115 * extent insertions we have queued up so far. count can be 2116 * 0, which means to process everything in the tree at the start 2117 * of the run (but not newly added entries), or it can be some target 2118 * number you'd like to process. 2119 * 2120 * Returns 0 on success or if called with an aborted transaction 2121 * Returns <0 on error and aborts the transaction 2122 */ 2123 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2124 unsigned long count) 2125 { 2126 struct btrfs_fs_info *fs_info = trans->fs_info; 2127 struct rb_node *node; 2128 struct btrfs_delayed_ref_root *delayed_refs; 2129 struct btrfs_delayed_ref_head *head; 2130 int ret; 2131 int run_all = count == (unsigned long)-1; 2132 2133 /* We'll clean this up in btrfs_cleanup_transaction */ 2134 if (TRANS_ABORTED(trans)) 2135 return 0; 2136 2137 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) 2138 return 0; 2139 2140 delayed_refs = &trans->transaction->delayed_refs; 2141 if (count == 0) 2142 count = delayed_refs->num_heads_ready; 2143 2144 again: 2145 #ifdef SCRAMBLE_DELAYED_REFS 2146 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); 2147 #endif 2148 ret = __btrfs_run_delayed_refs(trans, count); 2149 if (ret < 0) { 2150 btrfs_abort_transaction(trans, ret); 2151 return ret; 2152 } 2153 2154 if (run_all) { 2155 btrfs_create_pending_block_groups(trans); 2156 2157 spin_lock(&delayed_refs->lock); 2158 node = rb_first_cached(&delayed_refs->href_root); 2159 if (!node) { 2160 spin_unlock(&delayed_refs->lock); 2161 goto out; 2162 } 2163 head = rb_entry(node, struct btrfs_delayed_ref_head, 2164 href_node); 2165 refcount_inc(&head->refs); 2166 spin_unlock(&delayed_refs->lock); 2167 2168 /* Mutex was contended, block until it's released and retry. */ 2169 mutex_lock(&head->mutex); 2170 mutex_unlock(&head->mutex); 2171 2172 btrfs_put_delayed_ref_head(head); 2173 cond_resched(); 2174 goto again; 2175 } 2176 out: 2177 return 0; 2178 } 2179 2180 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2181 struct extent_buffer *eb, u64 flags, 2182 int level) 2183 { 2184 struct btrfs_delayed_extent_op *extent_op; 2185 int ret; 2186 2187 extent_op = btrfs_alloc_delayed_extent_op(); 2188 if (!extent_op) 2189 return -ENOMEM; 2190 2191 extent_op->flags_to_set = flags; 2192 extent_op->update_flags = true; 2193 extent_op->update_key = false; 2194 extent_op->level = level; 2195 2196 ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op); 2197 if (ret) 2198 btrfs_free_delayed_extent_op(extent_op); 2199 return ret; 2200 } 2201 2202 static noinline int check_delayed_ref(struct btrfs_root *root, 2203 struct btrfs_path *path, 2204 u64 objectid, u64 offset, u64 bytenr) 2205 { 2206 struct btrfs_delayed_ref_head *head; 2207 struct btrfs_delayed_ref_node *ref; 2208 struct btrfs_delayed_data_ref *data_ref; 2209 struct btrfs_delayed_ref_root *delayed_refs; 2210 struct btrfs_transaction *cur_trans; 2211 struct rb_node *node; 2212 int ret = 0; 2213 2214 spin_lock(&root->fs_info->trans_lock); 2215 cur_trans = root->fs_info->running_transaction; 2216 if (cur_trans) 2217 refcount_inc(&cur_trans->use_count); 2218 spin_unlock(&root->fs_info->trans_lock); 2219 if (!cur_trans) 2220 return 0; 2221 2222 delayed_refs = &cur_trans->delayed_refs; 2223 spin_lock(&delayed_refs->lock); 2224 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 2225 if (!head) { 2226 spin_unlock(&delayed_refs->lock); 2227 btrfs_put_transaction(cur_trans); 2228 return 0; 2229 } 2230 2231 if (!mutex_trylock(&head->mutex)) { 2232 if (path->nowait) { 2233 spin_unlock(&delayed_refs->lock); 2234 btrfs_put_transaction(cur_trans); 2235 return -EAGAIN; 2236 } 2237 2238 refcount_inc(&head->refs); 2239 spin_unlock(&delayed_refs->lock); 2240 2241 btrfs_release_path(path); 2242 2243 /* 2244 * Mutex was contended, block until it's released and let 2245 * caller try again 2246 */ 2247 mutex_lock(&head->mutex); 2248 mutex_unlock(&head->mutex); 2249 btrfs_put_delayed_ref_head(head); 2250 btrfs_put_transaction(cur_trans); 2251 return -EAGAIN; 2252 } 2253 spin_unlock(&delayed_refs->lock); 2254 2255 spin_lock(&head->lock); 2256 /* 2257 * XXX: We should replace this with a proper search function in the 2258 * future. 2259 */ 2260 for (node = rb_first_cached(&head->ref_tree); node; 2261 node = rb_next(node)) { 2262 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); 2263 /* If it's a shared ref we know a cross reference exists */ 2264 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { 2265 ret = 1; 2266 break; 2267 } 2268 2269 data_ref = btrfs_delayed_node_to_data_ref(ref); 2270 2271 /* 2272 * If our ref doesn't match the one we're currently looking at 2273 * then we have a cross reference. 2274 */ 2275 if (data_ref->root != root->root_key.objectid || 2276 data_ref->objectid != objectid || 2277 data_ref->offset != offset) { 2278 ret = 1; 2279 break; 2280 } 2281 } 2282 spin_unlock(&head->lock); 2283 mutex_unlock(&head->mutex); 2284 btrfs_put_transaction(cur_trans); 2285 return ret; 2286 } 2287 2288 static noinline int check_committed_ref(struct btrfs_root *root, 2289 struct btrfs_path *path, 2290 u64 objectid, u64 offset, u64 bytenr, 2291 bool strict) 2292 { 2293 struct btrfs_fs_info *fs_info = root->fs_info; 2294 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); 2295 struct extent_buffer *leaf; 2296 struct btrfs_extent_data_ref *ref; 2297 struct btrfs_extent_inline_ref *iref; 2298 struct btrfs_extent_item *ei; 2299 struct btrfs_key key; 2300 u32 item_size; 2301 int type; 2302 int ret; 2303 2304 key.objectid = bytenr; 2305 key.offset = (u64)-1; 2306 key.type = BTRFS_EXTENT_ITEM_KEY; 2307 2308 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2309 if (ret < 0) 2310 goto out; 2311 BUG_ON(ret == 0); /* Corruption */ 2312 2313 ret = -ENOENT; 2314 if (path->slots[0] == 0) 2315 goto out; 2316 2317 path->slots[0]--; 2318 leaf = path->nodes[0]; 2319 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2320 2321 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) 2322 goto out; 2323 2324 ret = 1; 2325 item_size = btrfs_item_size(leaf, path->slots[0]); 2326 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2327 2328 /* If extent item has more than 1 inline ref then it's shared */ 2329 if (item_size != sizeof(*ei) + 2330 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) 2331 goto out; 2332 2333 /* 2334 * If extent created before last snapshot => it's shared unless the 2335 * snapshot has been deleted. Use the heuristic if strict is false. 2336 */ 2337 if (!strict && 2338 (btrfs_extent_generation(leaf, ei) <= 2339 btrfs_root_last_snapshot(&root->root_item))) 2340 goto out; 2341 2342 iref = (struct btrfs_extent_inline_ref *)(ei + 1); 2343 2344 /* If this extent has SHARED_DATA_REF then it's shared */ 2345 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 2346 if (type != BTRFS_EXTENT_DATA_REF_KEY) 2347 goto out; 2348 2349 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 2350 if (btrfs_extent_refs(leaf, ei) != 2351 btrfs_extent_data_ref_count(leaf, ref) || 2352 btrfs_extent_data_ref_root(leaf, ref) != 2353 root->root_key.objectid || 2354 btrfs_extent_data_ref_objectid(leaf, ref) != objectid || 2355 btrfs_extent_data_ref_offset(leaf, ref) != offset) 2356 goto out; 2357 2358 ret = 0; 2359 out: 2360 return ret; 2361 } 2362 2363 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, 2364 u64 bytenr, bool strict, struct btrfs_path *path) 2365 { 2366 int ret; 2367 2368 do { 2369 ret = check_committed_ref(root, path, objectid, 2370 offset, bytenr, strict); 2371 if (ret && ret != -ENOENT) 2372 goto out; 2373 2374 ret = check_delayed_ref(root, path, objectid, offset, bytenr); 2375 } while (ret == -EAGAIN); 2376 2377 out: 2378 btrfs_release_path(path); 2379 if (btrfs_is_data_reloc_root(root)) 2380 WARN_ON(ret > 0); 2381 return ret; 2382 } 2383 2384 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, 2385 struct btrfs_root *root, 2386 struct extent_buffer *buf, 2387 int full_backref, int inc) 2388 { 2389 struct btrfs_fs_info *fs_info = root->fs_info; 2390 u64 bytenr; 2391 u64 num_bytes; 2392 u64 parent; 2393 u64 ref_root; 2394 u32 nritems; 2395 struct btrfs_key key; 2396 struct btrfs_file_extent_item *fi; 2397 struct btrfs_ref generic_ref = { 0 }; 2398 bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC); 2399 int i; 2400 int action; 2401 int level; 2402 int ret = 0; 2403 2404 if (btrfs_is_testing(fs_info)) 2405 return 0; 2406 2407 ref_root = btrfs_header_owner(buf); 2408 nritems = btrfs_header_nritems(buf); 2409 level = btrfs_header_level(buf); 2410 2411 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) 2412 return 0; 2413 2414 if (full_backref) 2415 parent = buf->start; 2416 else 2417 parent = 0; 2418 if (inc) 2419 action = BTRFS_ADD_DELAYED_REF; 2420 else 2421 action = BTRFS_DROP_DELAYED_REF; 2422 2423 for (i = 0; i < nritems; i++) { 2424 if (level == 0) { 2425 btrfs_item_key_to_cpu(buf, &key, i); 2426 if (key.type != BTRFS_EXTENT_DATA_KEY) 2427 continue; 2428 fi = btrfs_item_ptr(buf, i, 2429 struct btrfs_file_extent_item); 2430 if (btrfs_file_extent_type(buf, fi) == 2431 BTRFS_FILE_EXTENT_INLINE) 2432 continue; 2433 bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 2434 if (bytenr == 0) 2435 continue; 2436 2437 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); 2438 key.offset -= btrfs_file_extent_offset(buf, fi); 2439 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2440 num_bytes, parent); 2441 btrfs_init_data_ref(&generic_ref, ref_root, key.objectid, 2442 key.offset, root->root_key.objectid, 2443 for_reloc); 2444 if (inc) 2445 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2446 else 2447 ret = btrfs_free_extent(trans, &generic_ref); 2448 if (ret) 2449 goto fail; 2450 } else { 2451 bytenr = btrfs_node_blockptr(buf, i); 2452 num_bytes = fs_info->nodesize; 2453 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2454 num_bytes, parent); 2455 btrfs_init_tree_ref(&generic_ref, level - 1, ref_root, 2456 root->root_key.objectid, for_reloc); 2457 if (inc) 2458 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2459 else 2460 ret = btrfs_free_extent(trans, &generic_ref); 2461 if (ret) 2462 goto fail; 2463 } 2464 } 2465 return 0; 2466 fail: 2467 return ret; 2468 } 2469 2470 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2471 struct extent_buffer *buf, int full_backref) 2472 { 2473 return __btrfs_mod_ref(trans, root, buf, full_backref, 1); 2474 } 2475 2476 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2477 struct extent_buffer *buf, int full_backref) 2478 { 2479 return __btrfs_mod_ref(trans, root, buf, full_backref, 0); 2480 } 2481 2482 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) 2483 { 2484 struct btrfs_fs_info *fs_info = root->fs_info; 2485 u64 flags; 2486 u64 ret; 2487 2488 if (data) 2489 flags = BTRFS_BLOCK_GROUP_DATA; 2490 else if (root == fs_info->chunk_root) 2491 flags = BTRFS_BLOCK_GROUP_SYSTEM; 2492 else 2493 flags = BTRFS_BLOCK_GROUP_METADATA; 2494 2495 ret = btrfs_get_alloc_profile(fs_info, flags); 2496 return ret; 2497 } 2498 2499 static u64 first_logical_byte(struct btrfs_fs_info *fs_info) 2500 { 2501 struct rb_node *leftmost; 2502 u64 bytenr = 0; 2503 2504 read_lock(&fs_info->block_group_cache_lock); 2505 /* Get the block group with the lowest logical start address. */ 2506 leftmost = rb_first_cached(&fs_info->block_group_cache_tree); 2507 if (leftmost) { 2508 struct btrfs_block_group *bg; 2509 2510 bg = rb_entry(leftmost, struct btrfs_block_group, cache_node); 2511 bytenr = bg->start; 2512 } 2513 read_unlock(&fs_info->block_group_cache_lock); 2514 2515 return bytenr; 2516 } 2517 2518 static int pin_down_extent(struct btrfs_trans_handle *trans, 2519 struct btrfs_block_group *cache, 2520 u64 bytenr, u64 num_bytes, int reserved) 2521 { 2522 struct btrfs_fs_info *fs_info = cache->fs_info; 2523 2524 spin_lock(&cache->space_info->lock); 2525 spin_lock(&cache->lock); 2526 cache->pinned += num_bytes; 2527 btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info, 2528 num_bytes); 2529 if (reserved) { 2530 cache->reserved -= num_bytes; 2531 cache->space_info->bytes_reserved -= num_bytes; 2532 } 2533 spin_unlock(&cache->lock); 2534 spin_unlock(&cache->space_info->lock); 2535 2536 set_extent_dirty(&trans->transaction->pinned_extents, bytenr, 2537 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); 2538 return 0; 2539 } 2540 2541 int btrfs_pin_extent(struct btrfs_trans_handle *trans, 2542 u64 bytenr, u64 num_bytes, int reserved) 2543 { 2544 struct btrfs_block_group *cache; 2545 2546 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2547 BUG_ON(!cache); /* Logic error */ 2548 2549 pin_down_extent(trans, cache, bytenr, num_bytes, reserved); 2550 2551 btrfs_put_block_group(cache); 2552 return 0; 2553 } 2554 2555 /* 2556 * this function must be called within transaction 2557 */ 2558 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, 2559 u64 bytenr, u64 num_bytes) 2560 { 2561 struct btrfs_block_group *cache; 2562 int ret; 2563 2564 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2565 if (!cache) 2566 return -EINVAL; 2567 2568 /* 2569 * Fully cache the free space first so that our pin removes the free space 2570 * from the cache. 2571 */ 2572 ret = btrfs_cache_block_group(cache, true); 2573 if (ret) 2574 goto out; 2575 2576 pin_down_extent(trans, cache, bytenr, num_bytes, 0); 2577 2578 /* remove us from the free space cache (if we're there at all) */ 2579 ret = btrfs_remove_free_space(cache, bytenr, num_bytes); 2580 out: 2581 btrfs_put_block_group(cache); 2582 return ret; 2583 } 2584 2585 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, 2586 u64 start, u64 num_bytes) 2587 { 2588 int ret; 2589 struct btrfs_block_group *block_group; 2590 2591 block_group = btrfs_lookup_block_group(fs_info, start); 2592 if (!block_group) 2593 return -EINVAL; 2594 2595 ret = btrfs_cache_block_group(block_group, true); 2596 if (ret) 2597 goto out; 2598 2599 ret = btrfs_remove_free_space(block_group, start, num_bytes); 2600 out: 2601 btrfs_put_block_group(block_group); 2602 return ret; 2603 } 2604 2605 int btrfs_exclude_logged_extents(struct extent_buffer *eb) 2606 { 2607 struct btrfs_fs_info *fs_info = eb->fs_info; 2608 struct btrfs_file_extent_item *item; 2609 struct btrfs_key key; 2610 int found_type; 2611 int i; 2612 int ret = 0; 2613 2614 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) 2615 return 0; 2616 2617 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2618 btrfs_item_key_to_cpu(eb, &key, i); 2619 if (key.type != BTRFS_EXTENT_DATA_KEY) 2620 continue; 2621 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 2622 found_type = btrfs_file_extent_type(eb, item); 2623 if (found_type == BTRFS_FILE_EXTENT_INLINE) 2624 continue; 2625 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 2626 continue; 2627 key.objectid = btrfs_file_extent_disk_bytenr(eb, item); 2628 key.offset = btrfs_file_extent_disk_num_bytes(eb, item); 2629 ret = __exclude_logged_extent(fs_info, key.objectid, key.offset); 2630 if (ret) 2631 break; 2632 } 2633 2634 return ret; 2635 } 2636 2637 static void 2638 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) 2639 { 2640 atomic_inc(&bg->reservations); 2641 } 2642 2643 /* 2644 * Returns the free cluster for the given space info and sets empty_cluster to 2645 * what it should be based on the mount options. 2646 */ 2647 static struct btrfs_free_cluster * 2648 fetch_cluster_info(struct btrfs_fs_info *fs_info, 2649 struct btrfs_space_info *space_info, u64 *empty_cluster) 2650 { 2651 struct btrfs_free_cluster *ret = NULL; 2652 2653 *empty_cluster = 0; 2654 if (btrfs_mixed_space_info(space_info)) 2655 return ret; 2656 2657 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 2658 ret = &fs_info->meta_alloc_cluster; 2659 if (btrfs_test_opt(fs_info, SSD)) 2660 *empty_cluster = SZ_2M; 2661 else 2662 *empty_cluster = SZ_64K; 2663 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && 2664 btrfs_test_opt(fs_info, SSD_SPREAD)) { 2665 *empty_cluster = SZ_2M; 2666 ret = &fs_info->data_alloc_cluster; 2667 } 2668 2669 return ret; 2670 } 2671 2672 static int unpin_extent_range(struct btrfs_fs_info *fs_info, 2673 u64 start, u64 end, 2674 const bool return_free_space) 2675 { 2676 struct btrfs_block_group *cache = NULL; 2677 struct btrfs_space_info *space_info; 2678 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 2679 struct btrfs_free_cluster *cluster = NULL; 2680 u64 len; 2681 u64 total_unpinned = 0; 2682 u64 empty_cluster = 0; 2683 bool readonly; 2684 2685 while (start <= end) { 2686 readonly = false; 2687 if (!cache || 2688 start >= cache->start + cache->length) { 2689 if (cache) 2690 btrfs_put_block_group(cache); 2691 total_unpinned = 0; 2692 cache = btrfs_lookup_block_group(fs_info, start); 2693 BUG_ON(!cache); /* Logic error */ 2694 2695 cluster = fetch_cluster_info(fs_info, 2696 cache->space_info, 2697 &empty_cluster); 2698 empty_cluster <<= 1; 2699 } 2700 2701 len = cache->start + cache->length - start; 2702 len = min(len, end + 1 - start); 2703 2704 if (return_free_space) 2705 btrfs_add_free_space(cache, start, len); 2706 2707 start += len; 2708 total_unpinned += len; 2709 space_info = cache->space_info; 2710 2711 /* 2712 * If this space cluster has been marked as fragmented and we've 2713 * unpinned enough in this block group to potentially allow a 2714 * cluster to be created inside of it go ahead and clear the 2715 * fragmented check. 2716 */ 2717 if (cluster && cluster->fragmented && 2718 total_unpinned > empty_cluster) { 2719 spin_lock(&cluster->lock); 2720 cluster->fragmented = 0; 2721 spin_unlock(&cluster->lock); 2722 } 2723 2724 spin_lock(&space_info->lock); 2725 spin_lock(&cache->lock); 2726 cache->pinned -= len; 2727 btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); 2728 space_info->max_extent_size = 0; 2729 if (cache->ro) { 2730 space_info->bytes_readonly += len; 2731 readonly = true; 2732 } else if (btrfs_is_zoned(fs_info)) { 2733 /* Need reset before reusing in a zoned block group */ 2734 space_info->bytes_zone_unusable += len; 2735 readonly = true; 2736 } 2737 spin_unlock(&cache->lock); 2738 if (!readonly && return_free_space && 2739 global_rsv->space_info == space_info) { 2740 spin_lock(&global_rsv->lock); 2741 if (!global_rsv->full) { 2742 u64 to_add = min(len, global_rsv->size - 2743 global_rsv->reserved); 2744 2745 global_rsv->reserved += to_add; 2746 btrfs_space_info_update_bytes_may_use(fs_info, 2747 space_info, to_add); 2748 if (global_rsv->reserved >= global_rsv->size) 2749 global_rsv->full = 1; 2750 len -= to_add; 2751 } 2752 spin_unlock(&global_rsv->lock); 2753 } 2754 /* Add to any tickets we may have */ 2755 if (!readonly && return_free_space && len) 2756 btrfs_try_granting_tickets(fs_info, space_info); 2757 spin_unlock(&space_info->lock); 2758 } 2759 2760 if (cache) 2761 btrfs_put_block_group(cache); 2762 return 0; 2763 } 2764 2765 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) 2766 { 2767 struct btrfs_fs_info *fs_info = trans->fs_info; 2768 struct btrfs_block_group *block_group, *tmp; 2769 struct list_head *deleted_bgs; 2770 struct extent_io_tree *unpin; 2771 u64 start; 2772 u64 end; 2773 int ret; 2774 2775 unpin = &trans->transaction->pinned_extents; 2776 2777 while (!TRANS_ABORTED(trans)) { 2778 struct extent_state *cached_state = NULL; 2779 2780 mutex_lock(&fs_info->unused_bg_unpin_mutex); 2781 ret = find_first_extent_bit(unpin, 0, &start, &end, 2782 EXTENT_DIRTY, &cached_state); 2783 if (ret) { 2784 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2785 break; 2786 } 2787 2788 if (btrfs_test_opt(fs_info, DISCARD_SYNC)) 2789 ret = btrfs_discard_extent(fs_info, start, 2790 end + 1 - start, NULL); 2791 2792 clear_extent_dirty(unpin, start, end, &cached_state); 2793 unpin_extent_range(fs_info, start, end, true); 2794 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2795 free_extent_state(cached_state); 2796 cond_resched(); 2797 } 2798 2799 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { 2800 btrfs_discard_calc_delay(&fs_info->discard_ctl); 2801 btrfs_discard_schedule_work(&fs_info->discard_ctl, true); 2802 } 2803 2804 /* 2805 * Transaction is finished. We don't need the lock anymore. We 2806 * do need to clean up the block groups in case of a transaction 2807 * abort. 2808 */ 2809 deleted_bgs = &trans->transaction->deleted_bgs; 2810 list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) { 2811 u64 trimmed = 0; 2812 2813 ret = -EROFS; 2814 if (!TRANS_ABORTED(trans)) 2815 ret = btrfs_discard_extent(fs_info, 2816 block_group->start, 2817 block_group->length, 2818 &trimmed); 2819 2820 list_del_init(&block_group->bg_list); 2821 btrfs_unfreeze_block_group(block_group); 2822 btrfs_put_block_group(block_group); 2823 2824 if (ret) { 2825 const char *errstr = btrfs_decode_error(ret); 2826 btrfs_warn(fs_info, 2827 "discard failed while removing blockgroup: errno=%d %s", 2828 ret, errstr); 2829 } 2830 } 2831 2832 return 0; 2833 } 2834 2835 static int do_free_extent_accounting(struct btrfs_trans_handle *trans, 2836 u64 bytenr, u64 num_bytes, bool is_data) 2837 { 2838 int ret; 2839 2840 if (is_data) { 2841 struct btrfs_root *csum_root; 2842 2843 csum_root = btrfs_csum_root(trans->fs_info, bytenr); 2844 ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes); 2845 if (ret) { 2846 btrfs_abort_transaction(trans, ret); 2847 return ret; 2848 } 2849 } 2850 2851 ret = add_to_free_space_tree(trans, bytenr, num_bytes); 2852 if (ret) { 2853 btrfs_abort_transaction(trans, ret); 2854 return ret; 2855 } 2856 2857 ret = btrfs_update_block_group(trans, bytenr, num_bytes, false); 2858 if (ret) 2859 btrfs_abort_transaction(trans, ret); 2860 2861 return ret; 2862 } 2863 2864 /* 2865 * Drop one or more refs of @node. 2866 * 2867 * 1. Locate the extent refs. 2868 * It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item. 2869 * Locate it, then reduce the refs number or remove the ref line completely. 2870 * 2871 * 2. Update the refs count in EXTENT/METADATA_ITEM 2872 * 2873 * Inline backref case: 2874 * 2875 * in extent tree we have: 2876 * 2877 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2878 * refs 2 gen 6 flags DATA 2879 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2880 * extent data backref root FS_TREE objectid 257 offset 0 count 1 2881 * 2882 * This function gets called with: 2883 * 2884 * node->bytenr = 13631488 2885 * node->num_bytes = 1048576 2886 * root_objectid = FS_TREE 2887 * owner_objectid = 257 2888 * owner_offset = 0 2889 * refs_to_drop = 1 2890 * 2891 * Then we should get some like: 2892 * 2893 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2894 * refs 1 gen 6 flags DATA 2895 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2896 * 2897 * Keyed backref case: 2898 * 2899 * in extent tree we have: 2900 * 2901 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2902 * refs 754 gen 6 flags DATA 2903 * [...] 2904 * item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28 2905 * extent data backref root FS_TREE objectid 866 offset 0 count 1 2906 * 2907 * This function get called with: 2908 * 2909 * node->bytenr = 13631488 2910 * node->num_bytes = 1048576 2911 * root_objectid = FS_TREE 2912 * owner_objectid = 866 2913 * owner_offset = 0 2914 * refs_to_drop = 1 2915 * 2916 * Then we should get some like: 2917 * 2918 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2919 * refs 753 gen 6 flags DATA 2920 * 2921 * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed. 2922 */ 2923 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 2924 struct btrfs_delayed_ref_node *node, u64 parent, 2925 u64 root_objectid, u64 owner_objectid, 2926 u64 owner_offset, int refs_to_drop, 2927 struct btrfs_delayed_extent_op *extent_op) 2928 { 2929 struct btrfs_fs_info *info = trans->fs_info; 2930 struct btrfs_key key; 2931 struct btrfs_path *path; 2932 struct btrfs_root *extent_root; 2933 struct extent_buffer *leaf; 2934 struct btrfs_extent_item *ei; 2935 struct btrfs_extent_inline_ref *iref; 2936 int ret; 2937 int is_data; 2938 int extent_slot = 0; 2939 int found_extent = 0; 2940 int num_to_del = 1; 2941 u32 item_size; 2942 u64 refs; 2943 u64 bytenr = node->bytenr; 2944 u64 num_bytes = node->num_bytes; 2945 bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA); 2946 2947 extent_root = btrfs_extent_root(info, bytenr); 2948 ASSERT(extent_root); 2949 2950 path = btrfs_alloc_path(); 2951 if (!path) 2952 return -ENOMEM; 2953 2954 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; 2955 2956 if (!is_data && refs_to_drop != 1) { 2957 btrfs_crit(info, 2958 "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u", 2959 node->bytenr, refs_to_drop); 2960 ret = -EINVAL; 2961 btrfs_abort_transaction(trans, ret); 2962 goto out; 2963 } 2964 2965 if (is_data) 2966 skinny_metadata = false; 2967 2968 ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes, 2969 parent, root_objectid, owner_objectid, 2970 owner_offset); 2971 if (ret == 0) { 2972 /* 2973 * Either the inline backref or the SHARED_DATA_REF/ 2974 * SHARED_BLOCK_REF is found 2975 * 2976 * Here is a quick path to locate EXTENT/METADATA_ITEM. 2977 * It's possible the EXTENT/METADATA_ITEM is near current slot. 2978 */ 2979 extent_slot = path->slots[0]; 2980 while (extent_slot >= 0) { 2981 btrfs_item_key_to_cpu(path->nodes[0], &key, 2982 extent_slot); 2983 if (key.objectid != bytenr) 2984 break; 2985 if (key.type == BTRFS_EXTENT_ITEM_KEY && 2986 key.offset == num_bytes) { 2987 found_extent = 1; 2988 break; 2989 } 2990 if (key.type == BTRFS_METADATA_ITEM_KEY && 2991 key.offset == owner_objectid) { 2992 found_extent = 1; 2993 break; 2994 } 2995 2996 /* Quick path didn't find the EXTEMT/METADATA_ITEM */ 2997 if (path->slots[0] - extent_slot > 5) 2998 break; 2999 extent_slot--; 3000 } 3001 3002 if (!found_extent) { 3003 if (iref) { 3004 btrfs_crit(info, 3005 "invalid iref, no EXTENT/METADATA_ITEM found but has inline extent ref"); 3006 btrfs_abort_transaction(trans, -EUCLEAN); 3007 goto err_dump; 3008 } 3009 /* Must be SHARED_* item, remove the backref first */ 3010 ret = remove_extent_backref(trans, extent_root, path, 3011 NULL, refs_to_drop, is_data); 3012 if (ret) { 3013 btrfs_abort_transaction(trans, ret); 3014 goto out; 3015 } 3016 btrfs_release_path(path); 3017 3018 /* Slow path to locate EXTENT/METADATA_ITEM */ 3019 key.objectid = bytenr; 3020 key.type = BTRFS_EXTENT_ITEM_KEY; 3021 key.offset = num_bytes; 3022 3023 if (!is_data && skinny_metadata) { 3024 key.type = BTRFS_METADATA_ITEM_KEY; 3025 key.offset = owner_objectid; 3026 } 3027 3028 ret = btrfs_search_slot(trans, extent_root, 3029 &key, path, -1, 1); 3030 if (ret > 0 && skinny_metadata && path->slots[0]) { 3031 /* 3032 * Couldn't find our skinny metadata item, 3033 * see if we have ye olde extent item. 3034 */ 3035 path->slots[0]--; 3036 btrfs_item_key_to_cpu(path->nodes[0], &key, 3037 path->slots[0]); 3038 if (key.objectid == bytenr && 3039 key.type == BTRFS_EXTENT_ITEM_KEY && 3040 key.offset == num_bytes) 3041 ret = 0; 3042 } 3043 3044 if (ret > 0 && skinny_metadata) { 3045 skinny_metadata = false; 3046 key.objectid = bytenr; 3047 key.type = BTRFS_EXTENT_ITEM_KEY; 3048 key.offset = num_bytes; 3049 btrfs_release_path(path); 3050 ret = btrfs_search_slot(trans, extent_root, 3051 &key, path, -1, 1); 3052 } 3053 3054 if (ret) { 3055 btrfs_err(info, 3056 "umm, got %d back from search, was looking for %llu", 3057 ret, bytenr); 3058 if (ret > 0) 3059 btrfs_print_leaf(path->nodes[0]); 3060 } 3061 if (ret < 0) { 3062 btrfs_abort_transaction(trans, ret); 3063 goto out; 3064 } 3065 extent_slot = path->slots[0]; 3066 } 3067 } else if (WARN_ON(ret == -ENOENT)) { 3068 btrfs_print_leaf(path->nodes[0]); 3069 btrfs_err(info, 3070 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu", 3071 bytenr, parent, root_objectid, owner_objectid, 3072 owner_offset); 3073 btrfs_abort_transaction(trans, ret); 3074 goto out; 3075 } else { 3076 btrfs_abort_transaction(trans, ret); 3077 goto out; 3078 } 3079 3080 leaf = path->nodes[0]; 3081 item_size = btrfs_item_size(leaf, extent_slot); 3082 if (unlikely(item_size < sizeof(*ei))) { 3083 ret = -EINVAL; 3084 btrfs_print_v0_err(info); 3085 btrfs_abort_transaction(trans, ret); 3086 goto out; 3087 } 3088 ei = btrfs_item_ptr(leaf, extent_slot, 3089 struct btrfs_extent_item); 3090 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && 3091 key.type == BTRFS_EXTENT_ITEM_KEY) { 3092 struct btrfs_tree_block_info *bi; 3093 if (item_size < sizeof(*ei) + sizeof(*bi)) { 3094 btrfs_crit(info, 3095 "invalid extent item size for key (%llu, %u, %llu) owner %llu, has %u expect >= %zu", 3096 key.objectid, key.type, key.offset, 3097 owner_objectid, item_size, 3098 sizeof(*ei) + sizeof(*bi)); 3099 btrfs_abort_transaction(trans, -EUCLEAN); 3100 goto err_dump; 3101 } 3102 bi = (struct btrfs_tree_block_info *)(ei + 1); 3103 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); 3104 } 3105 3106 refs = btrfs_extent_refs(leaf, ei); 3107 if (refs < refs_to_drop) { 3108 btrfs_crit(info, 3109 "trying to drop %d refs but we only have %llu for bytenr %llu", 3110 refs_to_drop, refs, bytenr); 3111 btrfs_abort_transaction(trans, -EUCLEAN); 3112 goto err_dump; 3113 } 3114 refs -= refs_to_drop; 3115 3116 if (refs > 0) { 3117 if (extent_op) 3118 __run_delayed_extent_op(extent_op, leaf, ei); 3119 /* 3120 * In the case of inline back ref, reference count will 3121 * be updated by remove_extent_backref 3122 */ 3123 if (iref) { 3124 if (!found_extent) { 3125 btrfs_crit(info, 3126 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found"); 3127 btrfs_abort_transaction(trans, -EUCLEAN); 3128 goto err_dump; 3129 } 3130 } else { 3131 btrfs_set_extent_refs(leaf, ei, refs); 3132 btrfs_mark_buffer_dirty(leaf); 3133 } 3134 if (found_extent) { 3135 ret = remove_extent_backref(trans, extent_root, path, 3136 iref, refs_to_drop, is_data); 3137 if (ret) { 3138 btrfs_abort_transaction(trans, ret); 3139 goto out; 3140 } 3141 } 3142 } else { 3143 /* In this branch refs == 1 */ 3144 if (found_extent) { 3145 if (is_data && refs_to_drop != 3146 extent_data_ref_count(path, iref)) { 3147 btrfs_crit(info, 3148 "invalid refs_to_drop, current refs %u refs_to_drop %u", 3149 extent_data_ref_count(path, iref), 3150 refs_to_drop); 3151 btrfs_abort_transaction(trans, -EUCLEAN); 3152 goto err_dump; 3153 } 3154 if (iref) { 3155 if (path->slots[0] != extent_slot) { 3156 btrfs_crit(info, 3157 "invalid iref, extent item key (%llu %u %llu) doesn't have wanted iref", 3158 key.objectid, key.type, 3159 key.offset); 3160 btrfs_abort_transaction(trans, -EUCLEAN); 3161 goto err_dump; 3162 } 3163 } else { 3164 /* 3165 * No inline ref, we must be at SHARED_* item, 3166 * And it's single ref, it must be: 3167 * | extent_slot ||extent_slot + 1| 3168 * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ] 3169 */ 3170 if (path->slots[0] != extent_slot + 1) { 3171 btrfs_crit(info, 3172 "invalid SHARED_* item, previous item is not EXTENT/METADATA_ITEM"); 3173 btrfs_abort_transaction(trans, -EUCLEAN); 3174 goto err_dump; 3175 } 3176 path->slots[0] = extent_slot; 3177 num_to_del = 2; 3178 } 3179 } 3180 3181 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 3182 num_to_del); 3183 if (ret) { 3184 btrfs_abort_transaction(trans, ret); 3185 goto out; 3186 } 3187 btrfs_release_path(path); 3188 3189 ret = do_free_extent_accounting(trans, bytenr, num_bytes, is_data); 3190 } 3191 btrfs_release_path(path); 3192 3193 out: 3194 btrfs_free_path(path); 3195 return ret; 3196 err_dump: 3197 /* 3198 * Leaf dump can take up a lot of log buffer, so we only do full leaf 3199 * dump for debug build. 3200 */ 3201 if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) { 3202 btrfs_crit(info, "path->slots[0]=%d extent_slot=%d", 3203 path->slots[0], extent_slot); 3204 btrfs_print_leaf(path->nodes[0]); 3205 } 3206 3207 btrfs_free_path(path); 3208 return -EUCLEAN; 3209 } 3210 3211 /* 3212 * when we free an block, it is possible (and likely) that we free the last 3213 * delayed ref for that extent as well. This searches the delayed ref tree for 3214 * a given extent, and if there are no other delayed refs to be processed, it 3215 * removes it from the tree. 3216 */ 3217 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, 3218 u64 bytenr) 3219 { 3220 struct btrfs_delayed_ref_head *head; 3221 struct btrfs_delayed_ref_root *delayed_refs; 3222 int ret = 0; 3223 3224 delayed_refs = &trans->transaction->delayed_refs; 3225 spin_lock(&delayed_refs->lock); 3226 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 3227 if (!head) 3228 goto out_delayed_unlock; 3229 3230 spin_lock(&head->lock); 3231 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 3232 goto out; 3233 3234 if (cleanup_extent_op(head) != NULL) 3235 goto out; 3236 3237 /* 3238 * waiting for the lock here would deadlock. If someone else has it 3239 * locked they are already in the process of dropping it anyway 3240 */ 3241 if (!mutex_trylock(&head->mutex)) 3242 goto out; 3243 3244 btrfs_delete_ref_head(delayed_refs, head); 3245 head->processing = 0; 3246 3247 spin_unlock(&head->lock); 3248 spin_unlock(&delayed_refs->lock); 3249 3250 BUG_ON(head->extent_op); 3251 if (head->must_insert_reserved) 3252 ret = 1; 3253 3254 btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); 3255 mutex_unlock(&head->mutex); 3256 btrfs_put_delayed_ref_head(head); 3257 return ret; 3258 out: 3259 spin_unlock(&head->lock); 3260 3261 out_delayed_unlock: 3262 spin_unlock(&delayed_refs->lock); 3263 return 0; 3264 } 3265 3266 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3267 u64 root_id, 3268 struct extent_buffer *buf, 3269 u64 parent, int last_ref) 3270 { 3271 struct btrfs_fs_info *fs_info = trans->fs_info; 3272 struct btrfs_ref generic_ref = { 0 }; 3273 int ret; 3274 3275 btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, 3276 buf->start, buf->len, parent); 3277 btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 3278 root_id, 0, false); 3279 3280 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3281 btrfs_ref_tree_mod(fs_info, &generic_ref); 3282 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL); 3283 BUG_ON(ret); /* -ENOMEM */ 3284 } 3285 3286 if (last_ref && btrfs_header_generation(buf) == trans->transid) { 3287 struct btrfs_block_group *cache; 3288 bool must_pin = false; 3289 3290 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3291 ret = check_ref_cleanup(trans, buf->start); 3292 if (!ret) { 3293 btrfs_redirty_list_add(trans->transaction, buf); 3294 goto out; 3295 } 3296 } 3297 3298 cache = btrfs_lookup_block_group(fs_info, buf->start); 3299 3300 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 3301 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3302 btrfs_put_block_group(cache); 3303 goto out; 3304 } 3305 3306 /* 3307 * If there are tree mod log users we may have recorded mod log 3308 * operations for this node. If we re-allocate this node we 3309 * could replay operations on this node that happened when it 3310 * existed in a completely different root. For example if it 3311 * was part of root A, then was reallocated to root B, and we 3312 * are doing a btrfs_old_search_slot(root b), we could replay 3313 * operations that happened when the block was part of root A, 3314 * giving us an inconsistent view of the btree. 3315 * 3316 * We are safe from races here because at this point no other 3317 * node or root points to this extent buffer, so if after this 3318 * check a new tree mod log user joins we will not have an 3319 * existing log of operations on this node that we have to 3320 * contend with. 3321 */ 3322 if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) 3323 must_pin = true; 3324 3325 if (must_pin || btrfs_is_zoned(fs_info)) { 3326 btrfs_redirty_list_add(trans->transaction, buf); 3327 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3328 btrfs_put_block_group(cache); 3329 goto out; 3330 } 3331 3332 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 3333 3334 btrfs_add_free_space(cache, buf->start, buf->len); 3335 btrfs_free_reserved_bytes(cache, buf->len, 0); 3336 btrfs_put_block_group(cache); 3337 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); 3338 } 3339 out: 3340 if (last_ref) { 3341 /* 3342 * Deleting the buffer, clear the corrupt flag since it doesn't 3343 * matter anymore. 3344 */ 3345 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); 3346 } 3347 } 3348 3349 /* Can return -ENOMEM */ 3350 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) 3351 { 3352 struct btrfs_fs_info *fs_info = trans->fs_info; 3353 int ret; 3354 3355 if (btrfs_is_testing(fs_info)) 3356 return 0; 3357 3358 /* 3359 * tree log blocks never actually go into the extent allocation 3360 * tree, just update pinning info and exit early. 3361 */ 3362 if ((ref->type == BTRFS_REF_METADATA && 3363 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3364 (ref->type == BTRFS_REF_DATA && 3365 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) { 3366 /* unlocks the pinned mutex */ 3367 btrfs_pin_extent(trans, ref->bytenr, ref->len, 1); 3368 ret = 0; 3369 } else if (ref->type == BTRFS_REF_METADATA) { 3370 ret = btrfs_add_delayed_tree_ref(trans, ref, NULL); 3371 } else { 3372 ret = btrfs_add_delayed_data_ref(trans, ref, 0); 3373 } 3374 3375 if (!((ref->type == BTRFS_REF_METADATA && 3376 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3377 (ref->type == BTRFS_REF_DATA && 3378 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID))) 3379 btrfs_ref_tree_mod(fs_info, ref); 3380 3381 return ret; 3382 } 3383 3384 enum btrfs_loop_type { 3385 LOOP_CACHING_NOWAIT, 3386 LOOP_CACHING_WAIT, 3387 LOOP_UNSET_SIZE_CLASS, 3388 LOOP_ALLOC_CHUNK, 3389 LOOP_WRONG_SIZE_CLASS, 3390 LOOP_NO_EMPTY_SIZE, 3391 }; 3392 3393 static inline void 3394 btrfs_lock_block_group(struct btrfs_block_group *cache, 3395 int delalloc) 3396 { 3397 if (delalloc) 3398 down_read(&cache->data_rwsem); 3399 } 3400 3401 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, 3402 int delalloc) 3403 { 3404 btrfs_get_block_group(cache); 3405 if (delalloc) 3406 down_read(&cache->data_rwsem); 3407 } 3408 3409 static struct btrfs_block_group *btrfs_lock_cluster( 3410 struct btrfs_block_group *block_group, 3411 struct btrfs_free_cluster *cluster, 3412 int delalloc) 3413 __acquires(&cluster->refill_lock) 3414 { 3415 struct btrfs_block_group *used_bg = NULL; 3416 3417 spin_lock(&cluster->refill_lock); 3418 while (1) { 3419 used_bg = cluster->block_group; 3420 if (!used_bg) 3421 return NULL; 3422 3423 if (used_bg == block_group) 3424 return used_bg; 3425 3426 btrfs_get_block_group(used_bg); 3427 3428 if (!delalloc) 3429 return used_bg; 3430 3431 if (down_read_trylock(&used_bg->data_rwsem)) 3432 return used_bg; 3433 3434 spin_unlock(&cluster->refill_lock); 3435 3436 /* We should only have one-level nested. */ 3437 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); 3438 3439 spin_lock(&cluster->refill_lock); 3440 if (used_bg == cluster->block_group) 3441 return used_bg; 3442 3443 up_read(&used_bg->data_rwsem); 3444 btrfs_put_block_group(used_bg); 3445 } 3446 } 3447 3448 static inline void 3449 btrfs_release_block_group(struct btrfs_block_group *cache, 3450 int delalloc) 3451 { 3452 if (delalloc) 3453 up_read(&cache->data_rwsem); 3454 btrfs_put_block_group(cache); 3455 } 3456 3457 /* 3458 * Helper function for find_free_extent(). 3459 * 3460 * Return -ENOENT to inform caller that we need fallback to unclustered mode. 3461 * Return -EAGAIN to inform caller that we need to re-search this block group 3462 * Return >0 to inform caller that we find nothing 3463 * Return 0 means we have found a location and set ffe_ctl->found_offset. 3464 */ 3465 static int find_free_extent_clustered(struct btrfs_block_group *bg, 3466 struct find_free_extent_ctl *ffe_ctl, 3467 struct btrfs_block_group **cluster_bg_ret) 3468 { 3469 struct btrfs_block_group *cluster_bg; 3470 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3471 u64 aligned_cluster; 3472 u64 offset; 3473 int ret; 3474 3475 cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc); 3476 if (!cluster_bg) 3477 goto refill_cluster; 3478 if (cluster_bg != bg && (cluster_bg->ro || 3479 !block_group_bits(cluster_bg, ffe_ctl->flags))) 3480 goto release_cluster; 3481 3482 offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, 3483 ffe_ctl->num_bytes, cluster_bg->start, 3484 &ffe_ctl->max_extent_size); 3485 if (offset) { 3486 /* We have a block, we're done */ 3487 spin_unlock(&last_ptr->refill_lock); 3488 trace_btrfs_reserve_extent_cluster(cluster_bg, ffe_ctl); 3489 *cluster_bg_ret = cluster_bg; 3490 ffe_ctl->found_offset = offset; 3491 return 0; 3492 } 3493 WARN_ON(last_ptr->block_group != cluster_bg); 3494 3495 release_cluster: 3496 /* 3497 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so 3498 * lets just skip it and let the allocator find whatever block it can 3499 * find. If we reach this point, we will have tried the cluster 3500 * allocator plenty of times and not have found anything, so we are 3501 * likely way too fragmented for the clustering stuff to find anything. 3502 * 3503 * However, if the cluster is taken from the current block group, 3504 * release the cluster first, so that we stand a better chance of 3505 * succeeding in the unclustered allocation. 3506 */ 3507 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { 3508 spin_unlock(&last_ptr->refill_lock); 3509 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3510 return -ENOENT; 3511 } 3512 3513 /* This cluster didn't work out, free it and start over */ 3514 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3515 3516 if (cluster_bg != bg) 3517 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3518 3519 refill_cluster: 3520 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { 3521 spin_unlock(&last_ptr->refill_lock); 3522 return -ENOENT; 3523 } 3524 3525 aligned_cluster = max_t(u64, 3526 ffe_ctl->empty_cluster + ffe_ctl->empty_size, 3527 bg->full_stripe_len); 3528 ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start, 3529 ffe_ctl->num_bytes, aligned_cluster); 3530 if (ret == 0) { 3531 /* Now pull our allocation out of this cluster */ 3532 offset = btrfs_alloc_from_cluster(bg, last_ptr, 3533 ffe_ctl->num_bytes, ffe_ctl->search_start, 3534 &ffe_ctl->max_extent_size); 3535 if (offset) { 3536 /* We found one, proceed */ 3537 spin_unlock(&last_ptr->refill_lock); 3538 ffe_ctl->found_offset = offset; 3539 trace_btrfs_reserve_extent_cluster(bg, ffe_ctl); 3540 return 0; 3541 } 3542 } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && 3543 !ffe_ctl->retry_clustered) { 3544 spin_unlock(&last_ptr->refill_lock); 3545 3546 ffe_ctl->retry_clustered = true; 3547 btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + 3548 ffe_ctl->empty_cluster + ffe_ctl->empty_size); 3549 return -EAGAIN; 3550 } 3551 /* 3552 * At this point we either didn't find a cluster or we weren't able to 3553 * allocate a block from our cluster. Free the cluster we've been 3554 * trying to use, and go to the next block group. 3555 */ 3556 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3557 spin_unlock(&last_ptr->refill_lock); 3558 return 1; 3559 } 3560 3561 /* 3562 * Return >0 to inform caller that we find nothing 3563 * Return 0 when we found an free extent and set ffe_ctrl->found_offset 3564 * Return -EAGAIN to inform caller that we need to re-search this block group 3565 */ 3566 static int find_free_extent_unclustered(struct btrfs_block_group *bg, 3567 struct find_free_extent_ctl *ffe_ctl) 3568 { 3569 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3570 u64 offset; 3571 3572 /* 3573 * We are doing an unclustered allocation, set the fragmented flag so 3574 * we don't bother trying to setup a cluster again until we get more 3575 * space. 3576 */ 3577 if (unlikely(last_ptr)) { 3578 spin_lock(&last_ptr->lock); 3579 last_ptr->fragmented = 1; 3580 spin_unlock(&last_ptr->lock); 3581 } 3582 if (ffe_ctl->cached) { 3583 struct btrfs_free_space_ctl *free_space_ctl; 3584 3585 free_space_ctl = bg->free_space_ctl; 3586 spin_lock(&free_space_ctl->tree_lock); 3587 if (free_space_ctl->free_space < 3588 ffe_ctl->num_bytes + ffe_ctl->empty_cluster + 3589 ffe_ctl->empty_size) { 3590 ffe_ctl->total_free_space = max_t(u64, 3591 ffe_ctl->total_free_space, 3592 free_space_ctl->free_space); 3593 spin_unlock(&free_space_ctl->tree_lock); 3594 return 1; 3595 } 3596 spin_unlock(&free_space_ctl->tree_lock); 3597 } 3598 3599 offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start, 3600 ffe_ctl->num_bytes, ffe_ctl->empty_size, 3601 &ffe_ctl->max_extent_size); 3602 3603 /* 3604 * If we didn't find a chunk, and we haven't failed on this block group 3605 * before, and this block group is in the middle of caching and we are 3606 * ok with waiting, then go ahead and wait for progress to be made, and 3607 * set @retry_unclustered to true. 3608 * 3609 * If @retry_unclustered is true then we've already waited on this 3610 * block group once and should move on to the next block group. 3611 */ 3612 if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached && 3613 ffe_ctl->loop > LOOP_CACHING_NOWAIT) { 3614 btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + 3615 ffe_ctl->empty_size); 3616 ffe_ctl->retry_unclustered = true; 3617 return -EAGAIN; 3618 } else if (!offset) { 3619 return 1; 3620 } 3621 ffe_ctl->found_offset = offset; 3622 return 0; 3623 } 3624 3625 static int do_allocation_clustered(struct btrfs_block_group *block_group, 3626 struct find_free_extent_ctl *ffe_ctl, 3627 struct btrfs_block_group **bg_ret) 3628 { 3629 int ret; 3630 3631 /* We want to try and use the cluster allocator, so lets look there */ 3632 if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { 3633 ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret); 3634 if (ret >= 0 || ret == -EAGAIN) 3635 return ret; 3636 /* ret == -ENOENT case falls through */ 3637 } 3638 3639 return find_free_extent_unclustered(block_group, ffe_ctl); 3640 } 3641 3642 /* 3643 * Tree-log block group locking 3644 * ============================ 3645 * 3646 * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which 3647 * indicates the starting address of a block group, which is reserved only 3648 * for tree-log metadata. 3649 * 3650 * Lock nesting 3651 * ============ 3652 * 3653 * space_info::lock 3654 * block_group::lock 3655 * fs_info::treelog_bg_lock 3656 */ 3657 3658 /* 3659 * Simple allocator for sequential-only block group. It only allows sequential 3660 * allocation. No need to play with trees. This function also reserves the 3661 * bytes as in btrfs_add_reserved_bytes. 3662 */ 3663 static int do_allocation_zoned(struct btrfs_block_group *block_group, 3664 struct find_free_extent_ctl *ffe_ctl, 3665 struct btrfs_block_group **bg_ret) 3666 { 3667 struct btrfs_fs_info *fs_info = block_group->fs_info; 3668 struct btrfs_space_info *space_info = block_group->space_info; 3669 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3670 u64 start = block_group->start; 3671 u64 num_bytes = ffe_ctl->num_bytes; 3672 u64 avail; 3673 u64 bytenr = block_group->start; 3674 u64 log_bytenr; 3675 u64 data_reloc_bytenr; 3676 int ret = 0; 3677 bool skip = false; 3678 3679 ASSERT(btrfs_is_zoned(block_group->fs_info)); 3680 3681 /* 3682 * Do not allow non-tree-log blocks in the dedicated tree-log block 3683 * group, and vice versa. 3684 */ 3685 spin_lock(&fs_info->treelog_bg_lock); 3686 log_bytenr = fs_info->treelog_bg; 3687 if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || 3688 (!ffe_ctl->for_treelog && bytenr == log_bytenr))) 3689 skip = true; 3690 spin_unlock(&fs_info->treelog_bg_lock); 3691 if (skip) 3692 return 1; 3693 3694 /* 3695 * Do not allow non-relocation blocks in the dedicated relocation block 3696 * group, and vice versa. 3697 */ 3698 spin_lock(&fs_info->relocation_bg_lock); 3699 data_reloc_bytenr = fs_info->data_reloc_bg; 3700 if (data_reloc_bytenr && 3701 ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) || 3702 (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr))) 3703 skip = true; 3704 spin_unlock(&fs_info->relocation_bg_lock); 3705 if (skip) 3706 return 1; 3707 3708 /* Check RO and no space case before trying to activate it */ 3709 spin_lock(&block_group->lock); 3710 if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) { 3711 ret = 1; 3712 /* 3713 * May need to clear fs_info->{treelog,data_reloc}_bg. 3714 * Return the error after taking the locks. 3715 */ 3716 } 3717 spin_unlock(&block_group->lock); 3718 3719 if (!ret && !btrfs_zone_activate(block_group)) { 3720 ret = 1; 3721 /* 3722 * May need to clear fs_info->{treelog,data_reloc}_bg. 3723 * Return the error after taking the locks. 3724 */ 3725 } 3726 3727 spin_lock(&space_info->lock); 3728 spin_lock(&block_group->lock); 3729 spin_lock(&fs_info->treelog_bg_lock); 3730 spin_lock(&fs_info->relocation_bg_lock); 3731 3732 if (ret) 3733 goto out; 3734 3735 ASSERT(!ffe_ctl->for_treelog || 3736 block_group->start == fs_info->treelog_bg || 3737 fs_info->treelog_bg == 0); 3738 ASSERT(!ffe_ctl->for_data_reloc || 3739 block_group->start == fs_info->data_reloc_bg || 3740 fs_info->data_reloc_bg == 0); 3741 3742 if (block_group->ro || 3743 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { 3744 ret = 1; 3745 goto out; 3746 } 3747 3748 /* 3749 * Do not allow currently using block group to be tree-log dedicated 3750 * block group. 3751 */ 3752 if (ffe_ctl->for_treelog && !fs_info->treelog_bg && 3753 (block_group->used || block_group->reserved)) { 3754 ret = 1; 3755 goto out; 3756 } 3757 3758 /* 3759 * Do not allow currently used block group to be the data relocation 3760 * dedicated block group. 3761 */ 3762 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg && 3763 (block_group->used || block_group->reserved)) { 3764 ret = 1; 3765 goto out; 3766 } 3767 3768 WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity); 3769 avail = block_group->zone_capacity - block_group->alloc_offset; 3770 if (avail < num_bytes) { 3771 if (ffe_ctl->max_extent_size < avail) { 3772 /* 3773 * With sequential allocator, free space is always 3774 * contiguous 3775 */ 3776 ffe_ctl->max_extent_size = avail; 3777 ffe_ctl->total_free_space = avail; 3778 } 3779 ret = 1; 3780 goto out; 3781 } 3782 3783 if (ffe_ctl->for_treelog && !fs_info->treelog_bg) 3784 fs_info->treelog_bg = block_group->start; 3785 3786 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg) 3787 fs_info->data_reloc_bg = block_group->start; 3788 3789 ffe_ctl->found_offset = start + block_group->alloc_offset; 3790 block_group->alloc_offset += num_bytes; 3791 spin_lock(&ctl->tree_lock); 3792 ctl->free_space -= num_bytes; 3793 spin_unlock(&ctl->tree_lock); 3794 3795 /* 3796 * We do not check if found_offset is aligned to stripesize. The 3797 * address is anyway rewritten when using zone append writing. 3798 */ 3799 3800 ffe_ctl->search_start = ffe_ctl->found_offset; 3801 3802 out: 3803 if (ret && ffe_ctl->for_treelog) 3804 fs_info->treelog_bg = 0; 3805 if (ret && ffe_ctl->for_data_reloc && 3806 fs_info->data_reloc_bg == block_group->start) { 3807 /* 3808 * Do not allow further allocations from this block group. 3809 * Compared to increasing the ->ro, setting the 3810 * ->zoned_data_reloc_ongoing flag still allows nocow 3811 * writers to come in. See btrfs_inc_nocow_writers(). 3812 * 3813 * We need to disable an allocation to avoid an allocation of 3814 * regular (non-relocation data) extent. With mix of relocation 3815 * extents and regular extents, we can dispatch WRITE commands 3816 * (for relocation extents) and ZONE APPEND commands (for 3817 * regular extents) at the same time to the same zone, which 3818 * easily break the write pointer. 3819 */ 3820 set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags); 3821 fs_info->data_reloc_bg = 0; 3822 } 3823 spin_unlock(&fs_info->relocation_bg_lock); 3824 spin_unlock(&fs_info->treelog_bg_lock); 3825 spin_unlock(&block_group->lock); 3826 spin_unlock(&space_info->lock); 3827 return ret; 3828 } 3829 3830 static int do_allocation(struct btrfs_block_group *block_group, 3831 struct find_free_extent_ctl *ffe_ctl, 3832 struct btrfs_block_group **bg_ret) 3833 { 3834 switch (ffe_ctl->policy) { 3835 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3836 return do_allocation_clustered(block_group, ffe_ctl, bg_ret); 3837 case BTRFS_EXTENT_ALLOC_ZONED: 3838 return do_allocation_zoned(block_group, ffe_ctl, bg_ret); 3839 default: 3840 BUG(); 3841 } 3842 } 3843 3844 static void release_block_group(struct btrfs_block_group *block_group, 3845 struct find_free_extent_ctl *ffe_ctl, 3846 int delalloc) 3847 { 3848 switch (ffe_ctl->policy) { 3849 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3850 ffe_ctl->retry_clustered = false; 3851 ffe_ctl->retry_unclustered = false; 3852 break; 3853 case BTRFS_EXTENT_ALLOC_ZONED: 3854 /* Nothing to do */ 3855 break; 3856 default: 3857 BUG(); 3858 } 3859 3860 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != 3861 ffe_ctl->index); 3862 btrfs_release_block_group(block_group, delalloc); 3863 } 3864 3865 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl, 3866 struct btrfs_key *ins) 3867 { 3868 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3869 3870 if (!ffe_ctl->use_cluster && last_ptr) { 3871 spin_lock(&last_ptr->lock); 3872 last_ptr->window_start = ins->objectid; 3873 spin_unlock(&last_ptr->lock); 3874 } 3875 } 3876 3877 static void found_extent(struct find_free_extent_ctl *ffe_ctl, 3878 struct btrfs_key *ins) 3879 { 3880 switch (ffe_ctl->policy) { 3881 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3882 found_extent_clustered(ffe_ctl, ins); 3883 break; 3884 case BTRFS_EXTENT_ALLOC_ZONED: 3885 /* Nothing to do */ 3886 break; 3887 default: 3888 BUG(); 3889 } 3890 } 3891 3892 static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info, 3893 struct find_free_extent_ctl *ffe_ctl) 3894 { 3895 /* If we can activate new zone, just allocate a chunk and use it */ 3896 if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags)) 3897 return 0; 3898 3899 /* 3900 * We already reached the max active zones. Try to finish one block 3901 * group to make a room for a new block group. This is only possible 3902 * for a data block group because btrfs_zone_finish() may need to wait 3903 * for a running transaction which can cause a deadlock for metadata 3904 * allocation. 3905 */ 3906 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { 3907 int ret = btrfs_zone_finish_one_bg(fs_info); 3908 3909 if (ret == 1) 3910 return 0; 3911 else if (ret < 0) 3912 return ret; 3913 } 3914 3915 /* 3916 * If we have enough free space left in an already active block group 3917 * and we can't activate any other zone now, do not allow allocating a 3918 * new chunk and let find_free_extent() retry with a smaller size. 3919 */ 3920 if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size) 3921 return -ENOSPC; 3922 3923 /* 3924 * Even min_alloc_size is not left in any block groups. Since we cannot 3925 * activate a new block group, allocating it may not help. Let's tell a 3926 * caller to try again and hope it progress something by writing some 3927 * parts of the region. That is only possible for data block groups, 3928 * where a part of the region can be written. 3929 */ 3930 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) 3931 return -EAGAIN; 3932 3933 /* 3934 * We cannot activate a new block group and no enough space left in any 3935 * block groups. So, allocating a new block group may not help. But, 3936 * there is nothing to do anyway, so let's go with it. 3937 */ 3938 return 0; 3939 } 3940 3941 static int can_allocate_chunk(struct btrfs_fs_info *fs_info, 3942 struct find_free_extent_ctl *ffe_ctl) 3943 { 3944 switch (ffe_ctl->policy) { 3945 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3946 return 0; 3947 case BTRFS_EXTENT_ALLOC_ZONED: 3948 return can_allocate_chunk_zoned(fs_info, ffe_ctl); 3949 default: 3950 BUG(); 3951 } 3952 } 3953 3954 /* 3955 * Return >0 means caller needs to re-search for free extent 3956 * Return 0 means we have the needed free extent. 3957 * Return <0 means we failed to locate any free extent. 3958 */ 3959 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, 3960 struct btrfs_key *ins, 3961 struct find_free_extent_ctl *ffe_ctl, 3962 bool full_search) 3963 { 3964 struct btrfs_root *root = fs_info->chunk_root; 3965 int ret; 3966 3967 if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && 3968 ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) 3969 ffe_ctl->orig_have_caching_bg = true; 3970 3971 if (ins->objectid) { 3972 found_extent(ffe_ctl, ins); 3973 return 0; 3974 } 3975 3976 if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) 3977 return 1; 3978 3979 ffe_ctl->index++; 3980 if (ffe_ctl->index < BTRFS_NR_RAID_TYPES) 3981 return 1; 3982 3983 /* 3984 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking 3985 * caching kthreads as we move along 3986 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 3987 * LOOP_UNSET_SIZE_CLASS, allow unset size class 3988 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 3989 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 3990 * again 3991 */ 3992 if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { 3993 ffe_ctl->index = 0; 3994 /* 3995 * We want to skip the LOOP_CACHING_WAIT step if we don't have 3996 * any uncached bgs and we've already done a full search 3997 * through. 3998 */ 3999 if (ffe_ctl->loop == LOOP_CACHING_NOWAIT && 4000 (!ffe_ctl->orig_have_caching_bg && full_search)) 4001 ffe_ctl->loop++; 4002 ffe_ctl->loop++; 4003 4004 if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { 4005 struct btrfs_trans_handle *trans; 4006 int exist = 0; 4007 4008 /* Check if allocation policy allows to create a new chunk */ 4009 ret = can_allocate_chunk(fs_info, ffe_ctl); 4010 if (ret) 4011 return ret; 4012 4013 trans = current->journal_info; 4014 if (trans) 4015 exist = 1; 4016 else 4017 trans = btrfs_join_transaction(root); 4018 4019 if (IS_ERR(trans)) { 4020 ret = PTR_ERR(trans); 4021 return ret; 4022 } 4023 4024 ret = btrfs_chunk_alloc(trans, ffe_ctl->flags, 4025 CHUNK_ALLOC_FORCE_FOR_EXTENT); 4026 4027 /* Do not bail out on ENOSPC since we can do more. */ 4028 if (ret == -ENOSPC) { 4029 ret = 0; 4030 ffe_ctl->loop++; 4031 } 4032 else if (ret < 0) 4033 btrfs_abort_transaction(trans, ret); 4034 else 4035 ret = 0; 4036 if (!exist) 4037 btrfs_end_transaction(trans); 4038 if (ret) 4039 return ret; 4040 } 4041 4042 if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { 4043 if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) 4044 return -ENOSPC; 4045 4046 /* 4047 * Don't loop again if we already have no empty_size and 4048 * no empty_cluster. 4049 */ 4050 if (ffe_ctl->empty_size == 0 && 4051 ffe_ctl->empty_cluster == 0) 4052 return -ENOSPC; 4053 ffe_ctl->empty_size = 0; 4054 ffe_ctl->empty_cluster = 0; 4055 } 4056 return 1; 4057 } 4058 return -ENOSPC; 4059 } 4060 4061 static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl, 4062 struct btrfs_block_group *bg) 4063 { 4064 if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) 4065 return true; 4066 if (!btrfs_block_group_should_use_size_class(bg)) 4067 return true; 4068 if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) 4069 return true; 4070 if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && 4071 bg->size_class == BTRFS_BG_SZ_NONE) 4072 return true; 4073 return ffe_ctl->size_class == bg->size_class; 4074 } 4075 4076 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, 4077 struct find_free_extent_ctl *ffe_ctl, 4078 struct btrfs_space_info *space_info, 4079 struct btrfs_key *ins) 4080 { 4081 /* 4082 * If our free space is heavily fragmented we may not be able to make 4083 * big contiguous allocations, so instead of doing the expensive search 4084 * for free space, simply return ENOSPC with our max_extent_size so we 4085 * can go ahead and search for a more manageable chunk. 4086 * 4087 * If our max_extent_size is large enough for our allocation simply 4088 * disable clustering since we will likely not be able to find enough 4089 * space to create a cluster and induce latency trying. 4090 */ 4091 if (space_info->max_extent_size) { 4092 spin_lock(&space_info->lock); 4093 if (space_info->max_extent_size && 4094 ffe_ctl->num_bytes > space_info->max_extent_size) { 4095 ins->offset = space_info->max_extent_size; 4096 spin_unlock(&space_info->lock); 4097 return -ENOSPC; 4098 } else if (space_info->max_extent_size) { 4099 ffe_ctl->use_cluster = false; 4100 } 4101 spin_unlock(&space_info->lock); 4102 } 4103 4104 ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, 4105 &ffe_ctl->empty_cluster); 4106 if (ffe_ctl->last_ptr) { 4107 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 4108 4109 spin_lock(&last_ptr->lock); 4110 if (last_ptr->block_group) 4111 ffe_ctl->hint_byte = last_ptr->window_start; 4112 if (last_ptr->fragmented) { 4113 /* 4114 * We still set window_start so we can keep track of the 4115 * last place we found an allocation to try and save 4116 * some time. 4117 */ 4118 ffe_ctl->hint_byte = last_ptr->window_start; 4119 ffe_ctl->use_cluster = false; 4120 } 4121 spin_unlock(&last_ptr->lock); 4122 } 4123 4124 return 0; 4125 } 4126 4127 static int prepare_allocation(struct btrfs_fs_info *fs_info, 4128 struct find_free_extent_ctl *ffe_ctl, 4129 struct btrfs_space_info *space_info, 4130 struct btrfs_key *ins) 4131 { 4132 switch (ffe_ctl->policy) { 4133 case BTRFS_EXTENT_ALLOC_CLUSTERED: 4134 return prepare_allocation_clustered(fs_info, ffe_ctl, 4135 space_info, ins); 4136 case BTRFS_EXTENT_ALLOC_ZONED: 4137 if (ffe_ctl->for_treelog) { 4138 spin_lock(&fs_info->treelog_bg_lock); 4139 if (fs_info->treelog_bg) 4140 ffe_ctl->hint_byte = fs_info->treelog_bg; 4141 spin_unlock(&fs_info->treelog_bg_lock); 4142 } 4143 if (ffe_ctl->for_data_reloc) { 4144 spin_lock(&fs_info->relocation_bg_lock); 4145 if (fs_info->data_reloc_bg) 4146 ffe_ctl->hint_byte = fs_info->data_reloc_bg; 4147 spin_unlock(&fs_info->relocation_bg_lock); 4148 } 4149 return 0; 4150 default: 4151 BUG(); 4152 } 4153 } 4154 4155 /* 4156 * walks the btree of allocated extents and find a hole of a given size. 4157 * The key ins is changed to record the hole: 4158 * ins->objectid == start position 4159 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4160 * ins->offset == the size of the hole. 4161 * Any available blocks before search_start are skipped. 4162 * 4163 * If there is no suitable free space, we will record the max size of 4164 * the free space extent currently. 4165 * 4166 * The overall logic and call chain: 4167 * 4168 * find_free_extent() 4169 * |- Iterate through all block groups 4170 * | |- Get a valid block group 4171 * | |- Try to do clustered allocation in that block group 4172 * | |- Try to do unclustered allocation in that block group 4173 * | |- Check if the result is valid 4174 * | | |- If valid, then exit 4175 * | |- Jump to next block group 4176 * | 4177 * |- Push harder to find free extents 4178 * |- If not found, re-iterate all block groups 4179 */ 4180 static noinline int find_free_extent(struct btrfs_root *root, 4181 struct btrfs_key *ins, 4182 struct find_free_extent_ctl *ffe_ctl) 4183 { 4184 struct btrfs_fs_info *fs_info = root->fs_info; 4185 int ret = 0; 4186 int cache_block_group_error = 0; 4187 struct btrfs_block_group *block_group = NULL; 4188 struct btrfs_space_info *space_info; 4189 bool full_search = false; 4190 4191 WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize); 4192 4193 ffe_ctl->search_start = 0; 4194 /* For clustered allocation */ 4195 ffe_ctl->empty_cluster = 0; 4196 ffe_ctl->last_ptr = NULL; 4197 ffe_ctl->use_cluster = true; 4198 ffe_ctl->have_caching_bg = false; 4199 ffe_ctl->orig_have_caching_bg = false; 4200 ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags); 4201 ffe_ctl->loop = 0; 4202 /* For clustered allocation */ 4203 ffe_ctl->retry_clustered = false; 4204 ffe_ctl->retry_unclustered = false; 4205 ffe_ctl->cached = 0; 4206 ffe_ctl->max_extent_size = 0; 4207 ffe_ctl->total_free_space = 0; 4208 ffe_ctl->found_offset = 0; 4209 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED; 4210 ffe_ctl->size_class = btrfs_calc_block_group_size_class(ffe_ctl->num_bytes); 4211 4212 if (btrfs_is_zoned(fs_info)) 4213 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED; 4214 4215 ins->type = BTRFS_EXTENT_ITEM_KEY; 4216 ins->objectid = 0; 4217 ins->offset = 0; 4218 4219 trace_find_free_extent(root, ffe_ctl); 4220 4221 space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags); 4222 if (!space_info) { 4223 btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags); 4224 return -ENOSPC; 4225 } 4226 4227 ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins); 4228 if (ret < 0) 4229 return ret; 4230 4231 ffe_ctl->search_start = max(ffe_ctl->search_start, 4232 first_logical_byte(fs_info)); 4233 ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte); 4234 if (ffe_ctl->search_start == ffe_ctl->hint_byte) { 4235 block_group = btrfs_lookup_block_group(fs_info, 4236 ffe_ctl->search_start); 4237 /* 4238 * we don't want to use the block group if it doesn't match our 4239 * allocation bits, or if its not cached. 4240 * 4241 * However if we are re-searching with an ideal block group 4242 * picked out then we don't care that the block group is cached. 4243 */ 4244 if (block_group && block_group_bits(block_group, ffe_ctl->flags) && 4245 block_group->cached != BTRFS_CACHE_NO) { 4246 down_read(&space_info->groups_sem); 4247 if (list_empty(&block_group->list) || 4248 block_group->ro) { 4249 /* 4250 * someone is removing this block group, 4251 * we can't jump into the have_block_group 4252 * target because our list pointers are not 4253 * valid 4254 */ 4255 btrfs_put_block_group(block_group); 4256 up_read(&space_info->groups_sem); 4257 } else { 4258 ffe_ctl->index = btrfs_bg_flags_to_raid_index( 4259 block_group->flags); 4260 btrfs_lock_block_group(block_group, 4261 ffe_ctl->delalloc); 4262 ffe_ctl->hinted = true; 4263 goto have_block_group; 4264 } 4265 } else if (block_group) { 4266 btrfs_put_block_group(block_group); 4267 } 4268 } 4269 search: 4270 trace_find_free_extent_search_loop(root, ffe_ctl); 4271 ffe_ctl->have_caching_bg = false; 4272 if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) || 4273 ffe_ctl->index == 0) 4274 full_search = true; 4275 down_read(&space_info->groups_sem); 4276 list_for_each_entry(block_group, 4277 &space_info->block_groups[ffe_ctl->index], list) { 4278 struct btrfs_block_group *bg_ret; 4279 4280 ffe_ctl->hinted = false; 4281 /* If the block group is read-only, we can skip it entirely. */ 4282 if (unlikely(block_group->ro)) { 4283 if (ffe_ctl->for_treelog) 4284 btrfs_clear_treelog_bg(block_group); 4285 if (ffe_ctl->for_data_reloc) 4286 btrfs_clear_data_reloc_bg(block_group); 4287 continue; 4288 } 4289 4290 btrfs_grab_block_group(block_group, ffe_ctl->delalloc); 4291 ffe_ctl->search_start = block_group->start; 4292 4293 /* 4294 * this can happen if we end up cycling through all the 4295 * raid types, but we want to make sure we only allocate 4296 * for the proper type. 4297 */ 4298 if (!block_group_bits(block_group, ffe_ctl->flags)) { 4299 u64 extra = BTRFS_BLOCK_GROUP_DUP | 4300 BTRFS_BLOCK_GROUP_RAID1_MASK | 4301 BTRFS_BLOCK_GROUP_RAID56_MASK | 4302 BTRFS_BLOCK_GROUP_RAID10; 4303 4304 /* 4305 * if they asked for extra copies and this block group 4306 * doesn't provide them, bail. This does allow us to 4307 * fill raid0 from raid1. 4308 */ 4309 if ((ffe_ctl->flags & extra) && !(block_group->flags & extra)) 4310 goto loop; 4311 4312 /* 4313 * This block group has different flags than we want. 4314 * It's possible that we have MIXED_GROUP flag but no 4315 * block group is mixed. Just skip such block group. 4316 */ 4317 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4318 continue; 4319 } 4320 4321 have_block_group: 4322 trace_find_free_extent_have_block_group(root, ffe_ctl, block_group); 4323 ffe_ctl->cached = btrfs_block_group_done(block_group); 4324 if (unlikely(!ffe_ctl->cached)) { 4325 ffe_ctl->have_caching_bg = true; 4326 ret = btrfs_cache_block_group(block_group, false); 4327 4328 /* 4329 * If we get ENOMEM here or something else we want to 4330 * try other block groups, because it may not be fatal. 4331 * However if we can't find anything else we need to 4332 * save our return here so that we return the actual 4333 * error that caused problems, not ENOSPC. 4334 */ 4335 if (ret < 0) { 4336 if (!cache_block_group_error) 4337 cache_block_group_error = ret; 4338 ret = 0; 4339 goto loop; 4340 } 4341 ret = 0; 4342 } 4343 4344 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) 4345 goto loop; 4346 4347 if (!find_free_extent_check_size_class(ffe_ctl, block_group)) 4348 goto loop; 4349 4350 bg_ret = NULL; 4351 ret = do_allocation(block_group, ffe_ctl, &bg_ret); 4352 if (ret == 0) { 4353 if (bg_ret && bg_ret != block_group) { 4354 btrfs_release_block_group(block_group, 4355 ffe_ctl->delalloc); 4356 block_group = bg_ret; 4357 } 4358 } else if (ret == -EAGAIN) { 4359 goto have_block_group; 4360 } else if (ret > 0) { 4361 goto loop; 4362 } 4363 4364 /* Checks */ 4365 ffe_ctl->search_start = round_up(ffe_ctl->found_offset, 4366 fs_info->stripesize); 4367 4368 /* move on to the next group */ 4369 if (ffe_ctl->search_start + ffe_ctl->num_bytes > 4370 block_group->start + block_group->length) { 4371 btrfs_add_free_space_unused(block_group, 4372 ffe_ctl->found_offset, 4373 ffe_ctl->num_bytes); 4374 goto loop; 4375 } 4376 4377 if (ffe_ctl->found_offset < ffe_ctl->search_start) 4378 btrfs_add_free_space_unused(block_group, 4379 ffe_ctl->found_offset, 4380 ffe_ctl->search_start - ffe_ctl->found_offset); 4381 4382 ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes, 4383 ffe_ctl->num_bytes, 4384 ffe_ctl->delalloc, 4385 ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS); 4386 if (ret == -EAGAIN) { 4387 btrfs_add_free_space_unused(block_group, 4388 ffe_ctl->found_offset, 4389 ffe_ctl->num_bytes); 4390 goto loop; 4391 } 4392 btrfs_inc_block_group_reservations(block_group); 4393 4394 /* we are all good, lets return */ 4395 ins->objectid = ffe_ctl->search_start; 4396 ins->offset = ffe_ctl->num_bytes; 4397 4398 trace_btrfs_reserve_extent(block_group, ffe_ctl); 4399 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4400 break; 4401 loop: 4402 release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc); 4403 cond_resched(); 4404 } 4405 up_read(&space_info->groups_sem); 4406 4407 ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search); 4408 if (ret > 0) 4409 goto search; 4410 4411 if (ret == -ENOSPC && !cache_block_group_error) { 4412 /* 4413 * Use ffe_ctl->total_free_space as fallback if we can't find 4414 * any contiguous hole. 4415 */ 4416 if (!ffe_ctl->max_extent_size) 4417 ffe_ctl->max_extent_size = ffe_ctl->total_free_space; 4418 spin_lock(&space_info->lock); 4419 space_info->max_extent_size = ffe_ctl->max_extent_size; 4420 spin_unlock(&space_info->lock); 4421 ins->offset = ffe_ctl->max_extent_size; 4422 } else if (ret == -ENOSPC) { 4423 ret = cache_block_group_error; 4424 } 4425 return ret; 4426 } 4427 4428 /* 4429 * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a 4430 * hole that is at least as big as @num_bytes. 4431 * 4432 * @root - The root that will contain this extent 4433 * 4434 * @ram_bytes - The amount of space in ram that @num_bytes take. This 4435 * is used for accounting purposes. This value differs 4436 * from @num_bytes only in the case of compressed extents. 4437 * 4438 * @num_bytes - Number of bytes to allocate on-disk. 4439 * 4440 * @min_alloc_size - Indicates the minimum amount of space that the 4441 * allocator should try to satisfy. In some cases 4442 * @num_bytes may be larger than what is required and if 4443 * the filesystem is fragmented then allocation fails. 4444 * However, the presence of @min_alloc_size gives a 4445 * chance to try and satisfy the smaller allocation. 4446 * 4447 * @empty_size - A hint that you plan on doing more COW. This is the 4448 * size in bytes the allocator should try to find free 4449 * next to the block it returns. This is just a hint and 4450 * may be ignored by the allocator. 4451 * 4452 * @hint_byte - Hint to the allocator to start searching above the byte 4453 * address passed. It might be ignored. 4454 * 4455 * @ins - This key is modified to record the found hole. It will 4456 * have the following values: 4457 * ins->objectid == start position 4458 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4459 * ins->offset == the size of the hole. 4460 * 4461 * @is_data - Boolean flag indicating whether an extent is 4462 * allocated for data (true) or metadata (false) 4463 * 4464 * @delalloc - Boolean flag indicating whether this allocation is for 4465 * delalloc or not. If 'true' data_rwsem of block groups 4466 * is going to be acquired. 4467 * 4468 * 4469 * Returns 0 when an allocation succeeded or < 0 when an error occurred. In 4470 * case -ENOSPC is returned then @ins->offset will contain the size of the 4471 * largest available hole the allocator managed to find. 4472 */ 4473 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, 4474 u64 num_bytes, u64 min_alloc_size, 4475 u64 empty_size, u64 hint_byte, 4476 struct btrfs_key *ins, int is_data, int delalloc) 4477 { 4478 struct btrfs_fs_info *fs_info = root->fs_info; 4479 struct find_free_extent_ctl ffe_ctl = {}; 4480 bool final_tried = num_bytes == min_alloc_size; 4481 u64 flags; 4482 int ret; 4483 bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4484 bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data); 4485 4486 flags = get_alloc_profile_by_root(root, is_data); 4487 again: 4488 WARN_ON(num_bytes < fs_info->sectorsize); 4489 4490 ffe_ctl.ram_bytes = ram_bytes; 4491 ffe_ctl.num_bytes = num_bytes; 4492 ffe_ctl.min_alloc_size = min_alloc_size; 4493 ffe_ctl.empty_size = empty_size; 4494 ffe_ctl.flags = flags; 4495 ffe_ctl.delalloc = delalloc; 4496 ffe_ctl.hint_byte = hint_byte; 4497 ffe_ctl.for_treelog = for_treelog; 4498 ffe_ctl.for_data_reloc = for_data_reloc; 4499 4500 ret = find_free_extent(root, ins, &ffe_ctl); 4501 if (!ret && !is_data) { 4502 btrfs_dec_block_group_reservations(fs_info, ins->objectid); 4503 } else if (ret == -ENOSPC) { 4504 if (!final_tried && ins->offset) { 4505 num_bytes = min(num_bytes >> 1, ins->offset); 4506 num_bytes = round_down(num_bytes, 4507 fs_info->sectorsize); 4508 num_bytes = max(num_bytes, min_alloc_size); 4509 ram_bytes = num_bytes; 4510 if (num_bytes == min_alloc_size) 4511 final_tried = true; 4512 goto again; 4513 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4514 struct btrfs_space_info *sinfo; 4515 4516 sinfo = btrfs_find_space_info(fs_info, flags); 4517 btrfs_err(fs_info, 4518 "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d", 4519 flags, num_bytes, for_treelog, for_data_reloc); 4520 if (sinfo) 4521 btrfs_dump_space_info(fs_info, sinfo, 4522 num_bytes, 1); 4523 } 4524 } 4525 4526 return ret; 4527 } 4528 4529 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, 4530 u64 start, u64 len, int delalloc) 4531 { 4532 struct btrfs_block_group *cache; 4533 4534 cache = btrfs_lookup_block_group(fs_info, start); 4535 if (!cache) { 4536 btrfs_err(fs_info, "Unable to find block group for %llu", 4537 start); 4538 return -ENOSPC; 4539 } 4540 4541 btrfs_add_free_space(cache, start, len); 4542 btrfs_free_reserved_bytes(cache, len, delalloc); 4543 trace_btrfs_reserved_extent_free(fs_info, start, len); 4544 4545 btrfs_put_block_group(cache); 4546 return 0; 4547 } 4548 4549 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, 4550 u64 len) 4551 { 4552 struct btrfs_block_group *cache; 4553 int ret = 0; 4554 4555 cache = btrfs_lookup_block_group(trans->fs_info, start); 4556 if (!cache) { 4557 btrfs_err(trans->fs_info, "unable to find block group for %llu", 4558 start); 4559 return -ENOSPC; 4560 } 4561 4562 ret = pin_down_extent(trans, cache, start, len, 1); 4563 btrfs_put_block_group(cache); 4564 return ret; 4565 } 4566 4567 static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr, 4568 u64 num_bytes) 4569 { 4570 struct btrfs_fs_info *fs_info = trans->fs_info; 4571 int ret; 4572 4573 ret = remove_from_free_space_tree(trans, bytenr, num_bytes); 4574 if (ret) 4575 return ret; 4576 4577 ret = btrfs_update_block_group(trans, bytenr, num_bytes, true); 4578 if (ret) { 4579 ASSERT(!ret); 4580 btrfs_err(fs_info, "update block group failed for %llu %llu", 4581 bytenr, num_bytes); 4582 return ret; 4583 } 4584 4585 trace_btrfs_reserved_extent_alloc(fs_info, bytenr, num_bytes); 4586 return 0; 4587 } 4588 4589 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4590 u64 parent, u64 root_objectid, 4591 u64 flags, u64 owner, u64 offset, 4592 struct btrfs_key *ins, int ref_mod) 4593 { 4594 struct btrfs_fs_info *fs_info = trans->fs_info; 4595 struct btrfs_root *extent_root; 4596 int ret; 4597 struct btrfs_extent_item *extent_item; 4598 struct btrfs_extent_inline_ref *iref; 4599 struct btrfs_path *path; 4600 struct extent_buffer *leaf; 4601 int type; 4602 u32 size; 4603 4604 if (parent > 0) 4605 type = BTRFS_SHARED_DATA_REF_KEY; 4606 else 4607 type = BTRFS_EXTENT_DATA_REF_KEY; 4608 4609 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); 4610 4611 path = btrfs_alloc_path(); 4612 if (!path) 4613 return -ENOMEM; 4614 4615 extent_root = btrfs_extent_root(fs_info, ins->objectid); 4616 ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size); 4617 if (ret) { 4618 btrfs_free_path(path); 4619 return ret; 4620 } 4621 4622 leaf = path->nodes[0]; 4623 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4624 struct btrfs_extent_item); 4625 btrfs_set_extent_refs(leaf, extent_item, ref_mod); 4626 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4627 btrfs_set_extent_flags(leaf, extent_item, 4628 flags | BTRFS_EXTENT_FLAG_DATA); 4629 4630 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4631 btrfs_set_extent_inline_ref_type(leaf, iref, type); 4632 if (parent > 0) { 4633 struct btrfs_shared_data_ref *ref; 4634 ref = (struct btrfs_shared_data_ref *)(iref + 1); 4635 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 4636 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); 4637 } else { 4638 struct btrfs_extent_data_ref *ref; 4639 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 4640 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); 4641 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 4642 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 4643 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); 4644 } 4645 4646 btrfs_mark_buffer_dirty(path->nodes[0]); 4647 btrfs_free_path(path); 4648 4649 return alloc_reserved_extent(trans, ins->objectid, ins->offset); 4650 } 4651 4652 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 4653 struct btrfs_delayed_ref_node *node, 4654 struct btrfs_delayed_extent_op *extent_op) 4655 { 4656 struct btrfs_fs_info *fs_info = trans->fs_info; 4657 struct btrfs_root *extent_root; 4658 int ret; 4659 struct btrfs_extent_item *extent_item; 4660 struct btrfs_key extent_key; 4661 struct btrfs_tree_block_info *block_info; 4662 struct btrfs_extent_inline_ref *iref; 4663 struct btrfs_path *path; 4664 struct extent_buffer *leaf; 4665 struct btrfs_delayed_tree_ref *ref; 4666 u32 size = sizeof(*extent_item) + sizeof(*iref); 4667 u64 flags = extent_op->flags_to_set; 4668 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4669 4670 ref = btrfs_delayed_node_to_tree_ref(node); 4671 4672 extent_key.objectid = node->bytenr; 4673 if (skinny_metadata) { 4674 extent_key.offset = ref->level; 4675 extent_key.type = BTRFS_METADATA_ITEM_KEY; 4676 } else { 4677 extent_key.offset = node->num_bytes; 4678 extent_key.type = BTRFS_EXTENT_ITEM_KEY; 4679 size += sizeof(*block_info); 4680 } 4681 4682 path = btrfs_alloc_path(); 4683 if (!path) 4684 return -ENOMEM; 4685 4686 extent_root = btrfs_extent_root(fs_info, extent_key.objectid); 4687 ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key, 4688 size); 4689 if (ret) { 4690 btrfs_free_path(path); 4691 return ret; 4692 } 4693 4694 leaf = path->nodes[0]; 4695 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4696 struct btrfs_extent_item); 4697 btrfs_set_extent_refs(leaf, extent_item, 1); 4698 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4699 btrfs_set_extent_flags(leaf, extent_item, 4700 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); 4701 4702 if (skinny_metadata) { 4703 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4704 } else { 4705 block_info = (struct btrfs_tree_block_info *)(extent_item + 1); 4706 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key); 4707 btrfs_set_tree_block_level(leaf, block_info, ref->level); 4708 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); 4709 } 4710 4711 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { 4712 btrfs_set_extent_inline_ref_type(leaf, iref, 4713 BTRFS_SHARED_BLOCK_REF_KEY); 4714 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent); 4715 } else { 4716 btrfs_set_extent_inline_ref_type(leaf, iref, 4717 BTRFS_TREE_BLOCK_REF_KEY); 4718 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root); 4719 } 4720 4721 btrfs_mark_buffer_dirty(leaf); 4722 btrfs_free_path(path); 4723 4724 return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize); 4725 } 4726 4727 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4728 struct btrfs_root *root, u64 owner, 4729 u64 offset, u64 ram_bytes, 4730 struct btrfs_key *ins) 4731 { 4732 struct btrfs_ref generic_ref = { 0 }; 4733 4734 BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4735 4736 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 4737 ins->objectid, ins->offset, 0); 4738 btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, 4739 offset, 0, false); 4740 btrfs_ref_tree_mod(root->fs_info, &generic_ref); 4741 4742 return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes); 4743 } 4744 4745 /* 4746 * this is used by the tree logging recovery code. It records that 4747 * an extent has been allocated and makes sure to clear the free 4748 * space cache bits as well 4749 */ 4750 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 4751 u64 root_objectid, u64 owner, u64 offset, 4752 struct btrfs_key *ins) 4753 { 4754 struct btrfs_fs_info *fs_info = trans->fs_info; 4755 int ret; 4756 struct btrfs_block_group *block_group; 4757 struct btrfs_space_info *space_info; 4758 4759 /* 4760 * Mixed block groups will exclude before processing the log so we only 4761 * need to do the exclude dance if this fs isn't mixed. 4762 */ 4763 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 4764 ret = __exclude_logged_extent(fs_info, ins->objectid, 4765 ins->offset); 4766 if (ret) 4767 return ret; 4768 } 4769 4770 block_group = btrfs_lookup_block_group(fs_info, ins->objectid); 4771 if (!block_group) 4772 return -EINVAL; 4773 4774 space_info = block_group->space_info; 4775 spin_lock(&space_info->lock); 4776 spin_lock(&block_group->lock); 4777 space_info->bytes_reserved += ins->offset; 4778 block_group->reserved += ins->offset; 4779 spin_unlock(&block_group->lock); 4780 spin_unlock(&space_info->lock); 4781 4782 ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner, 4783 offset, ins, 1); 4784 if (ret) 4785 btrfs_pin_extent(trans, ins->objectid, ins->offset, 1); 4786 btrfs_put_block_group(block_group); 4787 return ret; 4788 } 4789 4790 static struct extent_buffer * 4791 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4792 u64 bytenr, int level, u64 owner, 4793 enum btrfs_lock_nesting nest) 4794 { 4795 struct btrfs_fs_info *fs_info = root->fs_info; 4796 struct extent_buffer *buf; 4797 u64 lockdep_owner = owner; 4798 4799 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level); 4800 if (IS_ERR(buf)) 4801 return buf; 4802 4803 /* 4804 * Extra safety check in case the extent tree is corrupted and extent 4805 * allocator chooses to use a tree block which is already used and 4806 * locked. 4807 */ 4808 if (buf->lock_owner == current->pid) { 4809 btrfs_err_rl(fs_info, 4810 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected", 4811 buf->start, btrfs_header_owner(buf), current->pid); 4812 free_extent_buffer(buf); 4813 return ERR_PTR(-EUCLEAN); 4814 } 4815 4816 /* 4817 * The reloc trees are just snapshots, so we need them to appear to be 4818 * just like any other fs tree WRT lockdep. 4819 * 4820 * The exception however is in replace_path() in relocation, where we 4821 * hold the lock on the original fs root and then search for the reloc 4822 * root. At that point we need to make sure any reloc root buffers are 4823 * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make 4824 * lockdep happy. 4825 */ 4826 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID && 4827 !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) 4828 lockdep_owner = BTRFS_FS_TREE_OBJECTID; 4829 4830 /* btrfs_clean_tree_block() accesses generation field. */ 4831 btrfs_set_header_generation(buf, trans->transid); 4832 4833 /* 4834 * This needs to stay, because we could allocate a freed block from an 4835 * old tree into a new tree, so we need to make sure this new block is 4836 * set to the appropriate level and owner. 4837 */ 4838 btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level); 4839 4840 __btrfs_tree_lock(buf, nest); 4841 btrfs_clear_buffer_dirty(trans, buf); 4842 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); 4843 clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags); 4844 4845 set_extent_buffer_uptodate(buf); 4846 4847 memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header)); 4848 btrfs_set_header_level(buf, level); 4849 btrfs_set_header_bytenr(buf, buf->start); 4850 btrfs_set_header_generation(buf, trans->transid); 4851 btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV); 4852 btrfs_set_header_owner(buf, owner); 4853 write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid); 4854 write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid); 4855 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 4856 buf->log_index = root->log_transid % 2; 4857 /* 4858 * we allow two log transactions at a time, use different 4859 * EXTENT bit to differentiate dirty pages. 4860 */ 4861 if (buf->log_index == 0) 4862 set_extent_dirty(&root->dirty_log_pages, buf->start, 4863 buf->start + buf->len - 1, GFP_NOFS); 4864 else 4865 set_extent_new(&root->dirty_log_pages, buf->start, 4866 buf->start + buf->len - 1); 4867 } else { 4868 buf->log_index = -1; 4869 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 4870 buf->start + buf->len - 1, GFP_NOFS); 4871 } 4872 /* this returns a buffer locked for blocking */ 4873 return buf; 4874 } 4875 4876 /* 4877 * finds a free extent and does all the dirty work required for allocation 4878 * returns the tree buffer or an ERR_PTR on error. 4879 */ 4880 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 4881 struct btrfs_root *root, 4882 u64 parent, u64 root_objectid, 4883 const struct btrfs_disk_key *key, 4884 int level, u64 hint, 4885 u64 empty_size, 4886 enum btrfs_lock_nesting nest) 4887 { 4888 struct btrfs_fs_info *fs_info = root->fs_info; 4889 struct btrfs_key ins; 4890 struct btrfs_block_rsv *block_rsv; 4891 struct extent_buffer *buf; 4892 struct btrfs_delayed_extent_op *extent_op; 4893 struct btrfs_ref generic_ref = { 0 }; 4894 u64 flags = 0; 4895 int ret; 4896 u32 blocksize = fs_info->nodesize; 4897 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4898 4899 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4900 if (btrfs_is_testing(fs_info)) { 4901 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, 4902 level, root_objectid, nest); 4903 if (!IS_ERR(buf)) 4904 root->alloc_bytenr += blocksize; 4905 return buf; 4906 } 4907 #endif 4908 4909 block_rsv = btrfs_use_block_rsv(trans, root, blocksize); 4910 if (IS_ERR(block_rsv)) 4911 return ERR_CAST(block_rsv); 4912 4913 ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize, 4914 empty_size, hint, &ins, 0, 0); 4915 if (ret) 4916 goto out_unuse; 4917 4918 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level, 4919 root_objectid, nest); 4920 if (IS_ERR(buf)) { 4921 ret = PTR_ERR(buf); 4922 goto out_free_reserved; 4923 } 4924 4925 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 4926 if (parent == 0) 4927 parent = ins.objectid; 4928 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 4929 } else 4930 BUG_ON(parent > 0); 4931 4932 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 4933 extent_op = btrfs_alloc_delayed_extent_op(); 4934 if (!extent_op) { 4935 ret = -ENOMEM; 4936 goto out_free_buf; 4937 } 4938 if (key) 4939 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 4940 else 4941 memset(&extent_op->key, 0, sizeof(extent_op->key)); 4942 extent_op->flags_to_set = flags; 4943 extent_op->update_key = skinny_metadata ? false : true; 4944 extent_op->update_flags = true; 4945 extent_op->level = level; 4946 4947 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 4948 ins.objectid, ins.offset, parent); 4949 btrfs_init_tree_ref(&generic_ref, level, root_objectid, 4950 root->root_key.objectid, false); 4951 btrfs_ref_tree_mod(fs_info, &generic_ref); 4952 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op); 4953 if (ret) 4954 goto out_free_delayed; 4955 } 4956 return buf; 4957 4958 out_free_delayed: 4959 btrfs_free_delayed_extent_op(extent_op); 4960 out_free_buf: 4961 btrfs_tree_unlock(buf); 4962 free_extent_buffer(buf); 4963 out_free_reserved: 4964 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0); 4965 out_unuse: 4966 btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize); 4967 return ERR_PTR(ret); 4968 } 4969 4970 struct walk_control { 4971 u64 refs[BTRFS_MAX_LEVEL]; 4972 u64 flags[BTRFS_MAX_LEVEL]; 4973 struct btrfs_key update_progress; 4974 struct btrfs_key drop_progress; 4975 int drop_level; 4976 int stage; 4977 int level; 4978 int shared_level; 4979 int update_ref; 4980 int keep_locks; 4981 int reada_slot; 4982 int reada_count; 4983 int restarted; 4984 }; 4985 4986 #define DROP_REFERENCE 1 4987 #define UPDATE_BACKREF 2 4988 4989 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, 4990 struct btrfs_root *root, 4991 struct walk_control *wc, 4992 struct btrfs_path *path) 4993 { 4994 struct btrfs_fs_info *fs_info = root->fs_info; 4995 u64 bytenr; 4996 u64 generation; 4997 u64 refs; 4998 u64 flags; 4999 u32 nritems; 5000 struct btrfs_key key; 5001 struct extent_buffer *eb; 5002 int ret; 5003 int slot; 5004 int nread = 0; 5005 5006 if (path->slots[wc->level] < wc->reada_slot) { 5007 wc->reada_count = wc->reada_count * 2 / 3; 5008 wc->reada_count = max(wc->reada_count, 2); 5009 } else { 5010 wc->reada_count = wc->reada_count * 3 / 2; 5011 wc->reada_count = min_t(int, wc->reada_count, 5012 BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 5013 } 5014 5015 eb = path->nodes[wc->level]; 5016 nritems = btrfs_header_nritems(eb); 5017 5018 for (slot = path->slots[wc->level]; slot < nritems; slot++) { 5019 if (nread >= wc->reada_count) 5020 break; 5021 5022 cond_resched(); 5023 bytenr = btrfs_node_blockptr(eb, slot); 5024 generation = btrfs_node_ptr_generation(eb, slot); 5025 5026 if (slot == path->slots[wc->level]) 5027 goto reada; 5028 5029 if (wc->stage == UPDATE_BACKREF && 5030 generation <= root->root_key.offset) 5031 continue; 5032 5033 /* We don't lock the tree block, it's OK to be racy here */ 5034 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, 5035 wc->level - 1, 1, &refs, 5036 &flags); 5037 /* We don't care about errors in readahead. */ 5038 if (ret < 0) 5039 continue; 5040 BUG_ON(refs == 0); 5041 5042 if (wc->stage == DROP_REFERENCE) { 5043 if (refs == 1) 5044 goto reada; 5045 5046 if (wc->level == 1 && 5047 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5048 continue; 5049 if (!wc->update_ref || 5050 generation <= root->root_key.offset) 5051 continue; 5052 btrfs_node_key_to_cpu(eb, &key, slot); 5053 ret = btrfs_comp_cpu_keys(&key, 5054 &wc->update_progress); 5055 if (ret < 0) 5056 continue; 5057 } else { 5058 if (wc->level == 1 && 5059 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5060 continue; 5061 } 5062 reada: 5063 btrfs_readahead_node_child(eb, slot); 5064 nread++; 5065 } 5066 wc->reada_slot = slot; 5067 } 5068 5069 /* 5070 * helper to process tree block while walking down the tree. 5071 * 5072 * when wc->stage == UPDATE_BACKREF, this function updates 5073 * back refs for pointers in the block. 5074 * 5075 * NOTE: return value 1 means we should stop walking down. 5076 */ 5077 static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 5078 struct btrfs_root *root, 5079 struct btrfs_path *path, 5080 struct walk_control *wc, int lookup_info) 5081 { 5082 struct btrfs_fs_info *fs_info = root->fs_info; 5083 int level = wc->level; 5084 struct extent_buffer *eb = path->nodes[level]; 5085 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5086 int ret; 5087 5088 if (wc->stage == UPDATE_BACKREF && 5089 btrfs_header_owner(eb) != root->root_key.objectid) 5090 return 1; 5091 5092 /* 5093 * when reference count of tree block is 1, it won't increase 5094 * again. once full backref flag is set, we never clear it. 5095 */ 5096 if (lookup_info && 5097 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 5098 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { 5099 BUG_ON(!path->locks[level]); 5100 ret = btrfs_lookup_extent_info(trans, fs_info, 5101 eb->start, level, 1, 5102 &wc->refs[level], 5103 &wc->flags[level]); 5104 BUG_ON(ret == -ENOMEM); 5105 if (ret) 5106 return ret; 5107 BUG_ON(wc->refs[level] == 0); 5108 } 5109 5110 if (wc->stage == DROP_REFERENCE) { 5111 if (wc->refs[level] > 1) 5112 return 1; 5113 5114 if (path->locks[level] && !wc->keep_locks) { 5115 btrfs_tree_unlock_rw(eb, path->locks[level]); 5116 path->locks[level] = 0; 5117 } 5118 return 0; 5119 } 5120 5121 /* wc->stage == UPDATE_BACKREF */ 5122 if (!(wc->flags[level] & flag)) { 5123 BUG_ON(!path->locks[level]); 5124 ret = btrfs_inc_ref(trans, root, eb, 1); 5125 BUG_ON(ret); /* -ENOMEM */ 5126 ret = btrfs_dec_ref(trans, root, eb, 0); 5127 BUG_ON(ret); /* -ENOMEM */ 5128 ret = btrfs_set_disk_extent_flags(trans, eb, flag, 5129 btrfs_header_level(eb)); 5130 BUG_ON(ret); /* -ENOMEM */ 5131 wc->flags[level] |= flag; 5132 } 5133 5134 /* 5135 * the block is shared by multiple trees, so it's not good to 5136 * keep the tree lock 5137 */ 5138 if (path->locks[level] && level > 0) { 5139 btrfs_tree_unlock_rw(eb, path->locks[level]); 5140 path->locks[level] = 0; 5141 } 5142 return 0; 5143 } 5144 5145 /* 5146 * This is used to verify a ref exists for this root to deal with a bug where we 5147 * would have a drop_progress key that hadn't been updated properly. 5148 */ 5149 static int check_ref_exists(struct btrfs_trans_handle *trans, 5150 struct btrfs_root *root, u64 bytenr, u64 parent, 5151 int level) 5152 { 5153 struct btrfs_path *path; 5154 struct btrfs_extent_inline_ref *iref; 5155 int ret; 5156 5157 path = btrfs_alloc_path(); 5158 if (!path) 5159 return -ENOMEM; 5160 5161 ret = lookup_extent_backref(trans, path, &iref, bytenr, 5162 root->fs_info->nodesize, parent, 5163 root->root_key.objectid, level, 0); 5164 btrfs_free_path(path); 5165 if (ret == -ENOENT) 5166 return 0; 5167 if (ret < 0) 5168 return ret; 5169 return 1; 5170 } 5171 5172 /* 5173 * helper to process tree block pointer. 5174 * 5175 * when wc->stage == DROP_REFERENCE, this function checks 5176 * reference count of the block pointed to. if the block 5177 * is shared and we need update back refs for the subtree 5178 * rooted at the block, this function changes wc->stage to 5179 * UPDATE_BACKREF. if the block is shared and there is no 5180 * need to update back, this function drops the reference 5181 * to the block. 5182 * 5183 * NOTE: return value 1 means we should stop walking down. 5184 */ 5185 static noinline int do_walk_down(struct btrfs_trans_handle *trans, 5186 struct btrfs_root *root, 5187 struct btrfs_path *path, 5188 struct walk_control *wc, int *lookup_info) 5189 { 5190 struct btrfs_fs_info *fs_info = root->fs_info; 5191 u64 bytenr; 5192 u64 generation; 5193 u64 parent; 5194 struct btrfs_tree_parent_check check = { 0 }; 5195 struct btrfs_key key; 5196 struct btrfs_ref ref = { 0 }; 5197 struct extent_buffer *next; 5198 int level = wc->level; 5199 int reada = 0; 5200 int ret = 0; 5201 bool need_account = false; 5202 5203 generation = btrfs_node_ptr_generation(path->nodes[level], 5204 path->slots[level]); 5205 /* 5206 * if the lower level block was created before the snapshot 5207 * was created, we know there is no need to update back refs 5208 * for the subtree 5209 */ 5210 if (wc->stage == UPDATE_BACKREF && 5211 generation <= root->root_key.offset) { 5212 *lookup_info = 1; 5213 return 1; 5214 } 5215 5216 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 5217 5218 check.level = level - 1; 5219 check.transid = generation; 5220 check.owner_root = root->root_key.objectid; 5221 check.has_first_key = true; 5222 btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, 5223 path->slots[level]); 5224 5225 next = find_extent_buffer(fs_info, bytenr); 5226 if (!next) { 5227 next = btrfs_find_create_tree_block(fs_info, bytenr, 5228 root->root_key.objectid, level - 1); 5229 if (IS_ERR(next)) 5230 return PTR_ERR(next); 5231 reada = 1; 5232 } 5233 btrfs_tree_lock(next); 5234 5235 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, 5236 &wc->refs[level - 1], 5237 &wc->flags[level - 1]); 5238 if (ret < 0) 5239 goto out_unlock; 5240 5241 if (unlikely(wc->refs[level - 1] == 0)) { 5242 btrfs_err(fs_info, "Missing references."); 5243 ret = -EIO; 5244 goto out_unlock; 5245 } 5246 *lookup_info = 0; 5247 5248 if (wc->stage == DROP_REFERENCE) { 5249 if (wc->refs[level - 1] > 1) { 5250 need_account = true; 5251 if (level == 1 && 5252 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5253 goto skip; 5254 5255 if (!wc->update_ref || 5256 generation <= root->root_key.offset) 5257 goto skip; 5258 5259 btrfs_node_key_to_cpu(path->nodes[level], &key, 5260 path->slots[level]); 5261 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); 5262 if (ret < 0) 5263 goto skip; 5264 5265 wc->stage = UPDATE_BACKREF; 5266 wc->shared_level = level - 1; 5267 } 5268 } else { 5269 if (level == 1 && 5270 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5271 goto skip; 5272 } 5273 5274 if (!btrfs_buffer_uptodate(next, generation, 0)) { 5275 btrfs_tree_unlock(next); 5276 free_extent_buffer(next); 5277 next = NULL; 5278 *lookup_info = 1; 5279 } 5280 5281 if (!next) { 5282 if (reada && level == 1) 5283 reada_walk_down(trans, root, wc, path); 5284 next = read_tree_block(fs_info, bytenr, &check); 5285 if (IS_ERR(next)) { 5286 return PTR_ERR(next); 5287 } else if (!extent_buffer_uptodate(next)) { 5288 free_extent_buffer(next); 5289 return -EIO; 5290 } 5291 btrfs_tree_lock(next); 5292 } 5293 5294 level--; 5295 ASSERT(level == btrfs_header_level(next)); 5296 if (level != btrfs_header_level(next)) { 5297 btrfs_err(root->fs_info, "mismatched level"); 5298 ret = -EIO; 5299 goto out_unlock; 5300 } 5301 path->nodes[level] = next; 5302 path->slots[level] = 0; 5303 path->locks[level] = BTRFS_WRITE_LOCK; 5304 wc->level = level; 5305 if (wc->level == 1) 5306 wc->reada_slot = 0; 5307 return 0; 5308 skip: 5309 wc->refs[level - 1] = 0; 5310 wc->flags[level - 1] = 0; 5311 if (wc->stage == DROP_REFERENCE) { 5312 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 5313 parent = path->nodes[level]->start; 5314 } else { 5315 ASSERT(root->root_key.objectid == 5316 btrfs_header_owner(path->nodes[level])); 5317 if (root->root_key.objectid != 5318 btrfs_header_owner(path->nodes[level])) { 5319 btrfs_err(root->fs_info, 5320 "mismatched block owner"); 5321 ret = -EIO; 5322 goto out_unlock; 5323 } 5324 parent = 0; 5325 } 5326 5327 /* 5328 * If we had a drop_progress we need to verify the refs are set 5329 * as expected. If we find our ref then we know that from here 5330 * on out everything should be correct, and we can clear the 5331 * ->restarted flag. 5332 */ 5333 if (wc->restarted) { 5334 ret = check_ref_exists(trans, root, bytenr, parent, 5335 level - 1); 5336 if (ret < 0) 5337 goto out_unlock; 5338 if (ret == 0) 5339 goto no_delete; 5340 ret = 0; 5341 wc->restarted = 0; 5342 } 5343 5344 /* 5345 * Reloc tree doesn't contribute to qgroup numbers, and we have 5346 * already accounted them at merge time (replace_path), 5347 * thus we could skip expensive subtree trace here. 5348 */ 5349 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 5350 need_account) { 5351 ret = btrfs_qgroup_trace_subtree(trans, next, 5352 generation, level - 1); 5353 if (ret) { 5354 btrfs_err_rl(fs_info, 5355 "Error %d accounting shared subtree. Quota is out of sync, rescan required.", 5356 ret); 5357 } 5358 } 5359 5360 /* 5361 * We need to update the next key in our walk control so we can 5362 * update the drop_progress key accordingly. We don't care if 5363 * find_next_key doesn't find a key because that means we're at 5364 * the end and are going to clean up now. 5365 */ 5366 wc->drop_level = level; 5367 find_next_key(path, level, &wc->drop_progress); 5368 5369 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 5370 fs_info->nodesize, parent); 5371 btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid, 5372 0, false); 5373 ret = btrfs_free_extent(trans, &ref); 5374 if (ret) 5375 goto out_unlock; 5376 } 5377 no_delete: 5378 *lookup_info = 1; 5379 ret = 1; 5380 5381 out_unlock: 5382 btrfs_tree_unlock(next); 5383 free_extent_buffer(next); 5384 5385 return ret; 5386 } 5387 5388 /* 5389 * helper to process tree block while walking up the tree. 5390 * 5391 * when wc->stage == DROP_REFERENCE, this function drops 5392 * reference count on the block. 5393 * 5394 * when wc->stage == UPDATE_BACKREF, this function changes 5395 * wc->stage back to DROP_REFERENCE if we changed wc->stage 5396 * to UPDATE_BACKREF previously while processing the block. 5397 * 5398 * NOTE: return value 1 means we should stop walking up. 5399 */ 5400 static noinline int walk_up_proc(struct btrfs_trans_handle *trans, 5401 struct btrfs_root *root, 5402 struct btrfs_path *path, 5403 struct walk_control *wc) 5404 { 5405 struct btrfs_fs_info *fs_info = root->fs_info; 5406 int ret; 5407 int level = wc->level; 5408 struct extent_buffer *eb = path->nodes[level]; 5409 u64 parent = 0; 5410 5411 if (wc->stage == UPDATE_BACKREF) { 5412 BUG_ON(wc->shared_level < level); 5413 if (level < wc->shared_level) 5414 goto out; 5415 5416 ret = find_next_key(path, level + 1, &wc->update_progress); 5417 if (ret > 0) 5418 wc->update_ref = 0; 5419 5420 wc->stage = DROP_REFERENCE; 5421 wc->shared_level = -1; 5422 path->slots[level] = 0; 5423 5424 /* 5425 * check reference count again if the block isn't locked. 5426 * we should start walking down the tree again if reference 5427 * count is one. 5428 */ 5429 if (!path->locks[level]) { 5430 BUG_ON(level == 0); 5431 btrfs_tree_lock(eb); 5432 path->locks[level] = BTRFS_WRITE_LOCK; 5433 5434 ret = btrfs_lookup_extent_info(trans, fs_info, 5435 eb->start, level, 1, 5436 &wc->refs[level], 5437 &wc->flags[level]); 5438 if (ret < 0) { 5439 btrfs_tree_unlock_rw(eb, path->locks[level]); 5440 path->locks[level] = 0; 5441 return ret; 5442 } 5443 BUG_ON(wc->refs[level] == 0); 5444 if (wc->refs[level] == 1) { 5445 btrfs_tree_unlock_rw(eb, path->locks[level]); 5446 path->locks[level] = 0; 5447 return 1; 5448 } 5449 } 5450 } 5451 5452 /* wc->stage == DROP_REFERENCE */ 5453 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); 5454 5455 if (wc->refs[level] == 1) { 5456 if (level == 0) { 5457 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5458 ret = btrfs_dec_ref(trans, root, eb, 1); 5459 else 5460 ret = btrfs_dec_ref(trans, root, eb, 0); 5461 BUG_ON(ret); /* -ENOMEM */ 5462 if (is_fstree(root->root_key.objectid)) { 5463 ret = btrfs_qgroup_trace_leaf_items(trans, eb); 5464 if (ret) { 5465 btrfs_err_rl(fs_info, 5466 "error %d accounting leaf items, quota is out of sync, rescan required", 5467 ret); 5468 } 5469 } 5470 } 5471 /* Make block locked assertion in btrfs_clear_buffer_dirty happy. */ 5472 if (!path->locks[level]) { 5473 btrfs_tree_lock(eb); 5474 path->locks[level] = BTRFS_WRITE_LOCK; 5475 } 5476 btrfs_clear_buffer_dirty(trans, eb); 5477 } 5478 5479 if (eb == root->node) { 5480 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5481 parent = eb->start; 5482 else if (root->root_key.objectid != btrfs_header_owner(eb)) 5483 goto owner_mismatch; 5484 } else { 5485 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5486 parent = path->nodes[level + 1]->start; 5487 else if (root->root_key.objectid != 5488 btrfs_header_owner(path->nodes[level + 1])) 5489 goto owner_mismatch; 5490 } 5491 5492 btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent, 5493 wc->refs[level] == 1); 5494 out: 5495 wc->refs[level] = 0; 5496 wc->flags[level] = 0; 5497 return 0; 5498 5499 owner_mismatch: 5500 btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu", 5501 btrfs_header_owner(eb), root->root_key.objectid); 5502 return -EUCLEAN; 5503 } 5504 5505 static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 5506 struct btrfs_root *root, 5507 struct btrfs_path *path, 5508 struct walk_control *wc) 5509 { 5510 int level = wc->level; 5511 int lookup_info = 1; 5512 int ret; 5513 5514 while (level >= 0) { 5515 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5516 if (ret > 0) 5517 break; 5518 5519 if (level == 0) 5520 break; 5521 5522 if (path->slots[level] >= 5523 btrfs_header_nritems(path->nodes[level])) 5524 break; 5525 5526 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5527 if (ret > 0) { 5528 path->slots[level]++; 5529 continue; 5530 } else if (ret < 0) 5531 return ret; 5532 level = wc->level; 5533 } 5534 return 0; 5535 } 5536 5537 static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 5538 struct btrfs_root *root, 5539 struct btrfs_path *path, 5540 struct walk_control *wc, int max_level) 5541 { 5542 int level = wc->level; 5543 int ret; 5544 5545 path->slots[level] = btrfs_header_nritems(path->nodes[level]); 5546 while (level < max_level && path->nodes[level]) { 5547 wc->level = level; 5548 if (path->slots[level] + 1 < 5549 btrfs_header_nritems(path->nodes[level])) { 5550 path->slots[level]++; 5551 return 0; 5552 } else { 5553 ret = walk_up_proc(trans, root, path, wc); 5554 if (ret > 0) 5555 return 0; 5556 if (ret < 0) 5557 return ret; 5558 5559 if (path->locks[level]) { 5560 btrfs_tree_unlock_rw(path->nodes[level], 5561 path->locks[level]); 5562 path->locks[level] = 0; 5563 } 5564 free_extent_buffer(path->nodes[level]); 5565 path->nodes[level] = NULL; 5566 level++; 5567 } 5568 } 5569 return 1; 5570 } 5571 5572 /* 5573 * drop a subvolume tree. 5574 * 5575 * this function traverses the tree freeing any blocks that only 5576 * referenced by the tree. 5577 * 5578 * when a shared tree block is found. this function decreases its 5579 * reference count by one. if update_ref is true, this function 5580 * also make sure backrefs for the shared block and all lower level 5581 * blocks are properly updated. 5582 * 5583 * If called with for_reloc == 0, may exit early with -EAGAIN 5584 */ 5585 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) 5586 { 5587 const bool is_reloc_root = (root->root_key.objectid == 5588 BTRFS_TREE_RELOC_OBJECTID); 5589 struct btrfs_fs_info *fs_info = root->fs_info; 5590 struct btrfs_path *path; 5591 struct btrfs_trans_handle *trans; 5592 struct btrfs_root *tree_root = fs_info->tree_root; 5593 struct btrfs_root_item *root_item = &root->root_item; 5594 struct walk_control *wc; 5595 struct btrfs_key key; 5596 int err = 0; 5597 int ret; 5598 int level; 5599 bool root_dropped = false; 5600 bool unfinished_drop = false; 5601 5602 btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid); 5603 5604 path = btrfs_alloc_path(); 5605 if (!path) { 5606 err = -ENOMEM; 5607 goto out; 5608 } 5609 5610 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5611 if (!wc) { 5612 btrfs_free_path(path); 5613 err = -ENOMEM; 5614 goto out; 5615 } 5616 5617 /* 5618 * Use join to avoid potential EINTR from transaction start. See 5619 * wait_reserve_ticket and the whole reservation callchain. 5620 */ 5621 if (for_reloc) 5622 trans = btrfs_join_transaction(tree_root); 5623 else 5624 trans = btrfs_start_transaction(tree_root, 0); 5625 if (IS_ERR(trans)) { 5626 err = PTR_ERR(trans); 5627 goto out_free; 5628 } 5629 5630 err = btrfs_run_delayed_items(trans); 5631 if (err) 5632 goto out_end_trans; 5633 5634 /* 5635 * This will help us catch people modifying the fs tree while we're 5636 * dropping it. It is unsafe to mess with the fs tree while it's being 5637 * dropped as we unlock the root node and parent nodes as we walk down 5638 * the tree, assuming nothing will change. If something does change 5639 * then we'll have stale information and drop references to blocks we've 5640 * already dropped. 5641 */ 5642 set_bit(BTRFS_ROOT_DELETING, &root->state); 5643 unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); 5644 5645 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 5646 level = btrfs_header_level(root->node); 5647 path->nodes[level] = btrfs_lock_root_node(root); 5648 path->slots[level] = 0; 5649 path->locks[level] = BTRFS_WRITE_LOCK; 5650 memset(&wc->update_progress, 0, 5651 sizeof(wc->update_progress)); 5652 } else { 5653 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 5654 memcpy(&wc->update_progress, &key, 5655 sizeof(wc->update_progress)); 5656 5657 level = btrfs_root_drop_level(root_item); 5658 BUG_ON(level == 0); 5659 path->lowest_level = level; 5660 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5661 path->lowest_level = 0; 5662 if (ret < 0) { 5663 err = ret; 5664 goto out_end_trans; 5665 } 5666 WARN_ON(ret > 0); 5667 5668 /* 5669 * unlock our path, this is safe because only this 5670 * function is allowed to delete this snapshot 5671 */ 5672 btrfs_unlock_up_safe(path, 0); 5673 5674 level = btrfs_header_level(root->node); 5675 while (1) { 5676 btrfs_tree_lock(path->nodes[level]); 5677 path->locks[level] = BTRFS_WRITE_LOCK; 5678 5679 ret = btrfs_lookup_extent_info(trans, fs_info, 5680 path->nodes[level]->start, 5681 level, 1, &wc->refs[level], 5682 &wc->flags[level]); 5683 if (ret < 0) { 5684 err = ret; 5685 goto out_end_trans; 5686 } 5687 BUG_ON(wc->refs[level] == 0); 5688 5689 if (level == btrfs_root_drop_level(root_item)) 5690 break; 5691 5692 btrfs_tree_unlock(path->nodes[level]); 5693 path->locks[level] = 0; 5694 WARN_ON(wc->refs[level] != 1); 5695 level--; 5696 } 5697 } 5698 5699 wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); 5700 wc->level = level; 5701 wc->shared_level = -1; 5702 wc->stage = DROP_REFERENCE; 5703 wc->update_ref = update_ref; 5704 wc->keep_locks = 0; 5705 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5706 5707 while (1) { 5708 5709 ret = walk_down_tree(trans, root, path, wc); 5710 if (ret < 0) { 5711 err = ret; 5712 break; 5713 } 5714 5715 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); 5716 if (ret < 0) { 5717 err = ret; 5718 break; 5719 } 5720 5721 if (ret > 0) { 5722 BUG_ON(wc->stage != DROP_REFERENCE); 5723 break; 5724 } 5725 5726 if (wc->stage == DROP_REFERENCE) { 5727 wc->drop_level = wc->level; 5728 btrfs_node_key_to_cpu(path->nodes[wc->drop_level], 5729 &wc->drop_progress, 5730 path->slots[wc->drop_level]); 5731 } 5732 btrfs_cpu_key_to_disk(&root_item->drop_progress, 5733 &wc->drop_progress); 5734 btrfs_set_root_drop_level(root_item, wc->drop_level); 5735 5736 BUG_ON(wc->level == 0); 5737 if (btrfs_should_end_transaction(trans) || 5738 (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) { 5739 ret = btrfs_update_root(trans, tree_root, 5740 &root->root_key, 5741 root_item); 5742 if (ret) { 5743 btrfs_abort_transaction(trans, ret); 5744 err = ret; 5745 goto out_end_trans; 5746 } 5747 5748 if (!is_reloc_root) 5749 btrfs_set_last_root_drop_gen(fs_info, trans->transid); 5750 5751 btrfs_end_transaction_throttle(trans); 5752 if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) { 5753 btrfs_debug(fs_info, 5754 "drop snapshot early exit"); 5755 err = -EAGAIN; 5756 goto out_free; 5757 } 5758 5759 /* 5760 * Use join to avoid potential EINTR from transaction 5761 * start. See wait_reserve_ticket and the whole 5762 * reservation callchain. 5763 */ 5764 if (for_reloc) 5765 trans = btrfs_join_transaction(tree_root); 5766 else 5767 trans = btrfs_start_transaction(tree_root, 0); 5768 if (IS_ERR(trans)) { 5769 err = PTR_ERR(trans); 5770 goto out_free; 5771 } 5772 } 5773 } 5774 btrfs_release_path(path); 5775 if (err) 5776 goto out_end_trans; 5777 5778 ret = btrfs_del_root(trans, &root->root_key); 5779 if (ret) { 5780 btrfs_abort_transaction(trans, ret); 5781 err = ret; 5782 goto out_end_trans; 5783 } 5784 5785 if (!is_reloc_root) { 5786 ret = btrfs_find_root(tree_root, &root->root_key, path, 5787 NULL, NULL); 5788 if (ret < 0) { 5789 btrfs_abort_transaction(trans, ret); 5790 err = ret; 5791 goto out_end_trans; 5792 } else if (ret > 0) { 5793 /* if we fail to delete the orphan item this time 5794 * around, it'll get picked up the next time. 5795 * 5796 * The most common failure here is just -ENOENT. 5797 */ 5798 btrfs_del_orphan_item(trans, tree_root, 5799 root->root_key.objectid); 5800 } 5801 } 5802 5803 /* 5804 * This subvolume is going to be completely dropped, and won't be 5805 * recorded as dirty roots, thus pertrans meta rsv will not be freed at 5806 * commit transaction time. So free it here manually. 5807 */ 5808 btrfs_qgroup_convert_reserved_meta(root, INT_MAX); 5809 btrfs_qgroup_free_meta_all_pertrans(root); 5810 5811 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) 5812 btrfs_add_dropped_root(trans, root); 5813 else 5814 btrfs_put_root(root); 5815 root_dropped = true; 5816 out_end_trans: 5817 if (!is_reloc_root) 5818 btrfs_set_last_root_drop_gen(fs_info, trans->transid); 5819 5820 btrfs_end_transaction_throttle(trans); 5821 out_free: 5822 kfree(wc); 5823 btrfs_free_path(path); 5824 out: 5825 /* 5826 * We were an unfinished drop root, check to see if there are any 5827 * pending, and if not clear and wake up any waiters. 5828 */ 5829 if (!err && unfinished_drop) 5830 btrfs_maybe_wake_unfinished_drop(fs_info); 5831 5832 /* 5833 * So if we need to stop dropping the snapshot for whatever reason we 5834 * need to make sure to add it back to the dead root list so that we 5835 * keep trying to do the work later. This also cleans up roots if we 5836 * don't have it in the radix (like when we recover after a power fail 5837 * or unmount) so we don't leak memory. 5838 */ 5839 if (!for_reloc && !root_dropped) 5840 btrfs_add_dead_root(root); 5841 return err; 5842 } 5843 5844 /* 5845 * drop subtree rooted at tree block 'node'. 5846 * 5847 * NOTE: this function will unlock and release tree block 'node' 5848 * only used by relocation code 5849 */ 5850 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 5851 struct btrfs_root *root, 5852 struct extent_buffer *node, 5853 struct extent_buffer *parent) 5854 { 5855 struct btrfs_fs_info *fs_info = root->fs_info; 5856 struct btrfs_path *path; 5857 struct walk_control *wc; 5858 int level; 5859 int parent_level; 5860 int ret = 0; 5861 int wret; 5862 5863 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 5864 5865 path = btrfs_alloc_path(); 5866 if (!path) 5867 return -ENOMEM; 5868 5869 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5870 if (!wc) { 5871 btrfs_free_path(path); 5872 return -ENOMEM; 5873 } 5874 5875 btrfs_assert_tree_write_locked(parent); 5876 parent_level = btrfs_header_level(parent); 5877 atomic_inc(&parent->refs); 5878 path->nodes[parent_level] = parent; 5879 path->slots[parent_level] = btrfs_header_nritems(parent); 5880 5881 btrfs_assert_tree_write_locked(node); 5882 level = btrfs_header_level(node); 5883 path->nodes[level] = node; 5884 path->slots[level] = 0; 5885 path->locks[level] = BTRFS_WRITE_LOCK; 5886 5887 wc->refs[parent_level] = 1; 5888 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5889 wc->level = level; 5890 wc->shared_level = -1; 5891 wc->stage = DROP_REFERENCE; 5892 wc->update_ref = 0; 5893 wc->keep_locks = 1; 5894 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5895 5896 while (1) { 5897 wret = walk_down_tree(trans, root, path, wc); 5898 if (wret < 0) { 5899 ret = wret; 5900 break; 5901 } 5902 5903 wret = walk_up_tree(trans, root, path, wc, parent_level); 5904 if (wret < 0) 5905 ret = wret; 5906 if (wret != 0) 5907 break; 5908 } 5909 5910 kfree(wc); 5911 btrfs_free_path(path); 5912 return ret; 5913 } 5914 5915 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, 5916 u64 start, u64 end) 5917 { 5918 return unpin_extent_range(fs_info, start, end, false); 5919 } 5920 5921 /* 5922 * It used to be that old block groups would be left around forever. 5923 * Iterating over them would be enough to trim unused space. Since we 5924 * now automatically remove them, we also need to iterate over unallocated 5925 * space. 5926 * 5927 * We don't want a transaction for this since the discard may take a 5928 * substantial amount of time. We don't require that a transaction be 5929 * running, but we do need to take a running transaction into account 5930 * to ensure that we're not discarding chunks that were released or 5931 * allocated in the current transaction. 5932 * 5933 * Holding the chunks lock will prevent other threads from allocating 5934 * or releasing chunks, but it won't prevent a running transaction 5935 * from committing and releasing the memory that the pending chunks 5936 * list head uses. For that, we need to take a reference to the 5937 * transaction and hold the commit root sem. We only need to hold 5938 * it while performing the free space search since we have already 5939 * held back allocations. 5940 */ 5941 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) 5942 { 5943 u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0; 5944 int ret; 5945 5946 *trimmed = 0; 5947 5948 /* Discard not supported = nothing to do. */ 5949 if (!bdev_max_discard_sectors(device->bdev)) 5950 return 0; 5951 5952 /* Not writable = nothing to do. */ 5953 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 5954 return 0; 5955 5956 /* No free space = nothing to do. */ 5957 if (device->total_bytes <= device->bytes_used) 5958 return 0; 5959 5960 ret = 0; 5961 5962 while (1) { 5963 struct btrfs_fs_info *fs_info = device->fs_info; 5964 u64 bytes; 5965 5966 ret = mutex_lock_interruptible(&fs_info->chunk_mutex); 5967 if (ret) 5968 break; 5969 5970 find_first_clear_extent_bit(&device->alloc_state, start, 5971 &start, &end, 5972 CHUNK_TRIMMED | CHUNK_ALLOCATED); 5973 5974 /* Check if there are any CHUNK_* bits left */ 5975 if (start > device->total_bytes) { 5976 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 5977 btrfs_warn_in_rcu(fs_info, 5978 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu", 5979 start, end - start + 1, 5980 btrfs_dev_name(device), 5981 device->total_bytes); 5982 mutex_unlock(&fs_info->chunk_mutex); 5983 ret = 0; 5984 break; 5985 } 5986 5987 /* Ensure we skip the reserved space on each device. */ 5988 start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); 5989 5990 /* 5991 * If find_first_clear_extent_bit find a range that spans the 5992 * end of the device it will set end to -1, in this case it's up 5993 * to the caller to trim the value to the size of the device. 5994 */ 5995 end = min(end, device->total_bytes - 1); 5996 5997 len = end - start + 1; 5998 5999 /* We didn't find any extents */ 6000 if (!len) { 6001 mutex_unlock(&fs_info->chunk_mutex); 6002 ret = 0; 6003 break; 6004 } 6005 6006 ret = btrfs_issue_discard(device->bdev, start, len, 6007 &bytes); 6008 if (!ret) 6009 set_extent_bits(&device->alloc_state, start, 6010 start + bytes - 1, 6011 CHUNK_TRIMMED); 6012 mutex_unlock(&fs_info->chunk_mutex); 6013 6014 if (ret) 6015 break; 6016 6017 start += len; 6018 *trimmed += bytes; 6019 6020 if (fatal_signal_pending(current)) { 6021 ret = -ERESTARTSYS; 6022 break; 6023 } 6024 6025 cond_resched(); 6026 } 6027 6028 return ret; 6029 } 6030 6031 /* 6032 * Trim the whole filesystem by: 6033 * 1) trimming the free space in each block group 6034 * 2) trimming the unallocated space on each device 6035 * 6036 * This will also continue trimming even if a block group or device encounters 6037 * an error. The return value will be the last error, or 0 if nothing bad 6038 * happens. 6039 */ 6040 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) 6041 { 6042 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6043 struct btrfs_block_group *cache = NULL; 6044 struct btrfs_device *device; 6045 u64 group_trimmed; 6046 u64 range_end = U64_MAX; 6047 u64 start; 6048 u64 end; 6049 u64 trimmed = 0; 6050 u64 bg_failed = 0; 6051 u64 dev_failed = 0; 6052 int bg_ret = 0; 6053 int dev_ret = 0; 6054 int ret = 0; 6055 6056 if (range->start == U64_MAX) 6057 return -EINVAL; 6058 6059 /* 6060 * Check range overflow if range->len is set. 6061 * The default range->len is U64_MAX. 6062 */ 6063 if (range->len != U64_MAX && 6064 check_add_overflow(range->start, range->len, &range_end)) 6065 return -EINVAL; 6066 6067 cache = btrfs_lookup_first_block_group(fs_info, range->start); 6068 for (; cache; cache = btrfs_next_block_group(cache)) { 6069 if (cache->start >= range_end) { 6070 btrfs_put_block_group(cache); 6071 break; 6072 } 6073 6074 start = max(range->start, cache->start); 6075 end = min(range_end, cache->start + cache->length); 6076 6077 if (end - start >= range->minlen) { 6078 if (!btrfs_block_group_done(cache)) { 6079 ret = btrfs_cache_block_group(cache, true); 6080 if (ret) { 6081 bg_failed++; 6082 bg_ret = ret; 6083 continue; 6084 } 6085 } 6086 ret = btrfs_trim_block_group(cache, 6087 &group_trimmed, 6088 start, 6089 end, 6090 range->minlen); 6091 6092 trimmed += group_trimmed; 6093 if (ret) { 6094 bg_failed++; 6095 bg_ret = ret; 6096 continue; 6097 } 6098 } 6099 } 6100 6101 if (bg_failed) 6102 btrfs_warn(fs_info, 6103 "failed to trim %llu block group(s), last error %d", 6104 bg_failed, bg_ret); 6105 6106 mutex_lock(&fs_devices->device_list_mutex); 6107 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6108 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 6109 continue; 6110 6111 ret = btrfs_trim_free_extents(device, &group_trimmed); 6112 if (ret) { 6113 dev_failed++; 6114 dev_ret = ret; 6115 break; 6116 } 6117 6118 trimmed += group_trimmed; 6119 } 6120 mutex_unlock(&fs_devices->device_list_mutex); 6121 6122 if (dev_failed) 6123 btrfs_warn(fs_info, 6124 "failed to trim %llu device(s), last error %d", 6125 dev_failed, dev_ret); 6126 range->len = trimmed; 6127 if (bg_ret) 6128 return bg_ret; 6129 return dev_ret; 6130 } 6131