1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/signal.h> 8 #include <linux/pagemap.h> 9 #include <linux/writeback.h> 10 #include <linux/blkdev.h> 11 #include <linux/sort.h> 12 #include <linux/rcupdate.h> 13 #include <linux/kthread.h> 14 #include <linux/slab.h> 15 #include <linux/ratelimit.h> 16 #include <linux/percpu_counter.h> 17 #include <linux/lockdep.h> 18 #include <linux/crc32c.h> 19 #include "misc.h" 20 #include "tree-log.h" 21 #include "disk-io.h" 22 #include "print-tree.h" 23 #include "volumes.h" 24 #include "raid56.h" 25 #include "locking.h" 26 #include "free-space-cache.h" 27 #include "free-space-tree.h" 28 #include "sysfs.h" 29 #include "qgroup.h" 30 #include "ref-verify.h" 31 #include "space-info.h" 32 #include "block-rsv.h" 33 #include "delalloc-space.h" 34 #include "block-group.h" 35 #include "discard.h" 36 #include "rcu-string.h" 37 #include "zoned.h" 38 #include "dev-replace.h" 39 40 #undef SCRAMBLE_DELAYED_REFS 41 42 43 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 44 struct btrfs_delayed_ref_node *node, u64 parent, 45 u64 root_objectid, u64 owner_objectid, 46 u64 owner_offset, int refs_to_drop, 47 struct btrfs_delayed_extent_op *extra_op); 48 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 49 struct extent_buffer *leaf, 50 struct btrfs_extent_item *ei); 51 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 52 u64 parent, u64 root_objectid, 53 u64 flags, u64 owner, u64 offset, 54 struct btrfs_key *ins, int ref_mod); 55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 56 struct btrfs_delayed_ref_node *node, 57 struct btrfs_delayed_extent_op *extent_op); 58 static int find_next_key(struct btrfs_path *path, int level, 59 struct btrfs_key *key); 60 61 static int block_group_bits(struct btrfs_block_group *cache, u64 bits) 62 { 63 return (cache->flags & bits) == bits; 64 } 65 66 int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, 67 u64 start, u64 num_bytes) 68 { 69 u64 end = start + num_bytes - 1; 70 set_extent_bits(&fs_info->excluded_extents, start, end, 71 EXTENT_UPTODATE); 72 return 0; 73 } 74 75 void btrfs_free_excluded_extents(struct btrfs_block_group *cache) 76 { 77 struct btrfs_fs_info *fs_info = cache->fs_info; 78 u64 start, end; 79 80 start = cache->start; 81 end = start + cache->length - 1; 82 83 clear_extent_bits(&fs_info->excluded_extents, start, end, 84 EXTENT_UPTODATE); 85 } 86 87 /* simple helper to search for an existing data extent at a given offset */ 88 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) 89 { 90 struct btrfs_root *root = btrfs_extent_root(fs_info, start); 91 int ret; 92 struct btrfs_key key; 93 struct btrfs_path *path; 94 95 path = btrfs_alloc_path(); 96 if (!path) 97 return -ENOMEM; 98 99 key.objectid = start; 100 key.offset = len; 101 key.type = BTRFS_EXTENT_ITEM_KEY; 102 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 103 btrfs_free_path(path); 104 return ret; 105 } 106 107 /* 108 * helper function to lookup reference count and flags of a tree block. 109 * 110 * the head node for delayed ref is used to store the sum of all the 111 * reference count modifications queued up in the rbtree. the head 112 * node may also store the extent flags to set. This way you can check 113 * to see what the reference count and extent flags would be if all of 114 * the delayed refs are not processed. 115 */ 116 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 117 struct btrfs_fs_info *fs_info, u64 bytenr, 118 u64 offset, int metadata, u64 *refs, u64 *flags) 119 { 120 struct btrfs_root *extent_root; 121 struct btrfs_delayed_ref_head *head; 122 struct btrfs_delayed_ref_root *delayed_refs; 123 struct btrfs_path *path; 124 struct btrfs_extent_item *ei; 125 struct extent_buffer *leaf; 126 struct btrfs_key key; 127 u32 item_size; 128 u64 num_refs; 129 u64 extent_flags; 130 int ret; 131 132 /* 133 * If we don't have skinny metadata, don't bother doing anything 134 * different 135 */ 136 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { 137 offset = fs_info->nodesize; 138 metadata = 0; 139 } 140 141 path = btrfs_alloc_path(); 142 if (!path) 143 return -ENOMEM; 144 145 if (!trans) { 146 path->skip_locking = 1; 147 path->search_commit_root = 1; 148 } 149 150 search_again: 151 key.objectid = bytenr; 152 key.offset = offset; 153 if (metadata) 154 key.type = BTRFS_METADATA_ITEM_KEY; 155 else 156 key.type = BTRFS_EXTENT_ITEM_KEY; 157 158 extent_root = btrfs_extent_root(fs_info, bytenr); 159 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 160 if (ret < 0) 161 goto out_free; 162 163 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 164 if (path->slots[0]) { 165 path->slots[0]--; 166 btrfs_item_key_to_cpu(path->nodes[0], &key, 167 path->slots[0]); 168 if (key.objectid == bytenr && 169 key.type == BTRFS_EXTENT_ITEM_KEY && 170 key.offset == fs_info->nodesize) 171 ret = 0; 172 } 173 } 174 175 if (ret == 0) { 176 leaf = path->nodes[0]; 177 item_size = btrfs_item_size(leaf, path->slots[0]); 178 if (item_size >= sizeof(*ei)) { 179 ei = btrfs_item_ptr(leaf, path->slots[0], 180 struct btrfs_extent_item); 181 num_refs = btrfs_extent_refs(leaf, ei); 182 extent_flags = btrfs_extent_flags(leaf, ei); 183 } else { 184 ret = -EINVAL; 185 btrfs_print_v0_err(fs_info); 186 if (trans) 187 btrfs_abort_transaction(trans, ret); 188 else 189 btrfs_handle_fs_error(fs_info, ret, NULL); 190 191 goto out_free; 192 } 193 194 BUG_ON(num_refs == 0); 195 } else { 196 num_refs = 0; 197 extent_flags = 0; 198 ret = 0; 199 } 200 201 if (!trans) 202 goto out; 203 204 delayed_refs = &trans->transaction->delayed_refs; 205 spin_lock(&delayed_refs->lock); 206 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 207 if (head) { 208 if (!mutex_trylock(&head->mutex)) { 209 refcount_inc(&head->refs); 210 spin_unlock(&delayed_refs->lock); 211 212 btrfs_release_path(path); 213 214 /* 215 * Mutex was contended, block until it's released and try 216 * again 217 */ 218 mutex_lock(&head->mutex); 219 mutex_unlock(&head->mutex); 220 btrfs_put_delayed_ref_head(head); 221 goto search_again; 222 } 223 spin_lock(&head->lock); 224 if (head->extent_op && head->extent_op->update_flags) 225 extent_flags |= head->extent_op->flags_to_set; 226 else 227 BUG_ON(num_refs == 0); 228 229 num_refs += head->ref_mod; 230 spin_unlock(&head->lock); 231 mutex_unlock(&head->mutex); 232 } 233 spin_unlock(&delayed_refs->lock); 234 out: 235 WARN_ON(num_refs == 0); 236 if (refs) 237 *refs = num_refs; 238 if (flags) 239 *flags = extent_flags; 240 out_free: 241 btrfs_free_path(path); 242 return ret; 243 } 244 245 /* 246 * Back reference rules. Back refs have three main goals: 247 * 248 * 1) differentiate between all holders of references to an extent so that 249 * when a reference is dropped we can make sure it was a valid reference 250 * before freeing the extent. 251 * 252 * 2) Provide enough information to quickly find the holders of an extent 253 * if we notice a given block is corrupted or bad. 254 * 255 * 3) Make it easy to migrate blocks for FS shrinking or storage pool 256 * maintenance. This is actually the same as #2, but with a slightly 257 * different use case. 258 * 259 * There are two kinds of back refs. The implicit back refs is optimized 260 * for pointers in non-shared tree blocks. For a given pointer in a block, 261 * back refs of this kind provide information about the block's owner tree 262 * and the pointer's key. These information allow us to find the block by 263 * b-tree searching. The full back refs is for pointers in tree blocks not 264 * referenced by their owner trees. The location of tree block is recorded 265 * in the back refs. Actually the full back refs is generic, and can be 266 * used in all cases the implicit back refs is used. The major shortcoming 267 * of the full back refs is its overhead. Every time a tree block gets 268 * COWed, we have to update back refs entry for all pointers in it. 269 * 270 * For a newly allocated tree block, we use implicit back refs for 271 * pointers in it. This means most tree related operations only involve 272 * implicit back refs. For a tree block created in old transaction, the 273 * only way to drop a reference to it is COW it. So we can detect the 274 * event that tree block loses its owner tree's reference and do the 275 * back refs conversion. 276 * 277 * When a tree block is COWed through a tree, there are four cases: 278 * 279 * The reference count of the block is one and the tree is the block's 280 * owner tree. Nothing to do in this case. 281 * 282 * The reference count of the block is one and the tree is not the 283 * block's owner tree. In this case, full back refs is used for pointers 284 * in the block. Remove these full back refs, add implicit back refs for 285 * every pointers in the new block. 286 * 287 * The reference count of the block is greater than one and the tree is 288 * the block's owner tree. In this case, implicit back refs is used for 289 * pointers in the block. Add full back refs for every pointers in the 290 * block, increase lower level extents' reference counts. The original 291 * implicit back refs are entailed to the new block. 292 * 293 * The reference count of the block is greater than one and the tree is 294 * not the block's owner tree. Add implicit back refs for every pointer in 295 * the new block, increase lower level extents' reference count. 296 * 297 * Back Reference Key composing: 298 * 299 * The key objectid corresponds to the first byte in the extent, 300 * The key type is used to differentiate between types of back refs. 301 * There are different meanings of the key offset for different types 302 * of back refs. 303 * 304 * File extents can be referenced by: 305 * 306 * - multiple snapshots, subvolumes, or different generations in one subvol 307 * - different files inside a single subvolume 308 * - different offsets inside a file (bookend extents in file.c) 309 * 310 * The extent ref structure for the implicit back refs has fields for: 311 * 312 * - Objectid of the subvolume root 313 * - objectid of the file holding the reference 314 * - original offset in the file 315 * - how many bookend extents 316 * 317 * The key offset for the implicit back refs is hash of the first 318 * three fields. 319 * 320 * The extent ref structure for the full back refs has field for: 321 * 322 * - number of pointers in the tree leaf 323 * 324 * The key offset for the implicit back refs is the first byte of 325 * the tree leaf 326 * 327 * When a file extent is allocated, The implicit back refs is used. 328 * the fields are filled in: 329 * 330 * (root_key.objectid, inode objectid, offset in file, 1) 331 * 332 * When a file extent is removed file truncation, we find the 333 * corresponding implicit back refs and check the following fields: 334 * 335 * (btrfs_header_owner(leaf), inode objectid, offset in file) 336 * 337 * Btree extents can be referenced by: 338 * 339 * - Different subvolumes 340 * 341 * Both the implicit back refs and the full back refs for tree blocks 342 * only consist of key. The key offset for the implicit back refs is 343 * objectid of block's owner tree. The key offset for the full back refs 344 * is the first byte of parent block. 345 * 346 * When implicit back refs is used, information about the lowest key and 347 * level of the tree block are required. These information are stored in 348 * tree block info structure. 349 */ 350 351 /* 352 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, 353 * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, 354 * is_data == BTRFS_REF_TYPE_ANY, either type is OK. 355 */ 356 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, 357 struct btrfs_extent_inline_ref *iref, 358 enum btrfs_inline_ref_type is_data) 359 { 360 int type = btrfs_extent_inline_ref_type(eb, iref); 361 u64 offset = btrfs_extent_inline_ref_offset(eb, iref); 362 363 if (type == BTRFS_TREE_BLOCK_REF_KEY || 364 type == BTRFS_SHARED_BLOCK_REF_KEY || 365 type == BTRFS_SHARED_DATA_REF_KEY || 366 type == BTRFS_EXTENT_DATA_REF_KEY) { 367 if (is_data == BTRFS_REF_TYPE_BLOCK) { 368 if (type == BTRFS_TREE_BLOCK_REF_KEY) 369 return type; 370 if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 371 ASSERT(eb->fs_info); 372 /* 373 * Every shared one has parent tree block, 374 * which must be aligned to sector size. 375 */ 376 if (offset && 377 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 378 return type; 379 } 380 } else if (is_data == BTRFS_REF_TYPE_DATA) { 381 if (type == BTRFS_EXTENT_DATA_REF_KEY) 382 return type; 383 if (type == BTRFS_SHARED_DATA_REF_KEY) { 384 ASSERT(eb->fs_info); 385 /* 386 * Every shared one has parent tree block, 387 * which must be aligned to sector size. 388 */ 389 if (offset && 390 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 391 return type; 392 } 393 } else { 394 ASSERT(is_data == BTRFS_REF_TYPE_ANY); 395 return type; 396 } 397 } 398 399 btrfs_print_leaf((struct extent_buffer *)eb); 400 btrfs_err(eb->fs_info, 401 "eb %llu iref 0x%lx invalid extent inline ref type %d", 402 eb->start, (unsigned long)iref, type); 403 WARN_ON(1); 404 405 return BTRFS_REF_TYPE_INVALID; 406 } 407 408 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) 409 { 410 u32 high_crc = ~(u32)0; 411 u32 low_crc = ~(u32)0; 412 __le64 lenum; 413 414 lenum = cpu_to_le64(root_objectid); 415 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum)); 416 lenum = cpu_to_le64(owner); 417 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 418 lenum = cpu_to_le64(offset); 419 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 420 421 return ((u64)high_crc << 31) ^ (u64)low_crc; 422 } 423 424 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, 425 struct btrfs_extent_data_ref *ref) 426 { 427 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), 428 btrfs_extent_data_ref_objectid(leaf, ref), 429 btrfs_extent_data_ref_offset(leaf, ref)); 430 } 431 432 static int match_extent_data_ref(struct extent_buffer *leaf, 433 struct btrfs_extent_data_ref *ref, 434 u64 root_objectid, u64 owner, u64 offset) 435 { 436 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || 437 btrfs_extent_data_ref_objectid(leaf, ref) != owner || 438 btrfs_extent_data_ref_offset(leaf, ref) != offset) 439 return 0; 440 return 1; 441 } 442 443 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, 444 struct btrfs_path *path, 445 u64 bytenr, u64 parent, 446 u64 root_objectid, 447 u64 owner, u64 offset) 448 { 449 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 450 struct btrfs_key key; 451 struct btrfs_extent_data_ref *ref; 452 struct extent_buffer *leaf; 453 u32 nritems; 454 int ret; 455 int recow; 456 int err = -ENOENT; 457 458 key.objectid = bytenr; 459 if (parent) { 460 key.type = BTRFS_SHARED_DATA_REF_KEY; 461 key.offset = parent; 462 } else { 463 key.type = BTRFS_EXTENT_DATA_REF_KEY; 464 key.offset = hash_extent_data_ref(root_objectid, 465 owner, offset); 466 } 467 again: 468 recow = 0; 469 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 470 if (ret < 0) { 471 err = ret; 472 goto fail; 473 } 474 475 if (parent) { 476 if (!ret) 477 return 0; 478 goto fail; 479 } 480 481 leaf = path->nodes[0]; 482 nritems = btrfs_header_nritems(leaf); 483 while (1) { 484 if (path->slots[0] >= nritems) { 485 ret = btrfs_next_leaf(root, path); 486 if (ret < 0) 487 err = ret; 488 if (ret) 489 goto fail; 490 491 leaf = path->nodes[0]; 492 nritems = btrfs_header_nritems(leaf); 493 recow = 1; 494 } 495 496 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 497 if (key.objectid != bytenr || 498 key.type != BTRFS_EXTENT_DATA_REF_KEY) 499 goto fail; 500 501 ref = btrfs_item_ptr(leaf, path->slots[0], 502 struct btrfs_extent_data_ref); 503 504 if (match_extent_data_ref(leaf, ref, root_objectid, 505 owner, offset)) { 506 if (recow) { 507 btrfs_release_path(path); 508 goto again; 509 } 510 err = 0; 511 break; 512 } 513 path->slots[0]++; 514 } 515 fail: 516 return err; 517 } 518 519 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, 520 struct btrfs_path *path, 521 u64 bytenr, u64 parent, 522 u64 root_objectid, u64 owner, 523 u64 offset, int refs_to_add) 524 { 525 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 526 struct btrfs_key key; 527 struct extent_buffer *leaf; 528 u32 size; 529 u32 num_refs; 530 int ret; 531 532 key.objectid = bytenr; 533 if (parent) { 534 key.type = BTRFS_SHARED_DATA_REF_KEY; 535 key.offset = parent; 536 size = sizeof(struct btrfs_shared_data_ref); 537 } else { 538 key.type = BTRFS_EXTENT_DATA_REF_KEY; 539 key.offset = hash_extent_data_ref(root_objectid, 540 owner, offset); 541 size = sizeof(struct btrfs_extent_data_ref); 542 } 543 544 ret = btrfs_insert_empty_item(trans, root, path, &key, size); 545 if (ret && ret != -EEXIST) 546 goto fail; 547 548 leaf = path->nodes[0]; 549 if (parent) { 550 struct btrfs_shared_data_ref *ref; 551 ref = btrfs_item_ptr(leaf, path->slots[0], 552 struct btrfs_shared_data_ref); 553 if (ret == 0) { 554 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); 555 } else { 556 num_refs = btrfs_shared_data_ref_count(leaf, ref); 557 num_refs += refs_to_add; 558 btrfs_set_shared_data_ref_count(leaf, ref, num_refs); 559 } 560 } else { 561 struct btrfs_extent_data_ref *ref; 562 while (ret == -EEXIST) { 563 ref = btrfs_item_ptr(leaf, path->slots[0], 564 struct btrfs_extent_data_ref); 565 if (match_extent_data_ref(leaf, ref, root_objectid, 566 owner, offset)) 567 break; 568 btrfs_release_path(path); 569 key.offset++; 570 ret = btrfs_insert_empty_item(trans, root, path, &key, 571 size); 572 if (ret && ret != -EEXIST) 573 goto fail; 574 575 leaf = path->nodes[0]; 576 } 577 ref = btrfs_item_ptr(leaf, path->slots[0], 578 struct btrfs_extent_data_ref); 579 if (ret == 0) { 580 btrfs_set_extent_data_ref_root(leaf, ref, 581 root_objectid); 582 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 583 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 584 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); 585 } else { 586 num_refs = btrfs_extent_data_ref_count(leaf, ref); 587 num_refs += refs_to_add; 588 btrfs_set_extent_data_ref_count(leaf, ref, num_refs); 589 } 590 } 591 btrfs_mark_buffer_dirty(leaf); 592 ret = 0; 593 fail: 594 btrfs_release_path(path); 595 return ret; 596 } 597 598 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, 599 struct btrfs_root *root, 600 struct btrfs_path *path, 601 int refs_to_drop, int *last_ref) 602 { 603 struct btrfs_key key; 604 struct btrfs_extent_data_ref *ref1 = NULL; 605 struct btrfs_shared_data_ref *ref2 = NULL; 606 struct extent_buffer *leaf; 607 u32 num_refs = 0; 608 int ret = 0; 609 610 leaf = path->nodes[0]; 611 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 612 613 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 614 ref1 = btrfs_item_ptr(leaf, path->slots[0], 615 struct btrfs_extent_data_ref); 616 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 617 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 618 ref2 = btrfs_item_ptr(leaf, path->slots[0], 619 struct btrfs_shared_data_ref); 620 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 621 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { 622 btrfs_print_v0_err(trans->fs_info); 623 btrfs_abort_transaction(trans, -EINVAL); 624 return -EINVAL; 625 } else { 626 BUG(); 627 } 628 629 BUG_ON(num_refs < refs_to_drop); 630 num_refs -= refs_to_drop; 631 632 if (num_refs == 0) { 633 ret = btrfs_del_item(trans, root, path); 634 *last_ref = 1; 635 } else { 636 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) 637 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); 638 else if (key.type == BTRFS_SHARED_DATA_REF_KEY) 639 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); 640 btrfs_mark_buffer_dirty(leaf); 641 } 642 return ret; 643 } 644 645 static noinline u32 extent_data_ref_count(struct btrfs_path *path, 646 struct btrfs_extent_inline_ref *iref) 647 { 648 struct btrfs_key key; 649 struct extent_buffer *leaf; 650 struct btrfs_extent_data_ref *ref1; 651 struct btrfs_shared_data_ref *ref2; 652 u32 num_refs = 0; 653 int type; 654 655 leaf = path->nodes[0]; 656 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 657 658 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 659 if (iref) { 660 /* 661 * If type is invalid, we should have bailed out earlier than 662 * this call. 663 */ 664 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 665 ASSERT(type != BTRFS_REF_TYPE_INVALID); 666 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 667 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); 668 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 669 } else { 670 ref2 = (struct btrfs_shared_data_ref *)(iref + 1); 671 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 672 } 673 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 674 ref1 = btrfs_item_ptr(leaf, path->slots[0], 675 struct btrfs_extent_data_ref); 676 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 677 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 678 ref2 = btrfs_item_ptr(leaf, path->slots[0], 679 struct btrfs_shared_data_ref); 680 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 681 } else { 682 WARN_ON(1); 683 } 684 return num_refs; 685 } 686 687 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, 688 struct btrfs_path *path, 689 u64 bytenr, u64 parent, 690 u64 root_objectid) 691 { 692 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 693 struct btrfs_key key; 694 int ret; 695 696 key.objectid = bytenr; 697 if (parent) { 698 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 699 key.offset = parent; 700 } else { 701 key.type = BTRFS_TREE_BLOCK_REF_KEY; 702 key.offset = root_objectid; 703 } 704 705 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 706 if (ret > 0) 707 ret = -ENOENT; 708 return ret; 709 } 710 711 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, 712 struct btrfs_path *path, 713 u64 bytenr, u64 parent, 714 u64 root_objectid) 715 { 716 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 717 struct btrfs_key key; 718 int ret; 719 720 key.objectid = bytenr; 721 if (parent) { 722 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 723 key.offset = parent; 724 } else { 725 key.type = BTRFS_TREE_BLOCK_REF_KEY; 726 key.offset = root_objectid; 727 } 728 729 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 730 btrfs_release_path(path); 731 return ret; 732 } 733 734 static inline int extent_ref_type(u64 parent, u64 owner) 735 { 736 int type; 737 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 738 if (parent > 0) 739 type = BTRFS_SHARED_BLOCK_REF_KEY; 740 else 741 type = BTRFS_TREE_BLOCK_REF_KEY; 742 } else { 743 if (parent > 0) 744 type = BTRFS_SHARED_DATA_REF_KEY; 745 else 746 type = BTRFS_EXTENT_DATA_REF_KEY; 747 } 748 return type; 749 } 750 751 static int find_next_key(struct btrfs_path *path, int level, 752 struct btrfs_key *key) 753 754 { 755 for (; level < BTRFS_MAX_LEVEL; level++) { 756 if (!path->nodes[level]) 757 break; 758 if (path->slots[level] + 1 >= 759 btrfs_header_nritems(path->nodes[level])) 760 continue; 761 if (level == 0) 762 btrfs_item_key_to_cpu(path->nodes[level], key, 763 path->slots[level] + 1); 764 else 765 btrfs_node_key_to_cpu(path->nodes[level], key, 766 path->slots[level] + 1); 767 return 0; 768 } 769 return 1; 770 } 771 772 /* 773 * look for inline back ref. if back ref is found, *ref_ret is set 774 * to the address of inline back ref, and 0 is returned. 775 * 776 * if back ref isn't found, *ref_ret is set to the address where it 777 * should be inserted, and -ENOENT is returned. 778 * 779 * if insert is true and there are too many inline back refs, the path 780 * points to the extent item, and -EAGAIN is returned. 781 * 782 * NOTE: inline back refs are ordered in the same way that back ref 783 * items in the tree are ordered. 784 */ 785 static noinline_for_stack 786 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, 787 struct btrfs_path *path, 788 struct btrfs_extent_inline_ref **ref_ret, 789 u64 bytenr, u64 num_bytes, 790 u64 parent, u64 root_objectid, 791 u64 owner, u64 offset, int insert) 792 { 793 struct btrfs_fs_info *fs_info = trans->fs_info; 794 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); 795 struct btrfs_key key; 796 struct extent_buffer *leaf; 797 struct btrfs_extent_item *ei; 798 struct btrfs_extent_inline_ref *iref; 799 u64 flags; 800 u64 item_size; 801 unsigned long ptr; 802 unsigned long end; 803 int extra_size; 804 int type; 805 int want; 806 int ret; 807 int err = 0; 808 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 809 int needed; 810 811 key.objectid = bytenr; 812 key.type = BTRFS_EXTENT_ITEM_KEY; 813 key.offset = num_bytes; 814 815 want = extent_ref_type(parent, owner); 816 if (insert) { 817 extra_size = btrfs_extent_inline_ref_size(want); 818 path->search_for_extension = 1; 819 path->keep_locks = 1; 820 } else 821 extra_size = -1; 822 823 /* 824 * Owner is our level, so we can just add one to get the level for the 825 * block we are interested in. 826 */ 827 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { 828 key.type = BTRFS_METADATA_ITEM_KEY; 829 key.offset = owner; 830 } 831 832 again: 833 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); 834 if (ret < 0) { 835 err = ret; 836 goto out; 837 } 838 839 /* 840 * We may be a newly converted file system which still has the old fat 841 * extent entries for metadata, so try and see if we have one of those. 842 */ 843 if (ret > 0 && skinny_metadata) { 844 skinny_metadata = false; 845 if (path->slots[0]) { 846 path->slots[0]--; 847 btrfs_item_key_to_cpu(path->nodes[0], &key, 848 path->slots[0]); 849 if (key.objectid == bytenr && 850 key.type == BTRFS_EXTENT_ITEM_KEY && 851 key.offset == num_bytes) 852 ret = 0; 853 } 854 if (ret) { 855 key.objectid = bytenr; 856 key.type = BTRFS_EXTENT_ITEM_KEY; 857 key.offset = num_bytes; 858 btrfs_release_path(path); 859 goto again; 860 } 861 } 862 863 if (ret && !insert) { 864 err = -ENOENT; 865 goto out; 866 } else if (WARN_ON(ret)) { 867 err = -EIO; 868 goto out; 869 } 870 871 leaf = path->nodes[0]; 872 item_size = btrfs_item_size(leaf, path->slots[0]); 873 if (unlikely(item_size < sizeof(*ei))) { 874 err = -EINVAL; 875 btrfs_print_v0_err(fs_info); 876 btrfs_abort_transaction(trans, err); 877 goto out; 878 } 879 880 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 881 flags = btrfs_extent_flags(leaf, ei); 882 883 ptr = (unsigned long)(ei + 1); 884 end = (unsigned long)ei + item_size; 885 886 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { 887 ptr += sizeof(struct btrfs_tree_block_info); 888 BUG_ON(ptr > end); 889 } 890 891 if (owner >= BTRFS_FIRST_FREE_OBJECTID) 892 needed = BTRFS_REF_TYPE_DATA; 893 else 894 needed = BTRFS_REF_TYPE_BLOCK; 895 896 err = -ENOENT; 897 while (1) { 898 if (ptr >= end) { 899 WARN_ON(ptr > end); 900 break; 901 } 902 iref = (struct btrfs_extent_inline_ref *)ptr; 903 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed); 904 if (type == BTRFS_REF_TYPE_INVALID) { 905 err = -EUCLEAN; 906 goto out; 907 } 908 909 if (want < type) 910 break; 911 if (want > type) { 912 ptr += btrfs_extent_inline_ref_size(type); 913 continue; 914 } 915 916 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 917 struct btrfs_extent_data_ref *dref; 918 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 919 if (match_extent_data_ref(leaf, dref, root_objectid, 920 owner, offset)) { 921 err = 0; 922 break; 923 } 924 if (hash_extent_data_ref_item(leaf, dref) < 925 hash_extent_data_ref(root_objectid, owner, offset)) 926 break; 927 } else { 928 u64 ref_offset; 929 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); 930 if (parent > 0) { 931 if (parent == ref_offset) { 932 err = 0; 933 break; 934 } 935 if (ref_offset < parent) 936 break; 937 } else { 938 if (root_objectid == ref_offset) { 939 err = 0; 940 break; 941 } 942 if (ref_offset < root_objectid) 943 break; 944 } 945 } 946 ptr += btrfs_extent_inline_ref_size(type); 947 } 948 if (err == -ENOENT && insert) { 949 if (item_size + extra_size >= 950 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { 951 err = -EAGAIN; 952 goto out; 953 } 954 /* 955 * To add new inline back ref, we have to make sure 956 * there is no corresponding back ref item. 957 * For simplicity, we just do not add new inline back 958 * ref if there is any kind of item for this block 959 */ 960 if (find_next_key(path, 0, &key) == 0 && 961 key.objectid == bytenr && 962 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 963 err = -EAGAIN; 964 goto out; 965 } 966 } 967 *ref_ret = (struct btrfs_extent_inline_ref *)ptr; 968 out: 969 if (insert) { 970 path->keep_locks = 0; 971 path->search_for_extension = 0; 972 btrfs_unlock_up_safe(path, 1); 973 } 974 return err; 975 } 976 977 /* 978 * helper to add new inline back ref 979 */ 980 static noinline_for_stack 981 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info, 982 struct btrfs_path *path, 983 struct btrfs_extent_inline_ref *iref, 984 u64 parent, u64 root_objectid, 985 u64 owner, u64 offset, int refs_to_add, 986 struct btrfs_delayed_extent_op *extent_op) 987 { 988 struct extent_buffer *leaf; 989 struct btrfs_extent_item *ei; 990 unsigned long ptr; 991 unsigned long end; 992 unsigned long item_offset; 993 u64 refs; 994 int size; 995 int type; 996 997 leaf = path->nodes[0]; 998 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 999 item_offset = (unsigned long)iref - (unsigned long)ei; 1000 1001 type = extent_ref_type(parent, owner); 1002 size = btrfs_extent_inline_ref_size(type); 1003 1004 btrfs_extend_item(path, size); 1005 1006 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1007 refs = btrfs_extent_refs(leaf, ei); 1008 refs += refs_to_add; 1009 btrfs_set_extent_refs(leaf, ei, refs); 1010 if (extent_op) 1011 __run_delayed_extent_op(extent_op, leaf, ei); 1012 1013 ptr = (unsigned long)ei + item_offset; 1014 end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]); 1015 if (ptr < end - size) 1016 memmove_extent_buffer(leaf, ptr + size, ptr, 1017 end - size - ptr); 1018 1019 iref = (struct btrfs_extent_inline_ref *)ptr; 1020 btrfs_set_extent_inline_ref_type(leaf, iref, type); 1021 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1022 struct btrfs_extent_data_ref *dref; 1023 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1024 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); 1025 btrfs_set_extent_data_ref_objectid(leaf, dref, owner); 1026 btrfs_set_extent_data_ref_offset(leaf, dref, offset); 1027 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); 1028 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1029 struct btrfs_shared_data_ref *sref; 1030 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1031 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); 1032 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1033 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 1034 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1035 } else { 1036 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1037 } 1038 btrfs_mark_buffer_dirty(leaf); 1039 } 1040 1041 static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1042 struct btrfs_path *path, 1043 struct btrfs_extent_inline_ref **ref_ret, 1044 u64 bytenr, u64 num_bytes, u64 parent, 1045 u64 root_objectid, u64 owner, u64 offset) 1046 { 1047 int ret; 1048 1049 ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr, 1050 num_bytes, parent, root_objectid, 1051 owner, offset, 0); 1052 if (ret != -ENOENT) 1053 return ret; 1054 1055 btrfs_release_path(path); 1056 *ref_ret = NULL; 1057 1058 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1059 ret = lookup_tree_block_ref(trans, path, bytenr, parent, 1060 root_objectid); 1061 } else { 1062 ret = lookup_extent_data_ref(trans, path, bytenr, parent, 1063 root_objectid, owner, offset); 1064 } 1065 return ret; 1066 } 1067 1068 /* 1069 * helper to update/remove inline back ref 1070 */ 1071 static noinline_for_stack 1072 void update_inline_extent_backref(struct btrfs_path *path, 1073 struct btrfs_extent_inline_ref *iref, 1074 int refs_to_mod, 1075 struct btrfs_delayed_extent_op *extent_op, 1076 int *last_ref) 1077 { 1078 struct extent_buffer *leaf = path->nodes[0]; 1079 struct btrfs_extent_item *ei; 1080 struct btrfs_extent_data_ref *dref = NULL; 1081 struct btrfs_shared_data_ref *sref = NULL; 1082 unsigned long ptr; 1083 unsigned long end; 1084 u32 item_size; 1085 int size; 1086 int type; 1087 u64 refs; 1088 1089 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1090 refs = btrfs_extent_refs(leaf, ei); 1091 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0); 1092 refs += refs_to_mod; 1093 btrfs_set_extent_refs(leaf, ei, refs); 1094 if (extent_op) 1095 __run_delayed_extent_op(extent_op, leaf, ei); 1096 1097 /* 1098 * If type is invalid, we should have bailed out after 1099 * lookup_inline_extent_backref(). 1100 */ 1101 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); 1102 ASSERT(type != BTRFS_REF_TYPE_INVALID); 1103 1104 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1105 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1106 refs = btrfs_extent_data_ref_count(leaf, dref); 1107 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1108 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1109 refs = btrfs_shared_data_ref_count(leaf, sref); 1110 } else { 1111 refs = 1; 1112 BUG_ON(refs_to_mod != -1); 1113 } 1114 1115 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); 1116 refs += refs_to_mod; 1117 1118 if (refs > 0) { 1119 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1120 btrfs_set_extent_data_ref_count(leaf, dref, refs); 1121 else 1122 btrfs_set_shared_data_ref_count(leaf, sref, refs); 1123 } else { 1124 *last_ref = 1; 1125 size = btrfs_extent_inline_ref_size(type); 1126 item_size = btrfs_item_size(leaf, path->slots[0]); 1127 ptr = (unsigned long)iref; 1128 end = (unsigned long)ei + item_size; 1129 if (ptr + size < end) 1130 memmove_extent_buffer(leaf, ptr, ptr + size, 1131 end - ptr - size); 1132 item_size -= size; 1133 btrfs_truncate_item(path, item_size, 1); 1134 } 1135 btrfs_mark_buffer_dirty(leaf); 1136 } 1137 1138 static noinline_for_stack 1139 int insert_inline_extent_backref(struct btrfs_trans_handle *trans, 1140 struct btrfs_path *path, 1141 u64 bytenr, u64 num_bytes, u64 parent, 1142 u64 root_objectid, u64 owner, 1143 u64 offset, int refs_to_add, 1144 struct btrfs_delayed_extent_op *extent_op) 1145 { 1146 struct btrfs_extent_inline_ref *iref; 1147 int ret; 1148 1149 ret = lookup_inline_extent_backref(trans, path, &iref, bytenr, 1150 num_bytes, parent, root_objectid, 1151 owner, offset, 1); 1152 if (ret == 0) { 1153 /* 1154 * We're adding refs to a tree block we already own, this 1155 * should not happen at all. 1156 */ 1157 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1158 btrfs_crit(trans->fs_info, 1159 "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu", 1160 bytenr, num_bytes, root_objectid); 1161 if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) { 1162 WARN_ON(1); 1163 btrfs_crit(trans->fs_info, 1164 "path->slots[0]=%d path->nodes[0]:", path->slots[0]); 1165 btrfs_print_leaf(path->nodes[0]); 1166 } 1167 return -EUCLEAN; 1168 } 1169 update_inline_extent_backref(path, iref, refs_to_add, 1170 extent_op, NULL); 1171 } else if (ret == -ENOENT) { 1172 setup_inline_extent_backref(trans->fs_info, path, iref, parent, 1173 root_objectid, owner, offset, 1174 refs_to_add, extent_op); 1175 ret = 0; 1176 } 1177 return ret; 1178 } 1179 1180 static int remove_extent_backref(struct btrfs_trans_handle *trans, 1181 struct btrfs_root *root, 1182 struct btrfs_path *path, 1183 struct btrfs_extent_inline_ref *iref, 1184 int refs_to_drop, int is_data, int *last_ref) 1185 { 1186 int ret = 0; 1187 1188 BUG_ON(!is_data && refs_to_drop != 1); 1189 if (iref) { 1190 update_inline_extent_backref(path, iref, -refs_to_drop, NULL, 1191 last_ref); 1192 } else if (is_data) { 1193 ret = remove_extent_data_ref(trans, root, path, refs_to_drop, 1194 last_ref); 1195 } else { 1196 *last_ref = 1; 1197 ret = btrfs_del_item(trans, root, path); 1198 } 1199 return ret; 1200 } 1201 1202 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, 1203 u64 *discarded_bytes) 1204 { 1205 int j, ret = 0; 1206 u64 bytes_left, end; 1207 u64 aligned_start = ALIGN(start, 1 << 9); 1208 1209 if (WARN_ON(start != aligned_start)) { 1210 len -= aligned_start - start; 1211 len = round_down(len, 1 << 9); 1212 start = aligned_start; 1213 } 1214 1215 *discarded_bytes = 0; 1216 1217 if (!len) 1218 return 0; 1219 1220 end = start + len; 1221 bytes_left = len; 1222 1223 /* Skip any superblocks on this device. */ 1224 for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) { 1225 u64 sb_start = btrfs_sb_offset(j); 1226 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE; 1227 u64 size = sb_start - start; 1228 1229 if (!in_range(sb_start, start, bytes_left) && 1230 !in_range(sb_end, start, bytes_left) && 1231 !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE)) 1232 continue; 1233 1234 /* 1235 * Superblock spans beginning of range. Adjust start and 1236 * try again. 1237 */ 1238 if (sb_start <= start) { 1239 start += sb_end - start; 1240 if (start > end) { 1241 bytes_left = 0; 1242 break; 1243 } 1244 bytes_left = end - start; 1245 continue; 1246 } 1247 1248 if (size) { 1249 ret = blkdev_issue_discard(bdev, start >> 9, size >> 9, 1250 GFP_NOFS, 0); 1251 if (!ret) 1252 *discarded_bytes += size; 1253 else if (ret != -EOPNOTSUPP) 1254 return ret; 1255 } 1256 1257 start = sb_end; 1258 if (start > end) { 1259 bytes_left = 0; 1260 break; 1261 } 1262 bytes_left = end - start; 1263 } 1264 1265 if (bytes_left) { 1266 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9, 1267 GFP_NOFS, 0); 1268 if (!ret) 1269 *discarded_bytes += bytes_left; 1270 } 1271 return ret; 1272 } 1273 1274 static int do_discard_extent(struct btrfs_io_stripe *stripe, u64 *bytes) 1275 { 1276 struct btrfs_device *dev = stripe->dev; 1277 struct btrfs_fs_info *fs_info = dev->fs_info; 1278 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1279 u64 phys = stripe->physical; 1280 u64 len = stripe->length; 1281 u64 discarded = 0; 1282 int ret = 0; 1283 1284 /* Zone reset on a zoned filesystem */ 1285 if (btrfs_can_zone_reset(dev, phys, len)) { 1286 u64 src_disc; 1287 1288 ret = btrfs_reset_device_zone(dev, phys, len, &discarded); 1289 if (ret) 1290 goto out; 1291 1292 if (!btrfs_dev_replace_is_ongoing(dev_replace) || 1293 dev != dev_replace->srcdev) 1294 goto out; 1295 1296 src_disc = discarded; 1297 1298 /* Send to replace target as well */ 1299 ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len, 1300 &discarded); 1301 discarded += src_disc; 1302 } else if (blk_queue_discard(bdev_get_queue(stripe->dev->bdev))) { 1303 ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded); 1304 } else { 1305 ret = 0; 1306 *bytes = 0; 1307 } 1308 1309 out: 1310 *bytes = discarded; 1311 return ret; 1312 } 1313 1314 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, 1315 u64 num_bytes, u64 *actual_bytes) 1316 { 1317 int ret = 0; 1318 u64 discarded_bytes = 0; 1319 u64 end = bytenr + num_bytes; 1320 u64 cur = bytenr; 1321 struct btrfs_io_context *bioc = NULL; 1322 1323 /* 1324 * Avoid races with device replace and make sure our bioc has devices 1325 * associated to its stripes that don't go away while we are discarding. 1326 */ 1327 btrfs_bio_counter_inc_blocked(fs_info); 1328 while (cur < end) { 1329 struct btrfs_io_stripe *stripe; 1330 int i; 1331 1332 num_bytes = end - cur; 1333 /* Tell the block device(s) that the sectors can be discarded */ 1334 ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur, 1335 &num_bytes, &bioc, 0); 1336 /* 1337 * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or 1338 * -EOPNOTSUPP. For any such error, @num_bytes is not updated, 1339 * thus we can't continue anyway. 1340 */ 1341 if (ret < 0) 1342 goto out; 1343 1344 stripe = bioc->stripes; 1345 for (i = 0; i < bioc->num_stripes; i++, stripe++) { 1346 u64 bytes; 1347 struct btrfs_device *device = stripe->dev; 1348 1349 if (!device->bdev) { 1350 ASSERT(btrfs_test_opt(fs_info, DEGRADED)); 1351 continue; 1352 } 1353 1354 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 1355 continue; 1356 1357 ret = do_discard_extent(stripe, &bytes); 1358 if (!ret) { 1359 discarded_bytes += bytes; 1360 } else if (ret != -EOPNOTSUPP) { 1361 /* 1362 * Logic errors or -ENOMEM, or -EIO, but 1363 * unlikely to happen. 1364 * 1365 * And since there are two loops, explicitly 1366 * go to out to avoid confusion. 1367 */ 1368 btrfs_put_bioc(bioc); 1369 goto out; 1370 } 1371 1372 /* 1373 * Just in case we get back EOPNOTSUPP for some reason, 1374 * just ignore the return value so we don't screw up 1375 * people calling discard_extent. 1376 */ 1377 ret = 0; 1378 } 1379 btrfs_put_bioc(bioc); 1380 cur += num_bytes; 1381 } 1382 out: 1383 btrfs_bio_counter_dec(fs_info); 1384 1385 if (actual_bytes) 1386 *actual_bytes = discarded_bytes; 1387 1388 1389 if (ret == -EOPNOTSUPP) 1390 ret = 0; 1391 return ret; 1392 } 1393 1394 /* Can return -ENOMEM */ 1395 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1396 struct btrfs_ref *generic_ref) 1397 { 1398 struct btrfs_fs_info *fs_info = trans->fs_info; 1399 int ret; 1400 1401 ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && 1402 generic_ref->action); 1403 BUG_ON(generic_ref->type == BTRFS_REF_METADATA && 1404 generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID); 1405 1406 if (generic_ref->type == BTRFS_REF_METADATA) 1407 ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL); 1408 else 1409 ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0); 1410 1411 btrfs_ref_tree_mod(fs_info, generic_ref); 1412 1413 return ret; 1414 } 1415 1416 /* 1417 * __btrfs_inc_extent_ref - insert backreference for a given extent 1418 * 1419 * The counterpart is in __btrfs_free_extent(), with examples and more details 1420 * how it works. 1421 * 1422 * @trans: Handle of transaction 1423 * 1424 * @node: The delayed ref node used to get the bytenr/length for 1425 * extent whose references are incremented. 1426 * 1427 * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/ 1428 * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical 1429 * bytenr of the parent block. Since new extents are always 1430 * created with indirect references, this will only be the case 1431 * when relocating a shared extent. In that case, root_objectid 1432 * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must 1433 * be 0 1434 * 1435 * @root_objectid: The id of the root where this modification has originated, 1436 * this can be either one of the well-known metadata trees or 1437 * the subvolume id which references this extent. 1438 * 1439 * @owner: For data extents it is the inode number of the owning file. 1440 * For metadata extents this parameter holds the level in the 1441 * tree of the extent. 1442 * 1443 * @offset: For metadata extents the offset is ignored and is currently 1444 * always passed as 0. For data extents it is the fileoffset 1445 * this extent belongs to. 1446 * 1447 * @refs_to_add Number of references to add 1448 * 1449 * @extent_op Pointer to a structure, holding information necessary when 1450 * updating a tree block's flags 1451 * 1452 */ 1453 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1454 struct btrfs_delayed_ref_node *node, 1455 u64 parent, u64 root_objectid, 1456 u64 owner, u64 offset, int refs_to_add, 1457 struct btrfs_delayed_extent_op *extent_op) 1458 { 1459 struct btrfs_path *path; 1460 struct extent_buffer *leaf; 1461 struct btrfs_extent_item *item; 1462 struct btrfs_key key; 1463 u64 bytenr = node->bytenr; 1464 u64 num_bytes = node->num_bytes; 1465 u64 refs; 1466 int ret; 1467 1468 path = btrfs_alloc_path(); 1469 if (!path) 1470 return -ENOMEM; 1471 1472 /* this will setup the path even if it fails to insert the back ref */ 1473 ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes, 1474 parent, root_objectid, owner, 1475 offset, refs_to_add, extent_op); 1476 if ((ret < 0 && ret != -EAGAIN) || !ret) 1477 goto out; 1478 1479 /* 1480 * Ok we had -EAGAIN which means we didn't have space to insert and 1481 * inline extent ref, so just update the reference count and add a 1482 * normal backref. 1483 */ 1484 leaf = path->nodes[0]; 1485 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1486 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1487 refs = btrfs_extent_refs(leaf, item); 1488 btrfs_set_extent_refs(leaf, item, refs + refs_to_add); 1489 if (extent_op) 1490 __run_delayed_extent_op(extent_op, leaf, item); 1491 1492 btrfs_mark_buffer_dirty(leaf); 1493 btrfs_release_path(path); 1494 1495 /* now insert the actual backref */ 1496 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1497 BUG_ON(refs_to_add != 1); 1498 ret = insert_tree_block_ref(trans, path, bytenr, parent, 1499 root_objectid); 1500 } else { 1501 ret = insert_extent_data_ref(trans, path, bytenr, parent, 1502 root_objectid, owner, offset, 1503 refs_to_add); 1504 } 1505 if (ret) 1506 btrfs_abort_transaction(trans, ret); 1507 out: 1508 btrfs_free_path(path); 1509 return ret; 1510 } 1511 1512 static int run_delayed_data_ref(struct btrfs_trans_handle *trans, 1513 struct btrfs_delayed_ref_node *node, 1514 struct btrfs_delayed_extent_op *extent_op, 1515 int insert_reserved) 1516 { 1517 int ret = 0; 1518 struct btrfs_delayed_data_ref *ref; 1519 struct btrfs_key ins; 1520 u64 parent = 0; 1521 u64 ref_root = 0; 1522 u64 flags = 0; 1523 1524 ins.objectid = node->bytenr; 1525 ins.offset = node->num_bytes; 1526 ins.type = BTRFS_EXTENT_ITEM_KEY; 1527 1528 ref = btrfs_delayed_node_to_data_ref(node); 1529 trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action); 1530 1531 if (node->type == BTRFS_SHARED_DATA_REF_KEY) 1532 parent = ref->parent; 1533 ref_root = ref->root; 1534 1535 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1536 if (extent_op) 1537 flags |= extent_op->flags_to_set; 1538 ret = alloc_reserved_file_extent(trans, parent, ref_root, 1539 flags, ref->objectid, 1540 ref->offset, &ins, 1541 node->ref_mod); 1542 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1543 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1544 ref->objectid, ref->offset, 1545 node->ref_mod, extent_op); 1546 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1547 ret = __btrfs_free_extent(trans, node, parent, 1548 ref_root, ref->objectid, 1549 ref->offset, node->ref_mod, 1550 extent_op); 1551 } else { 1552 BUG(); 1553 } 1554 return ret; 1555 } 1556 1557 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 1558 struct extent_buffer *leaf, 1559 struct btrfs_extent_item *ei) 1560 { 1561 u64 flags = btrfs_extent_flags(leaf, ei); 1562 if (extent_op->update_flags) { 1563 flags |= extent_op->flags_to_set; 1564 btrfs_set_extent_flags(leaf, ei, flags); 1565 } 1566 1567 if (extent_op->update_key) { 1568 struct btrfs_tree_block_info *bi; 1569 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); 1570 bi = (struct btrfs_tree_block_info *)(ei + 1); 1571 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); 1572 } 1573 } 1574 1575 static int run_delayed_extent_op(struct btrfs_trans_handle *trans, 1576 struct btrfs_delayed_ref_head *head, 1577 struct btrfs_delayed_extent_op *extent_op) 1578 { 1579 struct btrfs_fs_info *fs_info = trans->fs_info; 1580 struct btrfs_root *root; 1581 struct btrfs_key key; 1582 struct btrfs_path *path; 1583 struct btrfs_extent_item *ei; 1584 struct extent_buffer *leaf; 1585 u32 item_size; 1586 int ret; 1587 int err = 0; 1588 int metadata = !extent_op->is_data; 1589 1590 if (TRANS_ABORTED(trans)) 1591 return 0; 1592 1593 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1594 metadata = 0; 1595 1596 path = btrfs_alloc_path(); 1597 if (!path) 1598 return -ENOMEM; 1599 1600 key.objectid = head->bytenr; 1601 1602 if (metadata) { 1603 key.type = BTRFS_METADATA_ITEM_KEY; 1604 key.offset = extent_op->level; 1605 } else { 1606 key.type = BTRFS_EXTENT_ITEM_KEY; 1607 key.offset = head->num_bytes; 1608 } 1609 1610 root = btrfs_extent_root(fs_info, key.objectid); 1611 again: 1612 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1613 if (ret < 0) { 1614 err = ret; 1615 goto out; 1616 } 1617 if (ret > 0) { 1618 if (metadata) { 1619 if (path->slots[0] > 0) { 1620 path->slots[0]--; 1621 btrfs_item_key_to_cpu(path->nodes[0], &key, 1622 path->slots[0]); 1623 if (key.objectid == head->bytenr && 1624 key.type == BTRFS_EXTENT_ITEM_KEY && 1625 key.offset == head->num_bytes) 1626 ret = 0; 1627 } 1628 if (ret > 0) { 1629 btrfs_release_path(path); 1630 metadata = 0; 1631 1632 key.objectid = head->bytenr; 1633 key.offset = head->num_bytes; 1634 key.type = BTRFS_EXTENT_ITEM_KEY; 1635 goto again; 1636 } 1637 } else { 1638 err = -EIO; 1639 goto out; 1640 } 1641 } 1642 1643 leaf = path->nodes[0]; 1644 item_size = btrfs_item_size(leaf, path->slots[0]); 1645 1646 if (unlikely(item_size < sizeof(*ei))) { 1647 err = -EINVAL; 1648 btrfs_print_v0_err(fs_info); 1649 btrfs_abort_transaction(trans, err); 1650 goto out; 1651 } 1652 1653 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1654 __run_delayed_extent_op(extent_op, leaf, ei); 1655 1656 btrfs_mark_buffer_dirty(leaf); 1657 out: 1658 btrfs_free_path(path); 1659 return err; 1660 } 1661 1662 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, 1663 struct btrfs_delayed_ref_node *node, 1664 struct btrfs_delayed_extent_op *extent_op, 1665 int insert_reserved) 1666 { 1667 int ret = 0; 1668 struct btrfs_delayed_tree_ref *ref; 1669 u64 parent = 0; 1670 u64 ref_root = 0; 1671 1672 ref = btrfs_delayed_node_to_tree_ref(node); 1673 trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action); 1674 1675 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1676 parent = ref->parent; 1677 ref_root = ref->root; 1678 1679 if (node->ref_mod != 1) { 1680 btrfs_err(trans->fs_info, 1681 "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu", 1682 node->bytenr, node->ref_mod, node->action, ref_root, 1683 parent); 1684 return -EIO; 1685 } 1686 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1687 BUG_ON(!extent_op || !extent_op->update_flags); 1688 ret = alloc_reserved_tree_block(trans, node, extent_op); 1689 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1690 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1691 ref->level, 0, 1, extent_op); 1692 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1693 ret = __btrfs_free_extent(trans, node, parent, ref_root, 1694 ref->level, 0, 1, extent_op); 1695 } else { 1696 BUG(); 1697 } 1698 return ret; 1699 } 1700 1701 /* helper function to actually process a single delayed ref entry */ 1702 static int run_one_delayed_ref(struct btrfs_trans_handle *trans, 1703 struct btrfs_delayed_ref_node *node, 1704 struct btrfs_delayed_extent_op *extent_op, 1705 int insert_reserved) 1706 { 1707 int ret = 0; 1708 1709 if (TRANS_ABORTED(trans)) { 1710 if (insert_reserved) 1711 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1712 return 0; 1713 } 1714 1715 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 1716 node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1717 ret = run_delayed_tree_ref(trans, node, extent_op, 1718 insert_reserved); 1719 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || 1720 node->type == BTRFS_SHARED_DATA_REF_KEY) 1721 ret = run_delayed_data_ref(trans, node, extent_op, 1722 insert_reserved); 1723 else 1724 BUG(); 1725 if (ret && insert_reserved) 1726 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1727 return ret; 1728 } 1729 1730 static inline struct btrfs_delayed_ref_node * 1731 select_delayed_ref(struct btrfs_delayed_ref_head *head) 1732 { 1733 struct btrfs_delayed_ref_node *ref; 1734 1735 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 1736 return NULL; 1737 1738 /* 1739 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first. 1740 * This is to prevent a ref count from going down to zero, which deletes 1741 * the extent item from the extent tree, when there still are references 1742 * to add, which would fail because they would not find the extent item. 1743 */ 1744 if (!list_empty(&head->ref_add_list)) 1745 return list_first_entry(&head->ref_add_list, 1746 struct btrfs_delayed_ref_node, add_list); 1747 1748 ref = rb_entry(rb_first_cached(&head->ref_tree), 1749 struct btrfs_delayed_ref_node, ref_node); 1750 ASSERT(list_empty(&ref->add_list)); 1751 return ref; 1752 } 1753 1754 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 1755 struct btrfs_delayed_ref_head *head) 1756 { 1757 spin_lock(&delayed_refs->lock); 1758 head->processing = 0; 1759 delayed_refs->num_heads_ready++; 1760 spin_unlock(&delayed_refs->lock); 1761 btrfs_delayed_ref_unlock(head); 1762 } 1763 1764 static struct btrfs_delayed_extent_op *cleanup_extent_op( 1765 struct btrfs_delayed_ref_head *head) 1766 { 1767 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 1768 1769 if (!extent_op) 1770 return NULL; 1771 1772 if (head->must_insert_reserved) { 1773 head->extent_op = NULL; 1774 btrfs_free_delayed_extent_op(extent_op); 1775 return NULL; 1776 } 1777 return extent_op; 1778 } 1779 1780 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, 1781 struct btrfs_delayed_ref_head *head) 1782 { 1783 struct btrfs_delayed_extent_op *extent_op; 1784 int ret; 1785 1786 extent_op = cleanup_extent_op(head); 1787 if (!extent_op) 1788 return 0; 1789 head->extent_op = NULL; 1790 spin_unlock(&head->lock); 1791 ret = run_delayed_extent_op(trans, head, extent_op); 1792 btrfs_free_delayed_extent_op(extent_op); 1793 return ret ? ret : 1; 1794 } 1795 1796 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, 1797 struct btrfs_delayed_ref_root *delayed_refs, 1798 struct btrfs_delayed_ref_head *head) 1799 { 1800 int nr_items = 1; /* Dropping this ref head update. */ 1801 1802 /* 1803 * We had csum deletions accounted for in our delayed refs rsv, we need 1804 * to drop the csum leaves for this update from our delayed_refs_rsv. 1805 */ 1806 if (head->total_ref_mod < 0 && head->is_data) { 1807 spin_lock(&delayed_refs->lock); 1808 delayed_refs->pending_csums -= head->num_bytes; 1809 spin_unlock(&delayed_refs->lock); 1810 nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes); 1811 } 1812 1813 btrfs_delayed_refs_rsv_release(fs_info, nr_items); 1814 } 1815 1816 static int cleanup_ref_head(struct btrfs_trans_handle *trans, 1817 struct btrfs_delayed_ref_head *head) 1818 { 1819 1820 struct btrfs_fs_info *fs_info = trans->fs_info; 1821 struct btrfs_delayed_ref_root *delayed_refs; 1822 int ret; 1823 1824 delayed_refs = &trans->transaction->delayed_refs; 1825 1826 ret = run_and_cleanup_extent_op(trans, head); 1827 if (ret < 0) { 1828 unselect_delayed_ref_head(delayed_refs, head); 1829 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); 1830 return ret; 1831 } else if (ret) { 1832 return ret; 1833 } 1834 1835 /* 1836 * Need to drop our head ref lock and re-acquire the delayed ref lock 1837 * and then re-check to make sure nobody got added. 1838 */ 1839 spin_unlock(&head->lock); 1840 spin_lock(&delayed_refs->lock); 1841 spin_lock(&head->lock); 1842 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { 1843 spin_unlock(&head->lock); 1844 spin_unlock(&delayed_refs->lock); 1845 return 1; 1846 } 1847 btrfs_delete_ref_head(delayed_refs, head); 1848 spin_unlock(&head->lock); 1849 spin_unlock(&delayed_refs->lock); 1850 1851 if (head->must_insert_reserved) { 1852 btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1); 1853 if (head->is_data) { 1854 struct btrfs_root *csum_root; 1855 1856 csum_root = btrfs_csum_root(fs_info, head->bytenr); 1857 ret = btrfs_del_csums(trans, csum_root, head->bytenr, 1858 head->num_bytes); 1859 } 1860 } 1861 1862 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); 1863 1864 trace_run_delayed_ref_head(fs_info, head, 0); 1865 btrfs_delayed_ref_unlock(head); 1866 btrfs_put_delayed_ref_head(head); 1867 return ret; 1868 } 1869 1870 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( 1871 struct btrfs_trans_handle *trans) 1872 { 1873 struct btrfs_delayed_ref_root *delayed_refs = 1874 &trans->transaction->delayed_refs; 1875 struct btrfs_delayed_ref_head *head = NULL; 1876 int ret; 1877 1878 spin_lock(&delayed_refs->lock); 1879 head = btrfs_select_ref_head(delayed_refs); 1880 if (!head) { 1881 spin_unlock(&delayed_refs->lock); 1882 return head; 1883 } 1884 1885 /* 1886 * Grab the lock that says we are going to process all the refs for 1887 * this head 1888 */ 1889 ret = btrfs_delayed_ref_lock(delayed_refs, head); 1890 spin_unlock(&delayed_refs->lock); 1891 1892 /* 1893 * We may have dropped the spin lock to get the head mutex lock, and 1894 * that might have given someone else time to free the head. If that's 1895 * true, it has been removed from our list and we can move on. 1896 */ 1897 if (ret == -EAGAIN) 1898 head = ERR_PTR(-EAGAIN); 1899 1900 return head; 1901 } 1902 1903 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, 1904 struct btrfs_delayed_ref_head *locked_ref, 1905 unsigned long *run_refs) 1906 { 1907 struct btrfs_fs_info *fs_info = trans->fs_info; 1908 struct btrfs_delayed_ref_root *delayed_refs; 1909 struct btrfs_delayed_extent_op *extent_op; 1910 struct btrfs_delayed_ref_node *ref; 1911 int must_insert_reserved = 0; 1912 int ret; 1913 1914 delayed_refs = &trans->transaction->delayed_refs; 1915 1916 lockdep_assert_held(&locked_ref->mutex); 1917 lockdep_assert_held(&locked_ref->lock); 1918 1919 while ((ref = select_delayed_ref(locked_ref))) { 1920 if (ref->seq && 1921 btrfs_check_delayed_seq(fs_info, ref->seq)) { 1922 spin_unlock(&locked_ref->lock); 1923 unselect_delayed_ref_head(delayed_refs, locked_ref); 1924 return -EAGAIN; 1925 } 1926 1927 (*run_refs)++; 1928 ref->in_tree = 0; 1929 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); 1930 RB_CLEAR_NODE(&ref->ref_node); 1931 if (!list_empty(&ref->add_list)) 1932 list_del(&ref->add_list); 1933 /* 1934 * When we play the delayed ref, also correct the ref_mod on 1935 * head 1936 */ 1937 switch (ref->action) { 1938 case BTRFS_ADD_DELAYED_REF: 1939 case BTRFS_ADD_DELAYED_EXTENT: 1940 locked_ref->ref_mod -= ref->ref_mod; 1941 break; 1942 case BTRFS_DROP_DELAYED_REF: 1943 locked_ref->ref_mod += ref->ref_mod; 1944 break; 1945 default: 1946 WARN_ON(1); 1947 } 1948 atomic_dec(&delayed_refs->num_entries); 1949 1950 /* 1951 * Record the must_insert_reserved flag before we drop the 1952 * spin lock. 1953 */ 1954 must_insert_reserved = locked_ref->must_insert_reserved; 1955 locked_ref->must_insert_reserved = 0; 1956 1957 extent_op = locked_ref->extent_op; 1958 locked_ref->extent_op = NULL; 1959 spin_unlock(&locked_ref->lock); 1960 1961 ret = run_one_delayed_ref(trans, ref, extent_op, 1962 must_insert_reserved); 1963 1964 btrfs_free_delayed_extent_op(extent_op); 1965 if (ret) { 1966 unselect_delayed_ref_head(delayed_refs, locked_ref); 1967 btrfs_put_delayed_ref(ref); 1968 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", 1969 ret); 1970 return ret; 1971 } 1972 1973 btrfs_put_delayed_ref(ref); 1974 cond_resched(); 1975 1976 spin_lock(&locked_ref->lock); 1977 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref); 1978 } 1979 1980 return 0; 1981 } 1982 1983 /* 1984 * Returns 0 on success or if called with an already aborted transaction. 1985 * Returns -ENOMEM or -EIO on failure and will abort the transaction. 1986 */ 1987 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 1988 unsigned long nr) 1989 { 1990 struct btrfs_fs_info *fs_info = trans->fs_info; 1991 struct btrfs_delayed_ref_root *delayed_refs; 1992 struct btrfs_delayed_ref_head *locked_ref = NULL; 1993 ktime_t start = ktime_get(); 1994 int ret; 1995 unsigned long count = 0; 1996 unsigned long actual_count = 0; 1997 1998 delayed_refs = &trans->transaction->delayed_refs; 1999 do { 2000 if (!locked_ref) { 2001 locked_ref = btrfs_obtain_ref_head(trans); 2002 if (IS_ERR_OR_NULL(locked_ref)) { 2003 if (PTR_ERR(locked_ref) == -EAGAIN) { 2004 continue; 2005 } else { 2006 break; 2007 } 2008 } 2009 count++; 2010 } 2011 /* 2012 * We need to try and merge add/drops of the same ref since we 2013 * can run into issues with relocate dropping the implicit ref 2014 * and then it being added back again before the drop can 2015 * finish. If we merged anything we need to re-loop so we can 2016 * get a good ref. 2017 * Or we can get node references of the same type that weren't 2018 * merged when created due to bumps in the tree mod seq, and 2019 * we need to merge them to prevent adding an inline extent 2020 * backref before dropping it (triggering a BUG_ON at 2021 * insert_inline_extent_backref()). 2022 */ 2023 spin_lock(&locked_ref->lock); 2024 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref); 2025 2026 ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, 2027 &actual_count); 2028 if (ret < 0 && ret != -EAGAIN) { 2029 /* 2030 * Error, btrfs_run_delayed_refs_for_head already 2031 * unlocked everything so just bail out 2032 */ 2033 return ret; 2034 } else if (!ret) { 2035 /* 2036 * Success, perform the usual cleanup of a processed 2037 * head 2038 */ 2039 ret = cleanup_ref_head(trans, locked_ref); 2040 if (ret > 0 ) { 2041 /* We dropped our lock, we need to loop. */ 2042 ret = 0; 2043 continue; 2044 } else if (ret) { 2045 return ret; 2046 } 2047 } 2048 2049 /* 2050 * Either success case or btrfs_run_delayed_refs_for_head 2051 * returned -EAGAIN, meaning we need to select another head 2052 */ 2053 2054 locked_ref = NULL; 2055 cond_resched(); 2056 } while ((nr != -1 && count < nr) || locked_ref); 2057 2058 /* 2059 * We don't want to include ref heads since we can have empty ref heads 2060 * and those will drastically skew our runtime down since we just do 2061 * accounting, no actual extent tree updates. 2062 */ 2063 if (actual_count > 0) { 2064 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start)); 2065 u64 avg; 2066 2067 /* 2068 * We weigh the current average higher than our current runtime 2069 * to avoid large swings in the average. 2070 */ 2071 spin_lock(&delayed_refs->lock); 2072 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime; 2073 fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */ 2074 spin_unlock(&delayed_refs->lock); 2075 } 2076 return 0; 2077 } 2078 2079 #ifdef SCRAMBLE_DELAYED_REFS 2080 /* 2081 * Normally delayed refs get processed in ascending bytenr order. This 2082 * correlates in most cases to the order added. To expose dependencies on this 2083 * order, we start to process the tree in the middle instead of the beginning 2084 */ 2085 static u64 find_middle(struct rb_root *root) 2086 { 2087 struct rb_node *n = root->rb_node; 2088 struct btrfs_delayed_ref_node *entry; 2089 int alt = 1; 2090 u64 middle; 2091 u64 first = 0, last = 0; 2092 2093 n = rb_first(root); 2094 if (n) { 2095 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2096 first = entry->bytenr; 2097 } 2098 n = rb_last(root); 2099 if (n) { 2100 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2101 last = entry->bytenr; 2102 } 2103 n = root->rb_node; 2104 2105 while (n) { 2106 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2107 WARN_ON(!entry->in_tree); 2108 2109 middle = entry->bytenr; 2110 2111 if (alt) 2112 n = n->rb_left; 2113 else 2114 n = n->rb_right; 2115 2116 alt = 1 - alt; 2117 } 2118 return middle; 2119 } 2120 #endif 2121 2122 /* 2123 * this starts processing the delayed reference count updates and 2124 * extent insertions we have queued up so far. count can be 2125 * 0, which means to process everything in the tree at the start 2126 * of the run (but not newly added entries), or it can be some target 2127 * number you'd like to process. 2128 * 2129 * Returns 0 on success or if called with an aborted transaction 2130 * Returns <0 on error and aborts the transaction 2131 */ 2132 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2133 unsigned long count) 2134 { 2135 struct btrfs_fs_info *fs_info = trans->fs_info; 2136 struct rb_node *node; 2137 struct btrfs_delayed_ref_root *delayed_refs; 2138 struct btrfs_delayed_ref_head *head; 2139 int ret; 2140 int run_all = count == (unsigned long)-1; 2141 2142 /* We'll clean this up in btrfs_cleanup_transaction */ 2143 if (TRANS_ABORTED(trans)) 2144 return 0; 2145 2146 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) 2147 return 0; 2148 2149 delayed_refs = &trans->transaction->delayed_refs; 2150 if (count == 0) 2151 count = delayed_refs->num_heads_ready; 2152 2153 again: 2154 #ifdef SCRAMBLE_DELAYED_REFS 2155 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); 2156 #endif 2157 ret = __btrfs_run_delayed_refs(trans, count); 2158 if (ret < 0) { 2159 btrfs_abort_transaction(trans, ret); 2160 return ret; 2161 } 2162 2163 if (run_all) { 2164 btrfs_create_pending_block_groups(trans); 2165 2166 spin_lock(&delayed_refs->lock); 2167 node = rb_first_cached(&delayed_refs->href_root); 2168 if (!node) { 2169 spin_unlock(&delayed_refs->lock); 2170 goto out; 2171 } 2172 head = rb_entry(node, struct btrfs_delayed_ref_head, 2173 href_node); 2174 refcount_inc(&head->refs); 2175 spin_unlock(&delayed_refs->lock); 2176 2177 /* Mutex was contended, block until it's released and retry. */ 2178 mutex_lock(&head->mutex); 2179 mutex_unlock(&head->mutex); 2180 2181 btrfs_put_delayed_ref_head(head); 2182 cond_resched(); 2183 goto again; 2184 } 2185 out: 2186 return 0; 2187 } 2188 2189 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2190 struct extent_buffer *eb, u64 flags, 2191 int level, int is_data) 2192 { 2193 struct btrfs_delayed_extent_op *extent_op; 2194 int ret; 2195 2196 extent_op = btrfs_alloc_delayed_extent_op(); 2197 if (!extent_op) 2198 return -ENOMEM; 2199 2200 extent_op->flags_to_set = flags; 2201 extent_op->update_flags = true; 2202 extent_op->update_key = false; 2203 extent_op->is_data = is_data ? true : false; 2204 extent_op->level = level; 2205 2206 ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op); 2207 if (ret) 2208 btrfs_free_delayed_extent_op(extent_op); 2209 return ret; 2210 } 2211 2212 static noinline int check_delayed_ref(struct btrfs_root *root, 2213 struct btrfs_path *path, 2214 u64 objectid, u64 offset, u64 bytenr) 2215 { 2216 struct btrfs_delayed_ref_head *head; 2217 struct btrfs_delayed_ref_node *ref; 2218 struct btrfs_delayed_data_ref *data_ref; 2219 struct btrfs_delayed_ref_root *delayed_refs; 2220 struct btrfs_transaction *cur_trans; 2221 struct rb_node *node; 2222 int ret = 0; 2223 2224 spin_lock(&root->fs_info->trans_lock); 2225 cur_trans = root->fs_info->running_transaction; 2226 if (cur_trans) 2227 refcount_inc(&cur_trans->use_count); 2228 spin_unlock(&root->fs_info->trans_lock); 2229 if (!cur_trans) 2230 return 0; 2231 2232 delayed_refs = &cur_trans->delayed_refs; 2233 spin_lock(&delayed_refs->lock); 2234 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 2235 if (!head) { 2236 spin_unlock(&delayed_refs->lock); 2237 btrfs_put_transaction(cur_trans); 2238 return 0; 2239 } 2240 2241 if (!mutex_trylock(&head->mutex)) { 2242 refcount_inc(&head->refs); 2243 spin_unlock(&delayed_refs->lock); 2244 2245 btrfs_release_path(path); 2246 2247 /* 2248 * Mutex was contended, block until it's released and let 2249 * caller try again 2250 */ 2251 mutex_lock(&head->mutex); 2252 mutex_unlock(&head->mutex); 2253 btrfs_put_delayed_ref_head(head); 2254 btrfs_put_transaction(cur_trans); 2255 return -EAGAIN; 2256 } 2257 spin_unlock(&delayed_refs->lock); 2258 2259 spin_lock(&head->lock); 2260 /* 2261 * XXX: We should replace this with a proper search function in the 2262 * future. 2263 */ 2264 for (node = rb_first_cached(&head->ref_tree); node; 2265 node = rb_next(node)) { 2266 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); 2267 /* If it's a shared ref we know a cross reference exists */ 2268 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { 2269 ret = 1; 2270 break; 2271 } 2272 2273 data_ref = btrfs_delayed_node_to_data_ref(ref); 2274 2275 /* 2276 * If our ref doesn't match the one we're currently looking at 2277 * then we have a cross reference. 2278 */ 2279 if (data_ref->root != root->root_key.objectid || 2280 data_ref->objectid != objectid || 2281 data_ref->offset != offset) { 2282 ret = 1; 2283 break; 2284 } 2285 } 2286 spin_unlock(&head->lock); 2287 mutex_unlock(&head->mutex); 2288 btrfs_put_transaction(cur_trans); 2289 return ret; 2290 } 2291 2292 static noinline int check_committed_ref(struct btrfs_root *root, 2293 struct btrfs_path *path, 2294 u64 objectid, u64 offset, u64 bytenr, 2295 bool strict) 2296 { 2297 struct btrfs_fs_info *fs_info = root->fs_info; 2298 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); 2299 struct extent_buffer *leaf; 2300 struct btrfs_extent_data_ref *ref; 2301 struct btrfs_extent_inline_ref *iref; 2302 struct btrfs_extent_item *ei; 2303 struct btrfs_key key; 2304 u32 item_size; 2305 int type; 2306 int ret; 2307 2308 key.objectid = bytenr; 2309 key.offset = (u64)-1; 2310 key.type = BTRFS_EXTENT_ITEM_KEY; 2311 2312 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2313 if (ret < 0) 2314 goto out; 2315 BUG_ON(ret == 0); /* Corruption */ 2316 2317 ret = -ENOENT; 2318 if (path->slots[0] == 0) 2319 goto out; 2320 2321 path->slots[0]--; 2322 leaf = path->nodes[0]; 2323 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2324 2325 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) 2326 goto out; 2327 2328 ret = 1; 2329 item_size = btrfs_item_size(leaf, path->slots[0]); 2330 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2331 2332 /* If extent item has more than 1 inline ref then it's shared */ 2333 if (item_size != sizeof(*ei) + 2334 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) 2335 goto out; 2336 2337 /* 2338 * If extent created before last snapshot => it's shared unless the 2339 * snapshot has been deleted. Use the heuristic if strict is false. 2340 */ 2341 if (!strict && 2342 (btrfs_extent_generation(leaf, ei) <= 2343 btrfs_root_last_snapshot(&root->root_item))) 2344 goto out; 2345 2346 iref = (struct btrfs_extent_inline_ref *)(ei + 1); 2347 2348 /* If this extent has SHARED_DATA_REF then it's shared */ 2349 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 2350 if (type != BTRFS_EXTENT_DATA_REF_KEY) 2351 goto out; 2352 2353 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 2354 if (btrfs_extent_refs(leaf, ei) != 2355 btrfs_extent_data_ref_count(leaf, ref) || 2356 btrfs_extent_data_ref_root(leaf, ref) != 2357 root->root_key.objectid || 2358 btrfs_extent_data_ref_objectid(leaf, ref) != objectid || 2359 btrfs_extent_data_ref_offset(leaf, ref) != offset) 2360 goto out; 2361 2362 ret = 0; 2363 out: 2364 return ret; 2365 } 2366 2367 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, 2368 u64 bytenr, bool strict) 2369 { 2370 struct btrfs_path *path; 2371 int ret; 2372 2373 path = btrfs_alloc_path(); 2374 if (!path) 2375 return -ENOMEM; 2376 2377 do { 2378 ret = check_committed_ref(root, path, objectid, 2379 offset, bytenr, strict); 2380 if (ret && ret != -ENOENT) 2381 goto out; 2382 2383 ret = check_delayed_ref(root, path, objectid, offset, bytenr); 2384 } while (ret == -EAGAIN); 2385 2386 out: 2387 btrfs_free_path(path); 2388 if (btrfs_is_data_reloc_root(root)) 2389 WARN_ON(ret > 0); 2390 return ret; 2391 } 2392 2393 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, 2394 struct btrfs_root *root, 2395 struct extent_buffer *buf, 2396 int full_backref, int inc) 2397 { 2398 struct btrfs_fs_info *fs_info = root->fs_info; 2399 u64 bytenr; 2400 u64 num_bytes; 2401 u64 parent; 2402 u64 ref_root; 2403 u32 nritems; 2404 struct btrfs_key key; 2405 struct btrfs_file_extent_item *fi; 2406 struct btrfs_ref generic_ref = { 0 }; 2407 bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC); 2408 int i; 2409 int action; 2410 int level; 2411 int ret = 0; 2412 2413 if (btrfs_is_testing(fs_info)) 2414 return 0; 2415 2416 ref_root = btrfs_header_owner(buf); 2417 nritems = btrfs_header_nritems(buf); 2418 level = btrfs_header_level(buf); 2419 2420 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) 2421 return 0; 2422 2423 if (full_backref) 2424 parent = buf->start; 2425 else 2426 parent = 0; 2427 if (inc) 2428 action = BTRFS_ADD_DELAYED_REF; 2429 else 2430 action = BTRFS_DROP_DELAYED_REF; 2431 2432 for (i = 0; i < nritems; i++) { 2433 if (level == 0) { 2434 btrfs_item_key_to_cpu(buf, &key, i); 2435 if (key.type != BTRFS_EXTENT_DATA_KEY) 2436 continue; 2437 fi = btrfs_item_ptr(buf, i, 2438 struct btrfs_file_extent_item); 2439 if (btrfs_file_extent_type(buf, fi) == 2440 BTRFS_FILE_EXTENT_INLINE) 2441 continue; 2442 bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 2443 if (bytenr == 0) 2444 continue; 2445 2446 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); 2447 key.offset -= btrfs_file_extent_offset(buf, fi); 2448 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2449 num_bytes, parent); 2450 btrfs_init_data_ref(&generic_ref, ref_root, key.objectid, 2451 key.offset, root->root_key.objectid, 2452 for_reloc); 2453 if (inc) 2454 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2455 else 2456 ret = btrfs_free_extent(trans, &generic_ref); 2457 if (ret) 2458 goto fail; 2459 } else { 2460 bytenr = btrfs_node_blockptr(buf, i); 2461 num_bytes = fs_info->nodesize; 2462 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2463 num_bytes, parent); 2464 btrfs_init_tree_ref(&generic_ref, level - 1, ref_root, 2465 root->root_key.objectid, for_reloc); 2466 if (inc) 2467 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2468 else 2469 ret = btrfs_free_extent(trans, &generic_ref); 2470 if (ret) 2471 goto fail; 2472 } 2473 } 2474 return 0; 2475 fail: 2476 return ret; 2477 } 2478 2479 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2480 struct extent_buffer *buf, int full_backref) 2481 { 2482 return __btrfs_mod_ref(trans, root, buf, full_backref, 1); 2483 } 2484 2485 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2486 struct extent_buffer *buf, int full_backref) 2487 { 2488 return __btrfs_mod_ref(trans, root, buf, full_backref, 0); 2489 } 2490 2491 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) 2492 { 2493 struct btrfs_fs_info *fs_info = root->fs_info; 2494 u64 flags; 2495 u64 ret; 2496 2497 if (data) 2498 flags = BTRFS_BLOCK_GROUP_DATA; 2499 else if (root == fs_info->chunk_root) 2500 flags = BTRFS_BLOCK_GROUP_SYSTEM; 2501 else 2502 flags = BTRFS_BLOCK_GROUP_METADATA; 2503 2504 ret = btrfs_get_alloc_profile(fs_info, flags); 2505 return ret; 2506 } 2507 2508 static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start) 2509 { 2510 struct btrfs_block_group *cache; 2511 u64 bytenr; 2512 2513 spin_lock(&fs_info->block_group_cache_lock); 2514 bytenr = fs_info->first_logical_byte; 2515 spin_unlock(&fs_info->block_group_cache_lock); 2516 2517 if (bytenr < (u64)-1) 2518 return bytenr; 2519 2520 cache = btrfs_lookup_first_block_group(fs_info, search_start); 2521 if (!cache) 2522 return 0; 2523 2524 bytenr = cache->start; 2525 btrfs_put_block_group(cache); 2526 2527 return bytenr; 2528 } 2529 2530 static int pin_down_extent(struct btrfs_trans_handle *trans, 2531 struct btrfs_block_group *cache, 2532 u64 bytenr, u64 num_bytes, int reserved) 2533 { 2534 struct btrfs_fs_info *fs_info = cache->fs_info; 2535 2536 spin_lock(&cache->space_info->lock); 2537 spin_lock(&cache->lock); 2538 cache->pinned += num_bytes; 2539 btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info, 2540 num_bytes); 2541 if (reserved) { 2542 cache->reserved -= num_bytes; 2543 cache->space_info->bytes_reserved -= num_bytes; 2544 } 2545 spin_unlock(&cache->lock); 2546 spin_unlock(&cache->space_info->lock); 2547 2548 set_extent_dirty(&trans->transaction->pinned_extents, bytenr, 2549 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); 2550 return 0; 2551 } 2552 2553 int btrfs_pin_extent(struct btrfs_trans_handle *trans, 2554 u64 bytenr, u64 num_bytes, int reserved) 2555 { 2556 struct btrfs_block_group *cache; 2557 2558 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2559 BUG_ON(!cache); /* Logic error */ 2560 2561 pin_down_extent(trans, cache, bytenr, num_bytes, reserved); 2562 2563 btrfs_put_block_group(cache); 2564 return 0; 2565 } 2566 2567 /* 2568 * this function must be called within transaction 2569 */ 2570 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, 2571 u64 bytenr, u64 num_bytes) 2572 { 2573 struct btrfs_block_group *cache; 2574 int ret; 2575 2576 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2577 if (!cache) 2578 return -EINVAL; 2579 2580 /* 2581 * pull in the free space cache (if any) so that our pin 2582 * removes the free space from the cache. We have load_only set 2583 * to one because the slow code to read in the free extents does check 2584 * the pinned extents. 2585 */ 2586 btrfs_cache_block_group(cache, 1); 2587 /* 2588 * Make sure we wait until the cache is completely built in case it is 2589 * missing or is invalid and therefore needs to be rebuilt. 2590 */ 2591 ret = btrfs_wait_block_group_cache_done(cache); 2592 if (ret) 2593 goto out; 2594 2595 pin_down_extent(trans, cache, bytenr, num_bytes, 0); 2596 2597 /* remove us from the free space cache (if we're there at all) */ 2598 ret = btrfs_remove_free_space(cache, bytenr, num_bytes); 2599 out: 2600 btrfs_put_block_group(cache); 2601 return ret; 2602 } 2603 2604 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, 2605 u64 start, u64 num_bytes) 2606 { 2607 int ret; 2608 struct btrfs_block_group *block_group; 2609 2610 block_group = btrfs_lookup_block_group(fs_info, start); 2611 if (!block_group) 2612 return -EINVAL; 2613 2614 btrfs_cache_block_group(block_group, 1); 2615 /* 2616 * Make sure we wait until the cache is completely built in case it is 2617 * missing or is invalid and therefore needs to be rebuilt. 2618 */ 2619 ret = btrfs_wait_block_group_cache_done(block_group); 2620 if (ret) 2621 goto out; 2622 2623 ret = btrfs_remove_free_space(block_group, start, num_bytes); 2624 out: 2625 btrfs_put_block_group(block_group); 2626 return ret; 2627 } 2628 2629 int btrfs_exclude_logged_extents(struct extent_buffer *eb) 2630 { 2631 struct btrfs_fs_info *fs_info = eb->fs_info; 2632 struct btrfs_file_extent_item *item; 2633 struct btrfs_key key; 2634 int found_type; 2635 int i; 2636 int ret = 0; 2637 2638 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) 2639 return 0; 2640 2641 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2642 btrfs_item_key_to_cpu(eb, &key, i); 2643 if (key.type != BTRFS_EXTENT_DATA_KEY) 2644 continue; 2645 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 2646 found_type = btrfs_file_extent_type(eb, item); 2647 if (found_type == BTRFS_FILE_EXTENT_INLINE) 2648 continue; 2649 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 2650 continue; 2651 key.objectid = btrfs_file_extent_disk_bytenr(eb, item); 2652 key.offset = btrfs_file_extent_disk_num_bytes(eb, item); 2653 ret = __exclude_logged_extent(fs_info, key.objectid, key.offset); 2654 if (ret) 2655 break; 2656 } 2657 2658 return ret; 2659 } 2660 2661 static void 2662 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) 2663 { 2664 atomic_inc(&bg->reservations); 2665 } 2666 2667 /* 2668 * Returns the free cluster for the given space info and sets empty_cluster to 2669 * what it should be based on the mount options. 2670 */ 2671 static struct btrfs_free_cluster * 2672 fetch_cluster_info(struct btrfs_fs_info *fs_info, 2673 struct btrfs_space_info *space_info, u64 *empty_cluster) 2674 { 2675 struct btrfs_free_cluster *ret = NULL; 2676 2677 *empty_cluster = 0; 2678 if (btrfs_mixed_space_info(space_info)) 2679 return ret; 2680 2681 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 2682 ret = &fs_info->meta_alloc_cluster; 2683 if (btrfs_test_opt(fs_info, SSD)) 2684 *empty_cluster = SZ_2M; 2685 else 2686 *empty_cluster = SZ_64K; 2687 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && 2688 btrfs_test_opt(fs_info, SSD_SPREAD)) { 2689 *empty_cluster = SZ_2M; 2690 ret = &fs_info->data_alloc_cluster; 2691 } 2692 2693 return ret; 2694 } 2695 2696 static int unpin_extent_range(struct btrfs_fs_info *fs_info, 2697 u64 start, u64 end, 2698 const bool return_free_space) 2699 { 2700 struct btrfs_block_group *cache = NULL; 2701 struct btrfs_space_info *space_info; 2702 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 2703 struct btrfs_free_cluster *cluster = NULL; 2704 u64 len; 2705 u64 total_unpinned = 0; 2706 u64 empty_cluster = 0; 2707 bool readonly; 2708 2709 while (start <= end) { 2710 readonly = false; 2711 if (!cache || 2712 start >= cache->start + cache->length) { 2713 if (cache) 2714 btrfs_put_block_group(cache); 2715 total_unpinned = 0; 2716 cache = btrfs_lookup_block_group(fs_info, start); 2717 BUG_ON(!cache); /* Logic error */ 2718 2719 cluster = fetch_cluster_info(fs_info, 2720 cache->space_info, 2721 &empty_cluster); 2722 empty_cluster <<= 1; 2723 } 2724 2725 len = cache->start + cache->length - start; 2726 len = min(len, end + 1 - start); 2727 2728 down_read(&fs_info->commit_root_sem); 2729 if (start < cache->last_byte_to_unpin && return_free_space) { 2730 u64 add_len = min(len, cache->last_byte_to_unpin - start); 2731 2732 btrfs_add_free_space(cache, start, add_len); 2733 } 2734 up_read(&fs_info->commit_root_sem); 2735 2736 start += len; 2737 total_unpinned += len; 2738 space_info = cache->space_info; 2739 2740 /* 2741 * If this space cluster has been marked as fragmented and we've 2742 * unpinned enough in this block group to potentially allow a 2743 * cluster to be created inside of it go ahead and clear the 2744 * fragmented check. 2745 */ 2746 if (cluster && cluster->fragmented && 2747 total_unpinned > empty_cluster) { 2748 spin_lock(&cluster->lock); 2749 cluster->fragmented = 0; 2750 spin_unlock(&cluster->lock); 2751 } 2752 2753 spin_lock(&space_info->lock); 2754 spin_lock(&cache->lock); 2755 cache->pinned -= len; 2756 btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); 2757 space_info->max_extent_size = 0; 2758 if (cache->ro) { 2759 space_info->bytes_readonly += len; 2760 readonly = true; 2761 } else if (btrfs_is_zoned(fs_info)) { 2762 /* Need reset before reusing in a zoned block group */ 2763 space_info->bytes_zone_unusable += len; 2764 readonly = true; 2765 } 2766 spin_unlock(&cache->lock); 2767 if (!readonly && return_free_space && 2768 global_rsv->space_info == space_info) { 2769 u64 to_add = len; 2770 2771 spin_lock(&global_rsv->lock); 2772 if (!global_rsv->full) { 2773 to_add = min(len, global_rsv->size - 2774 global_rsv->reserved); 2775 global_rsv->reserved += to_add; 2776 btrfs_space_info_update_bytes_may_use(fs_info, 2777 space_info, to_add); 2778 if (global_rsv->reserved >= global_rsv->size) 2779 global_rsv->full = 1; 2780 len -= to_add; 2781 } 2782 spin_unlock(&global_rsv->lock); 2783 } 2784 /* Add to any tickets we may have */ 2785 if (!readonly && return_free_space && len) 2786 btrfs_try_granting_tickets(fs_info, space_info); 2787 spin_unlock(&space_info->lock); 2788 } 2789 2790 if (cache) 2791 btrfs_put_block_group(cache); 2792 return 0; 2793 } 2794 2795 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) 2796 { 2797 struct btrfs_fs_info *fs_info = trans->fs_info; 2798 struct btrfs_block_group *block_group, *tmp; 2799 struct list_head *deleted_bgs; 2800 struct extent_io_tree *unpin; 2801 u64 start; 2802 u64 end; 2803 int ret; 2804 2805 unpin = &trans->transaction->pinned_extents; 2806 2807 while (!TRANS_ABORTED(trans)) { 2808 struct extent_state *cached_state = NULL; 2809 2810 mutex_lock(&fs_info->unused_bg_unpin_mutex); 2811 ret = find_first_extent_bit(unpin, 0, &start, &end, 2812 EXTENT_DIRTY, &cached_state); 2813 if (ret) { 2814 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2815 break; 2816 } 2817 2818 if (btrfs_test_opt(fs_info, DISCARD_SYNC)) 2819 ret = btrfs_discard_extent(fs_info, start, 2820 end + 1 - start, NULL); 2821 2822 clear_extent_dirty(unpin, start, end, &cached_state); 2823 unpin_extent_range(fs_info, start, end, true); 2824 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2825 free_extent_state(cached_state); 2826 cond_resched(); 2827 } 2828 2829 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { 2830 btrfs_discard_calc_delay(&fs_info->discard_ctl); 2831 btrfs_discard_schedule_work(&fs_info->discard_ctl, true); 2832 } 2833 2834 /* 2835 * Transaction is finished. We don't need the lock anymore. We 2836 * do need to clean up the block groups in case of a transaction 2837 * abort. 2838 */ 2839 deleted_bgs = &trans->transaction->deleted_bgs; 2840 list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) { 2841 u64 trimmed = 0; 2842 2843 ret = -EROFS; 2844 if (!TRANS_ABORTED(trans)) 2845 ret = btrfs_discard_extent(fs_info, 2846 block_group->start, 2847 block_group->length, 2848 &trimmed); 2849 2850 list_del_init(&block_group->bg_list); 2851 btrfs_unfreeze_block_group(block_group); 2852 btrfs_put_block_group(block_group); 2853 2854 if (ret) { 2855 const char *errstr = btrfs_decode_error(ret); 2856 btrfs_warn(fs_info, 2857 "discard failed while removing blockgroup: errno=%d %s", 2858 ret, errstr); 2859 } 2860 } 2861 2862 return 0; 2863 } 2864 2865 /* 2866 * Drop one or more refs of @node. 2867 * 2868 * 1. Locate the extent refs. 2869 * It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item. 2870 * Locate it, then reduce the refs number or remove the ref line completely. 2871 * 2872 * 2. Update the refs count in EXTENT/METADATA_ITEM 2873 * 2874 * Inline backref case: 2875 * 2876 * in extent tree we have: 2877 * 2878 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2879 * refs 2 gen 6 flags DATA 2880 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2881 * extent data backref root FS_TREE objectid 257 offset 0 count 1 2882 * 2883 * This function gets called with: 2884 * 2885 * node->bytenr = 13631488 2886 * node->num_bytes = 1048576 2887 * root_objectid = FS_TREE 2888 * owner_objectid = 257 2889 * owner_offset = 0 2890 * refs_to_drop = 1 2891 * 2892 * Then we should get some like: 2893 * 2894 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2895 * refs 1 gen 6 flags DATA 2896 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2897 * 2898 * Keyed backref case: 2899 * 2900 * in extent tree we have: 2901 * 2902 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2903 * refs 754 gen 6 flags DATA 2904 * [...] 2905 * item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28 2906 * extent data backref root FS_TREE objectid 866 offset 0 count 1 2907 * 2908 * This function get called with: 2909 * 2910 * node->bytenr = 13631488 2911 * node->num_bytes = 1048576 2912 * root_objectid = FS_TREE 2913 * owner_objectid = 866 2914 * owner_offset = 0 2915 * refs_to_drop = 1 2916 * 2917 * Then we should get some like: 2918 * 2919 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2920 * refs 753 gen 6 flags DATA 2921 * 2922 * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed. 2923 */ 2924 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 2925 struct btrfs_delayed_ref_node *node, u64 parent, 2926 u64 root_objectid, u64 owner_objectid, 2927 u64 owner_offset, int refs_to_drop, 2928 struct btrfs_delayed_extent_op *extent_op) 2929 { 2930 struct btrfs_fs_info *info = trans->fs_info; 2931 struct btrfs_key key; 2932 struct btrfs_path *path; 2933 struct btrfs_root *extent_root; 2934 struct extent_buffer *leaf; 2935 struct btrfs_extent_item *ei; 2936 struct btrfs_extent_inline_ref *iref; 2937 int ret; 2938 int is_data; 2939 int extent_slot = 0; 2940 int found_extent = 0; 2941 int num_to_del = 1; 2942 u32 item_size; 2943 u64 refs; 2944 u64 bytenr = node->bytenr; 2945 u64 num_bytes = node->num_bytes; 2946 int last_ref = 0; 2947 bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA); 2948 2949 extent_root = btrfs_extent_root(info, bytenr); 2950 ASSERT(extent_root); 2951 2952 path = btrfs_alloc_path(); 2953 if (!path) 2954 return -ENOMEM; 2955 2956 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; 2957 2958 if (!is_data && refs_to_drop != 1) { 2959 btrfs_crit(info, 2960 "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u", 2961 node->bytenr, refs_to_drop); 2962 ret = -EINVAL; 2963 btrfs_abort_transaction(trans, ret); 2964 goto out; 2965 } 2966 2967 if (is_data) 2968 skinny_metadata = false; 2969 2970 ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes, 2971 parent, root_objectid, owner_objectid, 2972 owner_offset); 2973 if (ret == 0) { 2974 /* 2975 * Either the inline backref or the SHARED_DATA_REF/ 2976 * SHARED_BLOCK_REF is found 2977 * 2978 * Here is a quick path to locate EXTENT/METADATA_ITEM. 2979 * It's possible the EXTENT/METADATA_ITEM is near current slot. 2980 */ 2981 extent_slot = path->slots[0]; 2982 while (extent_slot >= 0) { 2983 btrfs_item_key_to_cpu(path->nodes[0], &key, 2984 extent_slot); 2985 if (key.objectid != bytenr) 2986 break; 2987 if (key.type == BTRFS_EXTENT_ITEM_KEY && 2988 key.offset == num_bytes) { 2989 found_extent = 1; 2990 break; 2991 } 2992 if (key.type == BTRFS_METADATA_ITEM_KEY && 2993 key.offset == owner_objectid) { 2994 found_extent = 1; 2995 break; 2996 } 2997 2998 /* Quick path didn't find the EXTEMT/METADATA_ITEM */ 2999 if (path->slots[0] - extent_slot > 5) 3000 break; 3001 extent_slot--; 3002 } 3003 3004 if (!found_extent) { 3005 if (iref) { 3006 btrfs_crit(info, 3007 "invalid iref, no EXTENT/METADATA_ITEM found but has inline extent ref"); 3008 btrfs_abort_transaction(trans, -EUCLEAN); 3009 goto err_dump; 3010 } 3011 /* Must be SHARED_* item, remove the backref first */ 3012 ret = remove_extent_backref(trans, extent_root, path, 3013 NULL, refs_to_drop, is_data, 3014 &last_ref); 3015 if (ret) { 3016 btrfs_abort_transaction(trans, ret); 3017 goto out; 3018 } 3019 btrfs_release_path(path); 3020 3021 /* Slow path to locate EXTENT/METADATA_ITEM */ 3022 key.objectid = bytenr; 3023 key.type = BTRFS_EXTENT_ITEM_KEY; 3024 key.offset = num_bytes; 3025 3026 if (!is_data && skinny_metadata) { 3027 key.type = BTRFS_METADATA_ITEM_KEY; 3028 key.offset = owner_objectid; 3029 } 3030 3031 ret = btrfs_search_slot(trans, extent_root, 3032 &key, path, -1, 1); 3033 if (ret > 0 && skinny_metadata && path->slots[0]) { 3034 /* 3035 * Couldn't find our skinny metadata item, 3036 * see if we have ye olde extent item. 3037 */ 3038 path->slots[0]--; 3039 btrfs_item_key_to_cpu(path->nodes[0], &key, 3040 path->slots[0]); 3041 if (key.objectid == bytenr && 3042 key.type == BTRFS_EXTENT_ITEM_KEY && 3043 key.offset == num_bytes) 3044 ret = 0; 3045 } 3046 3047 if (ret > 0 && skinny_metadata) { 3048 skinny_metadata = false; 3049 key.objectid = bytenr; 3050 key.type = BTRFS_EXTENT_ITEM_KEY; 3051 key.offset = num_bytes; 3052 btrfs_release_path(path); 3053 ret = btrfs_search_slot(trans, extent_root, 3054 &key, path, -1, 1); 3055 } 3056 3057 if (ret) { 3058 btrfs_err(info, 3059 "umm, got %d back from search, was looking for %llu", 3060 ret, bytenr); 3061 if (ret > 0) 3062 btrfs_print_leaf(path->nodes[0]); 3063 } 3064 if (ret < 0) { 3065 btrfs_abort_transaction(trans, ret); 3066 goto out; 3067 } 3068 extent_slot = path->slots[0]; 3069 } 3070 } else if (WARN_ON(ret == -ENOENT)) { 3071 btrfs_print_leaf(path->nodes[0]); 3072 btrfs_err(info, 3073 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu", 3074 bytenr, parent, root_objectid, owner_objectid, 3075 owner_offset); 3076 btrfs_abort_transaction(trans, ret); 3077 goto out; 3078 } else { 3079 btrfs_abort_transaction(trans, ret); 3080 goto out; 3081 } 3082 3083 leaf = path->nodes[0]; 3084 item_size = btrfs_item_size(leaf, extent_slot); 3085 if (unlikely(item_size < sizeof(*ei))) { 3086 ret = -EINVAL; 3087 btrfs_print_v0_err(info); 3088 btrfs_abort_transaction(trans, ret); 3089 goto out; 3090 } 3091 ei = btrfs_item_ptr(leaf, extent_slot, 3092 struct btrfs_extent_item); 3093 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && 3094 key.type == BTRFS_EXTENT_ITEM_KEY) { 3095 struct btrfs_tree_block_info *bi; 3096 if (item_size < sizeof(*ei) + sizeof(*bi)) { 3097 btrfs_crit(info, 3098 "invalid extent item size for key (%llu, %u, %llu) owner %llu, has %u expect >= %zu", 3099 key.objectid, key.type, key.offset, 3100 owner_objectid, item_size, 3101 sizeof(*ei) + sizeof(*bi)); 3102 btrfs_abort_transaction(trans, -EUCLEAN); 3103 goto err_dump; 3104 } 3105 bi = (struct btrfs_tree_block_info *)(ei + 1); 3106 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); 3107 } 3108 3109 refs = btrfs_extent_refs(leaf, ei); 3110 if (refs < refs_to_drop) { 3111 btrfs_crit(info, 3112 "trying to drop %d refs but we only have %llu for bytenr %llu", 3113 refs_to_drop, refs, bytenr); 3114 btrfs_abort_transaction(trans, -EUCLEAN); 3115 goto err_dump; 3116 } 3117 refs -= refs_to_drop; 3118 3119 if (refs > 0) { 3120 if (extent_op) 3121 __run_delayed_extent_op(extent_op, leaf, ei); 3122 /* 3123 * In the case of inline back ref, reference count will 3124 * be updated by remove_extent_backref 3125 */ 3126 if (iref) { 3127 if (!found_extent) { 3128 btrfs_crit(info, 3129 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found"); 3130 btrfs_abort_transaction(trans, -EUCLEAN); 3131 goto err_dump; 3132 } 3133 } else { 3134 btrfs_set_extent_refs(leaf, ei, refs); 3135 btrfs_mark_buffer_dirty(leaf); 3136 } 3137 if (found_extent) { 3138 ret = remove_extent_backref(trans, extent_root, path, 3139 iref, refs_to_drop, is_data, 3140 &last_ref); 3141 if (ret) { 3142 btrfs_abort_transaction(trans, ret); 3143 goto out; 3144 } 3145 } 3146 } else { 3147 /* In this branch refs == 1 */ 3148 if (found_extent) { 3149 if (is_data && refs_to_drop != 3150 extent_data_ref_count(path, iref)) { 3151 btrfs_crit(info, 3152 "invalid refs_to_drop, current refs %u refs_to_drop %u", 3153 extent_data_ref_count(path, iref), 3154 refs_to_drop); 3155 btrfs_abort_transaction(trans, -EUCLEAN); 3156 goto err_dump; 3157 } 3158 if (iref) { 3159 if (path->slots[0] != extent_slot) { 3160 btrfs_crit(info, 3161 "invalid iref, extent item key (%llu %u %llu) doesn't have wanted iref", 3162 key.objectid, key.type, 3163 key.offset); 3164 btrfs_abort_transaction(trans, -EUCLEAN); 3165 goto err_dump; 3166 } 3167 } else { 3168 /* 3169 * No inline ref, we must be at SHARED_* item, 3170 * And it's single ref, it must be: 3171 * | extent_slot ||extent_slot + 1| 3172 * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ] 3173 */ 3174 if (path->slots[0] != extent_slot + 1) { 3175 btrfs_crit(info, 3176 "invalid SHARED_* item, previous item is not EXTENT/METADATA_ITEM"); 3177 btrfs_abort_transaction(trans, -EUCLEAN); 3178 goto err_dump; 3179 } 3180 path->slots[0] = extent_slot; 3181 num_to_del = 2; 3182 } 3183 } 3184 3185 last_ref = 1; 3186 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 3187 num_to_del); 3188 if (ret) { 3189 btrfs_abort_transaction(trans, ret); 3190 goto out; 3191 } 3192 btrfs_release_path(path); 3193 3194 if (is_data) { 3195 struct btrfs_root *csum_root; 3196 csum_root = btrfs_csum_root(info, bytenr); 3197 ret = btrfs_del_csums(trans, csum_root, bytenr, 3198 num_bytes); 3199 if (ret) { 3200 btrfs_abort_transaction(trans, ret); 3201 goto out; 3202 } 3203 } 3204 3205 ret = add_to_free_space_tree(trans, bytenr, num_bytes); 3206 if (ret) { 3207 btrfs_abort_transaction(trans, ret); 3208 goto out; 3209 } 3210 3211 ret = btrfs_update_block_group(trans, bytenr, num_bytes, false); 3212 if (ret) { 3213 btrfs_abort_transaction(trans, ret); 3214 goto out; 3215 } 3216 } 3217 btrfs_release_path(path); 3218 3219 out: 3220 btrfs_free_path(path); 3221 return ret; 3222 err_dump: 3223 /* 3224 * Leaf dump can take up a lot of log buffer, so we only do full leaf 3225 * dump for debug build. 3226 */ 3227 if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) { 3228 btrfs_crit(info, "path->slots[0]=%d extent_slot=%d", 3229 path->slots[0], extent_slot); 3230 btrfs_print_leaf(path->nodes[0]); 3231 } 3232 3233 btrfs_free_path(path); 3234 return -EUCLEAN; 3235 } 3236 3237 /* 3238 * when we free an block, it is possible (and likely) that we free the last 3239 * delayed ref for that extent as well. This searches the delayed ref tree for 3240 * a given extent, and if there are no other delayed refs to be processed, it 3241 * removes it from the tree. 3242 */ 3243 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, 3244 u64 bytenr) 3245 { 3246 struct btrfs_delayed_ref_head *head; 3247 struct btrfs_delayed_ref_root *delayed_refs; 3248 int ret = 0; 3249 3250 delayed_refs = &trans->transaction->delayed_refs; 3251 spin_lock(&delayed_refs->lock); 3252 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 3253 if (!head) 3254 goto out_delayed_unlock; 3255 3256 spin_lock(&head->lock); 3257 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 3258 goto out; 3259 3260 if (cleanup_extent_op(head) != NULL) 3261 goto out; 3262 3263 /* 3264 * waiting for the lock here would deadlock. If someone else has it 3265 * locked they are already in the process of dropping it anyway 3266 */ 3267 if (!mutex_trylock(&head->mutex)) 3268 goto out; 3269 3270 btrfs_delete_ref_head(delayed_refs, head); 3271 head->processing = 0; 3272 3273 spin_unlock(&head->lock); 3274 spin_unlock(&delayed_refs->lock); 3275 3276 BUG_ON(head->extent_op); 3277 if (head->must_insert_reserved) 3278 ret = 1; 3279 3280 btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); 3281 mutex_unlock(&head->mutex); 3282 btrfs_put_delayed_ref_head(head); 3283 return ret; 3284 out: 3285 spin_unlock(&head->lock); 3286 3287 out_delayed_unlock: 3288 spin_unlock(&delayed_refs->lock); 3289 return 0; 3290 } 3291 3292 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3293 u64 root_id, 3294 struct extent_buffer *buf, 3295 u64 parent, int last_ref) 3296 { 3297 struct btrfs_fs_info *fs_info = trans->fs_info; 3298 struct btrfs_ref generic_ref = { 0 }; 3299 int ret; 3300 3301 btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, 3302 buf->start, buf->len, parent); 3303 btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 3304 root_id, 0, false); 3305 3306 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3307 btrfs_ref_tree_mod(fs_info, &generic_ref); 3308 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL); 3309 BUG_ON(ret); /* -ENOMEM */ 3310 } 3311 3312 if (last_ref && btrfs_header_generation(buf) == trans->transid) { 3313 struct btrfs_block_group *cache; 3314 bool must_pin = false; 3315 3316 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3317 ret = check_ref_cleanup(trans, buf->start); 3318 if (!ret) { 3319 btrfs_redirty_list_add(trans->transaction, buf); 3320 goto out; 3321 } 3322 } 3323 3324 cache = btrfs_lookup_block_group(fs_info, buf->start); 3325 3326 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 3327 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3328 btrfs_put_block_group(cache); 3329 goto out; 3330 } 3331 3332 /* 3333 * If this is a leaf and there are tree mod log users, we may 3334 * have recorded mod log operations that point to this leaf. 3335 * So we must make sure no one reuses this leaf's extent before 3336 * mod log operations are applied to a node, otherwise after 3337 * rewinding a node using the mod log operations we get an 3338 * inconsistent btree, as the leaf's extent may now be used as 3339 * a node or leaf for another different btree. 3340 * We are safe from races here because at this point no other 3341 * node or root points to this extent buffer, so if after this 3342 * check a new tree mod log user joins, it will not be able to 3343 * find a node pointing to this leaf and record operations that 3344 * point to this leaf. 3345 */ 3346 if (btrfs_header_level(buf) == 0 && 3347 test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) 3348 must_pin = true; 3349 3350 if (must_pin || btrfs_is_zoned(fs_info)) { 3351 btrfs_redirty_list_add(trans->transaction, buf); 3352 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3353 btrfs_put_block_group(cache); 3354 goto out; 3355 } 3356 3357 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 3358 3359 btrfs_add_free_space(cache, buf->start, buf->len); 3360 btrfs_free_reserved_bytes(cache, buf->len, 0); 3361 btrfs_put_block_group(cache); 3362 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); 3363 } 3364 out: 3365 if (last_ref) { 3366 /* 3367 * Deleting the buffer, clear the corrupt flag since it doesn't 3368 * matter anymore. 3369 */ 3370 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); 3371 } 3372 } 3373 3374 /* Can return -ENOMEM */ 3375 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) 3376 { 3377 struct btrfs_fs_info *fs_info = trans->fs_info; 3378 int ret; 3379 3380 if (btrfs_is_testing(fs_info)) 3381 return 0; 3382 3383 /* 3384 * tree log blocks never actually go into the extent allocation 3385 * tree, just update pinning info and exit early. 3386 */ 3387 if ((ref->type == BTRFS_REF_METADATA && 3388 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3389 (ref->type == BTRFS_REF_DATA && 3390 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) { 3391 /* unlocks the pinned mutex */ 3392 btrfs_pin_extent(trans, ref->bytenr, ref->len, 1); 3393 ret = 0; 3394 } else if (ref->type == BTRFS_REF_METADATA) { 3395 ret = btrfs_add_delayed_tree_ref(trans, ref, NULL); 3396 } else { 3397 ret = btrfs_add_delayed_data_ref(trans, ref, 0); 3398 } 3399 3400 if (!((ref->type == BTRFS_REF_METADATA && 3401 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3402 (ref->type == BTRFS_REF_DATA && 3403 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID))) 3404 btrfs_ref_tree_mod(fs_info, ref); 3405 3406 return ret; 3407 } 3408 3409 enum btrfs_loop_type { 3410 LOOP_CACHING_NOWAIT, 3411 LOOP_CACHING_WAIT, 3412 LOOP_ALLOC_CHUNK, 3413 LOOP_NO_EMPTY_SIZE, 3414 }; 3415 3416 static inline void 3417 btrfs_lock_block_group(struct btrfs_block_group *cache, 3418 int delalloc) 3419 { 3420 if (delalloc) 3421 down_read(&cache->data_rwsem); 3422 } 3423 3424 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, 3425 int delalloc) 3426 { 3427 btrfs_get_block_group(cache); 3428 if (delalloc) 3429 down_read(&cache->data_rwsem); 3430 } 3431 3432 static struct btrfs_block_group *btrfs_lock_cluster( 3433 struct btrfs_block_group *block_group, 3434 struct btrfs_free_cluster *cluster, 3435 int delalloc) 3436 __acquires(&cluster->refill_lock) 3437 { 3438 struct btrfs_block_group *used_bg = NULL; 3439 3440 spin_lock(&cluster->refill_lock); 3441 while (1) { 3442 used_bg = cluster->block_group; 3443 if (!used_bg) 3444 return NULL; 3445 3446 if (used_bg == block_group) 3447 return used_bg; 3448 3449 btrfs_get_block_group(used_bg); 3450 3451 if (!delalloc) 3452 return used_bg; 3453 3454 if (down_read_trylock(&used_bg->data_rwsem)) 3455 return used_bg; 3456 3457 spin_unlock(&cluster->refill_lock); 3458 3459 /* We should only have one-level nested. */ 3460 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); 3461 3462 spin_lock(&cluster->refill_lock); 3463 if (used_bg == cluster->block_group) 3464 return used_bg; 3465 3466 up_read(&used_bg->data_rwsem); 3467 btrfs_put_block_group(used_bg); 3468 } 3469 } 3470 3471 static inline void 3472 btrfs_release_block_group(struct btrfs_block_group *cache, 3473 int delalloc) 3474 { 3475 if (delalloc) 3476 up_read(&cache->data_rwsem); 3477 btrfs_put_block_group(cache); 3478 } 3479 3480 enum btrfs_extent_allocation_policy { 3481 BTRFS_EXTENT_ALLOC_CLUSTERED, 3482 BTRFS_EXTENT_ALLOC_ZONED, 3483 }; 3484 3485 /* 3486 * Structure used internally for find_free_extent() function. Wraps needed 3487 * parameters. 3488 */ 3489 struct find_free_extent_ctl { 3490 /* Basic allocation info */ 3491 u64 ram_bytes; 3492 u64 num_bytes; 3493 u64 min_alloc_size; 3494 u64 empty_size; 3495 u64 flags; 3496 int delalloc; 3497 3498 /* Where to start the search inside the bg */ 3499 u64 search_start; 3500 3501 /* For clustered allocation */ 3502 u64 empty_cluster; 3503 struct btrfs_free_cluster *last_ptr; 3504 bool use_cluster; 3505 3506 bool have_caching_bg; 3507 bool orig_have_caching_bg; 3508 3509 /* Allocation is called for tree-log */ 3510 bool for_treelog; 3511 3512 /* Allocation is called for data relocation */ 3513 bool for_data_reloc; 3514 3515 /* RAID index, converted from flags */ 3516 int index; 3517 3518 /* 3519 * Current loop number, check find_free_extent_update_loop() for details 3520 */ 3521 int loop; 3522 3523 /* 3524 * Whether we're refilling a cluster, if true we need to re-search 3525 * current block group but don't try to refill the cluster again. 3526 */ 3527 bool retry_clustered; 3528 3529 /* 3530 * Whether we're updating free space cache, if true we need to re-search 3531 * current block group but don't try updating free space cache again. 3532 */ 3533 bool retry_unclustered; 3534 3535 /* If current block group is cached */ 3536 int cached; 3537 3538 /* Max contiguous hole found */ 3539 u64 max_extent_size; 3540 3541 /* Total free space from free space cache, not always contiguous */ 3542 u64 total_free_space; 3543 3544 /* Found result */ 3545 u64 found_offset; 3546 3547 /* Hint where to start looking for an empty space */ 3548 u64 hint_byte; 3549 3550 /* Allocation policy */ 3551 enum btrfs_extent_allocation_policy policy; 3552 }; 3553 3554 3555 /* 3556 * Helper function for find_free_extent(). 3557 * 3558 * Return -ENOENT to inform caller that we need fallback to unclustered mode. 3559 * Return -EAGAIN to inform caller that we need to re-search this block group 3560 * Return >0 to inform caller that we find nothing 3561 * Return 0 means we have found a location and set ffe_ctl->found_offset. 3562 */ 3563 static int find_free_extent_clustered(struct btrfs_block_group *bg, 3564 struct find_free_extent_ctl *ffe_ctl, 3565 struct btrfs_block_group **cluster_bg_ret) 3566 { 3567 struct btrfs_block_group *cluster_bg; 3568 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3569 u64 aligned_cluster; 3570 u64 offset; 3571 int ret; 3572 3573 cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc); 3574 if (!cluster_bg) 3575 goto refill_cluster; 3576 if (cluster_bg != bg && (cluster_bg->ro || 3577 !block_group_bits(cluster_bg, ffe_ctl->flags))) 3578 goto release_cluster; 3579 3580 offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, 3581 ffe_ctl->num_bytes, cluster_bg->start, 3582 &ffe_ctl->max_extent_size); 3583 if (offset) { 3584 /* We have a block, we're done */ 3585 spin_unlock(&last_ptr->refill_lock); 3586 trace_btrfs_reserve_extent_cluster(cluster_bg, 3587 ffe_ctl->search_start, ffe_ctl->num_bytes); 3588 *cluster_bg_ret = cluster_bg; 3589 ffe_ctl->found_offset = offset; 3590 return 0; 3591 } 3592 WARN_ON(last_ptr->block_group != cluster_bg); 3593 3594 release_cluster: 3595 /* 3596 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so 3597 * lets just skip it and let the allocator find whatever block it can 3598 * find. If we reach this point, we will have tried the cluster 3599 * allocator plenty of times and not have found anything, so we are 3600 * likely way too fragmented for the clustering stuff to find anything. 3601 * 3602 * However, if the cluster is taken from the current block group, 3603 * release the cluster first, so that we stand a better chance of 3604 * succeeding in the unclustered allocation. 3605 */ 3606 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { 3607 spin_unlock(&last_ptr->refill_lock); 3608 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3609 return -ENOENT; 3610 } 3611 3612 /* This cluster didn't work out, free it and start over */ 3613 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3614 3615 if (cluster_bg != bg) 3616 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3617 3618 refill_cluster: 3619 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { 3620 spin_unlock(&last_ptr->refill_lock); 3621 return -ENOENT; 3622 } 3623 3624 aligned_cluster = max_t(u64, 3625 ffe_ctl->empty_cluster + ffe_ctl->empty_size, 3626 bg->full_stripe_len); 3627 ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start, 3628 ffe_ctl->num_bytes, aligned_cluster); 3629 if (ret == 0) { 3630 /* Now pull our allocation out of this cluster */ 3631 offset = btrfs_alloc_from_cluster(bg, last_ptr, 3632 ffe_ctl->num_bytes, ffe_ctl->search_start, 3633 &ffe_ctl->max_extent_size); 3634 if (offset) { 3635 /* We found one, proceed */ 3636 spin_unlock(&last_ptr->refill_lock); 3637 trace_btrfs_reserve_extent_cluster(bg, 3638 ffe_ctl->search_start, 3639 ffe_ctl->num_bytes); 3640 ffe_ctl->found_offset = offset; 3641 return 0; 3642 } 3643 } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && 3644 !ffe_ctl->retry_clustered) { 3645 spin_unlock(&last_ptr->refill_lock); 3646 3647 ffe_ctl->retry_clustered = true; 3648 btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + 3649 ffe_ctl->empty_cluster + ffe_ctl->empty_size); 3650 return -EAGAIN; 3651 } 3652 /* 3653 * At this point we either didn't find a cluster or we weren't able to 3654 * allocate a block from our cluster. Free the cluster we've been 3655 * trying to use, and go to the next block group. 3656 */ 3657 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3658 spin_unlock(&last_ptr->refill_lock); 3659 return 1; 3660 } 3661 3662 /* 3663 * Return >0 to inform caller that we find nothing 3664 * Return 0 when we found an free extent and set ffe_ctrl->found_offset 3665 * Return -EAGAIN to inform caller that we need to re-search this block group 3666 */ 3667 static int find_free_extent_unclustered(struct btrfs_block_group *bg, 3668 struct find_free_extent_ctl *ffe_ctl) 3669 { 3670 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3671 u64 offset; 3672 3673 /* 3674 * We are doing an unclustered allocation, set the fragmented flag so 3675 * we don't bother trying to setup a cluster again until we get more 3676 * space. 3677 */ 3678 if (unlikely(last_ptr)) { 3679 spin_lock(&last_ptr->lock); 3680 last_ptr->fragmented = 1; 3681 spin_unlock(&last_ptr->lock); 3682 } 3683 if (ffe_ctl->cached) { 3684 struct btrfs_free_space_ctl *free_space_ctl; 3685 3686 free_space_ctl = bg->free_space_ctl; 3687 spin_lock(&free_space_ctl->tree_lock); 3688 if (free_space_ctl->free_space < 3689 ffe_ctl->num_bytes + ffe_ctl->empty_cluster + 3690 ffe_ctl->empty_size) { 3691 ffe_ctl->total_free_space = max_t(u64, 3692 ffe_ctl->total_free_space, 3693 free_space_ctl->free_space); 3694 spin_unlock(&free_space_ctl->tree_lock); 3695 return 1; 3696 } 3697 spin_unlock(&free_space_ctl->tree_lock); 3698 } 3699 3700 offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start, 3701 ffe_ctl->num_bytes, ffe_ctl->empty_size, 3702 &ffe_ctl->max_extent_size); 3703 3704 /* 3705 * If we didn't find a chunk, and we haven't failed on this block group 3706 * before, and this block group is in the middle of caching and we are 3707 * ok with waiting, then go ahead and wait for progress to be made, and 3708 * set @retry_unclustered to true. 3709 * 3710 * If @retry_unclustered is true then we've already waited on this 3711 * block group once and should move on to the next block group. 3712 */ 3713 if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached && 3714 ffe_ctl->loop > LOOP_CACHING_NOWAIT) { 3715 btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + 3716 ffe_ctl->empty_size); 3717 ffe_ctl->retry_unclustered = true; 3718 return -EAGAIN; 3719 } else if (!offset) { 3720 return 1; 3721 } 3722 ffe_ctl->found_offset = offset; 3723 return 0; 3724 } 3725 3726 static int do_allocation_clustered(struct btrfs_block_group *block_group, 3727 struct find_free_extent_ctl *ffe_ctl, 3728 struct btrfs_block_group **bg_ret) 3729 { 3730 int ret; 3731 3732 /* We want to try and use the cluster allocator, so lets look there */ 3733 if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { 3734 ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret); 3735 if (ret >= 0 || ret == -EAGAIN) 3736 return ret; 3737 /* ret == -ENOENT case falls through */ 3738 } 3739 3740 return find_free_extent_unclustered(block_group, ffe_ctl); 3741 } 3742 3743 /* 3744 * Tree-log block group locking 3745 * ============================ 3746 * 3747 * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which 3748 * indicates the starting address of a block group, which is reserved only 3749 * for tree-log metadata. 3750 * 3751 * Lock nesting 3752 * ============ 3753 * 3754 * space_info::lock 3755 * block_group::lock 3756 * fs_info::treelog_bg_lock 3757 */ 3758 3759 /* 3760 * Simple allocator for sequential-only block group. It only allows sequential 3761 * allocation. No need to play with trees. This function also reserves the 3762 * bytes as in btrfs_add_reserved_bytes. 3763 */ 3764 static int do_allocation_zoned(struct btrfs_block_group *block_group, 3765 struct find_free_extent_ctl *ffe_ctl, 3766 struct btrfs_block_group **bg_ret) 3767 { 3768 struct btrfs_fs_info *fs_info = block_group->fs_info; 3769 struct btrfs_space_info *space_info = block_group->space_info; 3770 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3771 u64 start = block_group->start; 3772 u64 num_bytes = ffe_ctl->num_bytes; 3773 u64 avail; 3774 u64 bytenr = block_group->start; 3775 u64 log_bytenr; 3776 u64 data_reloc_bytenr; 3777 int ret = 0; 3778 bool skip = false; 3779 3780 ASSERT(btrfs_is_zoned(block_group->fs_info)); 3781 3782 /* 3783 * Do not allow non-tree-log blocks in the dedicated tree-log block 3784 * group, and vice versa. 3785 */ 3786 spin_lock(&fs_info->treelog_bg_lock); 3787 log_bytenr = fs_info->treelog_bg; 3788 if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || 3789 (!ffe_ctl->for_treelog && bytenr == log_bytenr))) 3790 skip = true; 3791 spin_unlock(&fs_info->treelog_bg_lock); 3792 if (skip) 3793 return 1; 3794 3795 /* 3796 * Do not allow non-relocation blocks in the dedicated relocation block 3797 * group, and vice versa. 3798 */ 3799 spin_lock(&fs_info->relocation_bg_lock); 3800 data_reloc_bytenr = fs_info->data_reloc_bg; 3801 if (data_reloc_bytenr && 3802 ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) || 3803 (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr))) 3804 skip = true; 3805 spin_unlock(&fs_info->relocation_bg_lock); 3806 if (skip) 3807 return 1; 3808 3809 /* Check RO and no space case before trying to activate it */ 3810 spin_lock(&block_group->lock); 3811 if (block_group->ro || 3812 block_group->alloc_offset == block_group->zone_capacity) { 3813 ret = 1; 3814 /* 3815 * May need to clear fs_info->{treelog,data_reloc}_bg. 3816 * Return the error after taking the locks. 3817 */ 3818 } 3819 spin_unlock(&block_group->lock); 3820 3821 if (!ret && !btrfs_zone_activate(block_group)) { 3822 ret = 1; 3823 /* 3824 * May need to clear fs_info->{treelog,data_reloc}_bg. 3825 * Return the error after taking the locks. 3826 */ 3827 } 3828 3829 spin_lock(&space_info->lock); 3830 spin_lock(&block_group->lock); 3831 spin_lock(&fs_info->treelog_bg_lock); 3832 spin_lock(&fs_info->relocation_bg_lock); 3833 3834 if (ret) 3835 goto out; 3836 3837 ASSERT(!ffe_ctl->for_treelog || 3838 block_group->start == fs_info->treelog_bg || 3839 fs_info->treelog_bg == 0); 3840 ASSERT(!ffe_ctl->for_data_reloc || 3841 block_group->start == fs_info->data_reloc_bg || 3842 fs_info->data_reloc_bg == 0); 3843 3844 if (block_group->ro) { 3845 ret = 1; 3846 goto out; 3847 } 3848 3849 /* 3850 * Do not allow currently using block group to be tree-log dedicated 3851 * block group. 3852 */ 3853 if (ffe_ctl->for_treelog && !fs_info->treelog_bg && 3854 (block_group->used || block_group->reserved)) { 3855 ret = 1; 3856 goto out; 3857 } 3858 3859 /* 3860 * Do not allow currently used block group to be the data relocation 3861 * dedicated block group. 3862 */ 3863 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg && 3864 (block_group->used || block_group->reserved)) { 3865 ret = 1; 3866 goto out; 3867 } 3868 3869 WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity); 3870 avail = block_group->zone_capacity - block_group->alloc_offset; 3871 if (avail < num_bytes) { 3872 if (ffe_ctl->max_extent_size < avail) { 3873 /* 3874 * With sequential allocator, free space is always 3875 * contiguous 3876 */ 3877 ffe_ctl->max_extent_size = avail; 3878 ffe_ctl->total_free_space = avail; 3879 } 3880 ret = 1; 3881 goto out; 3882 } 3883 3884 if (ffe_ctl->for_treelog && !fs_info->treelog_bg) 3885 fs_info->treelog_bg = block_group->start; 3886 3887 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg) 3888 fs_info->data_reloc_bg = block_group->start; 3889 3890 ffe_ctl->found_offset = start + block_group->alloc_offset; 3891 block_group->alloc_offset += num_bytes; 3892 spin_lock(&ctl->tree_lock); 3893 ctl->free_space -= num_bytes; 3894 spin_unlock(&ctl->tree_lock); 3895 3896 /* 3897 * We do not check if found_offset is aligned to stripesize. The 3898 * address is anyway rewritten when using zone append writing. 3899 */ 3900 3901 ffe_ctl->search_start = ffe_ctl->found_offset; 3902 3903 out: 3904 if (ret && ffe_ctl->for_treelog) 3905 fs_info->treelog_bg = 0; 3906 if (ret && ffe_ctl->for_data_reloc) 3907 fs_info->data_reloc_bg = 0; 3908 spin_unlock(&fs_info->relocation_bg_lock); 3909 spin_unlock(&fs_info->treelog_bg_lock); 3910 spin_unlock(&block_group->lock); 3911 spin_unlock(&space_info->lock); 3912 return ret; 3913 } 3914 3915 static int do_allocation(struct btrfs_block_group *block_group, 3916 struct find_free_extent_ctl *ffe_ctl, 3917 struct btrfs_block_group **bg_ret) 3918 { 3919 switch (ffe_ctl->policy) { 3920 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3921 return do_allocation_clustered(block_group, ffe_ctl, bg_ret); 3922 case BTRFS_EXTENT_ALLOC_ZONED: 3923 return do_allocation_zoned(block_group, ffe_ctl, bg_ret); 3924 default: 3925 BUG(); 3926 } 3927 } 3928 3929 static void release_block_group(struct btrfs_block_group *block_group, 3930 struct find_free_extent_ctl *ffe_ctl, 3931 int delalloc) 3932 { 3933 switch (ffe_ctl->policy) { 3934 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3935 ffe_ctl->retry_clustered = false; 3936 ffe_ctl->retry_unclustered = false; 3937 break; 3938 case BTRFS_EXTENT_ALLOC_ZONED: 3939 /* Nothing to do */ 3940 break; 3941 default: 3942 BUG(); 3943 } 3944 3945 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != 3946 ffe_ctl->index); 3947 btrfs_release_block_group(block_group, delalloc); 3948 } 3949 3950 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl, 3951 struct btrfs_key *ins) 3952 { 3953 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3954 3955 if (!ffe_ctl->use_cluster && last_ptr) { 3956 spin_lock(&last_ptr->lock); 3957 last_ptr->window_start = ins->objectid; 3958 spin_unlock(&last_ptr->lock); 3959 } 3960 } 3961 3962 static void found_extent(struct find_free_extent_ctl *ffe_ctl, 3963 struct btrfs_key *ins) 3964 { 3965 switch (ffe_ctl->policy) { 3966 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3967 found_extent_clustered(ffe_ctl, ins); 3968 break; 3969 case BTRFS_EXTENT_ALLOC_ZONED: 3970 /* Nothing to do */ 3971 break; 3972 default: 3973 BUG(); 3974 } 3975 } 3976 3977 static bool can_allocate_chunk(struct btrfs_fs_info *fs_info, 3978 struct find_free_extent_ctl *ffe_ctl) 3979 { 3980 switch (ffe_ctl->policy) { 3981 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3982 return true; 3983 case BTRFS_EXTENT_ALLOC_ZONED: 3984 /* 3985 * If we have enough free space left in an already 3986 * active block group and we can't activate any other 3987 * zone now, do not allow allocating a new chunk and 3988 * let find_free_extent() retry with a smaller size. 3989 */ 3990 if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size && 3991 !btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags)) 3992 return false; 3993 return true; 3994 default: 3995 BUG(); 3996 } 3997 } 3998 3999 static int chunk_allocation_failed(struct find_free_extent_ctl *ffe_ctl) 4000 { 4001 switch (ffe_ctl->policy) { 4002 case BTRFS_EXTENT_ALLOC_CLUSTERED: 4003 /* 4004 * If we can't allocate a new chunk we've already looped through 4005 * at least once, move on to the NO_EMPTY_SIZE case. 4006 */ 4007 ffe_ctl->loop = LOOP_NO_EMPTY_SIZE; 4008 return 0; 4009 case BTRFS_EXTENT_ALLOC_ZONED: 4010 /* Give up here */ 4011 return -ENOSPC; 4012 default: 4013 BUG(); 4014 } 4015 } 4016 4017 /* 4018 * Return >0 means caller needs to re-search for free extent 4019 * Return 0 means we have the needed free extent. 4020 * Return <0 means we failed to locate any free extent. 4021 */ 4022 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, 4023 struct btrfs_key *ins, 4024 struct find_free_extent_ctl *ffe_ctl, 4025 bool full_search) 4026 { 4027 struct btrfs_root *root = fs_info->chunk_root; 4028 int ret; 4029 4030 if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && 4031 ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) 4032 ffe_ctl->orig_have_caching_bg = true; 4033 4034 if (ins->objectid) { 4035 found_extent(ffe_ctl, ins); 4036 return 0; 4037 } 4038 4039 if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) 4040 return 1; 4041 4042 ffe_ctl->index++; 4043 if (ffe_ctl->index < BTRFS_NR_RAID_TYPES) 4044 return 1; 4045 4046 /* 4047 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking 4048 * caching kthreads as we move along 4049 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 4050 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 4051 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 4052 * again 4053 */ 4054 if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { 4055 ffe_ctl->index = 0; 4056 if (ffe_ctl->loop == LOOP_CACHING_NOWAIT) { 4057 /* 4058 * We want to skip the LOOP_CACHING_WAIT step if we 4059 * don't have any uncached bgs and we've already done a 4060 * full search through. 4061 */ 4062 if (ffe_ctl->orig_have_caching_bg || !full_search) 4063 ffe_ctl->loop = LOOP_CACHING_WAIT; 4064 else 4065 ffe_ctl->loop = LOOP_ALLOC_CHUNK; 4066 } else { 4067 ffe_ctl->loop++; 4068 } 4069 4070 if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { 4071 struct btrfs_trans_handle *trans; 4072 int exist = 0; 4073 4074 /*Check if allocation policy allows to create a new chunk */ 4075 if (!can_allocate_chunk(fs_info, ffe_ctl)) 4076 return -ENOSPC; 4077 4078 trans = current->journal_info; 4079 if (trans) 4080 exist = 1; 4081 else 4082 trans = btrfs_join_transaction(root); 4083 4084 if (IS_ERR(trans)) { 4085 ret = PTR_ERR(trans); 4086 return ret; 4087 } 4088 4089 ret = btrfs_chunk_alloc(trans, ffe_ctl->flags, 4090 CHUNK_ALLOC_FORCE); 4091 4092 /* Do not bail out on ENOSPC since we can do more. */ 4093 if (ret == -ENOSPC) 4094 ret = chunk_allocation_failed(ffe_ctl); 4095 else if (ret < 0) 4096 btrfs_abort_transaction(trans, ret); 4097 else 4098 ret = 0; 4099 if (!exist) 4100 btrfs_end_transaction(trans); 4101 if (ret) 4102 return ret; 4103 } 4104 4105 if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { 4106 if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) 4107 return -ENOSPC; 4108 4109 /* 4110 * Don't loop again if we already have no empty_size and 4111 * no empty_cluster. 4112 */ 4113 if (ffe_ctl->empty_size == 0 && 4114 ffe_ctl->empty_cluster == 0) 4115 return -ENOSPC; 4116 ffe_ctl->empty_size = 0; 4117 ffe_ctl->empty_cluster = 0; 4118 } 4119 return 1; 4120 } 4121 return -ENOSPC; 4122 } 4123 4124 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, 4125 struct find_free_extent_ctl *ffe_ctl, 4126 struct btrfs_space_info *space_info, 4127 struct btrfs_key *ins) 4128 { 4129 /* 4130 * If our free space is heavily fragmented we may not be able to make 4131 * big contiguous allocations, so instead of doing the expensive search 4132 * for free space, simply return ENOSPC with our max_extent_size so we 4133 * can go ahead and search for a more manageable chunk. 4134 * 4135 * If our max_extent_size is large enough for our allocation simply 4136 * disable clustering since we will likely not be able to find enough 4137 * space to create a cluster and induce latency trying. 4138 */ 4139 if (space_info->max_extent_size) { 4140 spin_lock(&space_info->lock); 4141 if (space_info->max_extent_size && 4142 ffe_ctl->num_bytes > space_info->max_extent_size) { 4143 ins->offset = space_info->max_extent_size; 4144 spin_unlock(&space_info->lock); 4145 return -ENOSPC; 4146 } else if (space_info->max_extent_size) { 4147 ffe_ctl->use_cluster = false; 4148 } 4149 spin_unlock(&space_info->lock); 4150 } 4151 4152 ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, 4153 &ffe_ctl->empty_cluster); 4154 if (ffe_ctl->last_ptr) { 4155 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 4156 4157 spin_lock(&last_ptr->lock); 4158 if (last_ptr->block_group) 4159 ffe_ctl->hint_byte = last_ptr->window_start; 4160 if (last_ptr->fragmented) { 4161 /* 4162 * We still set window_start so we can keep track of the 4163 * last place we found an allocation to try and save 4164 * some time. 4165 */ 4166 ffe_ctl->hint_byte = last_ptr->window_start; 4167 ffe_ctl->use_cluster = false; 4168 } 4169 spin_unlock(&last_ptr->lock); 4170 } 4171 4172 return 0; 4173 } 4174 4175 static int prepare_allocation(struct btrfs_fs_info *fs_info, 4176 struct find_free_extent_ctl *ffe_ctl, 4177 struct btrfs_space_info *space_info, 4178 struct btrfs_key *ins) 4179 { 4180 switch (ffe_ctl->policy) { 4181 case BTRFS_EXTENT_ALLOC_CLUSTERED: 4182 return prepare_allocation_clustered(fs_info, ffe_ctl, 4183 space_info, ins); 4184 case BTRFS_EXTENT_ALLOC_ZONED: 4185 if (ffe_ctl->for_treelog) { 4186 spin_lock(&fs_info->treelog_bg_lock); 4187 if (fs_info->treelog_bg) 4188 ffe_ctl->hint_byte = fs_info->treelog_bg; 4189 spin_unlock(&fs_info->treelog_bg_lock); 4190 } 4191 if (ffe_ctl->for_data_reloc) { 4192 spin_lock(&fs_info->relocation_bg_lock); 4193 if (fs_info->data_reloc_bg) 4194 ffe_ctl->hint_byte = fs_info->data_reloc_bg; 4195 spin_unlock(&fs_info->relocation_bg_lock); 4196 } 4197 return 0; 4198 default: 4199 BUG(); 4200 } 4201 } 4202 4203 /* 4204 * walks the btree of allocated extents and find a hole of a given size. 4205 * The key ins is changed to record the hole: 4206 * ins->objectid == start position 4207 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4208 * ins->offset == the size of the hole. 4209 * Any available blocks before search_start are skipped. 4210 * 4211 * If there is no suitable free space, we will record the max size of 4212 * the free space extent currently. 4213 * 4214 * The overall logic and call chain: 4215 * 4216 * find_free_extent() 4217 * |- Iterate through all block groups 4218 * | |- Get a valid block group 4219 * | |- Try to do clustered allocation in that block group 4220 * | |- Try to do unclustered allocation in that block group 4221 * | |- Check if the result is valid 4222 * | | |- If valid, then exit 4223 * | |- Jump to next block group 4224 * | 4225 * |- Push harder to find free extents 4226 * |- If not found, re-iterate all block groups 4227 */ 4228 static noinline int find_free_extent(struct btrfs_root *root, 4229 struct btrfs_key *ins, 4230 struct find_free_extent_ctl *ffe_ctl) 4231 { 4232 struct btrfs_fs_info *fs_info = root->fs_info; 4233 int ret = 0; 4234 int cache_block_group_error = 0; 4235 struct btrfs_block_group *block_group = NULL; 4236 struct btrfs_space_info *space_info; 4237 bool full_search = false; 4238 4239 WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize); 4240 4241 ffe_ctl->search_start = 0; 4242 /* For clustered allocation */ 4243 ffe_ctl->empty_cluster = 0; 4244 ffe_ctl->last_ptr = NULL; 4245 ffe_ctl->use_cluster = true; 4246 ffe_ctl->have_caching_bg = false; 4247 ffe_ctl->orig_have_caching_bg = false; 4248 ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags); 4249 ffe_ctl->loop = 0; 4250 /* For clustered allocation */ 4251 ffe_ctl->retry_clustered = false; 4252 ffe_ctl->retry_unclustered = false; 4253 ffe_ctl->cached = 0; 4254 ffe_ctl->max_extent_size = 0; 4255 ffe_ctl->total_free_space = 0; 4256 ffe_ctl->found_offset = 0; 4257 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED; 4258 4259 if (btrfs_is_zoned(fs_info)) 4260 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED; 4261 4262 ins->type = BTRFS_EXTENT_ITEM_KEY; 4263 ins->objectid = 0; 4264 ins->offset = 0; 4265 4266 trace_find_free_extent(root, ffe_ctl->num_bytes, ffe_ctl->empty_size, 4267 ffe_ctl->flags); 4268 4269 space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags); 4270 if (!space_info) { 4271 btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags); 4272 return -ENOSPC; 4273 } 4274 4275 ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins); 4276 if (ret < 0) 4277 return ret; 4278 4279 ffe_ctl->search_start = max(ffe_ctl->search_start, 4280 first_logical_byte(fs_info, 0)); 4281 ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte); 4282 if (ffe_ctl->search_start == ffe_ctl->hint_byte) { 4283 block_group = btrfs_lookup_block_group(fs_info, 4284 ffe_ctl->search_start); 4285 /* 4286 * we don't want to use the block group if it doesn't match our 4287 * allocation bits, or if its not cached. 4288 * 4289 * However if we are re-searching with an ideal block group 4290 * picked out then we don't care that the block group is cached. 4291 */ 4292 if (block_group && block_group_bits(block_group, ffe_ctl->flags) && 4293 block_group->cached != BTRFS_CACHE_NO) { 4294 down_read(&space_info->groups_sem); 4295 if (list_empty(&block_group->list) || 4296 block_group->ro) { 4297 /* 4298 * someone is removing this block group, 4299 * we can't jump into the have_block_group 4300 * target because our list pointers are not 4301 * valid 4302 */ 4303 btrfs_put_block_group(block_group); 4304 up_read(&space_info->groups_sem); 4305 } else { 4306 ffe_ctl->index = btrfs_bg_flags_to_raid_index( 4307 block_group->flags); 4308 btrfs_lock_block_group(block_group, 4309 ffe_ctl->delalloc); 4310 goto have_block_group; 4311 } 4312 } else if (block_group) { 4313 btrfs_put_block_group(block_group); 4314 } 4315 } 4316 search: 4317 ffe_ctl->have_caching_bg = false; 4318 if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) || 4319 ffe_ctl->index == 0) 4320 full_search = true; 4321 down_read(&space_info->groups_sem); 4322 list_for_each_entry(block_group, 4323 &space_info->block_groups[ffe_ctl->index], list) { 4324 struct btrfs_block_group *bg_ret; 4325 4326 /* If the block group is read-only, we can skip it entirely. */ 4327 if (unlikely(block_group->ro)) { 4328 if (ffe_ctl->for_treelog) 4329 btrfs_clear_treelog_bg(block_group); 4330 if (ffe_ctl->for_data_reloc) 4331 btrfs_clear_data_reloc_bg(block_group); 4332 continue; 4333 } 4334 4335 btrfs_grab_block_group(block_group, ffe_ctl->delalloc); 4336 ffe_ctl->search_start = block_group->start; 4337 4338 /* 4339 * this can happen if we end up cycling through all the 4340 * raid types, but we want to make sure we only allocate 4341 * for the proper type. 4342 */ 4343 if (!block_group_bits(block_group, ffe_ctl->flags)) { 4344 u64 extra = BTRFS_BLOCK_GROUP_DUP | 4345 BTRFS_BLOCK_GROUP_RAID1_MASK | 4346 BTRFS_BLOCK_GROUP_RAID56_MASK | 4347 BTRFS_BLOCK_GROUP_RAID10; 4348 4349 /* 4350 * if they asked for extra copies and this block group 4351 * doesn't provide them, bail. This does allow us to 4352 * fill raid0 from raid1. 4353 */ 4354 if ((ffe_ctl->flags & extra) && !(block_group->flags & extra)) 4355 goto loop; 4356 4357 /* 4358 * This block group has different flags than we want. 4359 * It's possible that we have MIXED_GROUP flag but no 4360 * block group is mixed. Just skip such block group. 4361 */ 4362 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4363 continue; 4364 } 4365 4366 have_block_group: 4367 ffe_ctl->cached = btrfs_block_group_done(block_group); 4368 if (unlikely(!ffe_ctl->cached)) { 4369 ffe_ctl->have_caching_bg = true; 4370 ret = btrfs_cache_block_group(block_group, 0); 4371 4372 /* 4373 * If we get ENOMEM here or something else we want to 4374 * try other block groups, because it may not be fatal. 4375 * However if we can't find anything else we need to 4376 * save our return here so that we return the actual 4377 * error that caused problems, not ENOSPC. 4378 */ 4379 if (ret < 0) { 4380 if (!cache_block_group_error) 4381 cache_block_group_error = ret; 4382 ret = 0; 4383 goto loop; 4384 } 4385 ret = 0; 4386 } 4387 4388 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) 4389 goto loop; 4390 4391 bg_ret = NULL; 4392 ret = do_allocation(block_group, ffe_ctl, &bg_ret); 4393 if (ret == 0) { 4394 if (bg_ret && bg_ret != block_group) { 4395 btrfs_release_block_group(block_group, 4396 ffe_ctl->delalloc); 4397 block_group = bg_ret; 4398 } 4399 } else if (ret == -EAGAIN) { 4400 goto have_block_group; 4401 } else if (ret > 0) { 4402 goto loop; 4403 } 4404 4405 /* Checks */ 4406 ffe_ctl->search_start = round_up(ffe_ctl->found_offset, 4407 fs_info->stripesize); 4408 4409 /* move on to the next group */ 4410 if (ffe_ctl->search_start + ffe_ctl->num_bytes > 4411 block_group->start + block_group->length) { 4412 btrfs_add_free_space_unused(block_group, 4413 ffe_ctl->found_offset, 4414 ffe_ctl->num_bytes); 4415 goto loop; 4416 } 4417 4418 if (ffe_ctl->found_offset < ffe_ctl->search_start) 4419 btrfs_add_free_space_unused(block_group, 4420 ffe_ctl->found_offset, 4421 ffe_ctl->search_start - ffe_ctl->found_offset); 4422 4423 ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes, 4424 ffe_ctl->num_bytes, 4425 ffe_ctl->delalloc); 4426 if (ret == -EAGAIN) { 4427 btrfs_add_free_space_unused(block_group, 4428 ffe_ctl->found_offset, 4429 ffe_ctl->num_bytes); 4430 goto loop; 4431 } 4432 btrfs_inc_block_group_reservations(block_group); 4433 4434 /* we are all good, lets return */ 4435 ins->objectid = ffe_ctl->search_start; 4436 ins->offset = ffe_ctl->num_bytes; 4437 4438 trace_btrfs_reserve_extent(block_group, ffe_ctl->search_start, 4439 ffe_ctl->num_bytes); 4440 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4441 break; 4442 loop: 4443 release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc); 4444 cond_resched(); 4445 } 4446 up_read(&space_info->groups_sem); 4447 4448 ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search); 4449 if (ret > 0) 4450 goto search; 4451 4452 if (ret == -ENOSPC && !cache_block_group_error) { 4453 /* 4454 * Use ffe_ctl->total_free_space as fallback if we can't find 4455 * any contiguous hole. 4456 */ 4457 if (!ffe_ctl->max_extent_size) 4458 ffe_ctl->max_extent_size = ffe_ctl->total_free_space; 4459 spin_lock(&space_info->lock); 4460 space_info->max_extent_size = ffe_ctl->max_extent_size; 4461 spin_unlock(&space_info->lock); 4462 ins->offset = ffe_ctl->max_extent_size; 4463 } else if (ret == -ENOSPC) { 4464 ret = cache_block_group_error; 4465 } 4466 return ret; 4467 } 4468 4469 /* 4470 * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a 4471 * hole that is at least as big as @num_bytes. 4472 * 4473 * @root - The root that will contain this extent 4474 * 4475 * @ram_bytes - The amount of space in ram that @num_bytes take. This 4476 * is used for accounting purposes. This value differs 4477 * from @num_bytes only in the case of compressed extents. 4478 * 4479 * @num_bytes - Number of bytes to allocate on-disk. 4480 * 4481 * @min_alloc_size - Indicates the minimum amount of space that the 4482 * allocator should try to satisfy. In some cases 4483 * @num_bytes may be larger than what is required and if 4484 * the filesystem is fragmented then allocation fails. 4485 * However, the presence of @min_alloc_size gives a 4486 * chance to try and satisfy the smaller allocation. 4487 * 4488 * @empty_size - A hint that you plan on doing more COW. This is the 4489 * size in bytes the allocator should try to find free 4490 * next to the block it returns. This is just a hint and 4491 * may be ignored by the allocator. 4492 * 4493 * @hint_byte - Hint to the allocator to start searching above the byte 4494 * address passed. It might be ignored. 4495 * 4496 * @ins - This key is modified to record the found hole. It will 4497 * have the following values: 4498 * ins->objectid == start position 4499 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4500 * ins->offset == the size of the hole. 4501 * 4502 * @is_data - Boolean flag indicating whether an extent is 4503 * allocated for data (true) or metadata (false) 4504 * 4505 * @delalloc - Boolean flag indicating whether this allocation is for 4506 * delalloc or not. If 'true' data_rwsem of block groups 4507 * is going to be acquired. 4508 * 4509 * 4510 * Returns 0 when an allocation succeeded or < 0 when an error occurred. In 4511 * case -ENOSPC is returned then @ins->offset will contain the size of the 4512 * largest available hole the allocator managed to find. 4513 */ 4514 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, 4515 u64 num_bytes, u64 min_alloc_size, 4516 u64 empty_size, u64 hint_byte, 4517 struct btrfs_key *ins, int is_data, int delalloc) 4518 { 4519 struct btrfs_fs_info *fs_info = root->fs_info; 4520 struct find_free_extent_ctl ffe_ctl = {}; 4521 bool final_tried = num_bytes == min_alloc_size; 4522 u64 flags; 4523 int ret; 4524 bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4525 bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data); 4526 4527 flags = get_alloc_profile_by_root(root, is_data); 4528 again: 4529 WARN_ON(num_bytes < fs_info->sectorsize); 4530 4531 ffe_ctl.ram_bytes = ram_bytes; 4532 ffe_ctl.num_bytes = num_bytes; 4533 ffe_ctl.min_alloc_size = min_alloc_size; 4534 ffe_ctl.empty_size = empty_size; 4535 ffe_ctl.flags = flags; 4536 ffe_ctl.delalloc = delalloc; 4537 ffe_ctl.hint_byte = hint_byte; 4538 ffe_ctl.for_treelog = for_treelog; 4539 ffe_ctl.for_data_reloc = for_data_reloc; 4540 4541 ret = find_free_extent(root, ins, &ffe_ctl); 4542 if (!ret && !is_data) { 4543 btrfs_dec_block_group_reservations(fs_info, ins->objectid); 4544 } else if (ret == -ENOSPC) { 4545 if (!final_tried && ins->offset) { 4546 num_bytes = min(num_bytes >> 1, ins->offset); 4547 num_bytes = round_down(num_bytes, 4548 fs_info->sectorsize); 4549 num_bytes = max(num_bytes, min_alloc_size); 4550 ram_bytes = num_bytes; 4551 if (num_bytes == min_alloc_size) 4552 final_tried = true; 4553 goto again; 4554 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4555 struct btrfs_space_info *sinfo; 4556 4557 sinfo = btrfs_find_space_info(fs_info, flags); 4558 btrfs_err(fs_info, 4559 "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d", 4560 flags, num_bytes, for_treelog, for_data_reloc); 4561 if (sinfo) 4562 btrfs_dump_space_info(fs_info, sinfo, 4563 num_bytes, 1); 4564 } 4565 } 4566 4567 return ret; 4568 } 4569 4570 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, 4571 u64 start, u64 len, int delalloc) 4572 { 4573 struct btrfs_block_group *cache; 4574 4575 cache = btrfs_lookup_block_group(fs_info, start); 4576 if (!cache) { 4577 btrfs_err(fs_info, "Unable to find block group for %llu", 4578 start); 4579 return -ENOSPC; 4580 } 4581 4582 btrfs_add_free_space(cache, start, len); 4583 btrfs_free_reserved_bytes(cache, len, delalloc); 4584 trace_btrfs_reserved_extent_free(fs_info, start, len); 4585 4586 btrfs_put_block_group(cache); 4587 return 0; 4588 } 4589 4590 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, 4591 u64 len) 4592 { 4593 struct btrfs_block_group *cache; 4594 int ret = 0; 4595 4596 cache = btrfs_lookup_block_group(trans->fs_info, start); 4597 if (!cache) { 4598 btrfs_err(trans->fs_info, "unable to find block group for %llu", 4599 start); 4600 return -ENOSPC; 4601 } 4602 4603 ret = pin_down_extent(trans, cache, start, len, 1); 4604 btrfs_put_block_group(cache); 4605 return ret; 4606 } 4607 4608 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4609 u64 parent, u64 root_objectid, 4610 u64 flags, u64 owner, u64 offset, 4611 struct btrfs_key *ins, int ref_mod) 4612 { 4613 struct btrfs_fs_info *fs_info = trans->fs_info; 4614 struct btrfs_root *extent_root; 4615 int ret; 4616 struct btrfs_extent_item *extent_item; 4617 struct btrfs_extent_inline_ref *iref; 4618 struct btrfs_path *path; 4619 struct extent_buffer *leaf; 4620 int type; 4621 u32 size; 4622 4623 if (parent > 0) 4624 type = BTRFS_SHARED_DATA_REF_KEY; 4625 else 4626 type = BTRFS_EXTENT_DATA_REF_KEY; 4627 4628 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); 4629 4630 path = btrfs_alloc_path(); 4631 if (!path) 4632 return -ENOMEM; 4633 4634 extent_root = btrfs_extent_root(fs_info, ins->objectid); 4635 ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size); 4636 if (ret) { 4637 btrfs_free_path(path); 4638 return ret; 4639 } 4640 4641 leaf = path->nodes[0]; 4642 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4643 struct btrfs_extent_item); 4644 btrfs_set_extent_refs(leaf, extent_item, ref_mod); 4645 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4646 btrfs_set_extent_flags(leaf, extent_item, 4647 flags | BTRFS_EXTENT_FLAG_DATA); 4648 4649 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4650 btrfs_set_extent_inline_ref_type(leaf, iref, type); 4651 if (parent > 0) { 4652 struct btrfs_shared_data_ref *ref; 4653 ref = (struct btrfs_shared_data_ref *)(iref + 1); 4654 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 4655 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); 4656 } else { 4657 struct btrfs_extent_data_ref *ref; 4658 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 4659 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); 4660 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 4661 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 4662 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); 4663 } 4664 4665 btrfs_mark_buffer_dirty(path->nodes[0]); 4666 btrfs_free_path(path); 4667 4668 ret = remove_from_free_space_tree(trans, ins->objectid, ins->offset); 4669 if (ret) 4670 return ret; 4671 4672 ret = btrfs_update_block_group(trans, ins->objectid, ins->offset, true); 4673 if (ret) { /* -ENOENT, logic error */ 4674 btrfs_err(fs_info, "update block group failed for %llu %llu", 4675 ins->objectid, ins->offset); 4676 BUG(); 4677 } 4678 trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset); 4679 return ret; 4680 } 4681 4682 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 4683 struct btrfs_delayed_ref_node *node, 4684 struct btrfs_delayed_extent_op *extent_op) 4685 { 4686 struct btrfs_fs_info *fs_info = trans->fs_info; 4687 struct btrfs_root *extent_root; 4688 int ret; 4689 struct btrfs_extent_item *extent_item; 4690 struct btrfs_key extent_key; 4691 struct btrfs_tree_block_info *block_info; 4692 struct btrfs_extent_inline_ref *iref; 4693 struct btrfs_path *path; 4694 struct extent_buffer *leaf; 4695 struct btrfs_delayed_tree_ref *ref; 4696 u32 size = sizeof(*extent_item) + sizeof(*iref); 4697 u64 num_bytes; 4698 u64 flags = extent_op->flags_to_set; 4699 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4700 4701 ref = btrfs_delayed_node_to_tree_ref(node); 4702 4703 extent_key.objectid = node->bytenr; 4704 if (skinny_metadata) { 4705 extent_key.offset = ref->level; 4706 extent_key.type = BTRFS_METADATA_ITEM_KEY; 4707 num_bytes = fs_info->nodesize; 4708 } else { 4709 extent_key.offset = node->num_bytes; 4710 extent_key.type = BTRFS_EXTENT_ITEM_KEY; 4711 size += sizeof(*block_info); 4712 num_bytes = node->num_bytes; 4713 } 4714 4715 path = btrfs_alloc_path(); 4716 if (!path) 4717 return -ENOMEM; 4718 4719 extent_root = btrfs_extent_root(fs_info, extent_key.objectid); 4720 ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key, 4721 size); 4722 if (ret) { 4723 btrfs_free_path(path); 4724 return ret; 4725 } 4726 4727 leaf = path->nodes[0]; 4728 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4729 struct btrfs_extent_item); 4730 btrfs_set_extent_refs(leaf, extent_item, 1); 4731 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4732 btrfs_set_extent_flags(leaf, extent_item, 4733 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); 4734 4735 if (skinny_metadata) { 4736 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4737 } else { 4738 block_info = (struct btrfs_tree_block_info *)(extent_item + 1); 4739 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key); 4740 btrfs_set_tree_block_level(leaf, block_info, ref->level); 4741 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); 4742 } 4743 4744 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { 4745 btrfs_set_extent_inline_ref_type(leaf, iref, 4746 BTRFS_SHARED_BLOCK_REF_KEY); 4747 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent); 4748 } else { 4749 btrfs_set_extent_inline_ref_type(leaf, iref, 4750 BTRFS_TREE_BLOCK_REF_KEY); 4751 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root); 4752 } 4753 4754 btrfs_mark_buffer_dirty(leaf); 4755 btrfs_free_path(path); 4756 4757 ret = remove_from_free_space_tree(trans, extent_key.objectid, 4758 num_bytes); 4759 if (ret) 4760 return ret; 4761 4762 ret = btrfs_update_block_group(trans, extent_key.objectid, 4763 fs_info->nodesize, true); 4764 if (ret) { /* -ENOENT, logic error */ 4765 btrfs_err(fs_info, "update block group failed for %llu %llu", 4766 extent_key.objectid, extent_key.offset); 4767 BUG(); 4768 } 4769 4770 trace_btrfs_reserved_extent_alloc(fs_info, extent_key.objectid, 4771 fs_info->nodesize); 4772 return ret; 4773 } 4774 4775 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4776 struct btrfs_root *root, u64 owner, 4777 u64 offset, u64 ram_bytes, 4778 struct btrfs_key *ins) 4779 { 4780 struct btrfs_ref generic_ref = { 0 }; 4781 4782 BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4783 4784 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 4785 ins->objectid, ins->offset, 0); 4786 btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, 4787 offset, 0, false); 4788 btrfs_ref_tree_mod(root->fs_info, &generic_ref); 4789 4790 return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes); 4791 } 4792 4793 /* 4794 * this is used by the tree logging recovery code. It records that 4795 * an extent has been allocated and makes sure to clear the free 4796 * space cache bits as well 4797 */ 4798 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 4799 u64 root_objectid, u64 owner, u64 offset, 4800 struct btrfs_key *ins) 4801 { 4802 struct btrfs_fs_info *fs_info = trans->fs_info; 4803 int ret; 4804 struct btrfs_block_group *block_group; 4805 struct btrfs_space_info *space_info; 4806 4807 /* 4808 * Mixed block groups will exclude before processing the log so we only 4809 * need to do the exclude dance if this fs isn't mixed. 4810 */ 4811 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 4812 ret = __exclude_logged_extent(fs_info, ins->objectid, 4813 ins->offset); 4814 if (ret) 4815 return ret; 4816 } 4817 4818 block_group = btrfs_lookup_block_group(fs_info, ins->objectid); 4819 if (!block_group) 4820 return -EINVAL; 4821 4822 space_info = block_group->space_info; 4823 spin_lock(&space_info->lock); 4824 spin_lock(&block_group->lock); 4825 space_info->bytes_reserved += ins->offset; 4826 block_group->reserved += ins->offset; 4827 spin_unlock(&block_group->lock); 4828 spin_unlock(&space_info->lock); 4829 4830 ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner, 4831 offset, ins, 1); 4832 if (ret) 4833 btrfs_pin_extent(trans, ins->objectid, ins->offset, 1); 4834 btrfs_put_block_group(block_group); 4835 return ret; 4836 } 4837 4838 static struct extent_buffer * 4839 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4840 u64 bytenr, int level, u64 owner, 4841 enum btrfs_lock_nesting nest) 4842 { 4843 struct btrfs_fs_info *fs_info = root->fs_info; 4844 struct extent_buffer *buf; 4845 4846 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level); 4847 if (IS_ERR(buf)) 4848 return buf; 4849 4850 /* 4851 * Extra safety check in case the extent tree is corrupted and extent 4852 * allocator chooses to use a tree block which is already used and 4853 * locked. 4854 */ 4855 if (buf->lock_owner == current->pid) { 4856 btrfs_err_rl(fs_info, 4857 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected", 4858 buf->start, btrfs_header_owner(buf), current->pid); 4859 free_extent_buffer(buf); 4860 return ERR_PTR(-EUCLEAN); 4861 } 4862 4863 /* 4864 * This needs to stay, because we could allocate a freed block from an 4865 * old tree into a new tree, so we need to make sure this new block is 4866 * set to the appropriate level and owner. 4867 */ 4868 btrfs_set_buffer_lockdep_class(owner, buf, level); 4869 __btrfs_tree_lock(buf, nest); 4870 btrfs_clean_tree_block(buf); 4871 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); 4872 clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags); 4873 4874 set_extent_buffer_uptodate(buf); 4875 4876 memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header)); 4877 btrfs_set_header_level(buf, level); 4878 btrfs_set_header_bytenr(buf, buf->start); 4879 btrfs_set_header_generation(buf, trans->transid); 4880 btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV); 4881 btrfs_set_header_owner(buf, owner); 4882 write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid); 4883 write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid); 4884 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 4885 buf->log_index = root->log_transid % 2; 4886 /* 4887 * we allow two log transactions at a time, use different 4888 * EXTENT bit to differentiate dirty pages. 4889 */ 4890 if (buf->log_index == 0) 4891 set_extent_dirty(&root->dirty_log_pages, buf->start, 4892 buf->start + buf->len - 1, GFP_NOFS); 4893 else 4894 set_extent_new(&root->dirty_log_pages, buf->start, 4895 buf->start + buf->len - 1); 4896 } else { 4897 buf->log_index = -1; 4898 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 4899 buf->start + buf->len - 1, GFP_NOFS); 4900 } 4901 /* this returns a buffer locked for blocking */ 4902 return buf; 4903 } 4904 4905 /* 4906 * finds a free extent and does all the dirty work required for allocation 4907 * returns the tree buffer or an ERR_PTR on error. 4908 */ 4909 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 4910 struct btrfs_root *root, 4911 u64 parent, u64 root_objectid, 4912 const struct btrfs_disk_key *key, 4913 int level, u64 hint, 4914 u64 empty_size, 4915 enum btrfs_lock_nesting nest) 4916 { 4917 struct btrfs_fs_info *fs_info = root->fs_info; 4918 struct btrfs_key ins; 4919 struct btrfs_block_rsv *block_rsv; 4920 struct extent_buffer *buf; 4921 struct btrfs_delayed_extent_op *extent_op; 4922 struct btrfs_ref generic_ref = { 0 }; 4923 u64 flags = 0; 4924 int ret; 4925 u32 blocksize = fs_info->nodesize; 4926 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4927 4928 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4929 if (btrfs_is_testing(fs_info)) { 4930 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, 4931 level, root_objectid, nest); 4932 if (!IS_ERR(buf)) 4933 root->alloc_bytenr += blocksize; 4934 return buf; 4935 } 4936 #endif 4937 4938 block_rsv = btrfs_use_block_rsv(trans, root, blocksize); 4939 if (IS_ERR(block_rsv)) 4940 return ERR_CAST(block_rsv); 4941 4942 ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize, 4943 empty_size, hint, &ins, 0, 0); 4944 if (ret) 4945 goto out_unuse; 4946 4947 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level, 4948 root_objectid, nest); 4949 if (IS_ERR(buf)) { 4950 ret = PTR_ERR(buf); 4951 goto out_free_reserved; 4952 } 4953 4954 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 4955 if (parent == 0) 4956 parent = ins.objectid; 4957 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 4958 } else 4959 BUG_ON(parent > 0); 4960 4961 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 4962 extent_op = btrfs_alloc_delayed_extent_op(); 4963 if (!extent_op) { 4964 ret = -ENOMEM; 4965 goto out_free_buf; 4966 } 4967 if (key) 4968 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 4969 else 4970 memset(&extent_op->key, 0, sizeof(extent_op->key)); 4971 extent_op->flags_to_set = flags; 4972 extent_op->update_key = skinny_metadata ? false : true; 4973 extent_op->update_flags = true; 4974 extent_op->is_data = false; 4975 extent_op->level = level; 4976 4977 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 4978 ins.objectid, ins.offset, parent); 4979 btrfs_init_tree_ref(&generic_ref, level, root_objectid, 4980 root->root_key.objectid, false); 4981 btrfs_ref_tree_mod(fs_info, &generic_ref); 4982 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op); 4983 if (ret) 4984 goto out_free_delayed; 4985 } 4986 return buf; 4987 4988 out_free_delayed: 4989 btrfs_free_delayed_extent_op(extent_op); 4990 out_free_buf: 4991 btrfs_tree_unlock(buf); 4992 free_extent_buffer(buf); 4993 out_free_reserved: 4994 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0); 4995 out_unuse: 4996 btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize); 4997 return ERR_PTR(ret); 4998 } 4999 5000 struct walk_control { 5001 u64 refs[BTRFS_MAX_LEVEL]; 5002 u64 flags[BTRFS_MAX_LEVEL]; 5003 struct btrfs_key update_progress; 5004 struct btrfs_key drop_progress; 5005 int drop_level; 5006 int stage; 5007 int level; 5008 int shared_level; 5009 int update_ref; 5010 int keep_locks; 5011 int reada_slot; 5012 int reada_count; 5013 int restarted; 5014 }; 5015 5016 #define DROP_REFERENCE 1 5017 #define UPDATE_BACKREF 2 5018 5019 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, 5020 struct btrfs_root *root, 5021 struct walk_control *wc, 5022 struct btrfs_path *path) 5023 { 5024 struct btrfs_fs_info *fs_info = root->fs_info; 5025 u64 bytenr; 5026 u64 generation; 5027 u64 refs; 5028 u64 flags; 5029 u32 nritems; 5030 struct btrfs_key key; 5031 struct extent_buffer *eb; 5032 int ret; 5033 int slot; 5034 int nread = 0; 5035 5036 if (path->slots[wc->level] < wc->reada_slot) { 5037 wc->reada_count = wc->reada_count * 2 / 3; 5038 wc->reada_count = max(wc->reada_count, 2); 5039 } else { 5040 wc->reada_count = wc->reada_count * 3 / 2; 5041 wc->reada_count = min_t(int, wc->reada_count, 5042 BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 5043 } 5044 5045 eb = path->nodes[wc->level]; 5046 nritems = btrfs_header_nritems(eb); 5047 5048 for (slot = path->slots[wc->level]; slot < nritems; slot++) { 5049 if (nread >= wc->reada_count) 5050 break; 5051 5052 cond_resched(); 5053 bytenr = btrfs_node_blockptr(eb, slot); 5054 generation = btrfs_node_ptr_generation(eb, slot); 5055 5056 if (slot == path->slots[wc->level]) 5057 goto reada; 5058 5059 if (wc->stage == UPDATE_BACKREF && 5060 generation <= root->root_key.offset) 5061 continue; 5062 5063 /* We don't lock the tree block, it's OK to be racy here */ 5064 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, 5065 wc->level - 1, 1, &refs, 5066 &flags); 5067 /* We don't care about errors in readahead. */ 5068 if (ret < 0) 5069 continue; 5070 BUG_ON(refs == 0); 5071 5072 if (wc->stage == DROP_REFERENCE) { 5073 if (refs == 1) 5074 goto reada; 5075 5076 if (wc->level == 1 && 5077 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5078 continue; 5079 if (!wc->update_ref || 5080 generation <= root->root_key.offset) 5081 continue; 5082 btrfs_node_key_to_cpu(eb, &key, slot); 5083 ret = btrfs_comp_cpu_keys(&key, 5084 &wc->update_progress); 5085 if (ret < 0) 5086 continue; 5087 } else { 5088 if (wc->level == 1 && 5089 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5090 continue; 5091 } 5092 reada: 5093 btrfs_readahead_node_child(eb, slot); 5094 nread++; 5095 } 5096 wc->reada_slot = slot; 5097 } 5098 5099 /* 5100 * helper to process tree block while walking down the tree. 5101 * 5102 * when wc->stage == UPDATE_BACKREF, this function updates 5103 * back refs for pointers in the block. 5104 * 5105 * NOTE: return value 1 means we should stop walking down. 5106 */ 5107 static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 5108 struct btrfs_root *root, 5109 struct btrfs_path *path, 5110 struct walk_control *wc, int lookup_info) 5111 { 5112 struct btrfs_fs_info *fs_info = root->fs_info; 5113 int level = wc->level; 5114 struct extent_buffer *eb = path->nodes[level]; 5115 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5116 int ret; 5117 5118 if (wc->stage == UPDATE_BACKREF && 5119 btrfs_header_owner(eb) != root->root_key.objectid) 5120 return 1; 5121 5122 /* 5123 * when reference count of tree block is 1, it won't increase 5124 * again. once full backref flag is set, we never clear it. 5125 */ 5126 if (lookup_info && 5127 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 5128 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { 5129 BUG_ON(!path->locks[level]); 5130 ret = btrfs_lookup_extent_info(trans, fs_info, 5131 eb->start, level, 1, 5132 &wc->refs[level], 5133 &wc->flags[level]); 5134 BUG_ON(ret == -ENOMEM); 5135 if (ret) 5136 return ret; 5137 BUG_ON(wc->refs[level] == 0); 5138 } 5139 5140 if (wc->stage == DROP_REFERENCE) { 5141 if (wc->refs[level] > 1) 5142 return 1; 5143 5144 if (path->locks[level] && !wc->keep_locks) { 5145 btrfs_tree_unlock_rw(eb, path->locks[level]); 5146 path->locks[level] = 0; 5147 } 5148 return 0; 5149 } 5150 5151 /* wc->stage == UPDATE_BACKREF */ 5152 if (!(wc->flags[level] & flag)) { 5153 BUG_ON(!path->locks[level]); 5154 ret = btrfs_inc_ref(trans, root, eb, 1); 5155 BUG_ON(ret); /* -ENOMEM */ 5156 ret = btrfs_dec_ref(trans, root, eb, 0); 5157 BUG_ON(ret); /* -ENOMEM */ 5158 ret = btrfs_set_disk_extent_flags(trans, eb, flag, 5159 btrfs_header_level(eb), 0); 5160 BUG_ON(ret); /* -ENOMEM */ 5161 wc->flags[level] |= flag; 5162 } 5163 5164 /* 5165 * the block is shared by multiple trees, so it's not good to 5166 * keep the tree lock 5167 */ 5168 if (path->locks[level] && level > 0) { 5169 btrfs_tree_unlock_rw(eb, path->locks[level]); 5170 path->locks[level] = 0; 5171 } 5172 return 0; 5173 } 5174 5175 /* 5176 * This is used to verify a ref exists for this root to deal with a bug where we 5177 * would have a drop_progress key that hadn't been updated properly. 5178 */ 5179 static int check_ref_exists(struct btrfs_trans_handle *trans, 5180 struct btrfs_root *root, u64 bytenr, u64 parent, 5181 int level) 5182 { 5183 struct btrfs_path *path; 5184 struct btrfs_extent_inline_ref *iref; 5185 int ret; 5186 5187 path = btrfs_alloc_path(); 5188 if (!path) 5189 return -ENOMEM; 5190 5191 ret = lookup_extent_backref(trans, path, &iref, bytenr, 5192 root->fs_info->nodesize, parent, 5193 root->root_key.objectid, level, 0); 5194 btrfs_free_path(path); 5195 if (ret == -ENOENT) 5196 return 0; 5197 if (ret < 0) 5198 return ret; 5199 return 1; 5200 } 5201 5202 /* 5203 * helper to process tree block pointer. 5204 * 5205 * when wc->stage == DROP_REFERENCE, this function checks 5206 * reference count of the block pointed to. if the block 5207 * is shared and we need update back refs for the subtree 5208 * rooted at the block, this function changes wc->stage to 5209 * UPDATE_BACKREF. if the block is shared and there is no 5210 * need to update back, this function drops the reference 5211 * to the block. 5212 * 5213 * NOTE: return value 1 means we should stop walking down. 5214 */ 5215 static noinline int do_walk_down(struct btrfs_trans_handle *trans, 5216 struct btrfs_root *root, 5217 struct btrfs_path *path, 5218 struct walk_control *wc, int *lookup_info) 5219 { 5220 struct btrfs_fs_info *fs_info = root->fs_info; 5221 u64 bytenr; 5222 u64 generation; 5223 u64 parent; 5224 struct btrfs_key key; 5225 struct btrfs_key first_key; 5226 struct btrfs_ref ref = { 0 }; 5227 struct extent_buffer *next; 5228 int level = wc->level; 5229 int reada = 0; 5230 int ret = 0; 5231 bool need_account = false; 5232 5233 generation = btrfs_node_ptr_generation(path->nodes[level], 5234 path->slots[level]); 5235 /* 5236 * if the lower level block was created before the snapshot 5237 * was created, we know there is no need to update back refs 5238 * for the subtree 5239 */ 5240 if (wc->stage == UPDATE_BACKREF && 5241 generation <= root->root_key.offset) { 5242 *lookup_info = 1; 5243 return 1; 5244 } 5245 5246 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 5247 btrfs_node_key_to_cpu(path->nodes[level], &first_key, 5248 path->slots[level]); 5249 5250 next = find_extent_buffer(fs_info, bytenr); 5251 if (!next) { 5252 next = btrfs_find_create_tree_block(fs_info, bytenr, 5253 root->root_key.objectid, level - 1); 5254 if (IS_ERR(next)) 5255 return PTR_ERR(next); 5256 reada = 1; 5257 } 5258 btrfs_tree_lock(next); 5259 5260 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, 5261 &wc->refs[level - 1], 5262 &wc->flags[level - 1]); 5263 if (ret < 0) 5264 goto out_unlock; 5265 5266 if (unlikely(wc->refs[level - 1] == 0)) { 5267 btrfs_err(fs_info, "Missing references."); 5268 ret = -EIO; 5269 goto out_unlock; 5270 } 5271 *lookup_info = 0; 5272 5273 if (wc->stage == DROP_REFERENCE) { 5274 if (wc->refs[level - 1] > 1) { 5275 need_account = true; 5276 if (level == 1 && 5277 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5278 goto skip; 5279 5280 if (!wc->update_ref || 5281 generation <= root->root_key.offset) 5282 goto skip; 5283 5284 btrfs_node_key_to_cpu(path->nodes[level], &key, 5285 path->slots[level]); 5286 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); 5287 if (ret < 0) 5288 goto skip; 5289 5290 wc->stage = UPDATE_BACKREF; 5291 wc->shared_level = level - 1; 5292 } 5293 } else { 5294 if (level == 1 && 5295 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5296 goto skip; 5297 } 5298 5299 if (!btrfs_buffer_uptodate(next, generation, 0)) { 5300 btrfs_tree_unlock(next); 5301 free_extent_buffer(next); 5302 next = NULL; 5303 *lookup_info = 1; 5304 } 5305 5306 if (!next) { 5307 if (reada && level == 1) 5308 reada_walk_down(trans, root, wc, path); 5309 next = read_tree_block(fs_info, bytenr, root->root_key.objectid, 5310 generation, level - 1, &first_key); 5311 if (IS_ERR(next)) { 5312 return PTR_ERR(next); 5313 } else if (!extent_buffer_uptodate(next)) { 5314 free_extent_buffer(next); 5315 return -EIO; 5316 } 5317 btrfs_tree_lock(next); 5318 } 5319 5320 level--; 5321 ASSERT(level == btrfs_header_level(next)); 5322 if (level != btrfs_header_level(next)) { 5323 btrfs_err(root->fs_info, "mismatched level"); 5324 ret = -EIO; 5325 goto out_unlock; 5326 } 5327 path->nodes[level] = next; 5328 path->slots[level] = 0; 5329 path->locks[level] = BTRFS_WRITE_LOCK; 5330 wc->level = level; 5331 if (wc->level == 1) 5332 wc->reada_slot = 0; 5333 return 0; 5334 skip: 5335 wc->refs[level - 1] = 0; 5336 wc->flags[level - 1] = 0; 5337 if (wc->stage == DROP_REFERENCE) { 5338 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 5339 parent = path->nodes[level]->start; 5340 } else { 5341 ASSERT(root->root_key.objectid == 5342 btrfs_header_owner(path->nodes[level])); 5343 if (root->root_key.objectid != 5344 btrfs_header_owner(path->nodes[level])) { 5345 btrfs_err(root->fs_info, 5346 "mismatched block owner"); 5347 ret = -EIO; 5348 goto out_unlock; 5349 } 5350 parent = 0; 5351 } 5352 5353 /* 5354 * If we had a drop_progress we need to verify the refs are set 5355 * as expected. If we find our ref then we know that from here 5356 * on out everything should be correct, and we can clear the 5357 * ->restarted flag. 5358 */ 5359 if (wc->restarted) { 5360 ret = check_ref_exists(trans, root, bytenr, parent, 5361 level - 1); 5362 if (ret < 0) 5363 goto out_unlock; 5364 if (ret == 0) 5365 goto no_delete; 5366 ret = 0; 5367 wc->restarted = 0; 5368 } 5369 5370 /* 5371 * Reloc tree doesn't contribute to qgroup numbers, and we have 5372 * already accounted them at merge time (replace_path), 5373 * thus we could skip expensive subtree trace here. 5374 */ 5375 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 5376 need_account) { 5377 ret = btrfs_qgroup_trace_subtree(trans, next, 5378 generation, level - 1); 5379 if (ret) { 5380 btrfs_err_rl(fs_info, 5381 "Error %d accounting shared subtree. Quota is out of sync, rescan required.", 5382 ret); 5383 } 5384 } 5385 5386 /* 5387 * We need to update the next key in our walk control so we can 5388 * update the drop_progress key accordingly. We don't care if 5389 * find_next_key doesn't find a key because that means we're at 5390 * the end and are going to clean up now. 5391 */ 5392 wc->drop_level = level; 5393 find_next_key(path, level, &wc->drop_progress); 5394 5395 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 5396 fs_info->nodesize, parent); 5397 btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid, 5398 0, false); 5399 ret = btrfs_free_extent(trans, &ref); 5400 if (ret) 5401 goto out_unlock; 5402 } 5403 no_delete: 5404 *lookup_info = 1; 5405 ret = 1; 5406 5407 out_unlock: 5408 btrfs_tree_unlock(next); 5409 free_extent_buffer(next); 5410 5411 return ret; 5412 } 5413 5414 /* 5415 * helper to process tree block while walking up the tree. 5416 * 5417 * when wc->stage == DROP_REFERENCE, this function drops 5418 * reference count on the block. 5419 * 5420 * when wc->stage == UPDATE_BACKREF, this function changes 5421 * wc->stage back to DROP_REFERENCE if we changed wc->stage 5422 * to UPDATE_BACKREF previously while processing the block. 5423 * 5424 * NOTE: return value 1 means we should stop walking up. 5425 */ 5426 static noinline int walk_up_proc(struct btrfs_trans_handle *trans, 5427 struct btrfs_root *root, 5428 struct btrfs_path *path, 5429 struct walk_control *wc) 5430 { 5431 struct btrfs_fs_info *fs_info = root->fs_info; 5432 int ret; 5433 int level = wc->level; 5434 struct extent_buffer *eb = path->nodes[level]; 5435 u64 parent = 0; 5436 5437 if (wc->stage == UPDATE_BACKREF) { 5438 BUG_ON(wc->shared_level < level); 5439 if (level < wc->shared_level) 5440 goto out; 5441 5442 ret = find_next_key(path, level + 1, &wc->update_progress); 5443 if (ret > 0) 5444 wc->update_ref = 0; 5445 5446 wc->stage = DROP_REFERENCE; 5447 wc->shared_level = -1; 5448 path->slots[level] = 0; 5449 5450 /* 5451 * check reference count again if the block isn't locked. 5452 * we should start walking down the tree again if reference 5453 * count is one. 5454 */ 5455 if (!path->locks[level]) { 5456 BUG_ON(level == 0); 5457 btrfs_tree_lock(eb); 5458 path->locks[level] = BTRFS_WRITE_LOCK; 5459 5460 ret = btrfs_lookup_extent_info(trans, fs_info, 5461 eb->start, level, 1, 5462 &wc->refs[level], 5463 &wc->flags[level]); 5464 if (ret < 0) { 5465 btrfs_tree_unlock_rw(eb, path->locks[level]); 5466 path->locks[level] = 0; 5467 return ret; 5468 } 5469 BUG_ON(wc->refs[level] == 0); 5470 if (wc->refs[level] == 1) { 5471 btrfs_tree_unlock_rw(eb, path->locks[level]); 5472 path->locks[level] = 0; 5473 return 1; 5474 } 5475 } 5476 } 5477 5478 /* wc->stage == DROP_REFERENCE */ 5479 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); 5480 5481 if (wc->refs[level] == 1) { 5482 if (level == 0) { 5483 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5484 ret = btrfs_dec_ref(trans, root, eb, 1); 5485 else 5486 ret = btrfs_dec_ref(trans, root, eb, 0); 5487 BUG_ON(ret); /* -ENOMEM */ 5488 if (is_fstree(root->root_key.objectid)) { 5489 ret = btrfs_qgroup_trace_leaf_items(trans, eb); 5490 if (ret) { 5491 btrfs_err_rl(fs_info, 5492 "error %d accounting leaf items, quota is out of sync, rescan required", 5493 ret); 5494 } 5495 } 5496 } 5497 /* make block locked assertion in btrfs_clean_tree_block happy */ 5498 if (!path->locks[level] && 5499 btrfs_header_generation(eb) == trans->transid) { 5500 btrfs_tree_lock(eb); 5501 path->locks[level] = BTRFS_WRITE_LOCK; 5502 } 5503 btrfs_clean_tree_block(eb); 5504 } 5505 5506 if (eb == root->node) { 5507 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5508 parent = eb->start; 5509 else if (root->root_key.objectid != btrfs_header_owner(eb)) 5510 goto owner_mismatch; 5511 } else { 5512 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5513 parent = path->nodes[level + 1]->start; 5514 else if (root->root_key.objectid != 5515 btrfs_header_owner(path->nodes[level + 1])) 5516 goto owner_mismatch; 5517 } 5518 5519 btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent, 5520 wc->refs[level] == 1); 5521 out: 5522 wc->refs[level] = 0; 5523 wc->flags[level] = 0; 5524 return 0; 5525 5526 owner_mismatch: 5527 btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu", 5528 btrfs_header_owner(eb), root->root_key.objectid); 5529 return -EUCLEAN; 5530 } 5531 5532 static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 5533 struct btrfs_root *root, 5534 struct btrfs_path *path, 5535 struct walk_control *wc) 5536 { 5537 int level = wc->level; 5538 int lookup_info = 1; 5539 int ret; 5540 5541 while (level >= 0) { 5542 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5543 if (ret > 0) 5544 break; 5545 5546 if (level == 0) 5547 break; 5548 5549 if (path->slots[level] >= 5550 btrfs_header_nritems(path->nodes[level])) 5551 break; 5552 5553 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5554 if (ret > 0) { 5555 path->slots[level]++; 5556 continue; 5557 } else if (ret < 0) 5558 return ret; 5559 level = wc->level; 5560 } 5561 return 0; 5562 } 5563 5564 static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 5565 struct btrfs_root *root, 5566 struct btrfs_path *path, 5567 struct walk_control *wc, int max_level) 5568 { 5569 int level = wc->level; 5570 int ret; 5571 5572 path->slots[level] = btrfs_header_nritems(path->nodes[level]); 5573 while (level < max_level && path->nodes[level]) { 5574 wc->level = level; 5575 if (path->slots[level] + 1 < 5576 btrfs_header_nritems(path->nodes[level])) { 5577 path->slots[level]++; 5578 return 0; 5579 } else { 5580 ret = walk_up_proc(trans, root, path, wc); 5581 if (ret > 0) 5582 return 0; 5583 if (ret < 0) 5584 return ret; 5585 5586 if (path->locks[level]) { 5587 btrfs_tree_unlock_rw(path->nodes[level], 5588 path->locks[level]); 5589 path->locks[level] = 0; 5590 } 5591 free_extent_buffer(path->nodes[level]); 5592 path->nodes[level] = NULL; 5593 level++; 5594 } 5595 } 5596 return 1; 5597 } 5598 5599 /* 5600 * drop a subvolume tree. 5601 * 5602 * this function traverses the tree freeing any blocks that only 5603 * referenced by the tree. 5604 * 5605 * when a shared tree block is found. this function decreases its 5606 * reference count by one. if update_ref is true, this function 5607 * also make sure backrefs for the shared block and all lower level 5608 * blocks are properly updated. 5609 * 5610 * If called with for_reloc == 0, may exit early with -EAGAIN 5611 */ 5612 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) 5613 { 5614 struct btrfs_fs_info *fs_info = root->fs_info; 5615 struct btrfs_path *path; 5616 struct btrfs_trans_handle *trans; 5617 struct btrfs_root *tree_root = fs_info->tree_root; 5618 struct btrfs_root_item *root_item = &root->root_item; 5619 struct walk_control *wc; 5620 struct btrfs_key key; 5621 int err = 0; 5622 int ret; 5623 int level; 5624 bool root_dropped = false; 5625 bool unfinished_drop = false; 5626 5627 btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid); 5628 5629 path = btrfs_alloc_path(); 5630 if (!path) { 5631 err = -ENOMEM; 5632 goto out; 5633 } 5634 5635 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5636 if (!wc) { 5637 btrfs_free_path(path); 5638 err = -ENOMEM; 5639 goto out; 5640 } 5641 5642 /* 5643 * Use join to avoid potential EINTR from transaction start. See 5644 * wait_reserve_ticket and the whole reservation callchain. 5645 */ 5646 if (for_reloc) 5647 trans = btrfs_join_transaction(tree_root); 5648 else 5649 trans = btrfs_start_transaction(tree_root, 0); 5650 if (IS_ERR(trans)) { 5651 err = PTR_ERR(trans); 5652 goto out_free; 5653 } 5654 5655 err = btrfs_run_delayed_items(trans); 5656 if (err) 5657 goto out_end_trans; 5658 5659 /* 5660 * This will help us catch people modifying the fs tree while we're 5661 * dropping it. It is unsafe to mess with the fs tree while it's being 5662 * dropped as we unlock the root node and parent nodes as we walk down 5663 * the tree, assuming nothing will change. If something does change 5664 * then we'll have stale information and drop references to blocks we've 5665 * already dropped. 5666 */ 5667 set_bit(BTRFS_ROOT_DELETING, &root->state); 5668 unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); 5669 5670 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 5671 level = btrfs_header_level(root->node); 5672 path->nodes[level] = btrfs_lock_root_node(root); 5673 path->slots[level] = 0; 5674 path->locks[level] = BTRFS_WRITE_LOCK; 5675 memset(&wc->update_progress, 0, 5676 sizeof(wc->update_progress)); 5677 } else { 5678 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 5679 memcpy(&wc->update_progress, &key, 5680 sizeof(wc->update_progress)); 5681 5682 level = btrfs_root_drop_level(root_item); 5683 BUG_ON(level == 0); 5684 path->lowest_level = level; 5685 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5686 path->lowest_level = 0; 5687 if (ret < 0) { 5688 err = ret; 5689 goto out_end_trans; 5690 } 5691 WARN_ON(ret > 0); 5692 5693 /* 5694 * unlock our path, this is safe because only this 5695 * function is allowed to delete this snapshot 5696 */ 5697 btrfs_unlock_up_safe(path, 0); 5698 5699 level = btrfs_header_level(root->node); 5700 while (1) { 5701 btrfs_tree_lock(path->nodes[level]); 5702 path->locks[level] = BTRFS_WRITE_LOCK; 5703 5704 ret = btrfs_lookup_extent_info(trans, fs_info, 5705 path->nodes[level]->start, 5706 level, 1, &wc->refs[level], 5707 &wc->flags[level]); 5708 if (ret < 0) { 5709 err = ret; 5710 goto out_end_trans; 5711 } 5712 BUG_ON(wc->refs[level] == 0); 5713 5714 if (level == btrfs_root_drop_level(root_item)) 5715 break; 5716 5717 btrfs_tree_unlock(path->nodes[level]); 5718 path->locks[level] = 0; 5719 WARN_ON(wc->refs[level] != 1); 5720 level--; 5721 } 5722 } 5723 5724 wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); 5725 wc->level = level; 5726 wc->shared_level = -1; 5727 wc->stage = DROP_REFERENCE; 5728 wc->update_ref = update_ref; 5729 wc->keep_locks = 0; 5730 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5731 5732 while (1) { 5733 5734 ret = walk_down_tree(trans, root, path, wc); 5735 if (ret < 0) { 5736 err = ret; 5737 break; 5738 } 5739 5740 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); 5741 if (ret < 0) { 5742 err = ret; 5743 break; 5744 } 5745 5746 if (ret > 0) { 5747 BUG_ON(wc->stage != DROP_REFERENCE); 5748 break; 5749 } 5750 5751 if (wc->stage == DROP_REFERENCE) { 5752 wc->drop_level = wc->level; 5753 btrfs_node_key_to_cpu(path->nodes[wc->drop_level], 5754 &wc->drop_progress, 5755 path->slots[wc->drop_level]); 5756 } 5757 btrfs_cpu_key_to_disk(&root_item->drop_progress, 5758 &wc->drop_progress); 5759 btrfs_set_root_drop_level(root_item, wc->drop_level); 5760 5761 BUG_ON(wc->level == 0); 5762 if (btrfs_should_end_transaction(trans) || 5763 (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) { 5764 ret = btrfs_update_root(trans, tree_root, 5765 &root->root_key, 5766 root_item); 5767 if (ret) { 5768 btrfs_abort_transaction(trans, ret); 5769 err = ret; 5770 goto out_end_trans; 5771 } 5772 5773 btrfs_end_transaction_throttle(trans); 5774 if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) { 5775 btrfs_debug(fs_info, 5776 "drop snapshot early exit"); 5777 err = -EAGAIN; 5778 goto out_free; 5779 } 5780 5781 /* 5782 * Use join to avoid potential EINTR from transaction 5783 * start. See wait_reserve_ticket and the whole 5784 * reservation callchain. 5785 */ 5786 if (for_reloc) 5787 trans = btrfs_join_transaction(tree_root); 5788 else 5789 trans = btrfs_start_transaction(tree_root, 0); 5790 if (IS_ERR(trans)) { 5791 err = PTR_ERR(trans); 5792 goto out_free; 5793 } 5794 } 5795 } 5796 btrfs_release_path(path); 5797 if (err) 5798 goto out_end_trans; 5799 5800 ret = btrfs_del_root(trans, &root->root_key); 5801 if (ret) { 5802 btrfs_abort_transaction(trans, ret); 5803 err = ret; 5804 goto out_end_trans; 5805 } 5806 5807 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 5808 ret = btrfs_find_root(tree_root, &root->root_key, path, 5809 NULL, NULL); 5810 if (ret < 0) { 5811 btrfs_abort_transaction(trans, ret); 5812 err = ret; 5813 goto out_end_trans; 5814 } else if (ret > 0) { 5815 /* if we fail to delete the orphan item this time 5816 * around, it'll get picked up the next time. 5817 * 5818 * The most common failure here is just -ENOENT. 5819 */ 5820 btrfs_del_orphan_item(trans, tree_root, 5821 root->root_key.objectid); 5822 } 5823 } 5824 5825 /* 5826 * This subvolume is going to be completely dropped, and won't be 5827 * recorded as dirty roots, thus pertrans meta rsv will not be freed at 5828 * commit transaction time. So free it here manually. 5829 */ 5830 btrfs_qgroup_convert_reserved_meta(root, INT_MAX); 5831 btrfs_qgroup_free_meta_all_pertrans(root); 5832 5833 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) 5834 btrfs_add_dropped_root(trans, root); 5835 else 5836 btrfs_put_root(root); 5837 root_dropped = true; 5838 out_end_trans: 5839 btrfs_end_transaction_throttle(trans); 5840 out_free: 5841 kfree(wc); 5842 btrfs_free_path(path); 5843 out: 5844 /* 5845 * We were an unfinished drop root, check to see if there are any 5846 * pending, and if not clear and wake up any waiters. 5847 */ 5848 if (!err && unfinished_drop) 5849 btrfs_maybe_wake_unfinished_drop(fs_info); 5850 5851 /* 5852 * So if we need to stop dropping the snapshot for whatever reason we 5853 * need to make sure to add it back to the dead root list so that we 5854 * keep trying to do the work later. This also cleans up roots if we 5855 * don't have it in the radix (like when we recover after a power fail 5856 * or unmount) so we don't leak memory. 5857 */ 5858 if (!for_reloc && !root_dropped) 5859 btrfs_add_dead_root(root); 5860 return err; 5861 } 5862 5863 /* 5864 * drop subtree rooted at tree block 'node'. 5865 * 5866 * NOTE: this function will unlock and release tree block 'node' 5867 * only used by relocation code 5868 */ 5869 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 5870 struct btrfs_root *root, 5871 struct extent_buffer *node, 5872 struct extent_buffer *parent) 5873 { 5874 struct btrfs_fs_info *fs_info = root->fs_info; 5875 struct btrfs_path *path; 5876 struct walk_control *wc; 5877 int level; 5878 int parent_level; 5879 int ret = 0; 5880 int wret; 5881 5882 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 5883 5884 path = btrfs_alloc_path(); 5885 if (!path) 5886 return -ENOMEM; 5887 5888 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5889 if (!wc) { 5890 btrfs_free_path(path); 5891 return -ENOMEM; 5892 } 5893 5894 btrfs_assert_tree_write_locked(parent); 5895 parent_level = btrfs_header_level(parent); 5896 atomic_inc(&parent->refs); 5897 path->nodes[parent_level] = parent; 5898 path->slots[parent_level] = btrfs_header_nritems(parent); 5899 5900 btrfs_assert_tree_write_locked(node); 5901 level = btrfs_header_level(node); 5902 path->nodes[level] = node; 5903 path->slots[level] = 0; 5904 path->locks[level] = BTRFS_WRITE_LOCK; 5905 5906 wc->refs[parent_level] = 1; 5907 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5908 wc->level = level; 5909 wc->shared_level = -1; 5910 wc->stage = DROP_REFERENCE; 5911 wc->update_ref = 0; 5912 wc->keep_locks = 1; 5913 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5914 5915 while (1) { 5916 wret = walk_down_tree(trans, root, path, wc); 5917 if (wret < 0) { 5918 ret = wret; 5919 break; 5920 } 5921 5922 wret = walk_up_tree(trans, root, path, wc, parent_level); 5923 if (wret < 0) 5924 ret = wret; 5925 if (wret != 0) 5926 break; 5927 } 5928 5929 kfree(wc); 5930 btrfs_free_path(path); 5931 return ret; 5932 } 5933 5934 /* 5935 * helper to account the unused space of all the readonly block group in the 5936 * space_info. takes mirrors into account. 5937 */ 5938 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) 5939 { 5940 struct btrfs_block_group *block_group; 5941 u64 free_bytes = 0; 5942 int factor; 5943 5944 /* It's df, we don't care if it's racy */ 5945 if (list_empty(&sinfo->ro_bgs)) 5946 return 0; 5947 5948 spin_lock(&sinfo->lock); 5949 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) { 5950 spin_lock(&block_group->lock); 5951 5952 if (!block_group->ro) { 5953 spin_unlock(&block_group->lock); 5954 continue; 5955 } 5956 5957 factor = btrfs_bg_type_to_factor(block_group->flags); 5958 free_bytes += (block_group->length - 5959 block_group->used) * factor; 5960 5961 spin_unlock(&block_group->lock); 5962 } 5963 spin_unlock(&sinfo->lock); 5964 5965 return free_bytes; 5966 } 5967 5968 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, 5969 u64 start, u64 end) 5970 { 5971 return unpin_extent_range(fs_info, start, end, false); 5972 } 5973 5974 /* 5975 * It used to be that old block groups would be left around forever. 5976 * Iterating over them would be enough to trim unused space. Since we 5977 * now automatically remove them, we also need to iterate over unallocated 5978 * space. 5979 * 5980 * We don't want a transaction for this since the discard may take a 5981 * substantial amount of time. We don't require that a transaction be 5982 * running, but we do need to take a running transaction into account 5983 * to ensure that we're not discarding chunks that were released or 5984 * allocated in the current transaction. 5985 * 5986 * Holding the chunks lock will prevent other threads from allocating 5987 * or releasing chunks, but it won't prevent a running transaction 5988 * from committing and releasing the memory that the pending chunks 5989 * list head uses. For that, we need to take a reference to the 5990 * transaction and hold the commit root sem. We only need to hold 5991 * it while performing the free space search since we have already 5992 * held back allocations. 5993 */ 5994 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) 5995 { 5996 u64 start = SZ_1M, len = 0, end = 0; 5997 int ret; 5998 5999 *trimmed = 0; 6000 6001 /* Discard not supported = nothing to do. */ 6002 if (!blk_queue_discard(bdev_get_queue(device->bdev))) 6003 return 0; 6004 6005 /* Not writable = nothing to do. */ 6006 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 6007 return 0; 6008 6009 /* No free space = nothing to do. */ 6010 if (device->total_bytes <= device->bytes_used) 6011 return 0; 6012 6013 ret = 0; 6014 6015 while (1) { 6016 struct btrfs_fs_info *fs_info = device->fs_info; 6017 u64 bytes; 6018 6019 ret = mutex_lock_interruptible(&fs_info->chunk_mutex); 6020 if (ret) 6021 break; 6022 6023 find_first_clear_extent_bit(&device->alloc_state, start, 6024 &start, &end, 6025 CHUNK_TRIMMED | CHUNK_ALLOCATED); 6026 6027 /* Check if there are any CHUNK_* bits left */ 6028 if (start > device->total_bytes) { 6029 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 6030 btrfs_warn_in_rcu(fs_info, 6031 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu", 6032 start, end - start + 1, 6033 rcu_str_deref(device->name), 6034 device->total_bytes); 6035 mutex_unlock(&fs_info->chunk_mutex); 6036 ret = 0; 6037 break; 6038 } 6039 6040 /* Ensure we skip the reserved area in the first 1M */ 6041 start = max_t(u64, start, SZ_1M); 6042 6043 /* 6044 * If find_first_clear_extent_bit find a range that spans the 6045 * end of the device it will set end to -1, in this case it's up 6046 * to the caller to trim the value to the size of the device. 6047 */ 6048 end = min(end, device->total_bytes - 1); 6049 6050 len = end - start + 1; 6051 6052 /* We didn't find any extents */ 6053 if (!len) { 6054 mutex_unlock(&fs_info->chunk_mutex); 6055 ret = 0; 6056 break; 6057 } 6058 6059 ret = btrfs_issue_discard(device->bdev, start, len, 6060 &bytes); 6061 if (!ret) 6062 set_extent_bits(&device->alloc_state, start, 6063 start + bytes - 1, 6064 CHUNK_TRIMMED); 6065 mutex_unlock(&fs_info->chunk_mutex); 6066 6067 if (ret) 6068 break; 6069 6070 start += len; 6071 *trimmed += bytes; 6072 6073 if (fatal_signal_pending(current)) { 6074 ret = -ERESTARTSYS; 6075 break; 6076 } 6077 6078 cond_resched(); 6079 } 6080 6081 return ret; 6082 } 6083 6084 /* 6085 * Trim the whole filesystem by: 6086 * 1) trimming the free space in each block group 6087 * 2) trimming the unallocated space on each device 6088 * 6089 * This will also continue trimming even if a block group or device encounters 6090 * an error. The return value will be the last error, or 0 if nothing bad 6091 * happens. 6092 */ 6093 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) 6094 { 6095 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6096 struct btrfs_block_group *cache = NULL; 6097 struct btrfs_device *device; 6098 u64 group_trimmed; 6099 u64 range_end = U64_MAX; 6100 u64 start; 6101 u64 end; 6102 u64 trimmed = 0; 6103 u64 bg_failed = 0; 6104 u64 dev_failed = 0; 6105 int bg_ret = 0; 6106 int dev_ret = 0; 6107 int ret = 0; 6108 6109 if (range->start == U64_MAX) 6110 return -EINVAL; 6111 6112 /* 6113 * Check range overflow if range->len is set. 6114 * The default range->len is U64_MAX. 6115 */ 6116 if (range->len != U64_MAX && 6117 check_add_overflow(range->start, range->len, &range_end)) 6118 return -EINVAL; 6119 6120 cache = btrfs_lookup_first_block_group(fs_info, range->start); 6121 for (; cache; cache = btrfs_next_block_group(cache)) { 6122 if (cache->start >= range_end) { 6123 btrfs_put_block_group(cache); 6124 break; 6125 } 6126 6127 start = max(range->start, cache->start); 6128 end = min(range_end, cache->start + cache->length); 6129 6130 if (end - start >= range->minlen) { 6131 if (!btrfs_block_group_done(cache)) { 6132 ret = btrfs_cache_block_group(cache, 0); 6133 if (ret) { 6134 bg_failed++; 6135 bg_ret = ret; 6136 continue; 6137 } 6138 ret = btrfs_wait_block_group_cache_done(cache); 6139 if (ret) { 6140 bg_failed++; 6141 bg_ret = ret; 6142 continue; 6143 } 6144 } 6145 ret = btrfs_trim_block_group(cache, 6146 &group_trimmed, 6147 start, 6148 end, 6149 range->minlen); 6150 6151 trimmed += group_trimmed; 6152 if (ret) { 6153 bg_failed++; 6154 bg_ret = ret; 6155 continue; 6156 } 6157 } 6158 } 6159 6160 if (bg_failed) 6161 btrfs_warn(fs_info, 6162 "failed to trim %llu block group(s), last error %d", 6163 bg_failed, bg_ret); 6164 6165 mutex_lock(&fs_devices->device_list_mutex); 6166 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6167 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 6168 continue; 6169 6170 ret = btrfs_trim_free_extents(device, &group_trimmed); 6171 if (ret) { 6172 dev_failed++; 6173 dev_ret = ret; 6174 break; 6175 } 6176 6177 trimmed += group_trimmed; 6178 } 6179 mutex_unlock(&fs_devices->device_list_mutex); 6180 6181 if (dev_failed) 6182 btrfs_warn(fs_info, 6183 "failed to trim %llu device(s), last error %d", 6184 dev_failed, dev_ret); 6185 range->len = trimmed; 6186 if (bg_ret) 6187 return bg_ret; 6188 return dev_ret; 6189 } 6190