1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/signal.h> 8 #include <linux/pagemap.h> 9 #include <linux/writeback.h> 10 #include <linux/blkdev.h> 11 #include <linux/sort.h> 12 #include <linux/rcupdate.h> 13 #include <linux/kthread.h> 14 #include <linux/slab.h> 15 #include <linux/ratelimit.h> 16 #include <linux/percpu_counter.h> 17 #include <linux/lockdep.h> 18 #include <linux/crc32c.h> 19 #include "ctree.h" 20 #include "extent-tree.h" 21 #include "tree-log.h" 22 #include "disk-io.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "locking.h" 27 #include "free-space-cache.h" 28 #include "free-space-tree.h" 29 #include "sysfs.h" 30 #include "qgroup.h" 31 #include "ref-verify.h" 32 #include "space-info.h" 33 #include "block-rsv.h" 34 #include "delalloc-space.h" 35 #include "discard.h" 36 #include "rcu-string.h" 37 #include "zoned.h" 38 #include "dev-replace.h" 39 #include "fs.h" 40 #include "accessors.h" 41 #include "root-tree.h" 42 #include "file-item.h" 43 #include "orphan.h" 44 #include "tree-checker.h" 45 46 #undef SCRAMBLE_DELAYED_REFS 47 48 49 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 50 struct btrfs_delayed_ref_node *node, u64 parent, 51 u64 root_objectid, u64 owner_objectid, 52 u64 owner_offset, int refs_to_drop, 53 struct btrfs_delayed_extent_op *extra_op); 54 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 55 struct extent_buffer *leaf, 56 struct btrfs_extent_item *ei); 57 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 58 u64 parent, u64 root_objectid, 59 u64 flags, u64 owner, u64 offset, 60 struct btrfs_key *ins, int ref_mod); 61 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 62 struct btrfs_delayed_ref_node *node, 63 struct btrfs_delayed_extent_op *extent_op); 64 static int find_next_key(struct btrfs_path *path, int level, 65 struct btrfs_key *key); 66 67 static int block_group_bits(struct btrfs_block_group *cache, u64 bits) 68 { 69 return (cache->flags & bits) == bits; 70 } 71 72 /* simple helper to search for an existing data extent at a given offset */ 73 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) 74 { 75 struct btrfs_root *root = btrfs_extent_root(fs_info, start); 76 int ret; 77 struct btrfs_key key; 78 struct btrfs_path *path; 79 80 path = btrfs_alloc_path(); 81 if (!path) 82 return -ENOMEM; 83 84 key.objectid = start; 85 key.offset = len; 86 key.type = BTRFS_EXTENT_ITEM_KEY; 87 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 88 btrfs_free_path(path); 89 return ret; 90 } 91 92 /* 93 * helper function to lookup reference count and flags of a tree block. 94 * 95 * the head node for delayed ref is used to store the sum of all the 96 * reference count modifications queued up in the rbtree. the head 97 * node may also store the extent flags to set. This way you can check 98 * to see what the reference count and extent flags would be if all of 99 * the delayed refs are not processed. 100 */ 101 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 102 struct btrfs_fs_info *fs_info, u64 bytenr, 103 u64 offset, int metadata, u64 *refs, u64 *flags) 104 { 105 struct btrfs_root *extent_root; 106 struct btrfs_delayed_ref_head *head; 107 struct btrfs_delayed_ref_root *delayed_refs; 108 struct btrfs_path *path; 109 struct btrfs_extent_item *ei; 110 struct extent_buffer *leaf; 111 struct btrfs_key key; 112 u32 item_size; 113 u64 num_refs; 114 u64 extent_flags; 115 int ret; 116 117 /* 118 * If we don't have skinny metadata, don't bother doing anything 119 * different 120 */ 121 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { 122 offset = fs_info->nodesize; 123 metadata = 0; 124 } 125 126 path = btrfs_alloc_path(); 127 if (!path) 128 return -ENOMEM; 129 130 if (!trans) { 131 path->skip_locking = 1; 132 path->search_commit_root = 1; 133 } 134 135 search_again: 136 key.objectid = bytenr; 137 key.offset = offset; 138 if (metadata) 139 key.type = BTRFS_METADATA_ITEM_KEY; 140 else 141 key.type = BTRFS_EXTENT_ITEM_KEY; 142 143 extent_root = btrfs_extent_root(fs_info, bytenr); 144 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 145 if (ret < 0) 146 goto out_free; 147 148 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 149 if (path->slots[0]) { 150 path->slots[0]--; 151 btrfs_item_key_to_cpu(path->nodes[0], &key, 152 path->slots[0]); 153 if (key.objectid == bytenr && 154 key.type == BTRFS_EXTENT_ITEM_KEY && 155 key.offset == fs_info->nodesize) 156 ret = 0; 157 } 158 } 159 160 if (ret == 0) { 161 leaf = path->nodes[0]; 162 item_size = btrfs_item_size(leaf, path->slots[0]); 163 if (item_size >= sizeof(*ei)) { 164 ei = btrfs_item_ptr(leaf, path->slots[0], 165 struct btrfs_extent_item); 166 num_refs = btrfs_extent_refs(leaf, ei); 167 extent_flags = btrfs_extent_flags(leaf, ei); 168 } else { 169 ret = -EUCLEAN; 170 btrfs_err(fs_info, 171 "unexpected extent item size, has %u expect >= %zu", 172 item_size, sizeof(*ei)); 173 if (trans) 174 btrfs_abort_transaction(trans, ret); 175 else 176 btrfs_handle_fs_error(fs_info, ret, NULL); 177 178 goto out_free; 179 } 180 181 BUG_ON(num_refs == 0); 182 } else { 183 num_refs = 0; 184 extent_flags = 0; 185 ret = 0; 186 } 187 188 if (!trans) 189 goto out; 190 191 delayed_refs = &trans->transaction->delayed_refs; 192 spin_lock(&delayed_refs->lock); 193 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 194 if (head) { 195 if (!mutex_trylock(&head->mutex)) { 196 refcount_inc(&head->refs); 197 spin_unlock(&delayed_refs->lock); 198 199 btrfs_release_path(path); 200 201 /* 202 * Mutex was contended, block until it's released and try 203 * again 204 */ 205 mutex_lock(&head->mutex); 206 mutex_unlock(&head->mutex); 207 btrfs_put_delayed_ref_head(head); 208 goto search_again; 209 } 210 spin_lock(&head->lock); 211 if (head->extent_op && head->extent_op->update_flags) 212 extent_flags |= head->extent_op->flags_to_set; 213 else 214 BUG_ON(num_refs == 0); 215 216 num_refs += head->ref_mod; 217 spin_unlock(&head->lock); 218 mutex_unlock(&head->mutex); 219 } 220 spin_unlock(&delayed_refs->lock); 221 out: 222 WARN_ON(num_refs == 0); 223 if (refs) 224 *refs = num_refs; 225 if (flags) 226 *flags = extent_flags; 227 out_free: 228 btrfs_free_path(path); 229 return ret; 230 } 231 232 /* 233 * Back reference rules. Back refs have three main goals: 234 * 235 * 1) differentiate between all holders of references to an extent so that 236 * when a reference is dropped we can make sure it was a valid reference 237 * before freeing the extent. 238 * 239 * 2) Provide enough information to quickly find the holders of an extent 240 * if we notice a given block is corrupted or bad. 241 * 242 * 3) Make it easy to migrate blocks for FS shrinking or storage pool 243 * maintenance. This is actually the same as #2, but with a slightly 244 * different use case. 245 * 246 * There are two kinds of back refs. The implicit back refs is optimized 247 * for pointers in non-shared tree blocks. For a given pointer in a block, 248 * back refs of this kind provide information about the block's owner tree 249 * and the pointer's key. These information allow us to find the block by 250 * b-tree searching. The full back refs is for pointers in tree blocks not 251 * referenced by their owner trees. The location of tree block is recorded 252 * in the back refs. Actually the full back refs is generic, and can be 253 * used in all cases the implicit back refs is used. The major shortcoming 254 * of the full back refs is its overhead. Every time a tree block gets 255 * COWed, we have to update back refs entry for all pointers in it. 256 * 257 * For a newly allocated tree block, we use implicit back refs for 258 * pointers in it. This means most tree related operations only involve 259 * implicit back refs. For a tree block created in old transaction, the 260 * only way to drop a reference to it is COW it. So we can detect the 261 * event that tree block loses its owner tree's reference and do the 262 * back refs conversion. 263 * 264 * When a tree block is COWed through a tree, there are four cases: 265 * 266 * The reference count of the block is one and the tree is the block's 267 * owner tree. Nothing to do in this case. 268 * 269 * The reference count of the block is one and the tree is not the 270 * block's owner tree. In this case, full back refs is used for pointers 271 * in the block. Remove these full back refs, add implicit back refs for 272 * every pointers in the new block. 273 * 274 * The reference count of the block is greater than one and the tree is 275 * the block's owner tree. In this case, implicit back refs is used for 276 * pointers in the block. Add full back refs for every pointers in the 277 * block, increase lower level extents' reference counts. The original 278 * implicit back refs are entailed to the new block. 279 * 280 * The reference count of the block is greater than one and the tree is 281 * not the block's owner tree. Add implicit back refs for every pointer in 282 * the new block, increase lower level extents' reference count. 283 * 284 * Back Reference Key composing: 285 * 286 * The key objectid corresponds to the first byte in the extent, 287 * The key type is used to differentiate between types of back refs. 288 * There are different meanings of the key offset for different types 289 * of back refs. 290 * 291 * File extents can be referenced by: 292 * 293 * - multiple snapshots, subvolumes, or different generations in one subvol 294 * - different files inside a single subvolume 295 * - different offsets inside a file (bookend extents in file.c) 296 * 297 * The extent ref structure for the implicit back refs has fields for: 298 * 299 * - Objectid of the subvolume root 300 * - objectid of the file holding the reference 301 * - original offset in the file 302 * - how many bookend extents 303 * 304 * The key offset for the implicit back refs is hash of the first 305 * three fields. 306 * 307 * The extent ref structure for the full back refs has field for: 308 * 309 * - number of pointers in the tree leaf 310 * 311 * The key offset for the implicit back refs is the first byte of 312 * the tree leaf 313 * 314 * When a file extent is allocated, The implicit back refs is used. 315 * the fields are filled in: 316 * 317 * (root_key.objectid, inode objectid, offset in file, 1) 318 * 319 * When a file extent is removed file truncation, we find the 320 * corresponding implicit back refs and check the following fields: 321 * 322 * (btrfs_header_owner(leaf), inode objectid, offset in file) 323 * 324 * Btree extents can be referenced by: 325 * 326 * - Different subvolumes 327 * 328 * Both the implicit back refs and the full back refs for tree blocks 329 * only consist of key. The key offset for the implicit back refs is 330 * objectid of block's owner tree. The key offset for the full back refs 331 * is the first byte of parent block. 332 * 333 * When implicit back refs is used, information about the lowest key and 334 * level of the tree block are required. These information are stored in 335 * tree block info structure. 336 */ 337 338 /* 339 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, 340 * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, 341 * is_data == BTRFS_REF_TYPE_ANY, either type is OK. 342 */ 343 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, 344 struct btrfs_extent_inline_ref *iref, 345 enum btrfs_inline_ref_type is_data) 346 { 347 int type = btrfs_extent_inline_ref_type(eb, iref); 348 u64 offset = btrfs_extent_inline_ref_offset(eb, iref); 349 350 if (type == BTRFS_TREE_BLOCK_REF_KEY || 351 type == BTRFS_SHARED_BLOCK_REF_KEY || 352 type == BTRFS_SHARED_DATA_REF_KEY || 353 type == BTRFS_EXTENT_DATA_REF_KEY) { 354 if (is_data == BTRFS_REF_TYPE_BLOCK) { 355 if (type == BTRFS_TREE_BLOCK_REF_KEY) 356 return type; 357 if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 358 ASSERT(eb->fs_info); 359 /* 360 * Every shared one has parent tree block, 361 * which must be aligned to sector size. 362 */ 363 if (offset && 364 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 365 return type; 366 } 367 } else if (is_data == BTRFS_REF_TYPE_DATA) { 368 if (type == BTRFS_EXTENT_DATA_REF_KEY) 369 return type; 370 if (type == BTRFS_SHARED_DATA_REF_KEY) { 371 ASSERT(eb->fs_info); 372 /* 373 * Every shared one has parent tree block, 374 * which must be aligned to sector size. 375 */ 376 if (offset && 377 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 378 return type; 379 } 380 } else { 381 ASSERT(is_data == BTRFS_REF_TYPE_ANY); 382 return type; 383 } 384 } 385 386 WARN_ON(1); 387 btrfs_print_leaf(eb); 388 btrfs_err(eb->fs_info, 389 "eb %llu iref 0x%lx invalid extent inline ref type %d", 390 eb->start, (unsigned long)iref, type); 391 392 return BTRFS_REF_TYPE_INVALID; 393 } 394 395 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) 396 { 397 u32 high_crc = ~(u32)0; 398 u32 low_crc = ~(u32)0; 399 __le64 lenum; 400 401 lenum = cpu_to_le64(root_objectid); 402 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum)); 403 lenum = cpu_to_le64(owner); 404 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 405 lenum = cpu_to_le64(offset); 406 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 407 408 return ((u64)high_crc << 31) ^ (u64)low_crc; 409 } 410 411 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, 412 struct btrfs_extent_data_ref *ref) 413 { 414 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), 415 btrfs_extent_data_ref_objectid(leaf, ref), 416 btrfs_extent_data_ref_offset(leaf, ref)); 417 } 418 419 static int match_extent_data_ref(struct extent_buffer *leaf, 420 struct btrfs_extent_data_ref *ref, 421 u64 root_objectid, u64 owner, u64 offset) 422 { 423 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || 424 btrfs_extent_data_ref_objectid(leaf, ref) != owner || 425 btrfs_extent_data_ref_offset(leaf, ref) != offset) 426 return 0; 427 return 1; 428 } 429 430 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, 431 struct btrfs_path *path, 432 u64 bytenr, u64 parent, 433 u64 root_objectid, 434 u64 owner, u64 offset) 435 { 436 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 437 struct btrfs_key key; 438 struct btrfs_extent_data_ref *ref; 439 struct extent_buffer *leaf; 440 u32 nritems; 441 int ret; 442 int recow; 443 int err = -ENOENT; 444 445 key.objectid = bytenr; 446 if (parent) { 447 key.type = BTRFS_SHARED_DATA_REF_KEY; 448 key.offset = parent; 449 } else { 450 key.type = BTRFS_EXTENT_DATA_REF_KEY; 451 key.offset = hash_extent_data_ref(root_objectid, 452 owner, offset); 453 } 454 again: 455 recow = 0; 456 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 457 if (ret < 0) { 458 err = ret; 459 goto fail; 460 } 461 462 if (parent) { 463 if (!ret) 464 return 0; 465 goto fail; 466 } 467 468 leaf = path->nodes[0]; 469 nritems = btrfs_header_nritems(leaf); 470 while (1) { 471 if (path->slots[0] >= nritems) { 472 ret = btrfs_next_leaf(root, path); 473 if (ret < 0) 474 err = ret; 475 if (ret) 476 goto fail; 477 478 leaf = path->nodes[0]; 479 nritems = btrfs_header_nritems(leaf); 480 recow = 1; 481 } 482 483 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 484 if (key.objectid != bytenr || 485 key.type != BTRFS_EXTENT_DATA_REF_KEY) 486 goto fail; 487 488 ref = btrfs_item_ptr(leaf, path->slots[0], 489 struct btrfs_extent_data_ref); 490 491 if (match_extent_data_ref(leaf, ref, root_objectid, 492 owner, offset)) { 493 if (recow) { 494 btrfs_release_path(path); 495 goto again; 496 } 497 err = 0; 498 break; 499 } 500 path->slots[0]++; 501 } 502 fail: 503 return err; 504 } 505 506 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, 507 struct btrfs_path *path, 508 u64 bytenr, u64 parent, 509 u64 root_objectid, u64 owner, 510 u64 offset, int refs_to_add) 511 { 512 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 513 struct btrfs_key key; 514 struct extent_buffer *leaf; 515 u32 size; 516 u32 num_refs; 517 int ret; 518 519 key.objectid = bytenr; 520 if (parent) { 521 key.type = BTRFS_SHARED_DATA_REF_KEY; 522 key.offset = parent; 523 size = sizeof(struct btrfs_shared_data_ref); 524 } else { 525 key.type = BTRFS_EXTENT_DATA_REF_KEY; 526 key.offset = hash_extent_data_ref(root_objectid, 527 owner, offset); 528 size = sizeof(struct btrfs_extent_data_ref); 529 } 530 531 ret = btrfs_insert_empty_item(trans, root, path, &key, size); 532 if (ret && ret != -EEXIST) 533 goto fail; 534 535 leaf = path->nodes[0]; 536 if (parent) { 537 struct btrfs_shared_data_ref *ref; 538 ref = btrfs_item_ptr(leaf, path->slots[0], 539 struct btrfs_shared_data_ref); 540 if (ret == 0) { 541 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); 542 } else { 543 num_refs = btrfs_shared_data_ref_count(leaf, ref); 544 num_refs += refs_to_add; 545 btrfs_set_shared_data_ref_count(leaf, ref, num_refs); 546 } 547 } else { 548 struct btrfs_extent_data_ref *ref; 549 while (ret == -EEXIST) { 550 ref = btrfs_item_ptr(leaf, path->slots[0], 551 struct btrfs_extent_data_ref); 552 if (match_extent_data_ref(leaf, ref, root_objectid, 553 owner, offset)) 554 break; 555 btrfs_release_path(path); 556 key.offset++; 557 ret = btrfs_insert_empty_item(trans, root, path, &key, 558 size); 559 if (ret && ret != -EEXIST) 560 goto fail; 561 562 leaf = path->nodes[0]; 563 } 564 ref = btrfs_item_ptr(leaf, path->slots[0], 565 struct btrfs_extent_data_ref); 566 if (ret == 0) { 567 btrfs_set_extent_data_ref_root(leaf, ref, 568 root_objectid); 569 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 570 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 571 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); 572 } else { 573 num_refs = btrfs_extent_data_ref_count(leaf, ref); 574 num_refs += refs_to_add; 575 btrfs_set_extent_data_ref_count(leaf, ref, num_refs); 576 } 577 } 578 btrfs_mark_buffer_dirty(leaf); 579 ret = 0; 580 fail: 581 btrfs_release_path(path); 582 return ret; 583 } 584 585 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, 586 struct btrfs_root *root, 587 struct btrfs_path *path, 588 int refs_to_drop) 589 { 590 struct btrfs_key key; 591 struct btrfs_extent_data_ref *ref1 = NULL; 592 struct btrfs_shared_data_ref *ref2 = NULL; 593 struct extent_buffer *leaf; 594 u32 num_refs = 0; 595 int ret = 0; 596 597 leaf = path->nodes[0]; 598 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 599 600 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 601 ref1 = btrfs_item_ptr(leaf, path->slots[0], 602 struct btrfs_extent_data_ref); 603 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 604 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 605 ref2 = btrfs_item_ptr(leaf, path->slots[0], 606 struct btrfs_shared_data_ref); 607 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 608 } else { 609 btrfs_err(trans->fs_info, 610 "unrecognized backref key (%llu %u %llu)", 611 key.objectid, key.type, key.offset); 612 btrfs_abort_transaction(trans, -EUCLEAN); 613 return -EUCLEAN; 614 } 615 616 BUG_ON(num_refs < refs_to_drop); 617 num_refs -= refs_to_drop; 618 619 if (num_refs == 0) { 620 ret = btrfs_del_item(trans, root, path); 621 } else { 622 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) 623 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); 624 else if (key.type == BTRFS_SHARED_DATA_REF_KEY) 625 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); 626 btrfs_mark_buffer_dirty(leaf); 627 } 628 return ret; 629 } 630 631 static noinline u32 extent_data_ref_count(struct btrfs_path *path, 632 struct btrfs_extent_inline_ref *iref) 633 { 634 struct btrfs_key key; 635 struct extent_buffer *leaf; 636 struct btrfs_extent_data_ref *ref1; 637 struct btrfs_shared_data_ref *ref2; 638 u32 num_refs = 0; 639 int type; 640 641 leaf = path->nodes[0]; 642 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 643 644 if (iref) { 645 /* 646 * If type is invalid, we should have bailed out earlier than 647 * this call. 648 */ 649 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 650 ASSERT(type != BTRFS_REF_TYPE_INVALID); 651 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 652 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); 653 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 654 } else { 655 ref2 = (struct btrfs_shared_data_ref *)(iref + 1); 656 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 657 } 658 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 659 ref1 = btrfs_item_ptr(leaf, path->slots[0], 660 struct btrfs_extent_data_ref); 661 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 662 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 663 ref2 = btrfs_item_ptr(leaf, path->slots[0], 664 struct btrfs_shared_data_ref); 665 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 666 } else { 667 WARN_ON(1); 668 } 669 return num_refs; 670 } 671 672 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, 673 struct btrfs_path *path, 674 u64 bytenr, u64 parent, 675 u64 root_objectid) 676 { 677 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 678 struct btrfs_key key; 679 int ret; 680 681 key.objectid = bytenr; 682 if (parent) { 683 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 684 key.offset = parent; 685 } else { 686 key.type = BTRFS_TREE_BLOCK_REF_KEY; 687 key.offset = root_objectid; 688 } 689 690 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 691 if (ret > 0) 692 ret = -ENOENT; 693 return ret; 694 } 695 696 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, 697 struct btrfs_path *path, 698 u64 bytenr, u64 parent, 699 u64 root_objectid) 700 { 701 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 702 struct btrfs_key key; 703 int ret; 704 705 key.objectid = bytenr; 706 if (parent) { 707 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 708 key.offset = parent; 709 } else { 710 key.type = BTRFS_TREE_BLOCK_REF_KEY; 711 key.offset = root_objectid; 712 } 713 714 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 715 btrfs_release_path(path); 716 return ret; 717 } 718 719 static inline int extent_ref_type(u64 parent, u64 owner) 720 { 721 int type; 722 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 723 if (parent > 0) 724 type = BTRFS_SHARED_BLOCK_REF_KEY; 725 else 726 type = BTRFS_TREE_BLOCK_REF_KEY; 727 } else { 728 if (parent > 0) 729 type = BTRFS_SHARED_DATA_REF_KEY; 730 else 731 type = BTRFS_EXTENT_DATA_REF_KEY; 732 } 733 return type; 734 } 735 736 static int find_next_key(struct btrfs_path *path, int level, 737 struct btrfs_key *key) 738 739 { 740 for (; level < BTRFS_MAX_LEVEL; level++) { 741 if (!path->nodes[level]) 742 break; 743 if (path->slots[level] + 1 >= 744 btrfs_header_nritems(path->nodes[level])) 745 continue; 746 if (level == 0) 747 btrfs_item_key_to_cpu(path->nodes[level], key, 748 path->slots[level] + 1); 749 else 750 btrfs_node_key_to_cpu(path->nodes[level], key, 751 path->slots[level] + 1); 752 return 0; 753 } 754 return 1; 755 } 756 757 /* 758 * look for inline back ref. if back ref is found, *ref_ret is set 759 * to the address of inline back ref, and 0 is returned. 760 * 761 * if back ref isn't found, *ref_ret is set to the address where it 762 * should be inserted, and -ENOENT is returned. 763 * 764 * if insert is true and there are too many inline back refs, the path 765 * points to the extent item, and -EAGAIN is returned. 766 * 767 * NOTE: inline back refs are ordered in the same way that back ref 768 * items in the tree are ordered. 769 */ 770 static noinline_for_stack 771 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, 772 struct btrfs_path *path, 773 struct btrfs_extent_inline_ref **ref_ret, 774 u64 bytenr, u64 num_bytes, 775 u64 parent, u64 root_objectid, 776 u64 owner, u64 offset, int insert) 777 { 778 struct btrfs_fs_info *fs_info = trans->fs_info; 779 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); 780 struct btrfs_key key; 781 struct extent_buffer *leaf; 782 struct btrfs_extent_item *ei; 783 struct btrfs_extent_inline_ref *iref; 784 u64 flags; 785 u64 item_size; 786 unsigned long ptr; 787 unsigned long end; 788 int extra_size; 789 int type; 790 int want; 791 int ret; 792 int err = 0; 793 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 794 int needed; 795 796 key.objectid = bytenr; 797 key.type = BTRFS_EXTENT_ITEM_KEY; 798 key.offset = num_bytes; 799 800 want = extent_ref_type(parent, owner); 801 if (insert) { 802 extra_size = btrfs_extent_inline_ref_size(want); 803 path->search_for_extension = 1; 804 path->keep_locks = 1; 805 } else 806 extra_size = -1; 807 808 /* 809 * Owner is our level, so we can just add one to get the level for the 810 * block we are interested in. 811 */ 812 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { 813 key.type = BTRFS_METADATA_ITEM_KEY; 814 key.offset = owner; 815 } 816 817 again: 818 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); 819 if (ret < 0) { 820 err = ret; 821 goto out; 822 } 823 824 /* 825 * We may be a newly converted file system which still has the old fat 826 * extent entries for metadata, so try and see if we have one of those. 827 */ 828 if (ret > 0 && skinny_metadata) { 829 skinny_metadata = false; 830 if (path->slots[0]) { 831 path->slots[0]--; 832 btrfs_item_key_to_cpu(path->nodes[0], &key, 833 path->slots[0]); 834 if (key.objectid == bytenr && 835 key.type == BTRFS_EXTENT_ITEM_KEY && 836 key.offset == num_bytes) 837 ret = 0; 838 } 839 if (ret) { 840 key.objectid = bytenr; 841 key.type = BTRFS_EXTENT_ITEM_KEY; 842 key.offset = num_bytes; 843 btrfs_release_path(path); 844 goto again; 845 } 846 } 847 848 if (ret && !insert) { 849 err = -ENOENT; 850 goto out; 851 } else if (WARN_ON(ret)) { 852 btrfs_print_leaf(path->nodes[0]); 853 btrfs_err(fs_info, 854 "extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu", 855 bytenr, num_bytes, parent, root_objectid, owner, 856 offset); 857 err = -EIO; 858 goto out; 859 } 860 861 leaf = path->nodes[0]; 862 item_size = btrfs_item_size(leaf, path->slots[0]); 863 if (unlikely(item_size < sizeof(*ei))) { 864 err = -EUCLEAN; 865 btrfs_err(fs_info, 866 "unexpected extent item size, has %llu expect >= %zu", 867 item_size, sizeof(*ei)); 868 btrfs_abort_transaction(trans, err); 869 goto out; 870 } 871 872 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 873 flags = btrfs_extent_flags(leaf, ei); 874 875 ptr = (unsigned long)(ei + 1); 876 end = (unsigned long)ei + item_size; 877 878 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { 879 ptr += sizeof(struct btrfs_tree_block_info); 880 BUG_ON(ptr > end); 881 } 882 883 if (owner >= BTRFS_FIRST_FREE_OBJECTID) 884 needed = BTRFS_REF_TYPE_DATA; 885 else 886 needed = BTRFS_REF_TYPE_BLOCK; 887 888 err = -ENOENT; 889 while (1) { 890 if (ptr >= end) { 891 if (ptr > end) { 892 err = -EUCLEAN; 893 btrfs_print_leaf(path->nodes[0]); 894 btrfs_crit(fs_info, 895 "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu", 896 path->slots[0], root_objectid, owner, offset, parent); 897 } 898 break; 899 } 900 iref = (struct btrfs_extent_inline_ref *)ptr; 901 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed); 902 if (type == BTRFS_REF_TYPE_INVALID) { 903 err = -EUCLEAN; 904 goto out; 905 } 906 907 if (want < type) 908 break; 909 if (want > type) { 910 ptr += btrfs_extent_inline_ref_size(type); 911 continue; 912 } 913 914 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 915 struct btrfs_extent_data_ref *dref; 916 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 917 if (match_extent_data_ref(leaf, dref, root_objectid, 918 owner, offset)) { 919 err = 0; 920 break; 921 } 922 if (hash_extent_data_ref_item(leaf, dref) < 923 hash_extent_data_ref(root_objectid, owner, offset)) 924 break; 925 } else { 926 u64 ref_offset; 927 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); 928 if (parent > 0) { 929 if (parent == ref_offset) { 930 err = 0; 931 break; 932 } 933 if (ref_offset < parent) 934 break; 935 } else { 936 if (root_objectid == ref_offset) { 937 err = 0; 938 break; 939 } 940 if (ref_offset < root_objectid) 941 break; 942 } 943 } 944 ptr += btrfs_extent_inline_ref_size(type); 945 } 946 if (err == -ENOENT && insert) { 947 if (item_size + extra_size >= 948 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { 949 err = -EAGAIN; 950 goto out; 951 } 952 /* 953 * To add new inline back ref, we have to make sure 954 * there is no corresponding back ref item. 955 * For simplicity, we just do not add new inline back 956 * ref if there is any kind of item for this block 957 */ 958 if (find_next_key(path, 0, &key) == 0 && 959 key.objectid == bytenr && 960 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 961 err = -EAGAIN; 962 goto out; 963 } 964 } 965 *ref_ret = (struct btrfs_extent_inline_ref *)ptr; 966 out: 967 if (insert) { 968 path->keep_locks = 0; 969 path->search_for_extension = 0; 970 btrfs_unlock_up_safe(path, 1); 971 } 972 return err; 973 } 974 975 /* 976 * helper to add new inline back ref 977 */ 978 static noinline_for_stack 979 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info, 980 struct btrfs_path *path, 981 struct btrfs_extent_inline_ref *iref, 982 u64 parent, u64 root_objectid, 983 u64 owner, u64 offset, int refs_to_add, 984 struct btrfs_delayed_extent_op *extent_op) 985 { 986 struct extent_buffer *leaf; 987 struct btrfs_extent_item *ei; 988 unsigned long ptr; 989 unsigned long end; 990 unsigned long item_offset; 991 u64 refs; 992 int size; 993 int type; 994 995 leaf = path->nodes[0]; 996 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 997 item_offset = (unsigned long)iref - (unsigned long)ei; 998 999 type = extent_ref_type(parent, owner); 1000 size = btrfs_extent_inline_ref_size(type); 1001 1002 btrfs_extend_item(path, size); 1003 1004 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1005 refs = btrfs_extent_refs(leaf, ei); 1006 refs += refs_to_add; 1007 btrfs_set_extent_refs(leaf, ei, refs); 1008 if (extent_op) 1009 __run_delayed_extent_op(extent_op, leaf, ei); 1010 1011 ptr = (unsigned long)ei + item_offset; 1012 end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]); 1013 if (ptr < end - size) 1014 memmove_extent_buffer(leaf, ptr + size, ptr, 1015 end - size - ptr); 1016 1017 iref = (struct btrfs_extent_inline_ref *)ptr; 1018 btrfs_set_extent_inline_ref_type(leaf, iref, type); 1019 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1020 struct btrfs_extent_data_ref *dref; 1021 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1022 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); 1023 btrfs_set_extent_data_ref_objectid(leaf, dref, owner); 1024 btrfs_set_extent_data_ref_offset(leaf, dref, offset); 1025 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); 1026 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1027 struct btrfs_shared_data_ref *sref; 1028 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1029 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); 1030 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1031 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 1032 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1033 } else { 1034 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1035 } 1036 btrfs_mark_buffer_dirty(leaf); 1037 } 1038 1039 static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1040 struct btrfs_path *path, 1041 struct btrfs_extent_inline_ref **ref_ret, 1042 u64 bytenr, u64 num_bytes, u64 parent, 1043 u64 root_objectid, u64 owner, u64 offset) 1044 { 1045 int ret; 1046 1047 ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr, 1048 num_bytes, parent, root_objectid, 1049 owner, offset, 0); 1050 if (ret != -ENOENT) 1051 return ret; 1052 1053 btrfs_release_path(path); 1054 *ref_ret = NULL; 1055 1056 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1057 ret = lookup_tree_block_ref(trans, path, bytenr, parent, 1058 root_objectid); 1059 } else { 1060 ret = lookup_extent_data_ref(trans, path, bytenr, parent, 1061 root_objectid, owner, offset); 1062 } 1063 return ret; 1064 } 1065 1066 /* 1067 * helper to update/remove inline back ref 1068 */ 1069 static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path, 1070 struct btrfs_extent_inline_ref *iref, 1071 int refs_to_mod, 1072 struct btrfs_delayed_extent_op *extent_op) 1073 { 1074 struct extent_buffer *leaf = path->nodes[0]; 1075 struct btrfs_fs_info *fs_info = leaf->fs_info; 1076 struct btrfs_extent_item *ei; 1077 struct btrfs_extent_data_ref *dref = NULL; 1078 struct btrfs_shared_data_ref *sref = NULL; 1079 unsigned long ptr; 1080 unsigned long end; 1081 u32 item_size; 1082 int size; 1083 int type; 1084 u64 refs; 1085 1086 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1087 refs = btrfs_extent_refs(leaf, ei); 1088 if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) { 1089 struct btrfs_key key; 1090 u32 extent_size; 1091 1092 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1093 if (key.type == BTRFS_METADATA_ITEM_KEY) 1094 extent_size = fs_info->nodesize; 1095 else 1096 extent_size = key.offset; 1097 btrfs_print_leaf(leaf); 1098 btrfs_err(fs_info, 1099 "invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu", 1100 key.objectid, extent_size, refs_to_mod, refs); 1101 return -EUCLEAN; 1102 } 1103 refs += refs_to_mod; 1104 btrfs_set_extent_refs(leaf, ei, refs); 1105 if (extent_op) 1106 __run_delayed_extent_op(extent_op, leaf, ei); 1107 1108 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); 1109 /* 1110 * Function btrfs_get_extent_inline_ref_type() has already printed 1111 * error messages. 1112 */ 1113 if (unlikely(type == BTRFS_REF_TYPE_INVALID)) 1114 return -EUCLEAN; 1115 1116 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1117 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1118 refs = btrfs_extent_data_ref_count(leaf, dref); 1119 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1120 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1121 refs = btrfs_shared_data_ref_count(leaf, sref); 1122 } else { 1123 refs = 1; 1124 /* 1125 * For tree blocks we can only drop one ref for it, and tree 1126 * blocks should not have refs > 1. 1127 * 1128 * Furthermore if we're inserting a new inline backref, we 1129 * won't reach this path either. That would be 1130 * setup_inline_extent_backref(). 1131 */ 1132 if (unlikely(refs_to_mod != -1)) { 1133 struct btrfs_key key; 1134 1135 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1136 1137 btrfs_print_leaf(leaf); 1138 btrfs_err(fs_info, 1139 "invalid refs_to_mod for tree block %llu, has %d expect -1", 1140 key.objectid, refs_to_mod); 1141 return -EUCLEAN; 1142 } 1143 } 1144 1145 if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) { 1146 struct btrfs_key key; 1147 u32 extent_size; 1148 1149 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1150 if (key.type == BTRFS_METADATA_ITEM_KEY) 1151 extent_size = fs_info->nodesize; 1152 else 1153 extent_size = key.offset; 1154 btrfs_print_leaf(leaf); 1155 btrfs_err(fs_info, 1156 "invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu", 1157 (unsigned long)iref, key.objectid, extent_size, 1158 refs_to_mod, refs); 1159 return -EUCLEAN; 1160 } 1161 refs += refs_to_mod; 1162 1163 if (refs > 0) { 1164 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1165 btrfs_set_extent_data_ref_count(leaf, dref, refs); 1166 else 1167 btrfs_set_shared_data_ref_count(leaf, sref, refs); 1168 } else { 1169 size = btrfs_extent_inline_ref_size(type); 1170 item_size = btrfs_item_size(leaf, path->slots[0]); 1171 ptr = (unsigned long)iref; 1172 end = (unsigned long)ei + item_size; 1173 if (ptr + size < end) 1174 memmove_extent_buffer(leaf, ptr, ptr + size, 1175 end - ptr - size); 1176 item_size -= size; 1177 btrfs_truncate_item(path, item_size, 1); 1178 } 1179 btrfs_mark_buffer_dirty(leaf); 1180 return 0; 1181 } 1182 1183 static noinline_for_stack 1184 int insert_inline_extent_backref(struct btrfs_trans_handle *trans, 1185 struct btrfs_path *path, 1186 u64 bytenr, u64 num_bytes, u64 parent, 1187 u64 root_objectid, u64 owner, 1188 u64 offset, int refs_to_add, 1189 struct btrfs_delayed_extent_op *extent_op) 1190 { 1191 struct btrfs_extent_inline_ref *iref; 1192 int ret; 1193 1194 ret = lookup_inline_extent_backref(trans, path, &iref, bytenr, 1195 num_bytes, parent, root_objectid, 1196 owner, offset, 1); 1197 if (ret == 0) { 1198 /* 1199 * We're adding refs to a tree block we already own, this 1200 * should not happen at all. 1201 */ 1202 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1203 btrfs_print_leaf(path->nodes[0]); 1204 btrfs_crit(trans->fs_info, 1205 "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u", 1206 bytenr, num_bytes, root_objectid, path->slots[0]); 1207 return -EUCLEAN; 1208 } 1209 ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op); 1210 } else if (ret == -ENOENT) { 1211 setup_inline_extent_backref(trans->fs_info, path, iref, parent, 1212 root_objectid, owner, offset, 1213 refs_to_add, extent_op); 1214 ret = 0; 1215 } 1216 return ret; 1217 } 1218 1219 static int remove_extent_backref(struct btrfs_trans_handle *trans, 1220 struct btrfs_root *root, 1221 struct btrfs_path *path, 1222 struct btrfs_extent_inline_ref *iref, 1223 int refs_to_drop, int is_data) 1224 { 1225 int ret = 0; 1226 1227 BUG_ON(!is_data && refs_to_drop != 1); 1228 if (iref) 1229 ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL); 1230 else if (is_data) 1231 ret = remove_extent_data_ref(trans, root, path, refs_to_drop); 1232 else 1233 ret = btrfs_del_item(trans, root, path); 1234 return ret; 1235 } 1236 1237 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, 1238 u64 *discarded_bytes) 1239 { 1240 int j, ret = 0; 1241 u64 bytes_left, end; 1242 u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT); 1243 1244 if (WARN_ON(start != aligned_start)) { 1245 len -= aligned_start - start; 1246 len = round_down(len, 1 << SECTOR_SHIFT); 1247 start = aligned_start; 1248 } 1249 1250 *discarded_bytes = 0; 1251 1252 if (!len) 1253 return 0; 1254 1255 end = start + len; 1256 bytes_left = len; 1257 1258 /* Skip any superblocks on this device. */ 1259 for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) { 1260 u64 sb_start = btrfs_sb_offset(j); 1261 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE; 1262 u64 size = sb_start - start; 1263 1264 if (!in_range(sb_start, start, bytes_left) && 1265 !in_range(sb_end, start, bytes_left) && 1266 !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE)) 1267 continue; 1268 1269 /* 1270 * Superblock spans beginning of range. Adjust start and 1271 * try again. 1272 */ 1273 if (sb_start <= start) { 1274 start += sb_end - start; 1275 if (start > end) { 1276 bytes_left = 0; 1277 break; 1278 } 1279 bytes_left = end - start; 1280 continue; 1281 } 1282 1283 if (size) { 1284 ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, 1285 size >> SECTOR_SHIFT, 1286 GFP_NOFS); 1287 if (!ret) 1288 *discarded_bytes += size; 1289 else if (ret != -EOPNOTSUPP) 1290 return ret; 1291 } 1292 1293 start = sb_end; 1294 if (start > end) { 1295 bytes_left = 0; 1296 break; 1297 } 1298 bytes_left = end - start; 1299 } 1300 1301 if (bytes_left) { 1302 ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, 1303 bytes_left >> SECTOR_SHIFT, 1304 GFP_NOFS); 1305 if (!ret) 1306 *discarded_bytes += bytes_left; 1307 } 1308 return ret; 1309 } 1310 1311 static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes) 1312 { 1313 struct btrfs_device *dev = stripe->dev; 1314 struct btrfs_fs_info *fs_info = dev->fs_info; 1315 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1316 u64 phys = stripe->physical; 1317 u64 len = stripe->length; 1318 u64 discarded = 0; 1319 int ret = 0; 1320 1321 /* Zone reset on a zoned filesystem */ 1322 if (btrfs_can_zone_reset(dev, phys, len)) { 1323 u64 src_disc; 1324 1325 ret = btrfs_reset_device_zone(dev, phys, len, &discarded); 1326 if (ret) 1327 goto out; 1328 1329 if (!btrfs_dev_replace_is_ongoing(dev_replace) || 1330 dev != dev_replace->srcdev) 1331 goto out; 1332 1333 src_disc = discarded; 1334 1335 /* Send to replace target as well */ 1336 ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len, 1337 &discarded); 1338 discarded += src_disc; 1339 } else if (bdev_max_discard_sectors(stripe->dev->bdev)) { 1340 ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded); 1341 } else { 1342 ret = 0; 1343 *bytes = 0; 1344 } 1345 1346 out: 1347 *bytes = discarded; 1348 return ret; 1349 } 1350 1351 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, 1352 u64 num_bytes, u64 *actual_bytes) 1353 { 1354 int ret = 0; 1355 u64 discarded_bytes = 0; 1356 u64 end = bytenr + num_bytes; 1357 u64 cur = bytenr; 1358 1359 /* 1360 * Avoid races with device replace and make sure the devices in the 1361 * stripes don't go away while we are discarding. 1362 */ 1363 btrfs_bio_counter_inc_blocked(fs_info); 1364 while (cur < end) { 1365 struct btrfs_discard_stripe *stripes; 1366 unsigned int num_stripes; 1367 int i; 1368 1369 num_bytes = end - cur; 1370 stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes); 1371 if (IS_ERR(stripes)) { 1372 ret = PTR_ERR(stripes); 1373 if (ret == -EOPNOTSUPP) 1374 ret = 0; 1375 break; 1376 } 1377 1378 for (i = 0; i < num_stripes; i++) { 1379 struct btrfs_discard_stripe *stripe = stripes + i; 1380 u64 bytes; 1381 1382 if (!stripe->dev->bdev) { 1383 ASSERT(btrfs_test_opt(fs_info, DEGRADED)); 1384 continue; 1385 } 1386 1387 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 1388 &stripe->dev->dev_state)) 1389 continue; 1390 1391 ret = do_discard_extent(stripe, &bytes); 1392 if (ret) { 1393 /* 1394 * Keep going if discard is not supported by the 1395 * device. 1396 */ 1397 if (ret != -EOPNOTSUPP) 1398 break; 1399 ret = 0; 1400 } else { 1401 discarded_bytes += bytes; 1402 } 1403 } 1404 kfree(stripes); 1405 if (ret) 1406 break; 1407 cur += num_bytes; 1408 } 1409 btrfs_bio_counter_dec(fs_info); 1410 if (actual_bytes) 1411 *actual_bytes = discarded_bytes; 1412 return ret; 1413 } 1414 1415 /* Can return -ENOMEM */ 1416 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1417 struct btrfs_ref *generic_ref) 1418 { 1419 struct btrfs_fs_info *fs_info = trans->fs_info; 1420 int ret; 1421 1422 ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && 1423 generic_ref->action); 1424 BUG_ON(generic_ref->type == BTRFS_REF_METADATA && 1425 generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID); 1426 1427 if (generic_ref->type == BTRFS_REF_METADATA) 1428 ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL); 1429 else 1430 ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0); 1431 1432 btrfs_ref_tree_mod(fs_info, generic_ref); 1433 1434 return ret; 1435 } 1436 1437 /* 1438 * __btrfs_inc_extent_ref - insert backreference for a given extent 1439 * 1440 * The counterpart is in __btrfs_free_extent(), with examples and more details 1441 * how it works. 1442 * 1443 * @trans: Handle of transaction 1444 * 1445 * @node: The delayed ref node used to get the bytenr/length for 1446 * extent whose references are incremented. 1447 * 1448 * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/ 1449 * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical 1450 * bytenr of the parent block. Since new extents are always 1451 * created with indirect references, this will only be the case 1452 * when relocating a shared extent. In that case, root_objectid 1453 * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must 1454 * be 0 1455 * 1456 * @root_objectid: The id of the root where this modification has originated, 1457 * this can be either one of the well-known metadata trees or 1458 * the subvolume id which references this extent. 1459 * 1460 * @owner: For data extents it is the inode number of the owning file. 1461 * For metadata extents this parameter holds the level in the 1462 * tree of the extent. 1463 * 1464 * @offset: For metadata extents the offset is ignored and is currently 1465 * always passed as 0. For data extents it is the fileoffset 1466 * this extent belongs to. 1467 * 1468 * @refs_to_add Number of references to add 1469 * 1470 * @extent_op Pointer to a structure, holding information necessary when 1471 * updating a tree block's flags 1472 * 1473 */ 1474 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1475 struct btrfs_delayed_ref_node *node, 1476 u64 parent, u64 root_objectid, 1477 u64 owner, u64 offset, int refs_to_add, 1478 struct btrfs_delayed_extent_op *extent_op) 1479 { 1480 struct btrfs_path *path; 1481 struct extent_buffer *leaf; 1482 struct btrfs_extent_item *item; 1483 struct btrfs_key key; 1484 u64 bytenr = node->bytenr; 1485 u64 num_bytes = node->num_bytes; 1486 u64 refs; 1487 int ret; 1488 1489 path = btrfs_alloc_path(); 1490 if (!path) 1491 return -ENOMEM; 1492 1493 /* this will setup the path even if it fails to insert the back ref */ 1494 ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes, 1495 parent, root_objectid, owner, 1496 offset, refs_to_add, extent_op); 1497 if ((ret < 0 && ret != -EAGAIN) || !ret) 1498 goto out; 1499 1500 /* 1501 * Ok we had -EAGAIN which means we didn't have space to insert and 1502 * inline extent ref, so just update the reference count and add a 1503 * normal backref. 1504 */ 1505 leaf = path->nodes[0]; 1506 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1507 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1508 refs = btrfs_extent_refs(leaf, item); 1509 btrfs_set_extent_refs(leaf, item, refs + refs_to_add); 1510 if (extent_op) 1511 __run_delayed_extent_op(extent_op, leaf, item); 1512 1513 btrfs_mark_buffer_dirty(leaf); 1514 btrfs_release_path(path); 1515 1516 /* now insert the actual backref */ 1517 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1518 BUG_ON(refs_to_add != 1); 1519 ret = insert_tree_block_ref(trans, path, bytenr, parent, 1520 root_objectid); 1521 } else { 1522 ret = insert_extent_data_ref(trans, path, bytenr, parent, 1523 root_objectid, owner, offset, 1524 refs_to_add); 1525 } 1526 if (ret) 1527 btrfs_abort_transaction(trans, ret); 1528 out: 1529 btrfs_free_path(path); 1530 return ret; 1531 } 1532 1533 static int run_delayed_data_ref(struct btrfs_trans_handle *trans, 1534 struct btrfs_delayed_ref_node *node, 1535 struct btrfs_delayed_extent_op *extent_op, 1536 bool insert_reserved) 1537 { 1538 int ret = 0; 1539 struct btrfs_delayed_data_ref *ref; 1540 struct btrfs_key ins; 1541 u64 parent = 0; 1542 u64 ref_root = 0; 1543 u64 flags = 0; 1544 1545 ins.objectid = node->bytenr; 1546 ins.offset = node->num_bytes; 1547 ins.type = BTRFS_EXTENT_ITEM_KEY; 1548 1549 ref = btrfs_delayed_node_to_data_ref(node); 1550 trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action); 1551 1552 if (node->type == BTRFS_SHARED_DATA_REF_KEY) 1553 parent = ref->parent; 1554 ref_root = ref->root; 1555 1556 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1557 if (extent_op) 1558 flags |= extent_op->flags_to_set; 1559 ret = alloc_reserved_file_extent(trans, parent, ref_root, 1560 flags, ref->objectid, 1561 ref->offset, &ins, 1562 node->ref_mod); 1563 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1564 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1565 ref->objectid, ref->offset, 1566 node->ref_mod, extent_op); 1567 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1568 ret = __btrfs_free_extent(trans, node, parent, 1569 ref_root, ref->objectid, 1570 ref->offset, node->ref_mod, 1571 extent_op); 1572 } else { 1573 BUG(); 1574 } 1575 return ret; 1576 } 1577 1578 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 1579 struct extent_buffer *leaf, 1580 struct btrfs_extent_item *ei) 1581 { 1582 u64 flags = btrfs_extent_flags(leaf, ei); 1583 if (extent_op->update_flags) { 1584 flags |= extent_op->flags_to_set; 1585 btrfs_set_extent_flags(leaf, ei, flags); 1586 } 1587 1588 if (extent_op->update_key) { 1589 struct btrfs_tree_block_info *bi; 1590 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); 1591 bi = (struct btrfs_tree_block_info *)(ei + 1); 1592 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); 1593 } 1594 } 1595 1596 static int run_delayed_extent_op(struct btrfs_trans_handle *trans, 1597 struct btrfs_delayed_ref_head *head, 1598 struct btrfs_delayed_extent_op *extent_op) 1599 { 1600 struct btrfs_fs_info *fs_info = trans->fs_info; 1601 struct btrfs_root *root; 1602 struct btrfs_key key; 1603 struct btrfs_path *path; 1604 struct btrfs_extent_item *ei; 1605 struct extent_buffer *leaf; 1606 u32 item_size; 1607 int ret; 1608 int err = 0; 1609 int metadata = 1; 1610 1611 if (TRANS_ABORTED(trans)) 1612 return 0; 1613 1614 if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1615 metadata = 0; 1616 1617 path = btrfs_alloc_path(); 1618 if (!path) 1619 return -ENOMEM; 1620 1621 key.objectid = head->bytenr; 1622 1623 if (metadata) { 1624 key.type = BTRFS_METADATA_ITEM_KEY; 1625 key.offset = extent_op->level; 1626 } else { 1627 key.type = BTRFS_EXTENT_ITEM_KEY; 1628 key.offset = head->num_bytes; 1629 } 1630 1631 root = btrfs_extent_root(fs_info, key.objectid); 1632 again: 1633 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1634 if (ret < 0) { 1635 err = ret; 1636 goto out; 1637 } 1638 if (ret > 0) { 1639 if (metadata) { 1640 if (path->slots[0] > 0) { 1641 path->slots[0]--; 1642 btrfs_item_key_to_cpu(path->nodes[0], &key, 1643 path->slots[0]); 1644 if (key.objectid == head->bytenr && 1645 key.type == BTRFS_EXTENT_ITEM_KEY && 1646 key.offset == head->num_bytes) 1647 ret = 0; 1648 } 1649 if (ret > 0) { 1650 btrfs_release_path(path); 1651 metadata = 0; 1652 1653 key.objectid = head->bytenr; 1654 key.offset = head->num_bytes; 1655 key.type = BTRFS_EXTENT_ITEM_KEY; 1656 goto again; 1657 } 1658 } else { 1659 err = -EIO; 1660 goto out; 1661 } 1662 } 1663 1664 leaf = path->nodes[0]; 1665 item_size = btrfs_item_size(leaf, path->slots[0]); 1666 1667 if (unlikely(item_size < sizeof(*ei))) { 1668 err = -EUCLEAN; 1669 btrfs_err(fs_info, 1670 "unexpected extent item size, has %u expect >= %zu", 1671 item_size, sizeof(*ei)); 1672 btrfs_abort_transaction(trans, err); 1673 goto out; 1674 } 1675 1676 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1677 __run_delayed_extent_op(extent_op, leaf, ei); 1678 1679 btrfs_mark_buffer_dirty(leaf); 1680 out: 1681 btrfs_free_path(path); 1682 return err; 1683 } 1684 1685 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, 1686 struct btrfs_delayed_ref_node *node, 1687 struct btrfs_delayed_extent_op *extent_op, 1688 bool insert_reserved) 1689 { 1690 int ret = 0; 1691 struct btrfs_delayed_tree_ref *ref; 1692 u64 parent = 0; 1693 u64 ref_root = 0; 1694 1695 ref = btrfs_delayed_node_to_tree_ref(node); 1696 trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action); 1697 1698 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1699 parent = ref->parent; 1700 ref_root = ref->root; 1701 1702 if (node->ref_mod != 1) { 1703 btrfs_err(trans->fs_info, 1704 "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu", 1705 node->bytenr, node->ref_mod, node->action, ref_root, 1706 parent); 1707 return -EIO; 1708 } 1709 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1710 BUG_ON(!extent_op || !extent_op->update_flags); 1711 ret = alloc_reserved_tree_block(trans, node, extent_op); 1712 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1713 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1714 ref->level, 0, 1, extent_op); 1715 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1716 ret = __btrfs_free_extent(trans, node, parent, ref_root, 1717 ref->level, 0, 1, extent_op); 1718 } else { 1719 BUG(); 1720 } 1721 return ret; 1722 } 1723 1724 /* helper function to actually process a single delayed ref entry */ 1725 static int run_one_delayed_ref(struct btrfs_trans_handle *trans, 1726 struct btrfs_delayed_ref_node *node, 1727 struct btrfs_delayed_extent_op *extent_op, 1728 bool insert_reserved) 1729 { 1730 int ret = 0; 1731 1732 if (TRANS_ABORTED(trans)) { 1733 if (insert_reserved) 1734 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1735 return 0; 1736 } 1737 1738 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 1739 node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1740 ret = run_delayed_tree_ref(trans, node, extent_op, 1741 insert_reserved); 1742 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || 1743 node->type == BTRFS_SHARED_DATA_REF_KEY) 1744 ret = run_delayed_data_ref(trans, node, extent_op, 1745 insert_reserved); 1746 else 1747 BUG(); 1748 if (ret && insert_reserved) 1749 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1750 if (ret < 0) 1751 btrfs_err(trans->fs_info, 1752 "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d", 1753 node->bytenr, node->num_bytes, node->type, 1754 node->action, node->ref_mod, ret); 1755 return ret; 1756 } 1757 1758 static inline struct btrfs_delayed_ref_node * 1759 select_delayed_ref(struct btrfs_delayed_ref_head *head) 1760 { 1761 struct btrfs_delayed_ref_node *ref; 1762 1763 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 1764 return NULL; 1765 1766 /* 1767 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first. 1768 * This is to prevent a ref count from going down to zero, which deletes 1769 * the extent item from the extent tree, when there still are references 1770 * to add, which would fail because they would not find the extent item. 1771 */ 1772 if (!list_empty(&head->ref_add_list)) 1773 return list_first_entry(&head->ref_add_list, 1774 struct btrfs_delayed_ref_node, add_list); 1775 1776 ref = rb_entry(rb_first_cached(&head->ref_tree), 1777 struct btrfs_delayed_ref_node, ref_node); 1778 ASSERT(list_empty(&ref->add_list)); 1779 return ref; 1780 } 1781 1782 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 1783 struct btrfs_delayed_ref_head *head) 1784 { 1785 spin_lock(&delayed_refs->lock); 1786 head->processing = false; 1787 delayed_refs->num_heads_ready++; 1788 spin_unlock(&delayed_refs->lock); 1789 btrfs_delayed_ref_unlock(head); 1790 } 1791 1792 static struct btrfs_delayed_extent_op *cleanup_extent_op( 1793 struct btrfs_delayed_ref_head *head) 1794 { 1795 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 1796 1797 if (!extent_op) 1798 return NULL; 1799 1800 if (head->must_insert_reserved) { 1801 head->extent_op = NULL; 1802 btrfs_free_delayed_extent_op(extent_op); 1803 return NULL; 1804 } 1805 return extent_op; 1806 } 1807 1808 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, 1809 struct btrfs_delayed_ref_head *head) 1810 { 1811 struct btrfs_delayed_extent_op *extent_op; 1812 int ret; 1813 1814 extent_op = cleanup_extent_op(head); 1815 if (!extent_op) 1816 return 0; 1817 head->extent_op = NULL; 1818 spin_unlock(&head->lock); 1819 ret = run_delayed_extent_op(trans, head, extent_op); 1820 btrfs_free_delayed_extent_op(extent_op); 1821 return ret ? ret : 1; 1822 } 1823 1824 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, 1825 struct btrfs_delayed_ref_root *delayed_refs, 1826 struct btrfs_delayed_ref_head *head) 1827 { 1828 int nr_items = 1; /* Dropping this ref head update. */ 1829 1830 /* 1831 * We had csum deletions accounted for in our delayed refs rsv, we need 1832 * to drop the csum leaves for this update from our delayed_refs_rsv. 1833 */ 1834 if (head->total_ref_mod < 0 && head->is_data) { 1835 spin_lock(&delayed_refs->lock); 1836 delayed_refs->pending_csums -= head->num_bytes; 1837 spin_unlock(&delayed_refs->lock); 1838 nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes); 1839 } 1840 1841 btrfs_delayed_refs_rsv_release(fs_info, nr_items); 1842 } 1843 1844 static int cleanup_ref_head(struct btrfs_trans_handle *trans, 1845 struct btrfs_delayed_ref_head *head) 1846 { 1847 1848 struct btrfs_fs_info *fs_info = trans->fs_info; 1849 struct btrfs_delayed_ref_root *delayed_refs; 1850 int ret; 1851 1852 delayed_refs = &trans->transaction->delayed_refs; 1853 1854 ret = run_and_cleanup_extent_op(trans, head); 1855 if (ret < 0) { 1856 unselect_delayed_ref_head(delayed_refs, head); 1857 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); 1858 return ret; 1859 } else if (ret) { 1860 return ret; 1861 } 1862 1863 /* 1864 * Need to drop our head ref lock and re-acquire the delayed ref lock 1865 * and then re-check to make sure nobody got added. 1866 */ 1867 spin_unlock(&head->lock); 1868 spin_lock(&delayed_refs->lock); 1869 spin_lock(&head->lock); 1870 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { 1871 spin_unlock(&head->lock); 1872 spin_unlock(&delayed_refs->lock); 1873 return 1; 1874 } 1875 btrfs_delete_ref_head(delayed_refs, head); 1876 spin_unlock(&head->lock); 1877 spin_unlock(&delayed_refs->lock); 1878 1879 if (head->must_insert_reserved) { 1880 btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1); 1881 if (head->is_data) { 1882 struct btrfs_root *csum_root; 1883 1884 csum_root = btrfs_csum_root(fs_info, head->bytenr); 1885 ret = btrfs_del_csums(trans, csum_root, head->bytenr, 1886 head->num_bytes); 1887 } 1888 } 1889 1890 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); 1891 1892 trace_run_delayed_ref_head(fs_info, head, 0); 1893 btrfs_delayed_ref_unlock(head); 1894 btrfs_put_delayed_ref_head(head); 1895 return ret; 1896 } 1897 1898 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( 1899 struct btrfs_trans_handle *trans) 1900 { 1901 struct btrfs_delayed_ref_root *delayed_refs = 1902 &trans->transaction->delayed_refs; 1903 struct btrfs_delayed_ref_head *head = NULL; 1904 int ret; 1905 1906 spin_lock(&delayed_refs->lock); 1907 head = btrfs_select_ref_head(delayed_refs); 1908 if (!head) { 1909 spin_unlock(&delayed_refs->lock); 1910 return head; 1911 } 1912 1913 /* 1914 * Grab the lock that says we are going to process all the refs for 1915 * this head 1916 */ 1917 ret = btrfs_delayed_ref_lock(delayed_refs, head); 1918 spin_unlock(&delayed_refs->lock); 1919 1920 /* 1921 * We may have dropped the spin lock to get the head mutex lock, and 1922 * that might have given someone else time to free the head. If that's 1923 * true, it has been removed from our list and we can move on. 1924 */ 1925 if (ret == -EAGAIN) 1926 head = ERR_PTR(-EAGAIN); 1927 1928 return head; 1929 } 1930 1931 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, 1932 struct btrfs_delayed_ref_head *locked_ref) 1933 { 1934 struct btrfs_fs_info *fs_info = trans->fs_info; 1935 struct btrfs_delayed_ref_root *delayed_refs; 1936 struct btrfs_delayed_extent_op *extent_op; 1937 struct btrfs_delayed_ref_node *ref; 1938 bool must_insert_reserved; 1939 int ret; 1940 1941 delayed_refs = &trans->transaction->delayed_refs; 1942 1943 lockdep_assert_held(&locked_ref->mutex); 1944 lockdep_assert_held(&locked_ref->lock); 1945 1946 while ((ref = select_delayed_ref(locked_ref))) { 1947 if (ref->seq && 1948 btrfs_check_delayed_seq(fs_info, ref->seq)) { 1949 spin_unlock(&locked_ref->lock); 1950 unselect_delayed_ref_head(delayed_refs, locked_ref); 1951 return -EAGAIN; 1952 } 1953 1954 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); 1955 RB_CLEAR_NODE(&ref->ref_node); 1956 if (!list_empty(&ref->add_list)) 1957 list_del(&ref->add_list); 1958 /* 1959 * When we play the delayed ref, also correct the ref_mod on 1960 * head 1961 */ 1962 switch (ref->action) { 1963 case BTRFS_ADD_DELAYED_REF: 1964 case BTRFS_ADD_DELAYED_EXTENT: 1965 locked_ref->ref_mod -= ref->ref_mod; 1966 break; 1967 case BTRFS_DROP_DELAYED_REF: 1968 locked_ref->ref_mod += ref->ref_mod; 1969 break; 1970 default: 1971 WARN_ON(1); 1972 } 1973 atomic_dec(&delayed_refs->num_entries); 1974 1975 /* 1976 * Record the must_insert_reserved flag before we drop the 1977 * spin lock. 1978 */ 1979 must_insert_reserved = locked_ref->must_insert_reserved; 1980 locked_ref->must_insert_reserved = false; 1981 1982 extent_op = locked_ref->extent_op; 1983 locked_ref->extent_op = NULL; 1984 spin_unlock(&locked_ref->lock); 1985 1986 ret = run_one_delayed_ref(trans, ref, extent_op, 1987 must_insert_reserved); 1988 1989 btrfs_free_delayed_extent_op(extent_op); 1990 if (ret) { 1991 unselect_delayed_ref_head(delayed_refs, locked_ref); 1992 btrfs_put_delayed_ref(ref); 1993 return ret; 1994 } 1995 1996 btrfs_put_delayed_ref(ref); 1997 cond_resched(); 1998 1999 spin_lock(&locked_ref->lock); 2000 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); 2001 } 2002 2003 return 0; 2004 } 2005 2006 /* 2007 * Returns 0 on success or if called with an already aborted transaction. 2008 * Returns -ENOMEM or -EIO on failure and will abort the transaction. 2009 */ 2010 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2011 unsigned long nr) 2012 { 2013 struct btrfs_fs_info *fs_info = trans->fs_info; 2014 struct btrfs_delayed_ref_root *delayed_refs; 2015 struct btrfs_delayed_ref_head *locked_ref = NULL; 2016 int ret; 2017 unsigned long count = 0; 2018 2019 delayed_refs = &trans->transaction->delayed_refs; 2020 do { 2021 if (!locked_ref) { 2022 locked_ref = btrfs_obtain_ref_head(trans); 2023 if (IS_ERR_OR_NULL(locked_ref)) { 2024 if (PTR_ERR(locked_ref) == -EAGAIN) { 2025 continue; 2026 } else { 2027 break; 2028 } 2029 } 2030 count++; 2031 } 2032 /* 2033 * We need to try and merge add/drops of the same ref since we 2034 * can run into issues with relocate dropping the implicit ref 2035 * and then it being added back again before the drop can 2036 * finish. If we merged anything we need to re-loop so we can 2037 * get a good ref. 2038 * Or we can get node references of the same type that weren't 2039 * merged when created due to bumps in the tree mod seq, and 2040 * we need to merge them to prevent adding an inline extent 2041 * backref before dropping it (triggering a BUG_ON at 2042 * insert_inline_extent_backref()). 2043 */ 2044 spin_lock(&locked_ref->lock); 2045 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); 2046 2047 ret = btrfs_run_delayed_refs_for_head(trans, locked_ref); 2048 if (ret < 0 && ret != -EAGAIN) { 2049 /* 2050 * Error, btrfs_run_delayed_refs_for_head already 2051 * unlocked everything so just bail out 2052 */ 2053 return ret; 2054 } else if (!ret) { 2055 /* 2056 * Success, perform the usual cleanup of a processed 2057 * head 2058 */ 2059 ret = cleanup_ref_head(trans, locked_ref); 2060 if (ret > 0 ) { 2061 /* We dropped our lock, we need to loop. */ 2062 ret = 0; 2063 continue; 2064 } else if (ret) { 2065 return ret; 2066 } 2067 } 2068 2069 /* 2070 * Either success case or btrfs_run_delayed_refs_for_head 2071 * returned -EAGAIN, meaning we need to select another head 2072 */ 2073 2074 locked_ref = NULL; 2075 cond_resched(); 2076 } while ((nr != -1 && count < nr) || locked_ref); 2077 2078 return 0; 2079 } 2080 2081 #ifdef SCRAMBLE_DELAYED_REFS 2082 /* 2083 * Normally delayed refs get processed in ascending bytenr order. This 2084 * correlates in most cases to the order added. To expose dependencies on this 2085 * order, we start to process the tree in the middle instead of the beginning 2086 */ 2087 static u64 find_middle(struct rb_root *root) 2088 { 2089 struct rb_node *n = root->rb_node; 2090 struct btrfs_delayed_ref_node *entry; 2091 int alt = 1; 2092 u64 middle; 2093 u64 first = 0, last = 0; 2094 2095 n = rb_first(root); 2096 if (n) { 2097 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2098 first = entry->bytenr; 2099 } 2100 n = rb_last(root); 2101 if (n) { 2102 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2103 last = entry->bytenr; 2104 } 2105 n = root->rb_node; 2106 2107 while (n) { 2108 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2109 WARN_ON(!entry->in_tree); 2110 2111 middle = entry->bytenr; 2112 2113 if (alt) 2114 n = n->rb_left; 2115 else 2116 n = n->rb_right; 2117 2118 alt = 1 - alt; 2119 } 2120 return middle; 2121 } 2122 #endif 2123 2124 /* 2125 * this starts processing the delayed reference count updates and 2126 * extent insertions we have queued up so far. count can be 2127 * 0, which means to process everything in the tree at the start 2128 * of the run (but not newly added entries), or it can be some target 2129 * number you'd like to process. 2130 * 2131 * Returns 0 on success or if called with an aborted transaction 2132 * Returns <0 on error and aborts the transaction 2133 */ 2134 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2135 unsigned long count) 2136 { 2137 struct btrfs_fs_info *fs_info = trans->fs_info; 2138 struct rb_node *node; 2139 struct btrfs_delayed_ref_root *delayed_refs; 2140 struct btrfs_delayed_ref_head *head; 2141 int ret; 2142 int run_all = count == (unsigned long)-1; 2143 2144 /* We'll clean this up in btrfs_cleanup_transaction */ 2145 if (TRANS_ABORTED(trans)) 2146 return 0; 2147 2148 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) 2149 return 0; 2150 2151 delayed_refs = &trans->transaction->delayed_refs; 2152 if (count == 0) 2153 count = delayed_refs->num_heads_ready; 2154 2155 again: 2156 #ifdef SCRAMBLE_DELAYED_REFS 2157 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); 2158 #endif 2159 ret = __btrfs_run_delayed_refs(trans, count); 2160 if (ret < 0) { 2161 btrfs_abort_transaction(trans, ret); 2162 return ret; 2163 } 2164 2165 if (run_all) { 2166 btrfs_create_pending_block_groups(trans); 2167 2168 spin_lock(&delayed_refs->lock); 2169 node = rb_first_cached(&delayed_refs->href_root); 2170 if (!node) { 2171 spin_unlock(&delayed_refs->lock); 2172 goto out; 2173 } 2174 head = rb_entry(node, struct btrfs_delayed_ref_head, 2175 href_node); 2176 refcount_inc(&head->refs); 2177 spin_unlock(&delayed_refs->lock); 2178 2179 /* Mutex was contended, block until it's released and retry. */ 2180 mutex_lock(&head->mutex); 2181 mutex_unlock(&head->mutex); 2182 2183 btrfs_put_delayed_ref_head(head); 2184 cond_resched(); 2185 goto again; 2186 } 2187 out: 2188 return 0; 2189 } 2190 2191 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2192 struct extent_buffer *eb, u64 flags) 2193 { 2194 struct btrfs_delayed_extent_op *extent_op; 2195 int level = btrfs_header_level(eb); 2196 int ret; 2197 2198 extent_op = btrfs_alloc_delayed_extent_op(); 2199 if (!extent_op) 2200 return -ENOMEM; 2201 2202 extent_op->flags_to_set = flags; 2203 extent_op->update_flags = true; 2204 extent_op->update_key = false; 2205 extent_op->level = level; 2206 2207 ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op); 2208 if (ret) 2209 btrfs_free_delayed_extent_op(extent_op); 2210 return ret; 2211 } 2212 2213 static noinline int check_delayed_ref(struct btrfs_root *root, 2214 struct btrfs_path *path, 2215 u64 objectid, u64 offset, u64 bytenr) 2216 { 2217 struct btrfs_delayed_ref_head *head; 2218 struct btrfs_delayed_ref_node *ref; 2219 struct btrfs_delayed_data_ref *data_ref; 2220 struct btrfs_delayed_ref_root *delayed_refs; 2221 struct btrfs_transaction *cur_trans; 2222 struct rb_node *node; 2223 int ret = 0; 2224 2225 spin_lock(&root->fs_info->trans_lock); 2226 cur_trans = root->fs_info->running_transaction; 2227 if (cur_trans) 2228 refcount_inc(&cur_trans->use_count); 2229 spin_unlock(&root->fs_info->trans_lock); 2230 if (!cur_trans) 2231 return 0; 2232 2233 delayed_refs = &cur_trans->delayed_refs; 2234 spin_lock(&delayed_refs->lock); 2235 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 2236 if (!head) { 2237 spin_unlock(&delayed_refs->lock); 2238 btrfs_put_transaction(cur_trans); 2239 return 0; 2240 } 2241 2242 if (!mutex_trylock(&head->mutex)) { 2243 if (path->nowait) { 2244 spin_unlock(&delayed_refs->lock); 2245 btrfs_put_transaction(cur_trans); 2246 return -EAGAIN; 2247 } 2248 2249 refcount_inc(&head->refs); 2250 spin_unlock(&delayed_refs->lock); 2251 2252 btrfs_release_path(path); 2253 2254 /* 2255 * Mutex was contended, block until it's released and let 2256 * caller try again 2257 */ 2258 mutex_lock(&head->mutex); 2259 mutex_unlock(&head->mutex); 2260 btrfs_put_delayed_ref_head(head); 2261 btrfs_put_transaction(cur_trans); 2262 return -EAGAIN; 2263 } 2264 spin_unlock(&delayed_refs->lock); 2265 2266 spin_lock(&head->lock); 2267 /* 2268 * XXX: We should replace this with a proper search function in the 2269 * future. 2270 */ 2271 for (node = rb_first_cached(&head->ref_tree); node; 2272 node = rb_next(node)) { 2273 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); 2274 /* If it's a shared ref we know a cross reference exists */ 2275 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { 2276 ret = 1; 2277 break; 2278 } 2279 2280 data_ref = btrfs_delayed_node_to_data_ref(ref); 2281 2282 /* 2283 * If our ref doesn't match the one we're currently looking at 2284 * then we have a cross reference. 2285 */ 2286 if (data_ref->root != root->root_key.objectid || 2287 data_ref->objectid != objectid || 2288 data_ref->offset != offset) { 2289 ret = 1; 2290 break; 2291 } 2292 } 2293 spin_unlock(&head->lock); 2294 mutex_unlock(&head->mutex); 2295 btrfs_put_transaction(cur_trans); 2296 return ret; 2297 } 2298 2299 static noinline int check_committed_ref(struct btrfs_root *root, 2300 struct btrfs_path *path, 2301 u64 objectid, u64 offset, u64 bytenr, 2302 bool strict) 2303 { 2304 struct btrfs_fs_info *fs_info = root->fs_info; 2305 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); 2306 struct extent_buffer *leaf; 2307 struct btrfs_extent_data_ref *ref; 2308 struct btrfs_extent_inline_ref *iref; 2309 struct btrfs_extent_item *ei; 2310 struct btrfs_key key; 2311 u32 item_size; 2312 int type; 2313 int ret; 2314 2315 key.objectid = bytenr; 2316 key.offset = (u64)-1; 2317 key.type = BTRFS_EXTENT_ITEM_KEY; 2318 2319 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2320 if (ret < 0) 2321 goto out; 2322 BUG_ON(ret == 0); /* Corruption */ 2323 2324 ret = -ENOENT; 2325 if (path->slots[0] == 0) 2326 goto out; 2327 2328 path->slots[0]--; 2329 leaf = path->nodes[0]; 2330 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2331 2332 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) 2333 goto out; 2334 2335 ret = 1; 2336 item_size = btrfs_item_size(leaf, path->slots[0]); 2337 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2338 2339 /* If extent item has more than 1 inline ref then it's shared */ 2340 if (item_size != sizeof(*ei) + 2341 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) 2342 goto out; 2343 2344 /* 2345 * If extent created before last snapshot => it's shared unless the 2346 * snapshot has been deleted. Use the heuristic if strict is false. 2347 */ 2348 if (!strict && 2349 (btrfs_extent_generation(leaf, ei) <= 2350 btrfs_root_last_snapshot(&root->root_item))) 2351 goto out; 2352 2353 iref = (struct btrfs_extent_inline_ref *)(ei + 1); 2354 2355 /* If this extent has SHARED_DATA_REF then it's shared */ 2356 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 2357 if (type != BTRFS_EXTENT_DATA_REF_KEY) 2358 goto out; 2359 2360 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 2361 if (btrfs_extent_refs(leaf, ei) != 2362 btrfs_extent_data_ref_count(leaf, ref) || 2363 btrfs_extent_data_ref_root(leaf, ref) != 2364 root->root_key.objectid || 2365 btrfs_extent_data_ref_objectid(leaf, ref) != objectid || 2366 btrfs_extent_data_ref_offset(leaf, ref) != offset) 2367 goto out; 2368 2369 ret = 0; 2370 out: 2371 return ret; 2372 } 2373 2374 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, 2375 u64 bytenr, bool strict, struct btrfs_path *path) 2376 { 2377 int ret; 2378 2379 do { 2380 ret = check_committed_ref(root, path, objectid, 2381 offset, bytenr, strict); 2382 if (ret && ret != -ENOENT) 2383 goto out; 2384 2385 ret = check_delayed_ref(root, path, objectid, offset, bytenr); 2386 } while (ret == -EAGAIN); 2387 2388 out: 2389 btrfs_release_path(path); 2390 if (btrfs_is_data_reloc_root(root)) 2391 WARN_ON(ret > 0); 2392 return ret; 2393 } 2394 2395 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, 2396 struct btrfs_root *root, 2397 struct extent_buffer *buf, 2398 int full_backref, int inc) 2399 { 2400 struct btrfs_fs_info *fs_info = root->fs_info; 2401 u64 bytenr; 2402 u64 num_bytes; 2403 u64 parent; 2404 u64 ref_root; 2405 u32 nritems; 2406 struct btrfs_key key; 2407 struct btrfs_file_extent_item *fi; 2408 struct btrfs_ref generic_ref = { 0 }; 2409 bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC); 2410 int i; 2411 int action; 2412 int level; 2413 int ret = 0; 2414 2415 if (btrfs_is_testing(fs_info)) 2416 return 0; 2417 2418 ref_root = btrfs_header_owner(buf); 2419 nritems = btrfs_header_nritems(buf); 2420 level = btrfs_header_level(buf); 2421 2422 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) 2423 return 0; 2424 2425 if (full_backref) 2426 parent = buf->start; 2427 else 2428 parent = 0; 2429 if (inc) 2430 action = BTRFS_ADD_DELAYED_REF; 2431 else 2432 action = BTRFS_DROP_DELAYED_REF; 2433 2434 for (i = 0; i < nritems; i++) { 2435 if (level == 0) { 2436 btrfs_item_key_to_cpu(buf, &key, i); 2437 if (key.type != BTRFS_EXTENT_DATA_KEY) 2438 continue; 2439 fi = btrfs_item_ptr(buf, i, 2440 struct btrfs_file_extent_item); 2441 if (btrfs_file_extent_type(buf, fi) == 2442 BTRFS_FILE_EXTENT_INLINE) 2443 continue; 2444 bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 2445 if (bytenr == 0) 2446 continue; 2447 2448 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); 2449 key.offset -= btrfs_file_extent_offset(buf, fi); 2450 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2451 num_bytes, parent); 2452 btrfs_init_data_ref(&generic_ref, ref_root, key.objectid, 2453 key.offset, root->root_key.objectid, 2454 for_reloc); 2455 if (inc) 2456 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2457 else 2458 ret = btrfs_free_extent(trans, &generic_ref); 2459 if (ret) 2460 goto fail; 2461 } else { 2462 bytenr = btrfs_node_blockptr(buf, i); 2463 num_bytes = fs_info->nodesize; 2464 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2465 num_bytes, parent); 2466 btrfs_init_tree_ref(&generic_ref, level - 1, ref_root, 2467 root->root_key.objectid, for_reloc); 2468 if (inc) 2469 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2470 else 2471 ret = btrfs_free_extent(trans, &generic_ref); 2472 if (ret) 2473 goto fail; 2474 } 2475 } 2476 return 0; 2477 fail: 2478 return ret; 2479 } 2480 2481 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2482 struct extent_buffer *buf, int full_backref) 2483 { 2484 return __btrfs_mod_ref(trans, root, buf, full_backref, 1); 2485 } 2486 2487 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2488 struct extent_buffer *buf, int full_backref) 2489 { 2490 return __btrfs_mod_ref(trans, root, buf, full_backref, 0); 2491 } 2492 2493 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) 2494 { 2495 struct btrfs_fs_info *fs_info = root->fs_info; 2496 u64 flags; 2497 u64 ret; 2498 2499 if (data) 2500 flags = BTRFS_BLOCK_GROUP_DATA; 2501 else if (root == fs_info->chunk_root) 2502 flags = BTRFS_BLOCK_GROUP_SYSTEM; 2503 else 2504 flags = BTRFS_BLOCK_GROUP_METADATA; 2505 2506 ret = btrfs_get_alloc_profile(fs_info, flags); 2507 return ret; 2508 } 2509 2510 static u64 first_logical_byte(struct btrfs_fs_info *fs_info) 2511 { 2512 struct rb_node *leftmost; 2513 u64 bytenr = 0; 2514 2515 read_lock(&fs_info->block_group_cache_lock); 2516 /* Get the block group with the lowest logical start address. */ 2517 leftmost = rb_first_cached(&fs_info->block_group_cache_tree); 2518 if (leftmost) { 2519 struct btrfs_block_group *bg; 2520 2521 bg = rb_entry(leftmost, struct btrfs_block_group, cache_node); 2522 bytenr = bg->start; 2523 } 2524 read_unlock(&fs_info->block_group_cache_lock); 2525 2526 return bytenr; 2527 } 2528 2529 static int pin_down_extent(struct btrfs_trans_handle *trans, 2530 struct btrfs_block_group *cache, 2531 u64 bytenr, u64 num_bytes, int reserved) 2532 { 2533 struct btrfs_fs_info *fs_info = cache->fs_info; 2534 2535 spin_lock(&cache->space_info->lock); 2536 spin_lock(&cache->lock); 2537 cache->pinned += num_bytes; 2538 btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info, 2539 num_bytes); 2540 if (reserved) { 2541 cache->reserved -= num_bytes; 2542 cache->space_info->bytes_reserved -= num_bytes; 2543 } 2544 spin_unlock(&cache->lock); 2545 spin_unlock(&cache->space_info->lock); 2546 2547 set_extent_bit(&trans->transaction->pinned_extents, bytenr, 2548 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); 2549 return 0; 2550 } 2551 2552 int btrfs_pin_extent(struct btrfs_trans_handle *trans, 2553 u64 bytenr, u64 num_bytes, int reserved) 2554 { 2555 struct btrfs_block_group *cache; 2556 2557 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2558 BUG_ON(!cache); /* Logic error */ 2559 2560 pin_down_extent(trans, cache, bytenr, num_bytes, reserved); 2561 2562 btrfs_put_block_group(cache); 2563 return 0; 2564 } 2565 2566 /* 2567 * this function must be called within transaction 2568 */ 2569 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, 2570 u64 bytenr, u64 num_bytes) 2571 { 2572 struct btrfs_block_group *cache; 2573 int ret; 2574 2575 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2576 if (!cache) 2577 return -EINVAL; 2578 2579 /* 2580 * Fully cache the free space first so that our pin removes the free space 2581 * from the cache. 2582 */ 2583 ret = btrfs_cache_block_group(cache, true); 2584 if (ret) 2585 goto out; 2586 2587 pin_down_extent(trans, cache, bytenr, num_bytes, 0); 2588 2589 /* remove us from the free space cache (if we're there at all) */ 2590 ret = btrfs_remove_free_space(cache, bytenr, num_bytes); 2591 out: 2592 btrfs_put_block_group(cache); 2593 return ret; 2594 } 2595 2596 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, 2597 u64 start, u64 num_bytes) 2598 { 2599 int ret; 2600 struct btrfs_block_group *block_group; 2601 2602 block_group = btrfs_lookup_block_group(fs_info, start); 2603 if (!block_group) 2604 return -EINVAL; 2605 2606 ret = btrfs_cache_block_group(block_group, true); 2607 if (ret) 2608 goto out; 2609 2610 ret = btrfs_remove_free_space(block_group, start, num_bytes); 2611 out: 2612 btrfs_put_block_group(block_group); 2613 return ret; 2614 } 2615 2616 int btrfs_exclude_logged_extents(struct extent_buffer *eb) 2617 { 2618 struct btrfs_fs_info *fs_info = eb->fs_info; 2619 struct btrfs_file_extent_item *item; 2620 struct btrfs_key key; 2621 int found_type; 2622 int i; 2623 int ret = 0; 2624 2625 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) 2626 return 0; 2627 2628 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2629 btrfs_item_key_to_cpu(eb, &key, i); 2630 if (key.type != BTRFS_EXTENT_DATA_KEY) 2631 continue; 2632 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 2633 found_type = btrfs_file_extent_type(eb, item); 2634 if (found_type == BTRFS_FILE_EXTENT_INLINE) 2635 continue; 2636 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 2637 continue; 2638 key.objectid = btrfs_file_extent_disk_bytenr(eb, item); 2639 key.offset = btrfs_file_extent_disk_num_bytes(eb, item); 2640 ret = __exclude_logged_extent(fs_info, key.objectid, key.offset); 2641 if (ret) 2642 break; 2643 } 2644 2645 return ret; 2646 } 2647 2648 static void 2649 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) 2650 { 2651 atomic_inc(&bg->reservations); 2652 } 2653 2654 /* 2655 * Returns the free cluster for the given space info and sets empty_cluster to 2656 * what it should be based on the mount options. 2657 */ 2658 static struct btrfs_free_cluster * 2659 fetch_cluster_info(struct btrfs_fs_info *fs_info, 2660 struct btrfs_space_info *space_info, u64 *empty_cluster) 2661 { 2662 struct btrfs_free_cluster *ret = NULL; 2663 2664 *empty_cluster = 0; 2665 if (btrfs_mixed_space_info(space_info)) 2666 return ret; 2667 2668 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 2669 ret = &fs_info->meta_alloc_cluster; 2670 if (btrfs_test_opt(fs_info, SSD)) 2671 *empty_cluster = SZ_2M; 2672 else 2673 *empty_cluster = SZ_64K; 2674 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && 2675 btrfs_test_opt(fs_info, SSD_SPREAD)) { 2676 *empty_cluster = SZ_2M; 2677 ret = &fs_info->data_alloc_cluster; 2678 } 2679 2680 return ret; 2681 } 2682 2683 static int unpin_extent_range(struct btrfs_fs_info *fs_info, 2684 u64 start, u64 end, 2685 const bool return_free_space) 2686 { 2687 struct btrfs_block_group *cache = NULL; 2688 struct btrfs_space_info *space_info; 2689 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 2690 struct btrfs_free_cluster *cluster = NULL; 2691 u64 len; 2692 u64 total_unpinned = 0; 2693 u64 empty_cluster = 0; 2694 bool readonly; 2695 2696 while (start <= end) { 2697 readonly = false; 2698 if (!cache || 2699 start >= cache->start + cache->length) { 2700 if (cache) 2701 btrfs_put_block_group(cache); 2702 total_unpinned = 0; 2703 cache = btrfs_lookup_block_group(fs_info, start); 2704 BUG_ON(!cache); /* Logic error */ 2705 2706 cluster = fetch_cluster_info(fs_info, 2707 cache->space_info, 2708 &empty_cluster); 2709 empty_cluster <<= 1; 2710 } 2711 2712 len = cache->start + cache->length - start; 2713 len = min(len, end + 1 - start); 2714 2715 if (return_free_space) 2716 btrfs_add_free_space(cache, start, len); 2717 2718 start += len; 2719 total_unpinned += len; 2720 space_info = cache->space_info; 2721 2722 /* 2723 * If this space cluster has been marked as fragmented and we've 2724 * unpinned enough in this block group to potentially allow a 2725 * cluster to be created inside of it go ahead and clear the 2726 * fragmented check. 2727 */ 2728 if (cluster && cluster->fragmented && 2729 total_unpinned > empty_cluster) { 2730 spin_lock(&cluster->lock); 2731 cluster->fragmented = 0; 2732 spin_unlock(&cluster->lock); 2733 } 2734 2735 spin_lock(&space_info->lock); 2736 spin_lock(&cache->lock); 2737 cache->pinned -= len; 2738 btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); 2739 space_info->max_extent_size = 0; 2740 if (cache->ro) { 2741 space_info->bytes_readonly += len; 2742 readonly = true; 2743 } else if (btrfs_is_zoned(fs_info)) { 2744 /* Need reset before reusing in a zoned block group */ 2745 space_info->bytes_zone_unusable += len; 2746 readonly = true; 2747 } 2748 spin_unlock(&cache->lock); 2749 if (!readonly && return_free_space && 2750 global_rsv->space_info == space_info) { 2751 spin_lock(&global_rsv->lock); 2752 if (!global_rsv->full) { 2753 u64 to_add = min(len, global_rsv->size - 2754 global_rsv->reserved); 2755 2756 global_rsv->reserved += to_add; 2757 btrfs_space_info_update_bytes_may_use(fs_info, 2758 space_info, to_add); 2759 if (global_rsv->reserved >= global_rsv->size) 2760 global_rsv->full = 1; 2761 len -= to_add; 2762 } 2763 spin_unlock(&global_rsv->lock); 2764 } 2765 /* Add to any tickets we may have */ 2766 if (!readonly && return_free_space && len) 2767 btrfs_try_granting_tickets(fs_info, space_info); 2768 spin_unlock(&space_info->lock); 2769 } 2770 2771 if (cache) 2772 btrfs_put_block_group(cache); 2773 return 0; 2774 } 2775 2776 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) 2777 { 2778 struct btrfs_fs_info *fs_info = trans->fs_info; 2779 struct btrfs_block_group *block_group, *tmp; 2780 struct list_head *deleted_bgs; 2781 struct extent_io_tree *unpin; 2782 u64 start; 2783 u64 end; 2784 int ret; 2785 2786 unpin = &trans->transaction->pinned_extents; 2787 2788 while (!TRANS_ABORTED(trans)) { 2789 struct extent_state *cached_state = NULL; 2790 2791 mutex_lock(&fs_info->unused_bg_unpin_mutex); 2792 if (!find_first_extent_bit(unpin, 0, &start, &end, 2793 EXTENT_DIRTY, &cached_state)) { 2794 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2795 break; 2796 } 2797 2798 if (btrfs_test_opt(fs_info, DISCARD_SYNC)) 2799 ret = btrfs_discard_extent(fs_info, start, 2800 end + 1 - start, NULL); 2801 2802 clear_extent_dirty(unpin, start, end, &cached_state); 2803 unpin_extent_range(fs_info, start, end, true); 2804 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2805 free_extent_state(cached_state); 2806 cond_resched(); 2807 } 2808 2809 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { 2810 btrfs_discard_calc_delay(&fs_info->discard_ctl); 2811 btrfs_discard_schedule_work(&fs_info->discard_ctl, true); 2812 } 2813 2814 /* 2815 * Transaction is finished. We don't need the lock anymore. We 2816 * do need to clean up the block groups in case of a transaction 2817 * abort. 2818 */ 2819 deleted_bgs = &trans->transaction->deleted_bgs; 2820 list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) { 2821 u64 trimmed = 0; 2822 2823 ret = -EROFS; 2824 if (!TRANS_ABORTED(trans)) 2825 ret = btrfs_discard_extent(fs_info, 2826 block_group->start, 2827 block_group->length, 2828 &trimmed); 2829 2830 list_del_init(&block_group->bg_list); 2831 btrfs_unfreeze_block_group(block_group); 2832 btrfs_put_block_group(block_group); 2833 2834 if (ret) { 2835 const char *errstr = btrfs_decode_error(ret); 2836 btrfs_warn(fs_info, 2837 "discard failed while removing blockgroup: errno=%d %s", 2838 ret, errstr); 2839 } 2840 } 2841 2842 return 0; 2843 } 2844 2845 static int do_free_extent_accounting(struct btrfs_trans_handle *trans, 2846 u64 bytenr, u64 num_bytes, bool is_data) 2847 { 2848 int ret; 2849 2850 if (is_data) { 2851 struct btrfs_root *csum_root; 2852 2853 csum_root = btrfs_csum_root(trans->fs_info, bytenr); 2854 ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes); 2855 if (ret) { 2856 btrfs_abort_transaction(trans, ret); 2857 return ret; 2858 } 2859 } 2860 2861 ret = add_to_free_space_tree(trans, bytenr, num_bytes); 2862 if (ret) { 2863 btrfs_abort_transaction(trans, ret); 2864 return ret; 2865 } 2866 2867 ret = btrfs_update_block_group(trans, bytenr, num_bytes, false); 2868 if (ret) 2869 btrfs_abort_transaction(trans, ret); 2870 2871 return ret; 2872 } 2873 2874 #define abort_and_dump(trans, path, fmt, args...) \ 2875 ({ \ 2876 btrfs_abort_transaction(trans, -EUCLEAN); \ 2877 btrfs_print_leaf(path->nodes[0]); \ 2878 btrfs_crit(trans->fs_info, fmt, ##args); \ 2879 }) 2880 2881 /* 2882 * Drop one or more refs of @node. 2883 * 2884 * 1. Locate the extent refs. 2885 * It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item. 2886 * Locate it, then reduce the refs number or remove the ref line completely. 2887 * 2888 * 2. Update the refs count in EXTENT/METADATA_ITEM 2889 * 2890 * Inline backref case: 2891 * 2892 * in extent tree we have: 2893 * 2894 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2895 * refs 2 gen 6 flags DATA 2896 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2897 * extent data backref root FS_TREE objectid 257 offset 0 count 1 2898 * 2899 * This function gets called with: 2900 * 2901 * node->bytenr = 13631488 2902 * node->num_bytes = 1048576 2903 * root_objectid = FS_TREE 2904 * owner_objectid = 257 2905 * owner_offset = 0 2906 * refs_to_drop = 1 2907 * 2908 * Then we should get some like: 2909 * 2910 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2911 * refs 1 gen 6 flags DATA 2912 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2913 * 2914 * Keyed backref case: 2915 * 2916 * in extent tree we have: 2917 * 2918 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2919 * refs 754 gen 6 flags DATA 2920 * [...] 2921 * item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28 2922 * extent data backref root FS_TREE objectid 866 offset 0 count 1 2923 * 2924 * This function get called with: 2925 * 2926 * node->bytenr = 13631488 2927 * node->num_bytes = 1048576 2928 * root_objectid = FS_TREE 2929 * owner_objectid = 866 2930 * owner_offset = 0 2931 * refs_to_drop = 1 2932 * 2933 * Then we should get some like: 2934 * 2935 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2936 * refs 753 gen 6 flags DATA 2937 * 2938 * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed. 2939 */ 2940 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 2941 struct btrfs_delayed_ref_node *node, u64 parent, 2942 u64 root_objectid, u64 owner_objectid, 2943 u64 owner_offset, int refs_to_drop, 2944 struct btrfs_delayed_extent_op *extent_op) 2945 { 2946 struct btrfs_fs_info *info = trans->fs_info; 2947 struct btrfs_key key; 2948 struct btrfs_path *path; 2949 struct btrfs_root *extent_root; 2950 struct extent_buffer *leaf; 2951 struct btrfs_extent_item *ei; 2952 struct btrfs_extent_inline_ref *iref; 2953 int ret; 2954 int is_data; 2955 int extent_slot = 0; 2956 int found_extent = 0; 2957 int num_to_del = 1; 2958 u32 item_size; 2959 u64 refs; 2960 u64 bytenr = node->bytenr; 2961 u64 num_bytes = node->num_bytes; 2962 bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA); 2963 2964 extent_root = btrfs_extent_root(info, bytenr); 2965 ASSERT(extent_root); 2966 2967 path = btrfs_alloc_path(); 2968 if (!path) 2969 return -ENOMEM; 2970 2971 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; 2972 2973 if (!is_data && refs_to_drop != 1) { 2974 btrfs_crit(info, 2975 "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u", 2976 node->bytenr, refs_to_drop); 2977 ret = -EINVAL; 2978 btrfs_abort_transaction(trans, ret); 2979 goto out; 2980 } 2981 2982 if (is_data) 2983 skinny_metadata = false; 2984 2985 ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes, 2986 parent, root_objectid, owner_objectid, 2987 owner_offset); 2988 if (ret == 0) { 2989 /* 2990 * Either the inline backref or the SHARED_DATA_REF/ 2991 * SHARED_BLOCK_REF is found 2992 * 2993 * Here is a quick path to locate EXTENT/METADATA_ITEM. 2994 * It's possible the EXTENT/METADATA_ITEM is near current slot. 2995 */ 2996 extent_slot = path->slots[0]; 2997 while (extent_slot >= 0) { 2998 btrfs_item_key_to_cpu(path->nodes[0], &key, 2999 extent_slot); 3000 if (key.objectid != bytenr) 3001 break; 3002 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3003 key.offset == num_bytes) { 3004 found_extent = 1; 3005 break; 3006 } 3007 if (key.type == BTRFS_METADATA_ITEM_KEY && 3008 key.offset == owner_objectid) { 3009 found_extent = 1; 3010 break; 3011 } 3012 3013 /* Quick path didn't find the EXTEMT/METADATA_ITEM */ 3014 if (path->slots[0] - extent_slot > 5) 3015 break; 3016 extent_slot--; 3017 } 3018 3019 if (!found_extent) { 3020 if (iref) { 3021 abort_and_dump(trans, path, 3022 "invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref", 3023 path->slots[0]); 3024 ret = -EUCLEAN; 3025 goto out; 3026 } 3027 /* Must be SHARED_* item, remove the backref first */ 3028 ret = remove_extent_backref(trans, extent_root, path, 3029 NULL, refs_to_drop, is_data); 3030 if (ret) { 3031 btrfs_abort_transaction(trans, ret); 3032 goto out; 3033 } 3034 btrfs_release_path(path); 3035 3036 /* Slow path to locate EXTENT/METADATA_ITEM */ 3037 key.objectid = bytenr; 3038 key.type = BTRFS_EXTENT_ITEM_KEY; 3039 key.offset = num_bytes; 3040 3041 if (!is_data && skinny_metadata) { 3042 key.type = BTRFS_METADATA_ITEM_KEY; 3043 key.offset = owner_objectid; 3044 } 3045 3046 ret = btrfs_search_slot(trans, extent_root, 3047 &key, path, -1, 1); 3048 if (ret > 0 && skinny_metadata && path->slots[0]) { 3049 /* 3050 * Couldn't find our skinny metadata item, 3051 * see if we have ye olde extent item. 3052 */ 3053 path->slots[0]--; 3054 btrfs_item_key_to_cpu(path->nodes[0], &key, 3055 path->slots[0]); 3056 if (key.objectid == bytenr && 3057 key.type == BTRFS_EXTENT_ITEM_KEY && 3058 key.offset == num_bytes) 3059 ret = 0; 3060 } 3061 3062 if (ret > 0 && skinny_metadata) { 3063 skinny_metadata = false; 3064 key.objectid = bytenr; 3065 key.type = BTRFS_EXTENT_ITEM_KEY; 3066 key.offset = num_bytes; 3067 btrfs_release_path(path); 3068 ret = btrfs_search_slot(trans, extent_root, 3069 &key, path, -1, 1); 3070 } 3071 3072 if (ret) { 3073 if (ret > 0) 3074 btrfs_print_leaf(path->nodes[0]); 3075 btrfs_err(info, 3076 "umm, got %d back from search, was looking for %llu, slot %d", 3077 ret, bytenr, path->slots[0]); 3078 } 3079 if (ret < 0) { 3080 btrfs_abort_transaction(trans, ret); 3081 goto out; 3082 } 3083 extent_slot = path->slots[0]; 3084 } 3085 } else if (WARN_ON(ret == -ENOENT)) { 3086 abort_and_dump(trans, path, 3087 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d", 3088 bytenr, parent, root_objectid, owner_objectid, 3089 owner_offset, path->slots[0]); 3090 goto out; 3091 } else { 3092 btrfs_abort_transaction(trans, ret); 3093 goto out; 3094 } 3095 3096 leaf = path->nodes[0]; 3097 item_size = btrfs_item_size(leaf, extent_slot); 3098 if (unlikely(item_size < sizeof(*ei))) { 3099 ret = -EUCLEAN; 3100 btrfs_err(trans->fs_info, 3101 "unexpected extent item size, has %u expect >= %zu", 3102 item_size, sizeof(*ei)); 3103 btrfs_abort_transaction(trans, ret); 3104 goto out; 3105 } 3106 ei = btrfs_item_ptr(leaf, extent_slot, 3107 struct btrfs_extent_item); 3108 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && 3109 key.type == BTRFS_EXTENT_ITEM_KEY) { 3110 struct btrfs_tree_block_info *bi; 3111 3112 if (item_size < sizeof(*ei) + sizeof(*bi)) { 3113 abort_and_dump(trans, path, 3114 "invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu", 3115 key.objectid, key.type, key.offset, 3116 path->slots[0], owner_objectid, item_size, 3117 sizeof(*ei) + sizeof(*bi)); 3118 ret = -EUCLEAN; 3119 goto out; 3120 } 3121 bi = (struct btrfs_tree_block_info *)(ei + 1); 3122 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); 3123 } 3124 3125 refs = btrfs_extent_refs(leaf, ei); 3126 if (refs < refs_to_drop) { 3127 abort_and_dump(trans, path, 3128 "trying to drop %d refs but we only have %llu for bytenr %llu slot %u", 3129 refs_to_drop, refs, bytenr, path->slots[0]); 3130 ret = -EUCLEAN; 3131 goto out; 3132 } 3133 refs -= refs_to_drop; 3134 3135 if (refs > 0) { 3136 if (extent_op) 3137 __run_delayed_extent_op(extent_op, leaf, ei); 3138 /* 3139 * In the case of inline back ref, reference count will 3140 * be updated by remove_extent_backref 3141 */ 3142 if (iref) { 3143 if (!found_extent) { 3144 abort_and_dump(trans, path, 3145 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u", 3146 path->slots[0]); 3147 ret = -EUCLEAN; 3148 goto out; 3149 } 3150 } else { 3151 btrfs_set_extent_refs(leaf, ei, refs); 3152 btrfs_mark_buffer_dirty(leaf); 3153 } 3154 if (found_extent) { 3155 ret = remove_extent_backref(trans, extent_root, path, 3156 iref, refs_to_drop, is_data); 3157 if (ret) { 3158 btrfs_abort_transaction(trans, ret); 3159 goto out; 3160 } 3161 } 3162 } else { 3163 /* In this branch refs == 1 */ 3164 if (found_extent) { 3165 if (is_data && refs_to_drop != 3166 extent_data_ref_count(path, iref)) { 3167 abort_and_dump(trans, path, 3168 "invalid refs_to_drop, current refs %u refs_to_drop %u slot %u", 3169 extent_data_ref_count(path, iref), 3170 refs_to_drop, path->slots[0]); 3171 ret = -EUCLEAN; 3172 goto out; 3173 } 3174 if (iref) { 3175 if (path->slots[0] != extent_slot) { 3176 abort_and_dump(trans, path, 3177 "invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref", 3178 key.objectid, key.type, 3179 key.offset, path->slots[0]); 3180 ret = -EUCLEAN; 3181 goto out; 3182 } 3183 } else { 3184 /* 3185 * No inline ref, we must be at SHARED_* item, 3186 * And it's single ref, it must be: 3187 * | extent_slot ||extent_slot + 1| 3188 * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ] 3189 */ 3190 if (path->slots[0] != extent_slot + 1) { 3191 abort_and_dump(trans, path, 3192 "invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM", 3193 path->slots[0]); 3194 ret = -EUCLEAN; 3195 goto out; 3196 } 3197 path->slots[0] = extent_slot; 3198 num_to_del = 2; 3199 } 3200 } 3201 3202 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 3203 num_to_del); 3204 if (ret) { 3205 btrfs_abort_transaction(trans, ret); 3206 goto out; 3207 } 3208 btrfs_release_path(path); 3209 3210 ret = do_free_extent_accounting(trans, bytenr, num_bytes, is_data); 3211 } 3212 btrfs_release_path(path); 3213 3214 out: 3215 btrfs_free_path(path); 3216 return ret; 3217 } 3218 3219 /* 3220 * when we free an block, it is possible (and likely) that we free the last 3221 * delayed ref for that extent as well. This searches the delayed ref tree for 3222 * a given extent, and if there are no other delayed refs to be processed, it 3223 * removes it from the tree. 3224 */ 3225 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, 3226 u64 bytenr) 3227 { 3228 struct btrfs_delayed_ref_head *head; 3229 struct btrfs_delayed_ref_root *delayed_refs; 3230 int ret = 0; 3231 3232 delayed_refs = &trans->transaction->delayed_refs; 3233 spin_lock(&delayed_refs->lock); 3234 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 3235 if (!head) 3236 goto out_delayed_unlock; 3237 3238 spin_lock(&head->lock); 3239 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 3240 goto out; 3241 3242 if (cleanup_extent_op(head) != NULL) 3243 goto out; 3244 3245 /* 3246 * waiting for the lock here would deadlock. If someone else has it 3247 * locked they are already in the process of dropping it anyway 3248 */ 3249 if (!mutex_trylock(&head->mutex)) 3250 goto out; 3251 3252 btrfs_delete_ref_head(delayed_refs, head); 3253 head->processing = false; 3254 3255 spin_unlock(&head->lock); 3256 spin_unlock(&delayed_refs->lock); 3257 3258 BUG_ON(head->extent_op); 3259 if (head->must_insert_reserved) 3260 ret = 1; 3261 3262 btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); 3263 mutex_unlock(&head->mutex); 3264 btrfs_put_delayed_ref_head(head); 3265 return ret; 3266 out: 3267 spin_unlock(&head->lock); 3268 3269 out_delayed_unlock: 3270 spin_unlock(&delayed_refs->lock); 3271 return 0; 3272 } 3273 3274 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3275 u64 root_id, 3276 struct extent_buffer *buf, 3277 u64 parent, int last_ref) 3278 { 3279 struct btrfs_fs_info *fs_info = trans->fs_info; 3280 struct btrfs_ref generic_ref = { 0 }; 3281 int ret; 3282 3283 btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, 3284 buf->start, buf->len, parent); 3285 btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 3286 root_id, 0, false); 3287 3288 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3289 btrfs_ref_tree_mod(fs_info, &generic_ref); 3290 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL); 3291 BUG_ON(ret); /* -ENOMEM */ 3292 } 3293 3294 if (last_ref && btrfs_header_generation(buf) == trans->transid) { 3295 struct btrfs_block_group *cache; 3296 bool must_pin = false; 3297 3298 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3299 ret = check_ref_cleanup(trans, buf->start); 3300 if (!ret) { 3301 btrfs_redirty_list_add(trans->transaction, buf); 3302 goto out; 3303 } 3304 } 3305 3306 cache = btrfs_lookup_block_group(fs_info, buf->start); 3307 3308 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 3309 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3310 btrfs_put_block_group(cache); 3311 goto out; 3312 } 3313 3314 /* 3315 * If there are tree mod log users we may have recorded mod log 3316 * operations for this node. If we re-allocate this node we 3317 * could replay operations on this node that happened when it 3318 * existed in a completely different root. For example if it 3319 * was part of root A, then was reallocated to root B, and we 3320 * are doing a btrfs_old_search_slot(root b), we could replay 3321 * operations that happened when the block was part of root A, 3322 * giving us an inconsistent view of the btree. 3323 * 3324 * We are safe from races here because at this point no other 3325 * node or root points to this extent buffer, so if after this 3326 * check a new tree mod log user joins we will not have an 3327 * existing log of operations on this node that we have to 3328 * contend with. 3329 */ 3330 if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) 3331 must_pin = true; 3332 3333 if (must_pin || btrfs_is_zoned(fs_info)) { 3334 btrfs_redirty_list_add(trans->transaction, buf); 3335 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3336 btrfs_put_block_group(cache); 3337 goto out; 3338 } 3339 3340 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 3341 3342 btrfs_add_free_space(cache, buf->start, buf->len); 3343 btrfs_free_reserved_bytes(cache, buf->len, 0); 3344 btrfs_put_block_group(cache); 3345 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); 3346 } 3347 out: 3348 if (last_ref) { 3349 /* 3350 * Deleting the buffer, clear the corrupt flag since it doesn't 3351 * matter anymore. 3352 */ 3353 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); 3354 } 3355 } 3356 3357 /* Can return -ENOMEM */ 3358 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) 3359 { 3360 struct btrfs_fs_info *fs_info = trans->fs_info; 3361 int ret; 3362 3363 if (btrfs_is_testing(fs_info)) 3364 return 0; 3365 3366 /* 3367 * tree log blocks never actually go into the extent allocation 3368 * tree, just update pinning info and exit early. 3369 */ 3370 if ((ref->type == BTRFS_REF_METADATA && 3371 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3372 (ref->type == BTRFS_REF_DATA && 3373 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) { 3374 /* unlocks the pinned mutex */ 3375 btrfs_pin_extent(trans, ref->bytenr, ref->len, 1); 3376 ret = 0; 3377 } else if (ref->type == BTRFS_REF_METADATA) { 3378 ret = btrfs_add_delayed_tree_ref(trans, ref, NULL); 3379 } else { 3380 ret = btrfs_add_delayed_data_ref(trans, ref, 0); 3381 } 3382 3383 if (!((ref->type == BTRFS_REF_METADATA && 3384 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3385 (ref->type == BTRFS_REF_DATA && 3386 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID))) 3387 btrfs_ref_tree_mod(fs_info, ref); 3388 3389 return ret; 3390 } 3391 3392 enum btrfs_loop_type { 3393 /* 3394 * Start caching block groups but do not wait for progress or for them 3395 * to be done. 3396 */ 3397 LOOP_CACHING_NOWAIT, 3398 3399 /* 3400 * Wait for the block group free_space >= the space we're waiting for if 3401 * the block group isn't cached. 3402 */ 3403 LOOP_CACHING_WAIT, 3404 3405 /* 3406 * Allow allocations to happen from block groups that do not yet have a 3407 * size classification. 3408 */ 3409 LOOP_UNSET_SIZE_CLASS, 3410 3411 /* 3412 * Allocate a chunk and then retry the allocation. 3413 */ 3414 LOOP_ALLOC_CHUNK, 3415 3416 /* 3417 * Ignore the size class restrictions for this allocation. 3418 */ 3419 LOOP_WRONG_SIZE_CLASS, 3420 3421 /* 3422 * Ignore the empty size, only try to allocate the number of bytes 3423 * needed for this allocation. 3424 */ 3425 LOOP_NO_EMPTY_SIZE, 3426 }; 3427 3428 static inline void 3429 btrfs_lock_block_group(struct btrfs_block_group *cache, 3430 int delalloc) 3431 { 3432 if (delalloc) 3433 down_read(&cache->data_rwsem); 3434 } 3435 3436 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, 3437 int delalloc) 3438 { 3439 btrfs_get_block_group(cache); 3440 if (delalloc) 3441 down_read(&cache->data_rwsem); 3442 } 3443 3444 static struct btrfs_block_group *btrfs_lock_cluster( 3445 struct btrfs_block_group *block_group, 3446 struct btrfs_free_cluster *cluster, 3447 int delalloc) 3448 __acquires(&cluster->refill_lock) 3449 { 3450 struct btrfs_block_group *used_bg = NULL; 3451 3452 spin_lock(&cluster->refill_lock); 3453 while (1) { 3454 used_bg = cluster->block_group; 3455 if (!used_bg) 3456 return NULL; 3457 3458 if (used_bg == block_group) 3459 return used_bg; 3460 3461 btrfs_get_block_group(used_bg); 3462 3463 if (!delalloc) 3464 return used_bg; 3465 3466 if (down_read_trylock(&used_bg->data_rwsem)) 3467 return used_bg; 3468 3469 spin_unlock(&cluster->refill_lock); 3470 3471 /* We should only have one-level nested. */ 3472 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); 3473 3474 spin_lock(&cluster->refill_lock); 3475 if (used_bg == cluster->block_group) 3476 return used_bg; 3477 3478 up_read(&used_bg->data_rwsem); 3479 btrfs_put_block_group(used_bg); 3480 } 3481 } 3482 3483 static inline void 3484 btrfs_release_block_group(struct btrfs_block_group *cache, 3485 int delalloc) 3486 { 3487 if (delalloc) 3488 up_read(&cache->data_rwsem); 3489 btrfs_put_block_group(cache); 3490 } 3491 3492 /* 3493 * Helper function for find_free_extent(). 3494 * 3495 * Return -ENOENT to inform caller that we need fallback to unclustered mode. 3496 * Return >0 to inform caller that we find nothing 3497 * Return 0 means we have found a location and set ffe_ctl->found_offset. 3498 */ 3499 static int find_free_extent_clustered(struct btrfs_block_group *bg, 3500 struct find_free_extent_ctl *ffe_ctl, 3501 struct btrfs_block_group **cluster_bg_ret) 3502 { 3503 struct btrfs_block_group *cluster_bg; 3504 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3505 u64 aligned_cluster; 3506 u64 offset; 3507 int ret; 3508 3509 cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc); 3510 if (!cluster_bg) 3511 goto refill_cluster; 3512 if (cluster_bg != bg && (cluster_bg->ro || 3513 !block_group_bits(cluster_bg, ffe_ctl->flags))) 3514 goto release_cluster; 3515 3516 offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, 3517 ffe_ctl->num_bytes, cluster_bg->start, 3518 &ffe_ctl->max_extent_size); 3519 if (offset) { 3520 /* We have a block, we're done */ 3521 spin_unlock(&last_ptr->refill_lock); 3522 trace_btrfs_reserve_extent_cluster(cluster_bg, ffe_ctl); 3523 *cluster_bg_ret = cluster_bg; 3524 ffe_ctl->found_offset = offset; 3525 return 0; 3526 } 3527 WARN_ON(last_ptr->block_group != cluster_bg); 3528 3529 release_cluster: 3530 /* 3531 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so 3532 * lets just skip it and let the allocator find whatever block it can 3533 * find. If we reach this point, we will have tried the cluster 3534 * allocator plenty of times and not have found anything, so we are 3535 * likely way too fragmented for the clustering stuff to find anything. 3536 * 3537 * However, if the cluster is taken from the current block group, 3538 * release the cluster first, so that we stand a better chance of 3539 * succeeding in the unclustered allocation. 3540 */ 3541 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { 3542 spin_unlock(&last_ptr->refill_lock); 3543 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3544 return -ENOENT; 3545 } 3546 3547 /* This cluster didn't work out, free it and start over */ 3548 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3549 3550 if (cluster_bg != bg) 3551 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3552 3553 refill_cluster: 3554 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { 3555 spin_unlock(&last_ptr->refill_lock); 3556 return -ENOENT; 3557 } 3558 3559 aligned_cluster = max_t(u64, 3560 ffe_ctl->empty_cluster + ffe_ctl->empty_size, 3561 bg->full_stripe_len); 3562 ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start, 3563 ffe_ctl->num_bytes, aligned_cluster); 3564 if (ret == 0) { 3565 /* Now pull our allocation out of this cluster */ 3566 offset = btrfs_alloc_from_cluster(bg, last_ptr, 3567 ffe_ctl->num_bytes, ffe_ctl->search_start, 3568 &ffe_ctl->max_extent_size); 3569 if (offset) { 3570 /* We found one, proceed */ 3571 spin_unlock(&last_ptr->refill_lock); 3572 ffe_ctl->found_offset = offset; 3573 trace_btrfs_reserve_extent_cluster(bg, ffe_ctl); 3574 return 0; 3575 } 3576 } 3577 /* 3578 * At this point we either didn't find a cluster or we weren't able to 3579 * allocate a block from our cluster. Free the cluster we've been 3580 * trying to use, and go to the next block group. 3581 */ 3582 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3583 spin_unlock(&last_ptr->refill_lock); 3584 return 1; 3585 } 3586 3587 /* 3588 * Return >0 to inform caller that we find nothing 3589 * Return 0 when we found an free extent and set ffe_ctrl->found_offset 3590 */ 3591 static int find_free_extent_unclustered(struct btrfs_block_group *bg, 3592 struct find_free_extent_ctl *ffe_ctl) 3593 { 3594 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3595 u64 offset; 3596 3597 /* 3598 * We are doing an unclustered allocation, set the fragmented flag so 3599 * we don't bother trying to setup a cluster again until we get more 3600 * space. 3601 */ 3602 if (unlikely(last_ptr)) { 3603 spin_lock(&last_ptr->lock); 3604 last_ptr->fragmented = 1; 3605 spin_unlock(&last_ptr->lock); 3606 } 3607 if (ffe_ctl->cached) { 3608 struct btrfs_free_space_ctl *free_space_ctl; 3609 3610 free_space_ctl = bg->free_space_ctl; 3611 spin_lock(&free_space_ctl->tree_lock); 3612 if (free_space_ctl->free_space < 3613 ffe_ctl->num_bytes + ffe_ctl->empty_cluster + 3614 ffe_ctl->empty_size) { 3615 ffe_ctl->total_free_space = max_t(u64, 3616 ffe_ctl->total_free_space, 3617 free_space_ctl->free_space); 3618 spin_unlock(&free_space_ctl->tree_lock); 3619 return 1; 3620 } 3621 spin_unlock(&free_space_ctl->tree_lock); 3622 } 3623 3624 offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start, 3625 ffe_ctl->num_bytes, ffe_ctl->empty_size, 3626 &ffe_ctl->max_extent_size); 3627 if (!offset) 3628 return 1; 3629 ffe_ctl->found_offset = offset; 3630 return 0; 3631 } 3632 3633 static int do_allocation_clustered(struct btrfs_block_group *block_group, 3634 struct find_free_extent_ctl *ffe_ctl, 3635 struct btrfs_block_group **bg_ret) 3636 { 3637 int ret; 3638 3639 /* We want to try and use the cluster allocator, so lets look there */ 3640 if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { 3641 ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret); 3642 if (ret >= 0) 3643 return ret; 3644 /* ret == -ENOENT case falls through */ 3645 } 3646 3647 return find_free_extent_unclustered(block_group, ffe_ctl); 3648 } 3649 3650 /* 3651 * Tree-log block group locking 3652 * ============================ 3653 * 3654 * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which 3655 * indicates the starting address of a block group, which is reserved only 3656 * for tree-log metadata. 3657 * 3658 * Lock nesting 3659 * ============ 3660 * 3661 * space_info::lock 3662 * block_group::lock 3663 * fs_info::treelog_bg_lock 3664 */ 3665 3666 /* 3667 * Simple allocator for sequential-only block group. It only allows sequential 3668 * allocation. No need to play with trees. This function also reserves the 3669 * bytes as in btrfs_add_reserved_bytes. 3670 */ 3671 static int do_allocation_zoned(struct btrfs_block_group *block_group, 3672 struct find_free_extent_ctl *ffe_ctl, 3673 struct btrfs_block_group **bg_ret) 3674 { 3675 struct btrfs_fs_info *fs_info = block_group->fs_info; 3676 struct btrfs_space_info *space_info = block_group->space_info; 3677 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3678 u64 start = block_group->start; 3679 u64 num_bytes = ffe_ctl->num_bytes; 3680 u64 avail; 3681 u64 bytenr = block_group->start; 3682 u64 log_bytenr; 3683 u64 data_reloc_bytenr; 3684 int ret = 0; 3685 bool skip = false; 3686 3687 ASSERT(btrfs_is_zoned(block_group->fs_info)); 3688 3689 /* 3690 * Do not allow non-tree-log blocks in the dedicated tree-log block 3691 * group, and vice versa. 3692 */ 3693 spin_lock(&fs_info->treelog_bg_lock); 3694 log_bytenr = fs_info->treelog_bg; 3695 if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || 3696 (!ffe_ctl->for_treelog && bytenr == log_bytenr))) 3697 skip = true; 3698 spin_unlock(&fs_info->treelog_bg_lock); 3699 if (skip) 3700 return 1; 3701 3702 /* 3703 * Do not allow non-relocation blocks in the dedicated relocation block 3704 * group, and vice versa. 3705 */ 3706 spin_lock(&fs_info->relocation_bg_lock); 3707 data_reloc_bytenr = fs_info->data_reloc_bg; 3708 if (data_reloc_bytenr && 3709 ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) || 3710 (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr))) 3711 skip = true; 3712 spin_unlock(&fs_info->relocation_bg_lock); 3713 if (skip) 3714 return 1; 3715 3716 /* Check RO and no space case before trying to activate it */ 3717 spin_lock(&block_group->lock); 3718 if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) { 3719 ret = 1; 3720 /* 3721 * May need to clear fs_info->{treelog,data_reloc}_bg. 3722 * Return the error after taking the locks. 3723 */ 3724 } 3725 spin_unlock(&block_group->lock); 3726 3727 /* Metadata block group is activated at write time. */ 3728 if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && 3729 !btrfs_zone_activate(block_group)) { 3730 ret = 1; 3731 /* 3732 * May need to clear fs_info->{treelog,data_reloc}_bg. 3733 * Return the error after taking the locks. 3734 */ 3735 } 3736 3737 spin_lock(&space_info->lock); 3738 spin_lock(&block_group->lock); 3739 spin_lock(&fs_info->treelog_bg_lock); 3740 spin_lock(&fs_info->relocation_bg_lock); 3741 3742 if (ret) 3743 goto out; 3744 3745 ASSERT(!ffe_ctl->for_treelog || 3746 block_group->start == fs_info->treelog_bg || 3747 fs_info->treelog_bg == 0); 3748 ASSERT(!ffe_ctl->for_data_reloc || 3749 block_group->start == fs_info->data_reloc_bg || 3750 fs_info->data_reloc_bg == 0); 3751 3752 if (block_group->ro || 3753 (!ffe_ctl->for_data_reloc && 3754 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) { 3755 ret = 1; 3756 goto out; 3757 } 3758 3759 /* 3760 * Do not allow currently using block group to be tree-log dedicated 3761 * block group. 3762 */ 3763 if (ffe_ctl->for_treelog && !fs_info->treelog_bg && 3764 (block_group->used || block_group->reserved)) { 3765 ret = 1; 3766 goto out; 3767 } 3768 3769 /* 3770 * Do not allow currently used block group to be the data relocation 3771 * dedicated block group. 3772 */ 3773 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg && 3774 (block_group->used || block_group->reserved)) { 3775 ret = 1; 3776 goto out; 3777 } 3778 3779 WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity); 3780 avail = block_group->zone_capacity - block_group->alloc_offset; 3781 if (avail < num_bytes) { 3782 if (ffe_ctl->max_extent_size < avail) { 3783 /* 3784 * With sequential allocator, free space is always 3785 * contiguous 3786 */ 3787 ffe_ctl->max_extent_size = avail; 3788 ffe_ctl->total_free_space = avail; 3789 } 3790 ret = 1; 3791 goto out; 3792 } 3793 3794 if (ffe_ctl->for_treelog && !fs_info->treelog_bg) 3795 fs_info->treelog_bg = block_group->start; 3796 3797 if (ffe_ctl->for_data_reloc) { 3798 if (!fs_info->data_reloc_bg) 3799 fs_info->data_reloc_bg = block_group->start; 3800 /* 3801 * Do not allow allocations from this block group, unless it is 3802 * for data relocation. Compared to increasing the ->ro, setting 3803 * the ->zoned_data_reloc_ongoing flag still allows nocow 3804 * writers to come in. See btrfs_inc_nocow_writers(). 3805 * 3806 * We need to disable an allocation to avoid an allocation of 3807 * regular (non-relocation data) extent. With mix of relocation 3808 * extents and regular extents, we can dispatch WRITE commands 3809 * (for relocation extents) and ZONE APPEND commands (for 3810 * regular extents) at the same time to the same zone, which 3811 * easily break the write pointer. 3812 * 3813 * Also, this flag avoids this block group to be zone finished. 3814 */ 3815 set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags); 3816 } 3817 3818 ffe_ctl->found_offset = start + block_group->alloc_offset; 3819 block_group->alloc_offset += num_bytes; 3820 spin_lock(&ctl->tree_lock); 3821 ctl->free_space -= num_bytes; 3822 spin_unlock(&ctl->tree_lock); 3823 3824 /* 3825 * We do not check if found_offset is aligned to stripesize. The 3826 * address is anyway rewritten when using zone append writing. 3827 */ 3828 3829 ffe_ctl->search_start = ffe_ctl->found_offset; 3830 3831 out: 3832 if (ret && ffe_ctl->for_treelog) 3833 fs_info->treelog_bg = 0; 3834 if (ret && ffe_ctl->for_data_reloc) 3835 fs_info->data_reloc_bg = 0; 3836 spin_unlock(&fs_info->relocation_bg_lock); 3837 spin_unlock(&fs_info->treelog_bg_lock); 3838 spin_unlock(&block_group->lock); 3839 spin_unlock(&space_info->lock); 3840 return ret; 3841 } 3842 3843 static int do_allocation(struct btrfs_block_group *block_group, 3844 struct find_free_extent_ctl *ffe_ctl, 3845 struct btrfs_block_group **bg_ret) 3846 { 3847 switch (ffe_ctl->policy) { 3848 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3849 return do_allocation_clustered(block_group, ffe_ctl, bg_ret); 3850 case BTRFS_EXTENT_ALLOC_ZONED: 3851 return do_allocation_zoned(block_group, ffe_ctl, bg_ret); 3852 default: 3853 BUG(); 3854 } 3855 } 3856 3857 static void release_block_group(struct btrfs_block_group *block_group, 3858 struct find_free_extent_ctl *ffe_ctl, 3859 int delalloc) 3860 { 3861 switch (ffe_ctl->policy) { 3862 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3863 ffe_ctl->retry_uncached = false; 3864 break; 3865 case BTRFS_EXTENT_ALLOC_ZONED: 3866 /* Nothing to do */ 3867 break; 3868 default: 3869 BUG(); 3870 } 3871 3872 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != 3873 ffe_ctl->index); 3874 btrfs_release_block_group(block_group, delalloc); 3875 } 3876 3877 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl, 3878 struct btrfs_key *ins) 3879 { 3880 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3881 3882 if (!ffe_ctl->use_cluster && last_ptr) { 3883 spin_lock(&last_ptr->lock); 3884 last_ptr->window_start = ins->objectid; 3885 spin_unlock(&last_ptr->lock); 3886 } 3887 } 3888 3889 static void found_extent(struct find_free_extent_ctl *ffe_ctl, 3890 struct btrfs_key *ins) 3891 { 3892 switch (ffe_ctl->policy) { 3893 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3894 found_extent_clustered(ffe_ctl, ins); 3895 break; 3896 case BTRFS_EXTENT_ALLOC_ZONED: 3897 /* Nothing to do */ 3898 break; 3899 default: 3900 BUG(); 3901 } 3902 } 3903 3904 static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info, 3905 struct find_free_extent_ctl *ffe_ctl) 3906 { 3907 /* Block group's activeness is not a requirement for METADATA block groups. */ 3908 if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)) 3909 return 0; 3910 3911 /* If we can activate new zone, just allocate a chunk and use it */ 3912 if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags)) 3913 return 0; 3914 3915 /* 3916 * We already reached the max active zones. Try to finish one block 3917 * group to make a room for a new block group. This is only possible 3918 * for a data block group because btrfs_zone_finish() may need to wait 3919 * for a running transaction which can cause a deadlock for metadata 3920 * allocation. 3921 */ 3922 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { 3923 int ret = btrfs_zone_finish_one_bg(fs_info); 3924 3925 if (ret == 1) 3926 return 0; 3927 else if (ret < 0) 3928 return ret; 3929 } 3930 3931 /* 3932 * If we have enough free space left in an already active block group 3933 * and we can't activate any other zone now, do not allow allocating a 3934 * new chunk and let find_free_extent() retry with a smaller size. 3935 */ 3936 if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size) 3937 return -ENOSPC; 3938 3939 /* 3940 * Even min_alloc_size is not left in any block groups. Since we cannot 3941 * activate a new block group, allocating it may not help. Let's tell a 3942 * caller to try again and hope it progress something by writing some 3943 * parts of the region. That is only possible for data block groups, 3944 * where a part of the region can be written. 3945 */ 3946 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) 3947 return -EAGAIN; 3948 3949 /* 3950 * We cannot activate a new block group and no enough space left in any 3951 * block groups. So, allocating a new block group may not help. But, 3952 * there is nothing to do anyway, so let's go with it. 3953 */ 3954 return 0; 3955 } 3956 3957 static int can_allocate_chunk(struct btrfs_fs_info *fs_info, 3958 struct find_free_extent_ctl *ffe_ctl) 3959 { 3960 switch (ffe_ctl->policy) { 3961 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3962 return 0; 3963 case BTRFS_EXTENT_ALLOC_ZONED: 3964 return can_allocate_chunk_zoned(fs_info, ffe_ctl); 3965 default: 3966 BUG(); 3967 } 3968 } 3969 3970 /* 3971 * Return >0 means caller needs to re-search for free extent 3972 * Return 0 means we have the needed free extent. 3973 * Return <0 means we failed to locate any free extent. 3974 */ 3975 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, 3976 struct btrfs_key *ins, 3977 struct find_free_extent_ctl *ffe_ctl, 3978 bool full_search) 3979 { 3980 struct btrfs_root *root = fs_info->chunk_root; 3981 int ret; 3982 3983 if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && 3984 ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) 3985 ffe_ctl->orig_have_caching_bg = true; 3986 3987 if (ins->objectid) { 3988 found_extent(ffe_ctl, ins); 3989 return 0; 3990 } 3991 3992 if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) 3993 return 1; 3994 3995 ffe_ctl->index++; 3996 if (ffe_ctl->index < BTRFS_NR_RAID_TYPES) 3997 return 1; 3998 3999 /* See the comments for btrfs_loop_type for an explanation of the phases. */ 4000 if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { 4001 ffe_ctl->index = 0; 4002 /* 4003 * We want to skip the LOOP_CACHING_WAIT step if we don't have 4004 * any uncached bgs and we've already done a full search 4005 * through. 4006 */ 4007 if (ffe_ctl->loop == LOOP_CACHING_NOWAIT && 4008 (!ffe_ctl->orig_have_caching_bg && full_search)) 4009 ffe_ctl->loop++; 4010 ffe_ctl->loop++; 4011 4012 if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { 4013 struct btrfs_trans_handle *trans; 4014 int exist = 0; 4015 4016 /* Check if allocation policy allows to create a new chunk */ 4017 ret = can_allocate_chunk(fs_info, ffe_ctl); 4018 if (ret) 4019 return ret; 4020 4021 trans = current->journal_info; 4022 if (trans) 4023 exist = 1; 4024 else 4025 trans = btrfs_join_transaction(root); 4026 4027 if (IS_ERR(trans)) { 4028 ret = PTR_ERR(trans); 4029 return ret; 4030 } 4031 4032 ret = btrfs_chunk_alloc(trans, ffe_ctl->flags, 4033 CHUNK_ALLOC_FORCE_FOR_EXTENT); 4034 4035 /* Do not bail out on ENOSPC since we can do more. */ 4036 if (ret == -ENOSPC) { 4037 ret = 0; 4038 ffe_ctl->loop++; 4039 } 4040 else if (ret < 0) 4041 btrfs_abort_transaction(trans, ret); 4042 else 4043 ret = 0; 4044 if (!exist) 4045 btrfs_end_transaction(trans); 4046 if (ret) 4047 return ret; 4048 } 4049 4050 if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { 4051 if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) 4052 return -ENOSPC; 4053 4054 /* 4055 * Don't loop again if we already have no empty_size and 4056 * no empty_cluster. 4057 */ 4058 if (ffe_ctl->empty_size == 0 && 4059 ffe_ctl->empty_cluster == 0) 4060 return -ENOSPC; 4061 ffe_ctl->empty_size = 0; 4062 ffe_ctl->empty_cluster = 0; 4063 } 4064 return 1; 4065 } 4066 return -ENOSPC; 4067 } 4068 4069 static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl, 4070 struct btrfs_block_group *bg) 4071 { 4072 if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) 4073 return true; 4074 if (!btrfs_block_group_should_use_size_class(bg)) 4075 return true; 4076 if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) 4077 return true; 4078 if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && 4079 bg->size_class == BTRFS_BG_SZ_NONE) 4080 return true; 4081 return ffe_ctl->size_class == bg->size_class; 4082 } 4083 4084 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, 4085 struct find_free_extent_ctl *ffe_ctl, 4086 struct btrfs_space_info *space_info, 4087 struct btrfs_key *ins) 4088 { 4089 /* 4090 * If our free space is heavily fragmented we may not be able to make 4091 * big contiguous allocations, so instead of doing the expensive search 4092 * for free space, simply return ENOSPC with our max_extent_size so we 4093 * can go ahead and search for a more manageable chunk. 4094 * 4095 * If our max_extent_size is large enough for our allocation simply 4096 * disable clustering since we will likely not be able to find enough 4097 * space to create a cluster and induce latency trying. 4098 */ 4099 if (space_info->max_extent_size) { 4100 spin_lock(&space_info->lock); 4101 if (space_info->max_extent_size && 4102 ffe_ctl->num_bytes > space_info->max_extent_size) { 4103 ins->offset = space_info->max_extent_size; 4104 spin_unlock(&space_info->lock); 4105 return -ENOSPC; 4106 } else if (space_info->max_extent_size) { 4107 ffe_ctl->use_cluster = false; 4108 } 4109 spin_unlock(&space_info->lock); 4110 } 4111 4112 ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, 4113 &ffe_ctl->empty_cluster); 4114 if (ffe_ctl->last_ptr) { 4115 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 4116 4117 spin_lock(&last_ptr->lock); 4118 if (last_ptr->block_group) 4119 ffe_ctl->hint_byte = last_ptr->window_start; 4120 if (last_ptr->fragmented) { 4121 /* 4122 * We still set window_start so we can keep track of the 4123 * last place we found an allocation to try and save 4124 * some time. 4125 */ 4126 ffe_ctl->hint_byte = last_ptr->window_start; 4127 ffe_ctl->use_cluster = false; 4128 } 4129 spin_unlock(&last_ptr->lock); 4130 } 4131 4132 return 0; 4133 } 4134 4135 static int prepare_allocation(struct btrfs_fs_info *fs_info, 4136 struct find_free_extent_ctl *ffe_ctl, 4137 struct btrfs_space_info *space_info, 4138 struct btrfs_key *ins) 4139 { 4140 switch (ffe_ctl->policy) { 4141 case BTRFS_EXTENT_ALLOC_CLUSTERED: 4142 return prepare_allocation_clustered(fs_info, ffe_ctl, 4143 space_info, ins); 4144 case BTRFS_EXTENT_ALLOC_ZONED: 4145 if (ffe_ctl->for_treelog) { 4146 spin_lock(&fs_info->treelog_bg_lock); 4147 if (fs_info->treelog_bg) 4148 ffe_ctl->hint_byte = fs_info->treelog_bg; 4149 spin_unlock(&fs_info->treelog_bg_lock); 4150 } 4151 if (ffe_ctl->for_data_reloc) { 4152 spin_lock(&fs_info->relocation_bg_lock); 4153 if (fs_info->data_reloc_bg) 4154 ffe_ctl->hint_byte = fs_info->data_reloc_bg; 4155 spin_unlock(&fs_info->relocation_bg_lock); 4156 } 4157 return 0; 4158 default: 4159 BUG(); 4160 } 4161 } 4162 4163 /* 4164 * walks the btree of allocated extents and find a hole of a given size. 4165 * The key ins is changed to record the hole: 4166 * ins->objectid == start position 4167 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4168 * ins->offset == the size of the hole. 4169 * Any available blocks before search_start are skipped. 4170 * 4171 * If there is no suitable free space, we will record the max size of 4172 * the free space extent currently. 4173 * 4174 * The overall logic and call chain: 4175 * 4176 * find_free_extent() 4177 * |- Iterate through all block groups 4178 * | |- Get a valid block group 4179 * | |- Try to do clustered allocation in that block group 4180 * | |- Try to do unclustered allocation in that block group 4181 * | |- Check if the result is valid 4182 * | | |- If valid, then exit 4183 * | |- Jump to next block group 4184 * | 4185 * |- Push harder to find free extents 4186 * |- If not found, re-iterate all block groups 4187 */ 4188 static noinline int find_free_extent(struct btrfs_root *root, 4189 struct btrfs_key *ins, 4190 struct find_free_extent_ctl *ffe_ctl) 4191 { 4192 struct btrfs_fs_info *fs_info = root->fs_info; 4193 int ret = 0; 4194 int cache_block_group_error = 0; 4195 struct btrfs_block_group *block_group = NULL; 4196 struct btrfs_space_info *space_info; 4197 bool full_search = false; 4198 4199 WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize); 4200 4201 ffe_ctl->search_start = 0; 4202 /* For clustered allocation */ 4203 ffe_ctl->empty_cluster = 0; 4204 ffe_ctl->last_ptr = NULL; 4205 ffe_ctl->use_cluster = true; 4206 ffe_ctl->have_caching_bg = false; 4207 ffe_ctl->orig_have_caching_bg = false; 4208 ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags); 4209 ffe_ctl->loop = 0; 4210 ffe_ctl->retry_uncached = false; 4211 ffe_ctl->cached = 0; 4212 ffe_ctl->max_extent_size = 0; 4213 ffe_ctl->total_free_space = 0; 4214 ffe_ctl->found_offset = 0; 4215 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED; 4216 ffe_ctl->size_class = btrfs_calc_block_group_size_class(ffe_ctl->num_bytes); 4217 4218 if (btrfs_is_zoned(fs_info)) 4219 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED; 4220 4221 ins->type = BTRFS_EXTENT_ITEM_KEY; 4222 ins->objectid = 0; 4223 ins->offset = 0; 4224 4225 trace_find_free_extent(root, ffe_ctl); 4226 4227 space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags); 4228 if (!space_info) { 4229 btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags); 4230 return -ENOSPC; 4231 } 4232 4233 ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins); 4234 if (ret < 0) 4235 return ret; 4236 4237 ffe_ctl->search_start = max(ffe_ctl->search_start, 4238 first_logical_byte(fs_info)); 4239 ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte); 4240 if (ffe_ctl->search_start == ffe_ctl->hint_byte) { 4241 block_group = btrfs_lookup_block_group(fs_info, 4242 ffe_ctl->search_start); 4243 /* 4244 * we don't want to use the block group if it doesn't match our 4245 * allocation bits, or if its not cached. 4246 * 4247 * However if we are re-searching with an ideal block group 4248 * picked out then we don't care that the block group is cached. 4249 */ 4250 if (block_group && block_group_bits(block_group, ffe_ctl->flags) && 4251 block_group->cached != BTRFS_CACHE_NO) { 4252 down_read(&space_info->groups_sem); 4253 if (list_empty(&block_group->list) || 4254 block_group->ro) { 4255 /* 4256 * someone is removing this block group, 4257 * we can't jump into the have_block_group 4258 * target because our list pointers are not 4259 * valid 4260 */ 4261 btrfs_put_block_group(block_group); 4262 up_read(&space_info->groups_sem); 4263 } else { 4264 ffe_ctl->index = btrfs_bg_flags_to_raid_index( 4265 block_group->flags); 4266 btrfs_lock_block_group(block_group, 4267 ffe_ctl->delalloc); 4268 ffe_ctl->hinted = true; 4269 goto have_block_group; 4270 } 4271 } else if (block_group) { 4272 btrfs_put_block_group(block_group); 4273 } 4274 } 4275 search: 4276 trace_find_free_extent_search_loop(root, ffe_ctl); 4277 ffe_ctl->have_caching_bg = false; 4278 if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) || 4279 ffe_ctl->index == 0) 4280 full_search = true; 4281 down_read(&space_info->groups_sem); 4282 list_for_each_entry(block_group, 4283 &space_info->block_groups[ffe_ctl->index], list) { 4284 struct btrfs_block_group *bg_ret; 4285 4286 ffe_ctl->hinted = false; 4287 /* If the block group is read-only, we can skip it entirely. */ 4288 if (unlikely(block_group->ro)) { 4289 if (ffe_ctl->for_treelog) 4290 btrfs_clear_treelog_bg(block_group); 4291 if (ffe_ctl->for_data_reloc) 4292 btrfs_clear_data_reloc_bg(block_group); 4293 continue; 4294 } 4295 4296 btrfs_grab_block_group(block_group, ffe_ctl->delalloc); 4297 ffe_ctl->search_start = block_group->start; 4298 4299 /* 4300 * this can happen if we end up cycling through all the 4301 * raid types, but we want to make sure we only allocate 4302 * for the proper type. 4303 */ 4304 if (!block_group_bits(block_group, ffe_ctl->flags)) { 4305 u64 extra = BTRFS_BLOCK_GROUP_DUP | 4306 BTRFS_BLOCK_GROUP_RAID1_MASK | 4307 BTRFS_BLOCK_GROUP_RAID56_MASK | 4308 BTRFS_BLOCK_GROUP_RAID10; 4309 4310 /* 4311 * if they asked for extra copies and this block group 4312 * doesn't provide them, bail. This does allow us to 4313 * fill raid0 from raid1. 4314 */ 4315 if ((ffe_ctl->flags & extra) && !(block_group->flags & extra)) 4316 goto loop; 4317 4318 /* 4319 * This block group has different flags than we want. 4320 * It's possible that we have MIXED_GROUP flag but no 4321 * block group is mixed. Just skip such block group. 4322 */ 4323 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4324 continue; 4325 } 4326 4327 have_block_group: 4328 trace_find_free_extent_have_block_group(root, ffe_ctl, block_group); 4329 ffe_ctl->cached = btrfs_block_group_done(block_group); 4330 if (unlikely(!ffe_ctl->cached)) { 4331 ffe_ctl->have_caching_bg = true; 4332 ret = btrfs_cache_block_group(block_group, false); 4333 4334 /* 4335 * If we get ENOMEM here or something else we want to 4336 * try other block groups, because it may not be fatal. 4337 * However if we can't find anything else we need to 4338 * save our return here so that we return the actual 4339 * error that caused problems, not ENOSPC. 4340 */ 4341 if (ret < 0) { 4342 if (!cache_block_group_error) 4343 cache_block_group_error = ret; 4344 ret = 0; 4345 goto loop; 4346 } 4347 ret = 0; 4348 } 4349 4350 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { 4351 if (!cache_block_group_error) 4352 cache_block_group_error = -EIO; 4353 goto loop; 4354 } 4355 4356 if (!find_free_extent_check_size_class(ffe_ctl, block_group)) 4357 goto loop; 4358 4359 bg_ret = NULL; 4360 ret = do_allocation(block_group, ffe_ctl, &bg_ret); 4361 if (ret > 0) 4362 goto loop; 4363 4364 if (bg_ret && bg_ret != block_group) { 4365 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4366 block_group = bg_ret; 4367 } 4368 4369 /* Checks */ 4370 ffe_ctl->search_start = round_up(ffe_ctl->found_offset, 4371 fs_info->stripesize); 4372 4373 /* move on to the next group */ 4374 if (ffe_ctl->search_start + ffe_ctl->num_bytes > 4375 block_group->start + block_group->length) { 4376 btrfs_add_free_space_unused(block_group, 4377 ffe_ctl->found_offset, 4378 ffe_ctl->num_bytes); 4379 goto loop; 4380 } 4381 4382 if (ffe_ctl->found_offset < ffe_ctl->search_start) 4383 btrfs_add_free_space_unused(block_group, 4384 ffe_ctl->found_offset, 4385 ffe_ctl->search_start - ffe_ctl->found_offset); 4386 4387 ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes, 4388 ffe_ctl->num_bytes, 4389 ffe_ctl->delalloc, 4390 ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS); 4391 if (ret == -EAGAIN) { 4392 btrfs_add_free_space_unused(block_group, 4393 ffe_ctl->found_offset, 4394 ffe_ctl->num_bytes); 4395 goto loop; 4396 } 4397 btrfs_inc_block_group_reservations(block_group); 4398 4399 /* we are all good, lets return */ 4400 ins->objectid = ffe_ctl->search_start; 4401 ins->offset = ffe_ctl->num_bytes; 4402 4403 trace_btrfs_reserve_extent(block_group, ffe_ctl); 4404 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4405 break; 4406 loop: 4407 if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && 4408 !ffe_ctl->retry_uncached) { 4409 ffe_ctl->retry_uncached = true; 4410 btrfs_wait_block_group_cache_progress(block_group, 4411 ffe_ctl->num_bytes + 4412 ffe_ctl->empty_cluster + 4413 ffe_ctl->empty_size); 4414 goto have_block_group; 4415 } 4416 release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc); 4417 cond_resched(); 4418 } 4419 up_read(&space_info->groups_sem); 4420 4421 ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search); 4422 if (ret > 0) 4423 goto search; 4424 4425 if (ret == -ENOSPC && !cache_block_group_error) { 4426 /* 4427 * Use ffe_ctl->total_free_space as fallback if we can't find 4428 * any contiguous hole. 4429 */ 4430 if (!ffe_ctl->max_extent_size) 4431 ffe_ctl->max_extent_size = ffe_ctl->total_free_space; 4432 spin_lock(&space_info->lock); 4433 space_info->max_extent_size = ffe_ctl->max_extent_size; 4434 spin_unlock(&space_info->lock); 4435 ins->offset = ffe_ctl->max_extent_size; 4436 } else if (ret == -ENOSPC) { 4437 ret = cache_block_group_error; 4438 } 4439 return ret; 4440 } 4441 4442 /* 4443 * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a 4444 * hole that is at least as big as @num_bytes. 4445 * 4446 * @root - The root that will contain this extent 4447 * 4448 * @ram_bytes - The amount of space in ram that @num_bytes take. This 4449 * is used for accounting purposes. This value differs 4450 * from @num_bytes only in the case of compressed extents. 4451 * 4452 * @num_bytes - Number of bytes to allocate on-disk. 4453 * 4454 * @min_alloc_size - Indicates the minimum amount of space that the 4455 * allocator should try to satisfy. In some cases 4456 * @num_bytes may be larger than what is required and if 4457 * the filesystem is fragmented then allocation fails. 4458 * However, the presence of @min_alloc_size gives a 4459 * chance to try and satisfy the smaller allocation. 4460 * 4461 * @empty_size - A hint that you plan on doing more COW. This is the 4462 * size in bytes the allocator should try to find free 4463 * next to the block it returns. This is just a hint and 4464 * may be ignored by the allocator. 4465 * 4466 * @hint_byte - Hint to the allocator to start searching above the byte 4467 * address passed. It might be ignored. 4468 * 4469 * @ins - This key is modified to record the found hole. It will 4470 * have the following values: 4471 * ins->objectid == start position 4472 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4473 * ins->offset == the size of the hole. 4474 * 4475 * @is_data - Boolean flag indicating whether an extent is 4476 * allocated for data (true) or metadata (false) 4477 * 4478 * @delalloc - Boolean flag indicating whether this allocation is for 4479 * delalloc or not. If 'true' data_rwsem of block groups 4480 * is going to be acquired. 4481 * 4482 * 4483 * Returns 0 when an allocation succeeded or < 0 when an error occurred. In 4484 * case -ENOSPC is returned then @ins->offset will contain the size of the 4485 * largest available hole the allocator managed to find. 4486 */ 4487 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, 4488 u64 num_bytes, u64 min_alloc_size, 4489 u64 empty_size, u64 hint_byte, 4490 struct btrfs_key *ins, int is_data, int delalloc) 4491 { 4492 struct btrfs_fs_info *fs_info = root->fs_info; 4493 struct find_free_extent_ctl ffe_ctl = {}; 4494 bool final_tried = num_bytes == min_alloc_size; 4495 u64 flags; 4496 int ret; 4497 bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4498 bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data); 4499 4500 flags = get_alloc_profile_by_root(root, is_data); 4501 again: 4502 WARN_ON(num_bytes < fs_info->sectorsize); 4503 4504 ffe_ctl.ram_bytes = ram_bytes; 4505 ffe_ctl.num_bytes = num_bytes; 4506 ffe_ctl.min_alloc_size = min_alloc_size; 4507 ffe_ctl.empty_size = empty_size; 4508 ffe_ctl.flags = flags; 4509 ffe_ctl.delalloc = delalloc; 4510 ffe_ctl.hint_byte = hint_byte; 4511 ffe_ctl.for_treelog = for_treelog; 4512 ffe_ctl.for_data_reloc = for_data_reloc; 4513 4514 ret = find_free_extent(root, ins, &ffe_ctl); 4515 if (!ret && !is_data) { 4516 btrfs_dec_block_group_reservations(fs_info, ins->objectid); 4517 } else if (ret == -ENOSPC) { 4518 if (!final_tried && ins->offset) { 4519 num_bytes = min(num_bytes >> 1, ins->offset); 4520 num_bytes = round_down(num_bytes, 4521 fs_info->sectorsize); 4522 num_bytes = max(num_bytes, min_alloc_size); 4523 ram_bytes = num_bytes; 4524 if (num_bytes == min_alloc_size) 4525 final_tried = true; 4526 goto again; 4527 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4528 struct btrfs_space_info *sinfo; 4529 4530 sinfo = btrfs_find_space_info(fs_info, flags); 4531 btrfs_err(fs_info, 4532 "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d", 4533 flags, num_bytes, for_treelog, for_data_reloc); 4534 if (sinfo) 4535 btrfs_dump_space_info(fs_info, sinfo, 4536 num_bytes, 1); 4537 } 4538 } 4539 4540 return ret; 4541 } 4542 4543 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, 4544 u64 start, u64 len, int delalloc) 4545 { 4546 struct btrfs_block_group *cache; 4547 4548 cache = btrfs_lookup_block_group(fs_info, start); 4549 if (!cache) { 4550 btrfs_err(fs_info, "Unable to find block group for %llu", 4551 start); 4552 return -ENOSPC; 4553 } 4554 4555 btrfs_add_free_space(cache, start, len); 4556 btrfs_free_reserved_bytes(cache, len, delalloc); 4557 trace_btrfs_reserved_extent_free(fs_info, start, len); 4558 4559 btrfs_put_block_group(cache); 4560 return 0; 4561 } 4562 4563 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, 4564 u64 len) 4565 { 4566 struct btrfs_block_group *cache; 4567 int ret = 0; 4568 4569 cache = btrfs_lookup_block_group(trans->fs_info, start); 4570 if (!cache) { 4571 btrfs_err(trans->fs_info, "unable to find block group for %llu", 4572 start); 4573 return -ENOSPC; 4574 } 4575 4576 ret = pin_down_extent(trans, cache, start, len, 1); 4577 btrfs_put_block_group(cache); 4578 return ret; 4579 } 4580 4581 static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr, 4582 u64 num_bytes) 4583 { 4584 struct btrfs_fs_info *fs_info = trans->fs_info; 4585 int ret; 4586 4587 ret = remove_from_free_space_tree(trans, bytenr, num_bytes); 4588 if (ret) 4589 return ret; 4590 4591 ret = btrfs_update_block_group(trans, bytenr, num_bytes, true); 4592 if (ret) { 4593 ASSERT(!ret); 4594 btrfs_err(fs_info, "update block group failed for %llu %llu", 4595 bytenr, num_bytes); 4596 return ret; 4597 } 4598 4599 trace_btrfs_reserved_extent_alloc(fs_info, bytenr, num_bytes); 4600 return 0; 4601 } 4602 4603 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4604 u64 parent, u64 root_objectid, 4605 u64 flags, u64 owner, u64 offset, 4606 struct btrfs_key *ins, int ref_mod) 4607 { 4608 struct btrfs_fs_info *fs_info = trans->fs_info; 4609 struct btrfs_root *extent_root; 4610 int ret; 4611 struct btrfs_extent_item *extent_item; 4612 struct btrfs_extent_inline_ref *iref; 4613 struct btrfs_path *path; 4614 struct extent_buffer *leaf; 4615 int type; 4616 u32 size; 4617 4618 if (parent > 0) 4619 type = BTRFS_SHARED_DATA_REF_KEY; 4620 else 4621 type = BTRFS_EXTENT_DATA_REF_KEY; 4622 4623 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); 4624 4625 path = btrfs_alloc_path(); 4626 if (!path) 4627 return -ENOMEM; 4628 4629 extent_root = btrfs_extent_root(fs_info, ins->objectid); 4630 ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size); 4631 if (ret) { 4632 btrfs_free_path(path); 4633 return ret; 4634 } 4635 4636 leaf = path->nodes[0]; 4637 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4638 struct btrfs_extent_item); 4639 btrfs_set_extent_refs(leaf, extent_item, ref_mod); 4640 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4641 btrfs_set_extent_flags(leaf, extent_item, 4642 flags | BTRFS_EXTENT_FLAG_DATA); 4643 4644 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4645 btrfs_set_extent_inline_ref_type(leaf, iref, type); 4646 if (parent > 0) { 4647 struct btrfs_shared_data_ref *ref; 4648 ref = (struct btrfs_shared_data_ref *)(iref + 1); 4649 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 4650 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); 4651 } else { 4652 struct btrfs_extent_data_ref *ref; 4653 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 4654 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); 4655 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 4656 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 4657 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); 4658 } 4659 4660 btrfs_mark_buffer_dirty(path->nodes[0]); 4661 btrfs_free_path(path); 4662 4663 return alloc_reserved_extent(trans, ins->objectid, ins->offset); 4664 } 4665 4666 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 4667 struct btrfs_delayed_ref_node *node, 4668 struct btrfs_delayed_extent_op *extent_op) 4669 { 4670 struct btrfs_fs_info *fs_info = trans->fs_info; 4671 struct btrfs_root *extent_root; 4672 int ret; 4673 struct btrfs_extent_item *extent_item; 4674 struct btrfs_key extent_key; 4675 struct btrfs_tree_block_info *block_info; 4676 struct btrfs_extent_inline_ref *iref; 4677 struct btrfs_path *path; 4678 struct extent_buffer *leaf; 4679 struct btrfs_delayed_tree_ref *ref; 4680 u32 size = sizeof(*extent_item) + sizeof(*iref); 4681 u64 flags = extent_op->flags_to_set; 4682 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4683 4684 ref = btrfs_delayed_node_to_tree_ref(node); 4685 4686 extent_key.objectid = node->bytenr; 4687 if (skinny_metadata) { 4688 extent_key.offset = ref->level; 4689 extent_key.type = BTRFS_METADATA_ITEM_KEY; 4690 } else { 4691 extent_key.offset = node->num_bytes; 4692 extent_key.type = BTRFS_EXTENT_ITEM_KEY; 4693 size += sizeof(*block_info); 4694 } 4695 4696 path = btrfs_alloc_path(); 4697 if (!path) 4698 return -ENOMEM; 4699 4700 extent_root = btrfs_extent_root(fs_info, extent_key.objectid); 4701 ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key, 4702 size); 4703 if (ret) { 4704 btrfs_free_path(path); 4705 return ret; 4706 } 4707 4708 leaf = path->nodes[0]; 4709 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4710 struct btrfs_extent_item); 4711 btrfs_set_extent_refs(leaf, extent_item, 1); 4712 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4713 btrfs_set_extent_flags(leaf, extent_item, 4714 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); 4715 4716 if (skinny_metadata) { 4717 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4718 } else { 4719 block_info = (struct btrfs_tree_block_info *)(extent_item + 1); 4720 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key); 4721 btrfs_set_tree_block_level(leaf, block_info, ref->level); 4722 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); 4723 } 4724 4725 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { 4726 btrfs_set_extent_inline_ref_type(leaf, iref, 4727 BTRFS_SHARED_BLOCK_REF_KEY); 4728 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent); 4729 } else { 4730 btrfs_set_extent_inline_ref_type(leaf, iref, 4731 BTRFS_TREE_BLOCK_REF_KEY); 4732 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root); 4733 } 4734 4735 btrfs_mark_buffer_dirty(leaf); 4736 btrfs_free_path(path); 4737 4738 return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize); 4739 } 4740 4741 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4742 struct btrfs_root *root, u64 owner, 4743 u64 offset, u64 ram_bytes, 4744 struct btrfs_key *ins) 4745 { 4746 struct btrfs_ref generic_ref = { 0 }; 4747 4748 BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4749 4750 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 4751 ins->objectid, ins->offset, 0); 4752 btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, 4753 offset, 0, false); 4754 btrfs_ref_tree_mod(root->fs_info, &generic_ref); 4755 4756 return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes); 4757 } 4758 4759 /* 4760 * this is used by the tree logging recovery code. It records that 4761 * an extent has been allocated and makes sure to clear the free 4762 * space cache bits as well 4763 */ 4764 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 4765 u64 root_objectid, u64 owner, u64 offset, 4766 struct btrfs_key *ins) 4767 { 4768 struct btrfs_fs_info *fs_info = trans->fs_info; 4769 int ret; 4770 struct btrfs_block_group *block_group; 4771 struct btrfs_space_info *space_info; 4772 4773 /* 4774 * Mixed block groups will exclude before processing the log so we only 4775 * need to do the exclude dance if this fs isn't mixed. 4776 */ 4777 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 4778 ret = __exclude_logged_extent(fs_info, ins->objectid, 4779 ins->offset); 4780 if (ret) 4781 return ret; 4782 } 4783 4784 block_group = btrfs_lookup_block_group(fs_info, ins->objectid); 4785 if (!block_group) 4786 return -EINVAL; 4787 4788 space_info = block_group->space_info; 4789 spin_lock(&space_info->lock); 4790 spin_lock(&block_group->lock); 4791 space_info->bytes_reserved += ins->offset; 4792 block_group->reserved += ins->offset; 4793 spin_unlock(&block_group->lock); 4794 spin_unlock(&space_info->lock); 4795 4796 ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner, 4797 offset, ins, 1); 4798 if (ret) 4799 btrfs_pin_extent(trans, ins->objectid, ins->offset, 1); 4800 btrfs_put_block_group(block_group); 4801 return ret; 4802 } 4803 4804 static struct extent_buffer * 4805 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4806 u64 bytenr, int level, u64 owner, 4807 enum btrfs_lock_nesting nest) 4808 { 4809 struct btrfs_fs_info *fs_info = root->fs_info; 4810 struct extent_buffer *buf; 4811 u64 lockdep_owner = owner; 4812 4813 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level); 4814 if (IS_ERR(buf)) 4815 return buf; 4816 4817 /* 4818 * Extra safety check in case the extent tree is corrupted and extent 4819 * allocator chooses to use a tree block which is already used and 4820 * locked. 4821 */ 4822 if (buf->lock_owner == current->pid) { 4823 btrfs_err_rl(fs_info, 4824 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected", 4825 buf->start, btrfs_header_owner(buf), current->pid); 4826 free_extent_buffer(buf); 4827 return ERR_PTR(-EUCLEAN); 4828 } 4829 4830 /* 4831 * The reloc trees are just snapshots, so we need them to appear to be 4832 * just like any other fs tree WRT lockdep. 4833 * 4834 * The exception however is in replace_path() in relocation, where we 4835 * hold the lock on the original fs root and then search for the reloc 4836 * root. At that point we need to make sure any reloc root buffers are 4837 * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make 4838 * lockdep happy. 4839 */ 4840 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID && 4841 !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) 4842 lockdep_owner = BTRFS_FS_TREE_OBJECTID; 4843 4844 /* btrfs_clear_buffer_dirty() accesses generation field. */ 4845 btrfs_set_header_generation(buf, trans->transid); 4846 4847 /* 4848 * This needs to stay, because we could allocate a freed block from an 4849 * old tree into a new tree, so we need to make sure this new block is 4850 * set to the appropriate level and owner. 4851 */ 4852 btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level); 4853 4854 __btrfs_tree_lock(buf, nest); 4855 btrfs_clear_buffer_dirty(trans, buf); 4856 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); 4857 clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags); 4858 4859 set_extent_buffer_uptodate(buf); 4860 4861 memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header)); 4862 btrfs_set_header_level(buf, level); 4863 btrfs_set_header_bytenr(buf, buf->start); 4864 btrfs_set_header_generation(buf, trans->transid); 4865 btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV); 4866 btrfs_set_header_owner(buf, owner); 4867 write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid); 4868 write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid); 4869 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 4870 buf->log_index = root->log_transid % 2; 4871 /* 4872 * we allow two log transactions at a time, use different 4873 * EXTENT bit to differentiate dirty pages. 4874 */ 4875 if (buf->log_index == 0) 4876 set_extent_bit(&root->dirty_log_pages, buf->start, 4877 buf->start + buf->len - 1, 4878 EXTENT_DIRTY, NULL); 4879 else 4880 set_extent_bit(&root->dirty_log_pages, buf->start, 4881 buf->start + buf->len - 1, 4882 EXTENT_NEW, NULL); 4883 } else { 4884 buf->log_index = -1; 4885 set_extent_bit(&trans->transaction->dirty_pages, buf->start, 4886 buf->start + buf->len - 1, EXTENT_DIRTY, NULL); 4887 } 4888 /* this returns a buffer locked for blocking */ 4889 return buf; 4890 } 4891 4892 /* 4893 * finds a free extent and does all the dirty work required for allocation 4894 * returns the tree buffer or an ERR_PTR on error. 4895 */ 4896 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 4897 struct btrfs_root *root, 4898 u64 parent, u64 root_objectid, 4899 const struct btrfs_disk_key *key, 4900 int level, u64 hint, 4901 u64 empty_size, 4902 enum btrfs_lock_nesting nest) 4903 { 4904 struct btrfs_fs_info *fs_info = root->fs_info; 4905 struct btrfs_key ins; 4906 struct btrfs_block_rsv *block_rsv; 4907 struct extent_buffer *buf; 4908 struct btrfs_delayed_extent_op *extent_op; 4909 struct btrfs_ref generic_ref = { 0 }; 4910 u64 flags = 0; 4911 int ret; 4912 u32 blocksize = fs_info->nodesize; 4913 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4914 4915 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4916 if (btrfs_is_testing(fs_info)) { 4917 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, 4918 level, root_objectid, nest); 4919 if (!IS_ERR(buf)) 4920 root->alloc_bytenr += blocksize; 4921 return buf; 4922 } 4923 #endif 4924 4925 block_rsv = btrfs_use_block_rsv(trans, root, blocksize); 4926 if (IS_ERR(block_rsv)) 4927 return ERR_CAST(block_rsv); 4928 4929 ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize, 4930 empty_size, hint, &ins, 0, 0); 4931 if (ret) 4932 goto out_unuse; 4933 4934 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level, 4935 root_objectid, nest); 4936 if (IS_ERR(buf)) { 4937 ret = PTR_ERR(buf); 4938 goto out_free_reserved; 4939 } 4940 4941 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 4942 if (parent == 0) 4943 parent = ins.objectid; 4944 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 4945 } else 4946 BUG_ON(parent > 0); 4947 4948 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 4949 extent_op = btrfs_alloc_delayed_extent_op(); 4950 if (!extent_op) { 4951 ret = -ENOMEM; 4952 goto out_free_buf; 4953 } 4954 if (key) 4955 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 4956 else 4957 memset(&extent_op->key, 0, sizeof(extent_op->key)); 4958 extent_op->flags_to_set = flags; 4959 extent_op->update_key = skinny_metadata ? false : true; 4960 extent_op->update_flags = true; 4961 extent_op->level = level; 4962 4963 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 4964 ins.objectid, ins.offset, parent); 4965 btrfs_init_tree_ref(&generic_ref, level, root_objectid, 4966 root->root_key.objectid, false); 4967 btrfs_ref_tree_mod(fs_info, &generic_ref); 4968 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op); 4969 if (ret) 4970 goto out_free_delayed; 4971 } 4972 return buf; 4973 4974 out_free_delayed: 4975 btrfs_free_delayed_extent_op(extent_op); 4976 out_free_buf: 4977 btrfs_tree_unlock(buf); 4978 free_extent_buffer(buf); 4979 out_free_reserved: 4980 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0); 4981 out_unuse: 4982 btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize); 4983 return ERR_PTR(ret); 4984 } 4985 4986 struct walk_control { 4987 u64 refs[BTRFS_MAX_LEVEL]; 4988 u64 flags[BTRFS_MAX_LEVEL]; 4989 struct btrfs_key update_progress; 4990 struct btrfs_key drop_progress; 4991 int drop_level; 4992 int stage; 4993 int level; 4994 int shared_level; 4995 int update_ref; 4996 int keep_locks; 4997 int reada_slot; 4998 int reada_count; 4999 int restarted; 5000 }; 5001 5002 #define DROP_REFERENCE 1 5003 #define UPDATE_BACKREF 2 5004 5005 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, 5006 struct btrfs_root *root, 5007 struct walk_control *wc, 5008 struct btrfs_path *path) 5009 { 5010 struct btrfs_fs_info *fs_info = root->fs_info; 5011 u64 bytenr; 5012 u64 generation; 5013 u64 refs; 5014 u64 flags; 5015 u32 nritems; 5016 struct btrfs_key key; 5017 struct extent_buffer *eb; 5018 int ret; 5019 int slot; 5020 int nread = 0; 5021 5022 if (path->slots[wc->level] < wc->reada_slot) { 5023 wc->reada_count = wc->reada_count * 2 / 3; 5024 wc->reada_count = max(wc->reada_count, 2); 5025 } else { 5026 wc->reada_count = wc->reada_count * 3 / 2; 5027 wc->reada_count = min_t(int, wc->reada_count, 5028 BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 5029 } 5030 5031 eb = path->nodes[wc->level]; 5032 nritems = btrfs_header_nritems(eb); 5033 5034 for (slot = path->slots[wc->level]; slot < nritems; slot++) { 5035 if (nread >= wc->reada_count) 5036 break; 5037 5038 cond_resched(); 5039 bytenr = btrfs_node_blockptr(eb, slot); 5040 generation = btrfs_node_ptr_generation(eb, slot); 5041 5042 if (slot == path->slots[wc->level]) 5043 goto reada; 5044 5045 if (wc->stage == UPDATE_BACKREF && 5046 generation <= root->root_key.offset) 5047 continue; 5048 5049 /* We don't lock the tree block, it's OK to be racy here */ 5050 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, 5051 wc->level - 1, 1, &refs, 5052 &flags); 5053 /* We don't care about errors in readahead. */ 5054 if (ret < 0) 5055 continue; 5056 BUG_ON(refs == 0); 5057 5058 if (wc->stage == DROP_REFERENCE) { 5059 if (refs == 1) 5060 goto reada; 5061 5062 if (wc->level == 1 && 5063 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5064 continue; 5065 if (!wc->update_ref || 5066 generation <= root->root_key.offset) 5067 continue; 5068 btrfs_node_key_to_cpu(eb, &key, slot); 5069 ret = btrfs_comp_cpu_keys(&key, 5070 &wc->update_progress); 5071 if (ret < 0) 5072 continue; 5073 } else { 5074 if (wc->level == 1 && 5075 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5076 continue; 5077 } 5078 reada: 5079 btrfs_readahead_node_child(eb, slot); 5080 nread++; 5081 } 5082 wc->reada_slot = slot; 5083 } 5084 5085 /* 5086 * helper to process tree block while walking down the tree. 5087 * 5088 * when wc->stage == UPDATE_BACKREF, this function updates 5089 * back refs for pointers in the block. 5090 * 5091 * NOTE: return value 1 means we should stop walking down. 5092 */ 5093 static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 5094 struct btrfs_root *root, 5095 struct btrfs_path *path, 5096 struct walk_control *wc, int lookup_info) 5097 { 5098 struct btrfs_fs_info *fs_info = root->fs_info; 5099 int level = wc->level; 5100 struct extent_buffer *eb = path->nodes[level]; 5101 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5102 int ret; 5103 5104 if (wc->stage == UPDATE_BACKREF && 5105 btrfs_header_owner(eb) != root->root_key.objectid) 5106 return 1; 5107 5108 /* 5109 * when reference count of tree block is 1, it won't increase 5110 * again. once full backref flag is set, we never clear it. 5111 */ 5112 if (lookup_info && 5113 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 5114 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { 5115 BUG_ON(!path->locks[level]); 5116 ret = btrfs_lookup_extent_info(trans, fs_info, 5117 eb->start, level, 1, 5118 &wc->refs[level], 5119 &wc->flags[level]); 5120 BUG_ON(ret == -ENOMEM); 5121 if (ret) 5122 return ret; 5123 BUG_ON(wc->refs[level] == 0); 5124 } 5125 5126 if (wc->stage == DROP_REFERENCE) { 5127 if (wc->refs[level] > 1) 5128 return 1; 5129 5130 if (path->locks[level] && !wc->keep_locks) { 5131 btrfs_tree_unlock_rw(eb, path->locks[level]); 5132 path->locks[level] = 0; 5133 } 5134 return 0; 5135 } 5136 5137 /* wc->stage == UPDATE_BACKREF */ 5138 if (!(wc->flags[level] & flag)) { 5139 BUG_ON(!path->locks[level]); 5140 ret = btrfs_inc_ref(trans, root, eb, 1); 5141 BUG_ON(ret); /* -ENOMEM */ 5142 ret = btrfs_dec_ref(trans, root, eb, 0); 5143 BUG_ON(ret); /* -ENOMEM */ 5144 ret = btrfs_set_disk_extent_flags(trans, eb, flag); 5145 BUG_ON(ret); /* -ENOMEM */ 5146 wc->flags[level] |= flag; 5147 } 5148 5149 /* 5150 * the block is shared by multiple trees, so it's not good to 5151 * keep the tree lock 5152 */ 5153 if (path->locks[level] && level > 0) { 5154 btrfs_tree_unlock_rw(eb, path->locks[level]); 5155 path->locks[level] = 0; 5156 } 5157 return 0; 5158 } 5159 5160 /* 5161 * This is used to verify a ref exists for this root to deal with a bug where we 5162 * would have a drop_progress key that hadn't been updated properly. 5163 */ 5164 static int check_ref_exists(struct btrfs_trans_handle *trans, 5165 struct btrfs_root *root, u64 bytenr, u64 parent, 5166 int level) 5167 { 5168 struct btrfs_path *path; 5169 struct btrfs_extent_inline_ref *iref; 5170 int ret; 5171 5172 path = btrfs_alloc_path(); 5173 if (!path) 5174 return -ENOMEM; 5175 5176 ret = lookup_extent_backref(trans, path, &iref, bytenr, 5177 root->fs_info->nodesize, parent, 5178 root->root_key.objectid, level, 0); 5179 btrfs_free_path(path); 5180 if (ret == -ENOENT) 5181 return 0; 5182 if (ret < 0) 5183 return ret; 5184 return 1; 5185 } 5186 5187 /* 5188 * helper to process tree block pointer. 5189 * 5190 * when wc->stage == DROP_REFERENCE, this function checks 5191 * reference count of the block pointed to. if the block 5192 * is shared and we need update back refs for the subtree 5193 * rooted at the block, this function changes wc->stage to 5194 * UPDATE_BACKREF. if the block is shared and there is no 5195 * need to update back, this function drops the reference 5196 * to the block. 5197 * 5198 * NOTE: return value 1 means we should stop walking down. 5199 */ 5200 static noinline int do_walk_down(struct btrfs_trans_handle *trans, 5201 struct btrfs_root *root, 5202 struct btrfs_path *path, 5203 struct walk_control *wc, int *lookup_info) 5204 { 5205 struct btrfs_fs_info *fs_info = root->fs_info; 5206 u64 bytenr; 5207 u64 generation; 5208 u64 parent; 5209 struct btrfs_tree_parent_check check = { 0 }; 5210 struct btrfs_key key; 5211 struct btrfs_ref ref = { 0 }; 5212 struct extent_buffer *next; 5213 int level = wc->level; 5214 int reada = 0; 5215 int ret = 0; 5216 bool need_account = false; 5217 5218 generation = btrfs_node_ptr_generation(path->nodes[level], 5219 path->slots[level]); 5220 /* 5221 * if the lower level block was created before the snapshot 5222 * was created, we know there is no need to update back refs 5223 * for the subtree 5224 */ 5225 if (wc->stage == UPDATE_BACKREF && 5226 generation <= root->root_key.offset) { 5227 *lookup_info = 1; 5228 return 1; 5229 } 5230 5231 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 5232 5233 check.level = level - 1; 5234 check.transid = generation; 5235 check.owner_root = root->root_key.objectid; 5236 check.has_first_key = true; 5237 btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, 5238 path->slots[level]); 5239 5240 next = find_extent_buffer(fs_info, bytenr); 5241 if (!next) { 5242 next = btrfs_find_create_tree_block(fs_info, bytenr, 5243 root->root_key.objectid, level - 1); 5244 if (IS_ERR(next)) 5245 return PTR_ERR(next); 5246 reada = 1; 5247 } 5248 btrfs_tree_lock(next); 5249 5250 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, 5251 &wc->refs[level - 1], 5252 &wc->flags[level - 1]); 5253 if (ret < 0) 5254 goto out_unlock; 5255 5256 if (unlikely(wc->refs[level - 1] == 0)) { 5257 btrfs_err(fs_info, "Missing references."); 5258 ret = -EIO; 5259 goto out_unlock; 5260 } 5261 *lookup_info = 0; 5262 5263 if (wc->stage == DROP_REFERENCE) { 5264 if (wc->refs[level - 1] > 1) { 5265 need_account = true; 5266 if (level == 1 && 5267 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5268 goto skip; 5269 5270 if (!wc->update_ref || 5271 generation <= root->root_key.offset) 5272 goto skip; 5273 5274 btrfs_node_key_to_cpu(path->nodes[level], &key, 5275 path->slots[level]); 5276 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); 5277 if (ret < 0) 5278 goto skip; 5279 5280 wc->stage = UPDATE_BACKREF; 5281 wc->shared_level = level - 1; 5282 } 5283 } else { 5284 if (level == 1 && 5285 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5286 goto skip; 5287 } 5288 5289 if (!btrfs_buffer_uptodate(next, generation, 0)) { 5290 btrfs_tree_unlock(next); 5291 free_extent_buffer(next); 5292 next = NULL; 5293 *lookup_info = 1; 5294 } 5295 5296 if (!next) { 5297 if (reada && level == 1) 5298 reada_walk_down(trans, root, wc, path); 5299 next = read_tree_block(fs_info, bytenr, &check); 5300 if (IS_ERR(next)) { 5301 return PTR_ERR(next); 5302 } else if (!extent_buffer_uptodate(next)) { 5303 free_extent_buffer(next); 5304 return -EIO; 5305 } 5306 btrfs_tree_lock(next); 5307 } 5308 5309 level--; 5310 ASSERT(level == btrfs_header_level(next)); 5311 if (level != btrfs_header_level(next)) { 5312 btrfs_err(root->fs_info, "mismatched level"); 5313 ret = -EIO; 5314 goto out_unlock; 5315 } 5316 path->nodes[level] = next; 5317 path->slots[level] = 0; 5318 path->locks[level] = BTRFS_WRITE_LOCK; 5319 wc->level = level; 5320 if (wc->level == 1) 5321 wc->reada_slot = 0; 5322 return 0; 5323 skip: 5324 wc->refs[level - 1] = 0; 5325 wc->flags[level - 1] = 0; 5326 if (wc->stage == DROP_REFERENCE) { 5327 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 5328 parent = path->nodes[level]->start; 5329 } else { 5330 ASSERT(root->root_key.objectid == 5331 btrfs_header_owner(path->nodes[level])); 5332 if (root->root_key.objectid != 5333 btrfs_header_owner(path->nodes[level])) { 5334 btrfs_err(root->fs_info, 5335 "mismatched block owner"); 5336 ret = -EIO; 5337 goto out_unlock; 5338 } 5339 parent = 0; 5340 } 5341 5342 /* 5343 * If we had a drop_progress we need to verify the refs are set 5344 * as expected. If we find our ref then we know that from here 5345 * on out everything should be correct, and we can clear the 5346 * ->restarted flag. 5347 */ 5348 if (wc->restarted) { 5349 ret = check_ref_exists(trans, root, bytenr, parent, 5350 level - 1); 5351 if (ret < 0) 5352 goto out_unlock; 5353 if (ret == 0) 5354 goto no_delete; 5355 ret = 0; 5356 wc->restarted = 0; 5357 } 5358 5359 /* 5360 * Reloc tree doesn't contribute to qgroup numbers, and we have 5361 * already accounted them at merge time (replace_path), 5362 * thus we could skip expensive subtree trace here. 5363 */ 5364 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 5365 need_account) { 5366 ret = btrfs_qgroup_trace_subtree(trans, next, 5367 generation, level - 1); 5368 if (ret) { 5369 btrfs_err_rl(fs_info, 5370 "Error %d accounting shared subtree. Quota is out of sync, rescan required.", 5371 ret); 5372 } 5373 } 5374 5375 /* 5376 * We need to update the next key in our walk control so we can 5377 * update the drop_progress key accordingly. We don't care if 5378 * find_next_key doesn't find a key because that means we're at 5379 * the end and are going to clean up now. 5380 */ 5381 wc->drop_level = level; 5382 find_next_key(path, level, &wc->drop_progress); 5383 5384 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 5385 fs_info->nodesize, parent); 5386 btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid, 5387 0, false); 5388 ret = btrfs_free_extent(trans, &ref); 5389 if (ret) 5390 goto out_unlock; 5391 } 5392 no_delete: 5393 *lookup_info = 1; 5394 ret = 1; 5395 5396 out_unlock: 5397 btrfs_tree_unlock(next); 5398 free_extent_buffer(next); 5399 5400 return ret; 5401 } 5402 5403 /* 5404 * helper to process tree block while walking up the tree. 5405 * 5406 * when wc->stage == DROP_REFERENCE, this function drops 5407 * reference count on the block. 5408 * 5409 * when wc->stage == UPDATE_BACKREF, this function changes 5410 * wc->stage back to DROP_REFERENCE if we changed wc->stage 5411 * to UPDATE_BACKREF previously while processing the block. 5412 * 5413 * NOTE: return value 1 means we should stop walking up. 5414 */ 5415 static noinline int walk_up_proc(struct btrfs_trans_handle *trans, 5416 struct btrfs_root *root, 5417 struct btrfs_path *path, 5418 struct walk_control *wc) 5419 { 5420 struct btrfs_fs_info *fs_info = root->fs_info; 5421 int ret; 5422 int level = wc->level; 5423 struct extent_buffer *eb = path->nodes[level]; 5424 u64 parent = 0; 5425 5426 if (wc->stage == UPDATE_BACKREF) { 5427 BUG_ON(wc->shared_level < level); 5428 if (level < wc->shared_level) 5429 goto out; 5430 5431 ret = find_next_key(path, level + 1, &wc->update_progress); 5432 if (ret > 0) 5433 wc->update_ref = 0; 5434 5435 wc->stage = DROP_REFERENCE; 5436 wc->shared_level = -1; 5437 path->slots[level] = 0; 5438 5439 /* 5440 * check reference count again if the block isn't locked. 5441 * we should start walking down the tree again if reference 5442 * count is one. 5443 */ 5444 if (!path->locks[level]) { 5445 BUG_ON(level == 0); 5446 btrfs_tree_lock(eb); 5447 path->locks[level] = BTRFS_WRITE_LOCK; 5448 5449 ret = btrfs_lookup_extent_info(trans, fs_info, 5450 eb->start, level, 1, 5451 &wc->refs[level], 5452 &wc->flags[level]); 5453 if (ret < 0) { 5454 btrfs_tree_unlock_rw(eb, path->locks[level]); 5455 path->locks[level] = 0; 5456 return ret; 5457 } 5458 BUG_ON(wc->refs[level] == 0); 5459 if (wc->refs[level] == 1) { 5460 btrfs_tree_unlock_rw(eb, path->locks[level]); 5461 path->locks[level] = 0; 5462 return 1; 5463 } 5464 } 5465 } 5466 5467 /* wc->stage == DROP_REFERENCE */ 5468 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); 5469 5470 if (wc->refs[level] == 1) { 5471 if (level == 0) { 5472 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5473 ret = btrfs_dec_ref(trans, root, eb, 1); 5474 else 5475 ret = btrfs_dec_ref(trans, root, eb, 0); 5476 BUG_ON(ret); /* -ENOMEM */ 5477 if (is_fstree(root->root_key.objectid)) { 5478 ret = btrfs_qgroup_trace_leaf_items(trans, eb); 5479 if (ret) { 5480 btrfs_err_rl(fs_info, 5481 "error %d accounting leaf items, quota is out of sync, rescan required", 5482 ret); 5483 } 5484 } 5485 } 5486 /* Make block locked assertion in btrfs_clear_buffer_dirty happy. */ 5487 if (!path->locks[level]) { 5488 btrfs_tree_lock(eb); 5489 path->locks[level] = BTRFS_WRITE_LOCK; 5490 } 5491 btrfs_clear_buffer_dirty(trans, eb); 5492 } 5493 5494 if (eb == root->node) { 5495 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5496 parent = eb->start; 5497 else if (root->root_key.objectid != btrfs_header_owner(eb)) 5498 goto owner_mismatch; 5499 } else { 5500 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5501 parent = path->nodes[level + 1]->start; 5502 else if (root->root_key.objectid != 5503 btrfs_header_owner(path->nodes[level + 1])) 5504 goto owner_mismatch; 5505 } 5506 5507 btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent, 5508 wc->refs[level] == 1); 5509 out: 5510 wc->refs[level] = 0; 5511 wc->flags[level] = 0; 5512 return 0; 5513 5514 owner_mismatch: 5515 btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu", 5516 btrfs_header_owner(eb), root->root_key.objectid); 5517 return -EUCLEAN; 5518 } 5519 5520 static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 5521 struct btrfs_root *root, 5522 struct btrfs_path *path, 5523 struct walk_control *wc) 5524 { 5525 int level = wc->level; 5526 int lookup_info = 1; 5527 int ret = 0; 5528 5529 while (level >= 0) { 5530 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5531 if (ret) 5532 break; 5533 5534 if (level == 0) 5535 break; 5536 5537 if (path->slots[level] >= 5538 btrfs_header_nritems(path->nodes[level])) 5539 break; 5540 5541 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5542 if (ret > 0) { 5543 path->slots[level]++; 5544 continue; 5545 } else if (ret < 0) 5546 break; 5547 level = wc->level; 5548 } 5549 return (ret == 1) ? 0 : ret; 5550 } 5551 5552 static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 5553 struct btrfs_root *root, 5554 struct btrfs_path *path, 5555 struct walk_control *wc, int max_level) 5556 { 5557 int level = wc->level; 5558 int ret; 5559 5560 path->slots[level] = btrfs_header_nritems(path->nodes[level]); 5561 while (level < max_level && path->nodes[level]) { 5562 wc->level = level; 5563 if (path->slots[level] + 1 < 5564 btrfs_header_nritems(path->nodes[level])) { 5565 path->slots[level]++; 5566 return 0; 5567 } else { 5568 ret = walk_up_proc(trans, root, path, wc); 5569 if (ret > 0) 5570 return 0; 5571 if (ret < 0) 5572 return ret; 5573 5574 if (path->locks[level]) { 5575 btrfs_tree_unlock_rw(path->nodes[level], 5576 path->locks[level]); 5577 path->locks[level] = 0; 5578 } 5579 free_extent_buffer(path->nodes[level]); 5580 path->nodes[level] = NULL; 5581 level++; 5582 } 5583 } 5584 return 1; 5585 } 5586 5587 /* 5588 * drop a subvolume tree. 5589 * 5590 * this function traverses the tree freeing any blocks that only 5591 * referenced by the tree. 5592 * 5593 * when a shared tree block is found. this function decreases its 5594 * reference count by one. if update_ref is true, this function 5595 * also make sure backrefs for the shared block and all lower level 5596 * blocks are properly updated. 5597 * 5598 * If called with for_reloc == 0, may exit early with -EAGAIN 5599 */ 5600 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) 5601 { 5602 const bool is_reloc_root = (root->root_key.objectid == 5603 BTRFS_TREE_RELOC_OBJECTID); 5604 struct btrfs_fs_info *fs_info = root->fs_info; 5605 struct btrfs_path *path; 5606 struct btrfs_trans_handle *trans; 5607 struct btrfs_root *tree_root = fs_info->tree_root; 5608 struct btrfs_root_item *root_item = &root->root_item; 5609 struct walk_control *wc; 5610 struct btrfs_key key; 5611 int err = 0; 5612 int ret; 5613 int level; 5614 bool root_dropped = false; 5615 bool unfinished_drop = false; 5616 5617 btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid); 5618 5619 path = btrfs_alloc_path(); 5620 if (!path) { 5621 err = -ENOMEM; 5622 goto out; 5623 } 5624 5625 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5626 if (!wc) { 5627 btrfs_free_path(path); 5628 err = -ENOMEM; 5629 goto out; 5630 } 5631 5632 /* 5633 * Use join to avoid potential EINTR from transaction start. See 5634 * wait_reserve_ticket and the whole reservation callchain. 5635 */ 5636 if (for_reloc) 5637 trans = btrfs_join_transaction(tree_root); 5638 else 5639 trans = btrfs_start_transaction(tree_root, 0); 5640 if (IS_ERR(trans)) { 5641 err = PTR_ERR(trans); 5642 goto out_free; 5643 } 5644 5645 err = btrfs_run_delayed_items(trans); 5646 if (err) 5647 goto out_end_trans; 5648 5649 /* 5650 * This will help us catch people modifying the fs tree while we're 5651 * dropping it. It is unsafe to mess with the fs tree while it's being 5652 * dropped as we unlock the root node and parent nodes as we walk down 5653 * the tree, assuming nothing will change. If something does change 5654 * then we'll have stale information and drop references to blocks we've 5655 * already dropped. 5656 */ 5657 set_bit(BTRFS_ROOT_DELETING, &root->state); 5658 unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); 5659 5660 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 5661 level = btrfs_header_level(root->node); 5662 path->nodes[level] = btrfs_lock_root_node(root); 5663 path->slots[level] = 0; 5664 path->locks[level] = BTRFS_WRITE_LOCK; 5665 memset(&wc->update_progress, 0, 5666 sizeof(wc->update_progress)); 5667 } else { 5668 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 5669 memcpy(&wc->update_progress, &key, 5670 sizeof(wc->update_progress)); 5671 5672 level = btrfs_root_drop_level(root_item); 5673 BUG_ON(level == 0); 5674 path->lowest_level = level; 5675 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5676 path->lowest_level = 0; 5677 if (ret < 0) { 5678 err = ret; 5679 goto out_end_trans; 5680 } 5681 WARN_ON(ret > 0); 5682 5683 /* 5684 * unlock our path, this is safe because only this 5685 * function is allowed to delete this snapshot 5686 */ 5687 btrfs_unlock_up_safe(path, 0); 5688 5689 level = btrfs_header_level(root->node); 5690 while (1) { 5691 btrfs_tree_lock(path->nodes[level]); 5692 path->locks[level] = BTRFS_WRITE_LOCK; 5693 5694 ret = btrfs_lookup_extent_info(trans, fs_info, 5695 path->nodes[level]->start, 5696 level, 1, &wc->refs[level], 5697 &wc->flags[level]); 5698 if (ret < 0) { 5699 err = ret; 5700 goto out_end_trans; 5701 } 5702 BUG_ON(wc->refs[level] == 0); 5703 5704 if (level == btrfs_root_drop_level(root_item)) 5705 break; 5706 5707 btrfs_tree_unlock(path->nodes[level]); 5708 path->locks[level] = 0; 5709 WARN_ON(wc->refs[level] != 1); 5710 level--; 5711 } 5712 } 5713 5714 wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); 5715 wc->level = level; 5716 wc->shared_level = -1; 5717 wc->stage = DROP_REFERENCE; 5718 wc->update_ref = update_ref; 5719 wc->keep_locks = 0; 5720 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5721 5722 while (1) { 5723 5724 ret = walk_down_tree(trans, root, path, wc); 5725 if (ret < 0) { 5726 btrfs_abort_transaction(trans, ret); 5727 err = ret; 5728 break; 5729 } 5730 5731 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); 5732 if (ret < 0) { 5733 btrfs_abort_transaction(trans, ret); 5734 err = ret; 5735 break; 5736 } 5737 5738 if (ret > 0) { 5739 BUG_ON(wc->stage != DROP_REFERENCE); 5740 break; 5741 } 5742 5743 if (wc->stage == DROP_REFERENCE) { 5744 wc->drop_level = wc->level; 5745 btrfs_node_key_to_cpu(path->nodes[wc->drop_level], 5746 &wc->drop_progress, 5747 path->slots[wc->drop_level]); 5748 } 5749 btrfs_cpu_key_to_disk(&root_item->drop_progress, 5750 &wc->drop_progress); 5751 btrfs_set_root_drop_level(root_item, wc->drop_level); 5752 5753 BUG_ON(wc->level == 0); 5754 if (btrfs_should_end_transaction(trans) || 5755 (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) { 5756 ret = btrfs_update_root(trans, tree_root, 5757 &root->root_key, 5758 root_item); 5759 if (ret) { 5760 btrfs_abort_transaction(trans, ret); 5761 err = ret; 5762 goto out_end_trans; 5763 } 5764 5765 if (!is_reloc_root) 5766 btrfs_set_last_root_drop_gen(fs_info, trans->transid); 5767 5768 btrfs_end_transaction_throttle(trans); 5769 if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) { 5770 btrfs_debug(fs_info, 5771 "drop snapshot early exit"); 5772 err = -EAGAIN; 5773 goto out_free; 5774 } 5775 5776 /* 5777 * Use join to avoid potential EINTR from transaction 5778 * start. See wait_reserve_ticket and the whole 5779 * reservation callchain. 5780 */ 5781 if (for_reloc) 5782 trans = btrfs_join_transaction(tree_root); 5783 else 5784 trans = btrfs_start_transaction(tree_root, 0); 5785 if (IS_ERR(trans)) { 5786 err = PTR_ERR(trans); 5787 goto out_free; 5788 } 5789 } 5790 } 5791 btrfs_release_path(path); 5792 if (err) 5793 goto out_end_trans; 5794 5795 ret = btrfs_del_root(trans, &root->root_key); 5796 if (ret) { 5797 btrfs_abort_transaction(trans, ret); 5798 err = ret; 5799 goto out_end_trans; 5800 } 5801 5802 if (!is_reloc_root) { 5803 ret = btrfs_find_root(tree_root, &root->root_key, path, 5804 NULL, NULL); 5805 if (ret < 0) { 5806 btrfs_abort_transaction(trans, ret); 5807 err = ret; 5808 goto out_end_trans; 5809 } else if (ret > 0) { 5810 /* if we fail to delete the orphan item this time 5811 * around, it'll get picked up the next time. 5812 * 5813 * The most common failure here is just -ENOENT. 5814 */ 5815 btrfs_del_orphan_item(trans, tree_root, 5816 root->root_key.objectid); 5817 } 5818 } 5819 5820 /* 5821 * This subvolume is going to be completely dropped, and won't be 5822 * recorded as dirty roots, thus pertrans meta rsv will not be freed at 5823 * commit transaction time. So free it here manually. 5824 */ 5825 btrfs_qgroup_convert_reserved_meta(root, INT_MAX); 5826 btrfs_qgroup_free_meta_all_pertrans(root); 5827 5828 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) 5829 btrfs_add_dropped_root(trans, root); 5830 else 5831 btrfs_put_root(root); 5832 root_dropped = true; 5833 out_end_trans: 5834 if (!is_reloc_root) 5835 btrfs_set_last_root_drop_gen(fs_info, trans->transid); 5836 5837 btrfs_end_transaction_throttle(trans); 5838 out_free: 5839 kfree(wc); 5840 btrfs_free_path(path); 5841 out: 5842 /* 5843 * We were an unfinished drop root, check to see if there are any 5844 * pending, and if not clear and wake up any waiters. 5845 */ 5846 if (!err && unfinished_drop) 5847 btrfs_maybe_wake_unfinished_drop(fs_info); 5848 5849 /* 5850 * So if we need to stop dropping the snapshot for whatever reason we 5851 * need to make sure to add it back to the dead root list so that we 5852 * keep trying to do the work later. This also cleans up roots if we 5853 * don't have it in the radix (like when we recover after a power fail 5854 * or unmount) so we don't leak memory. 5855 */ 5856 if (!for_reloc && !root_dropped) 5857 btrfs_add_dead_root(root); 5858 return err; 5859 } 5860 5861 /* 5862 * drop subtree rooted at tree block 'node'. 5863 * 5864 * NOTE: this function will unlock and release tree block 'node' 5865 * only used by relocation code 5866 */ 5867 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 5868 struct btrfs_root *root, 5869 struct extent_buffer *node, 5870 struct extent_buffer *parent) 5871 { 5872 struct btrfs_fs_info *fs_info = root->fs_info; 5873 struct btrfs_path *path; 5874 struct walk_control *wc; 5875 int level; 5876 int parent_level; 5877 int ret = 0; 5878 int wret; 5879 5880 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 5881 5882 path = btrfs_alloc_path(); 5883 if (!path) 5884 return -ENOMEM; 5885 5886 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5887 if (!wc) { 5888 btrfs_free_path(path); 5889 return -ENOMEM; 5890 } 5891 5892 btrfs_assert_tree_write_locked(parent); 5893 parent_level = btrfs_header_level(parent); 5894 atomic_inc(&parent->refs); 5895 path->nodes[parent_level] = parent; 5896 path->slots[parent_level] = btrfs_header_nritems(parent); 5897 5898 btrfs_assert_tree_write_locked(node); 5899 level = btrfs_header_level(node); 5900 path->nodes[level] = node; 5901 path->slots[level] = 0; 5902 path->locks[level] = BTRFS_WRITE_LOCK; 5903 5904 wc->refs[parent_level] = 1; 5905 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5906 wc->level = level; 5907 wc->shared_level = -1; 5908 wc->stage = DROP_REFERENCE; 5909 wc->update_ref = 0; 5910 wc->keep_locks = 1; 5911 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5912 5913 while (1) { 5914 wret = walk_down_tree(trans, root, path, wc); 5915 if (wret < 0) { 5916 ret = wret; 5917 break; 5918 } 5919 5920 wret = walk_up_tree(trans, root, path, wc, parent_level); 5921 if (wret < 0) 5922 ret = wret; 5923 if (wret != 0) 5924 break; 5925 } 5926 5927 kfree(wc); 5928 btrfs_free_path(path); 5929 return ret; 5930 } 5931 5932 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, 5933 u64 start, u64 end) 5934 { 5935 return unpin_extent_range(fs_info, start, end, false); 5936 } 5937 5938 /* 5939 * It used to be that old block groups would be left around forever. 5940 * Iterating over them would be enough to trim unused space. Since we 5941 * now automatically remove them, we also need to iterate over unallocated 5942 * space. 5943 * 5944 * We don't want a transaction for this since the discard may take a 5945 * substantial amount of time. We don't require that a transaction be 5946 * running, but we do need to take a running transaction into account 5947 * to ensure that we're not discarding chunks that were released or 5948 * allocated in the current transaction. 5949 * 5950 * Holding the chunks lock will prevent other threads from allocating 5951 * or releasing chunks, but it won't prevent a running transaction 5952 * from committing and releasing the memory that the pending chunks 5953 * list head uses. For that, we need to take a reference to the 5954 * transaction and hold the commit root sem. We only need to hold 5955 * it while performing the free space search since we have already 5956 * held back allocations. 5957 */ 5958 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) 5959 { 5960 u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0; 5961 int ret; 5962 5963 *trimmed = 0; 5964 5965 /* Discard not supported = nothing to do. */ 5966 if (!bdev_max_discard_sectors(device->bdev)) 5967 return 0; 5968 5969 /* Not writable = nothing to do. */ 5970 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 5971 return 0; 5972 5973 /* No free space = nothing to do. */ 5974 if (device->total_bytes <= device->bytes_used) 5975 return 0; 5976 5977 ret = 0; 5978 5979 while (1) { 5980 struct btrfs_fs_info *fs_info = device->fs_info; 5981 u64 bytes; 5982 5983 ret = mutex_lock_interruptible(&fs_info->chunk_mutex); 5984 if (ret) 5985 break; 5986 5987 find_first_clear_extent_bit(&device->alloc_state, start, 5988 &start, &end, 5989 CHUNK_TRIMMED | CHUNK_ALLOCATED); 5990 5991 /* Check if there are any CHUNK_* bits left */ 5992 if (start > device->total_bytes) { 5993 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 5994 btrfs_warn_in_rcu(fs_info, 5995 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu", 5996 start, end - start + 1, 5997 btrfs_dev_name(device), 5998 device->total_bytes); 5999 mutex_unlock(&fs_info->chunk_mutex); 6000 ret = 0; 6001 break; 6002 } 6003 6004 /* Ensure we skip the reserved space on each device. */ 6005 start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); 6006 6007 /* 6008 * If find_first_clear_extent_bit find a range that spans the 6009 * end of the device it will set end to -1, in this case it's up 6010 * to the caller to trim the value to the size of the device. 6011 */ 6012 end = min(end, device->total_bytes - 1); 6013 6014 len = end - start + 1; 6015 6016 /* We didn't find any extents */ 6017 if (!len) { 6018 mutex_unlock(&fs_info->chunk_mutex); 6019 ret = 0; 6020 break; 6021 } 6022 6023 ret = btrfs_issue_discard(device->bdev, start, len, 6024 &bytes); 6025 if (!ret) 6026 set_extent_bit(&device->alloc_state, start, 6027 start + bytes - 1, CHUNK_TRIMMED, NULL); 6028 mutex_unlock(&fs_info->chunk_mutex); 6029 6030 if (ret) 6031 break; 6032 6033 start += len; 6034 *trimmed += bytes; 6035 6036 if (fatal_signal_pending(current)) { 6037 ret = -ERESTARTSYS; 6038 break; 6039 } 6040 6041 cond_resched(); 6042 } 6043 6044 return ret; 6045 } 6046 6047 /* 6048 * Trim the whole filesystem by: 6049 * 1) trimming the free space in each block group 6050 * 2) trimming the unallocated space on each device 6051 * 6052 * This will also continue trimming even if a block group or device encounters 6053 * an error. The return value will be the last error, or 0 if nothing bad 6054 * happens. 6055 */ 6056 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) 6057 { 6058 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6059 struct btrfs_block_group *cache = NULL; 6060 struct btrfs_device *device; 6061 u64 group_trimmed; 6062 u64 range_end = U64_MAX; 6063 u64 start; 6064 u64 end; 6065 u64 trimmed = 0; 6066 u64 bg_failed = 0; 6067 u64 dev_failed = 0; 6068 int bg_ret = 0; 6069 int dev_ret = 0; 6070 int ret = 0; 6071 6072 if (range->start == U64_MAX) 6073 return -EINVAL; 6074 6075 /* 6076 * Check range overflow if range->len is set. 6077 * The default range->len is U64_MAX. 6078 */ 6079 if (range->len != U64_MAX && 6080 check_add_overflow(range->start, range->len, &range_end)) 6081 return -EINVAL; 6082 6083 cache = btrfs_lookup_first_block_group(fs_info, range->start); 6084 for (; cache; cache = btrfs_next_block_group(cache)) { 6085 if (cache->start >= range_end) { 6086 btrfs_put_block_group(cache); 6087 break; 6088 } 6089 6090 start = max(range->start, cache->start); 6091 end = min(range_end, cache->start + cache->length); 6092 6093 if (end - start >= range->minlen) { 6094 if (!btrfs_block_group_done(cache)) { 6095 ret = btrfs_cache_block_group(cache, true); 6096 if (ret) { 6097 bg_failed++; 6098 bg_ret = ret; 6099 continue; 6100 } 6101 } 6102 ret = btrfs_trim_block_group(cache, 6103 &group_trimmed, 6104 start, 6105 end, 6106 range->minlen); 6107 6108 trimmed += group_trimmed; 6109 if (ret) { 6110 bg_failed++; 6111 bg_ret = ret; 6112 continue; 6113 } 6114 } 6115 } 6116 6117 if (bg_failed) 6118 btrfs_warn(fs_info, 6119 "failed to trim %llu block group(s), last error %d", 6120 bg_failed, bg_ret); 6121 6122 mutex_lock(&fs_devices->device_list_mutex); 6123 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6124 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 6125 continue; 6126 6127 ret = btrfs_trim_free_extents(device, &group_trimmed); 6128 if (ret) { 6129 dev_failed++; 6130 dev_ret = ret; 6131 break; 6132 } 6133 6134 trimmed += group_trimmed; 6135 } 6136 mutex_unlock(&fs_devices->device_list_mutex); 6137 6138 if (dev_failed) 6139 btrfs_warn(fs_info, 6140 "failed to trim %llu device(s), last error %d", 6141 dev_failed, dev_ret); 6142 range->len = trimmed; 6143 if (bg_ret) 6144 return bg_ret; 6145 return dev_ret; 6146 } 6147