1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/signal.h> 8 #include <linux/pagemap.h> 9 #include <linux/writeback.h> 10 #include <linux/blkdev.h> 11 #include <linux/sort.h> 12 #include <linux/rcupdate.h> 13 #include <linux/kthread.h> 14 #include <linux/slab.h> 15 #include <linux/ratelimit.h> 16 #include <linux/percpu_counter.h> 17 #include <linux/lockdep.h> 18 #include <linux/crc32c.h> 19 #include "ctree.h" 20 #include "extent-tree.h" 21 #include "tree-log.h" 22 #include "disk-io.h" 23 #include "print-tree.h" 24 #include "volumes.h" 25 #include "raid56.h" 26 #include "locking.h" 27 #include "free-space-cache.h" 28 #include "free-space-tree.h" 29 #include "sysfs.h" 30 #include "qgroup.h" 31 #include "ref-verify.h" 32 #include "space-info.h" 33 #include "block-rsv.h" 34 #include "delalloc-space.h" 35 #include "discard.h" 36 #include "rcu-string.h" 37 #include "zoned.h" 38 #include "dev-replace.h" 39 #include "fs.h" 40 #include "accessors.h" 41 #include "root-tree.h" 42 #include "file-item.h" 43 #include "orphan.h" 44 #include "tree-checker.h" 45 46 #undef SCRAMBLE_DELAYED_REFS 47 48 49 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 50 struct btrfs_delayed_ref_node *node, u64 parent, 51 u64 root_objectid, u64 owner_objectid, 52 u64 owner_offset, int refs_to_drop, 53 struct btrfs_delayed_extent_op *extra_op); 54 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 55 struct extent_buffer *leaf, 56 struct btrfs_extent_item *ei); 57 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 58 u64 parent, u64 root_objectid, 59 u64 flags, u64 owner, u64 offset, 60 struct btrfs_key *ins, int ref_mod); 61 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 62 struct btrfs_delayed_ref_node *node, 63 struct btrfs_delayed_extent_op *extent_op); 64 static int find_next_key(struct btrfs_path *path, int level, 65 struct btrfs_key *key); 66 67 static int block_group_bits(struct btrfs_block_group *cache, u64 bits) 68 { 69 return (cache->flags & bits) == bits; 70 } 71 72 /* simple helper to search for an existing data extent at a given offset */ 73 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) 74 { 75 struct btrfs_root *root = btrfs_extent_root(fs_info, start); 76 int ret; 77 struct btrfs_key key; 78 struct btrfs_path *path; 79 80 path = btrfs_alloc_path(); 81 if (!path) 82 return -ENOMEM; 83 84 key.objectid = start; 85 key.offset = len; 86 key.type = BTRFS_EXTENT_ITEM_KEY; 87 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 88 btrfs_free_path(path); 89 return ret; 90 } 91 92 /* 93 * helper function to lookup reference count and flags of a tree block. 94 * 95 * the head node for delayed ref is used to store the sum of all the 96 * reference count modifications queued up in the rbtree. the head 97 * node may also store the extent flags to set. This way you can check 98 * to see what the reference count and extent flags would be if all of 99 * the delayed refs are not processed. 100 */ 101 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 102 struct btrfs_fs_info *fs_info, u64 bytenr, 103 u64 offset, int metadata, u64 *refs, u64 *flags) 104 { 105 struct btrfs_root *extent_root; 106 struct btrfs_delayed_ref_head *head; 107 struct btrfs_delayed_ref_root *delayed_refs; 108 struct btrfs_path *path; 109 struct btrfs_extent_item *ei; 110 struct extent_buffer *leaf; 111 struct btrfs_key key; 112 u32 item_size; 113 u64 num_refs; 114 u64 extent_flags; 115 int ret; 116 117 /* 118 * If we don't have skinny metadata, don't bother doing anything 119 * different 120 */ 121 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { 122 offset = fs_info->nodesize; 123 metadata = 0; 124 } 125 126 path = btrfs_alloc_path(); 127 if (!path) 128 return -ENOMEM; 129 130 if (!trans) { 131 path->skip_locking = 1; 132 path->search_commit_root = 1; 133 } 134 135 search_again: 136 key.objectid = bytenr; 137 key.offset = offset; 138 if (metadata) 139 key.type = BTRFS_METADATA_ITEM_KEY; 140 else 141 key.type = BTRFS_EXTENT_ITEM_KEY; 142 143 extent_root = btrfs_extent_root(fs_info, bytenr); 144 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 145 if (ret < 0) 146 goto out_free; 147 148 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 149 if (path->slots[0]) { 150 path->slots[0]--; 151 btrfs_item_key_to_cpu(path->nodes[0], &key, 152 path->slots[0]); 153 if (key.objectid == bytenr && 154 key.type == BTRFS_EXTENT_ITEM_KEY && 155 key.offset == fs_info->nodesize) 156 ret = 0; 157 } 158 } 159 160 if (ret == 0) { 161 leaf = path->nodes[0]; 162 item_size = btrfs_item_size(leaf, path->slots[0]); 163 if (item_size >= sizeof(*ei)) { 164 ei = btrfs_item_ptr(leaf, path->slots[0], 165 struct btrfs_extent_item); 166 num_refs = btrfs_extent_refs(leaf, ei); 167 extent_flags = btrfs_extent_flags(leaf, ei); 168 } else { 169 ret = -EUCLEAN; 170 btrfs_err(fs_info, 171 "unexpected extent item size, has %u expect >= %zu", 172 item_size, sizeof(*ei)); 173 if (trans) 174 btrfs_abort_transaction(trans, ret); 175 else 176 btrfs_handle_fs_error(fs_info, ret, NULL); 177 178 goto out_free; 179 } 180 181 BUG_ON(num_refs == 0); 182 } else { 183 num_refs = 0; 184 extent_flags = 0; 185 ret = 0; 186 } 187 188 if (!trans) 189 goto out; 190 191 delayed_refs = &trans->transaction->delayed_refs; 192 spin_lock(&delayed_refs->lock); 193 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 194 if (head) { 195 if (!mutex_trylock(&head->mutex)) { 196 refcount_inc(&head->refs); 197 spin_unlock(&delayed_refs->lock); 198 199 btrfs_release_path(path); 200 201 /* 202 * Mutex was contended, block until it's released and try 203 * again 204 */ 205 mutex_lock(&head->mutex); 206 mutex_unlock(&head->mutex); 207 btrfs_put_delayed_ref_head(head); 208 goto search_again; 209 } 210 spin_lock(&head->lock); 211 if (head->extent_op && head->extent_op->update_flags) 212 extent_flags |= head->extent_op->flags_to_set; 213 else 214 BUG_ON(num_refs == 0); 215 216 num_refs += head->ref_mod; 217 spin_unlock(&head->lock); 218 mutex_unlock(&head->mutex); 219 } 220 spin_unlock(&delayed_refs->lock); 221 out: 222 WARN_ON(num_refs == 0); 223 if (refs) 224 *refs = num_refs; 225 if (flags) 226 *flags = extent_flags; 227 out_free: 228 btrfs_free_path(path); 229 return ret; 230 } 231 232 /* 233 * Back reference rules. Back refs have three main goals: 234 * 235 * 1) differentiate between all holders of references to an extent so that 236 * when a reference is dropped we can make sure it was a valid reference 237 * before freeing the extent. 238 * 239 * 2) Provide enough information to quickly find the holders of an extent 240 * if we notice a given block is corrupted or bad. 241 * 242 * 3) Make it easy to migrate blocks for FS shrinking or storage pool 243 * maintenance. This is actually the same as #2, but with a slightly 244 * different use case. 245 * 246 * There are two kinds of back refs. The implicit back refs is optimized 247 * for pointers in non-shared tree blocks. For a given pointer in a block, 248 * back refs of this kind provide information about the block's owner tree 249 * and the pointer's key. These information allow us to find the block by 250 * b-tree searching. The full back refs is for pointers in tree blocks not 251 * referenced by their owner trees. The location of tree block is recorded 252 * in the back refs. Actually the full back refs is generic, and can be 253 * used in all cases the implicit back refs is used. The major shortcoming 254 * of the full back refs is its overhead. Every time a tree block gets 255 * COWed, we have to update back refs entry for all pointers in it. 256 * 257 * For a newly allocated tree block, we use implicit back refs for 258 * pointers in it. This means most tree related operations only involve 259 * implicit back refs. For a tree block created in old transaction, the 260 * only way to drop a reference to it is COW it. So we can detect the 261 * event that tree block loses its owner tree's reference and do the 262 * back refs conversion. 263 * 264 * When a tree block is COWed through a tree, there are four cases: 265 * 266 * The reference count of the block is one and the tree is the block's 267 * owner tree. Nothing to do in this case. 268 * 269 * The reference count of the block is one and the tree is not the 270 * block's owner tree. In this case, full back refs is used for pointers 271 * in the block. Remove these full back refs, add implicit back refs for 272 * every pointers in the new block. 273 * 274 * The reference count of the block is greater than one and the tree is 275 * the block's owner tree. In this case, implicit back refs is used for 276 * pointers in the block. Add full back refs for every pointers in the 277 * block, increase lower level extents' reference counts. The original 278 * implicit back refs are entailed to the new block. 279 * 280 * The reference count of the block is greater than one and the tree is 281 * not the block's owner tree. Add implicit back refs for every pointer in 282 * the new block, increase lower level extents' reference count. 283 * 284 * Back Reference Key composing: 285 * 286 * The key objectid corresponds to the first byte in the extent, 287 * The key type is used to differentiate between types of back refs. 288 * There are different meanings of the key offset for different types 289 * of back refs. 290 * 291 * File extents can be referenced by: 292 * 293 * - multiple snapshots, subvolumes, or different generations in one subvol 294 * - different files inside a single subvolume 295 * - different offsets inside a file (bookend extents in file.c) 296 * 297 * The extent ref structure for the implicit back refs has fields for: 298 * 299 * - Objectid of the subvolume root 300 * - objectid of the file holding the reference 301 * - original offset in the file 302 * - how many bookend extents 303 * 304 * The key offset for the implicit back refs is hash of the first 305 * three fields. 306 * 307 * The extent ref structure for the full back refs has field for: 308 * 309 * - number of pointers in the tree leaf 310 * 311 * The key offset for the implicit back refs is the first byte of 312 * the tree leaf 313 * 314 * When a file extent is allocated, The implicit back refs is used. 315 * the fields are filled in: 316 * 317 * (root_key.objectid, inode objectid, offset in file, 1) 318 * 319 * When a file extent is removed file truncation, we find the 320 * corresponding implicit back refs and check the following fields: 321 * 322 * (btrfs_header_owner(leaf), inode objectid, offset in file) 323 * 324 * Btree extents can be referenced by: 325 * 326 * - Different subvolumes 327 * 328 * Both the implicit back refs and the full back refs for tree blocks 329 * only consist of key. The key offset for the implicit back refs is 330 * objectid of block's owner tree. The key offset for the full back refs 331 * is the first byte of parent block. 332 * 333 * When implicit back refs is used, information about the lowest key and 334 * level of the tree block are required. These information are stored in 335 * tree block info structure. 336 */ 337 338 /* 339 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, 340 * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, 341 * is_data == BTRFS_REF_TYPE_ANY, either type is OK. 342 */ 343 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, 344 struct btrfs_extent_inline_ref *iref, 345 enum btrfs_inline_ref_type is_data) 346 { 347 int type = btrfs_extent_inline_ref_type(eb, iref); 348 u64 offset = btrfs_extent_inline_ref_offset(eb, iref); 349 350 if (type == BTRFS_TREE_BLOCK_REF_KEY || 351 type == BTRFS_SHARED_BLOCK_REF_KEY || 352 type == BTRFS_SHARED_DATA_REF_KEY || 353 type == BTRFS_EXTENT_DATA_REF_KEY) { 354 if (is_data == BTRFS_REF_TYPE_BLOCK) { 355 if (type == BTRFS_TREE_BLOCK_REF_KEY) 356 return type; 357 if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 358 ASSERT(eb->fs_info); 359 /* 360 * Every shared one has parent tree block, 361 * which must be aligned to sector size. 362 */ 363 if (offset && 364 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 365 return type; 366 } 367 } else if (is_data == BTRFS_REF_TYPE_DATA) { 368 if (type == BTRFS_EXTENT_DATA_REF_KEY) 369 return type; 370 if (type == BTRFS_SHARED_DATA_REF_KEY) { 371 ASSERT(eb->fs_info); 372 /* 373 * Every shared one has parent tree block, 374 * which must be aligned to sector size. 375 */ 376 if (offset && 377 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 378 return type; 379 } 380 } else { 381 ASSERT(is_data == BTRFS_REF_TYPE_ANY); 382 return type; 383 } 384 } 385 386 WARN_ON(1); 387 btrfs_print_leaf(eb); 388 btrfs_err(eb->fs_info, 389 "eb %llu iref 0x%lx invalid extent inline ref type %d", 390 eb->start, (unsigned long)iref, type); 391 392 return BTRFS_REF_TYPE_INVALID; 393 } 394 395 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) 396 { 397 u32 high_crc = ~(u32)0; 398 u32 low_crc = ~(u32)0; 399 __le64 lenum; 400 401 lenum = cpu_to_le64(root_objectid); 402 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum)); 403 lenum = cpu_to_le64(owner); 404 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 405 lenum = cpu_to_le64(offset); 406 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 407 408 return ((u64)high_crc << 31) ^ (u64)low_crc; 409 } 410 411 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, 412 struct btrfs_extent_data_ref *ref) 413 { 414 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), 415 btrfs_extent_data_ref_objectid(leaf, ref), 416 btrfs_extent_data_ref_offset(leaf, ref)); 417 } 418 419 static int match_extent_data_ref(struct extent_buffer *leaf, 420 struct btrfs_extent_data_ref *ref, 421 u64 root_objectid, u64 owner, u64 offset) 422 { 423 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || 424 btrfs_extent_data_ref_objectid(leaf, ref) != owner || 425 btrfs_extent_data_ref_offset(leaf, ref) != offset) 426 return 0; 427 return 1; 428 } 429 430 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, 431 struct btrfs_path *path, 432 u64 bytenr, u64 parent, 433 u64 root_objectid, 434 u64 owner, u64 offset) 435 { 436 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 437 struct btrfs_key key; 438 struct btrfs_extent_data_ref *ref; 439 struct extent_buffer *leaf; 440 u32 nritems; 441 int ret; 442 int recow; 443 int err = -ENOENT; 444 445 key.objectid = bytenr; 446 if (parent) { 447 key.type = BTRFS_SHARED_DATA_REF_KEY; 448 key.offset = parent; 449 } else { 450 key.type = BTRFS_EXTENT_DATA_REF_KEY; 451 key.offset = hash_extent_data_ref(root_objectid, 452 owner, offset); 453 } 454 again: 455 recow = 0; 456 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 457 if (ret < 0) { 458 err = ret; 459 goto fail; 460 } 461 462 if (parent) { 463 if (!ret) 464 return 0; 465 goto fail; 466 } 467 468 leaf = path->nodes[0]; 469 nritems = btrfs_header_nritems(leaf); 470 while (1) { 471 if (path->slots[0] >= nritems) { 472 ret = btrfs_next_leaf(root, path); 473 if (ret < 0) 474 err = ret; 475 if (ret) 476 goto fail; 477 478 leaf = path->nodes[0]; 479 nritems = btrfs_header_nritems(leaf); 480 recow = 1; 481 } 482 483 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 484 if (key.objectid != bytenr || 485 key.type != BTRFS_EXTENT_DATA_REF_KEY) 486 goto fail; 487 488 ref = btrfs_item_ptr(leaf, path->slots[0], 489 struct btrfs_extent_data_ref); 490 491 if (match_extent_data_ref(leaf, ref, root_objectid, 492 owner, offset)) { 493 if (recow) { 494 btrfs_release_path(path); 495 goto again; 496 } 497 err = 0; 498 break; 499 } 500 path->slots[0]++; 501 } 502 fail: 503 return err; 504 } 505 506 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, 507 struct btrfs_path *path, 508 u64 bytenr, u64 parent, 509 u64 root_objectid, u64 owner, 510 u64 offset, int refs_to_add) 511 { 512 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 513 struct btrfs_key key; 514 struct extent_buffer *leaf; 515 u32 size; 516 u32 num_refs; 517 int ret; 518 519 key.objectid = bytenr; 520 if (parent) { 521 key.type = BTRFS_SHARED_DATA_REF_KEY; 522 key.offset = parent; 523 size = sizeof(struct btrfs_shared_data_ref); 524 } else { 525 key.type = BTRFS_EXTENT_DATA_REF_KEY; 526 key.offset = hash_extent_data_ref(root_objectid, 527 owner, offset); 528 size = sizeof(struct btrfs_extent_data_ref); 529 } 530 531 ret = btrfs_insert_empty_item(trans, root, path, &key, size); 532 if (ret && ret != -EEXIST) 533 goto fail; 534 535 leaf = path->nodes[0]; 536 if (parent) { 537 struct btrfs_shared_data_ref *ref; 538 ref = btrfs_item_ptr(leaf, path->slots[0], 539 struct btrfs_shared_data_ref); 540 if (ret == 0) { 541 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); 542 } else { 543 num_refs = btrfs_shared_data_ref_count(leaf, ref); 544 num_refs += refs_to_add; 545 btrfs_set_shared_data_ref_count(leaf, ref, num_refs); 546 } 547 } else { 548 struct btrfs_extent_data_ref *ref; 549 while (ret == -EEXIST) { 550 ref = btrfs_item_ptr(leaf, path->slots[0], 551 struct btrfs_extent_data_ref); 552 if (match_extent_data_ref(leaf, ref, root_objectid, 553 owner, offset)) 554 break; 555 btrfs_release_path(path); 556 key.offset++; 557 ret = btrfs_insert_empty_item(trans, root, path, &key, 558 size); 559 if (ret && ret != -EEXIST) 560 goto fail; 561 562 leaf = path->nodes[0]; 563 } 564 ref = btrfs_item_ptr(leaf, path->slots[0], 565 struct btrfs_extent_data_ref); 566 if (ret == 0) { 567 btrfs_set_extent_data_ref_root(leaf, ref, 568 root_objectid); 569 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 570 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 571 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); 572 } else { 573 num_refs = btrfs_extent_data_ref_count(leaf, ref); 574 num_refs += refs_to_add; 575 btrfs_set_extent_data_ref_count(leaf, ref, num_refs); 576 } 577 } 578 btrfs_mark_buffer_dirty(trans, leaf); 579 ret = 0; 580 fail: 581 btrfs_release_path(path); 582 return ret; 583 } 584 585 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, 586 struct btrfs_root *root, 587 struct btrfs_path *path, 588 int refs_to_drop) 589 { 590 struct btrfs_key key; 591 struct btrfs_extent_data_ref *ref1 = NULL; 592 struct btrfs_shared_data_ref *ref2 = NULL; 593 struct extent_buffer *leaf; 594 u32 num_refs = 0; 595 int ret = 0; 596 597 leaf = path->nodes[0]; 598 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 599 600 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 601 ref1 = btrfs_item_ptr(leaf, path->slots[0], 602 struct btrfs_extent_data_ref); 603 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 604 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 605 ref2 = btrfs_item_ptr(leaf, path->slots[0], 606 struct btrfs_shared_data_ref); 607 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 608 } else { 609 btrfs_err(trans->fs_info, 610 "unrecognized backref key (%llu %u %llu)", 611 key.objectid, key.type, key.offset); 612 btrfs_abort_transaction(trans, -EUCLEAN); 613 return -EUCLEAN; 614 } 615 616 BUG_ON(num_refs < refs_to_drop); 617 num_refs -= refs_to_drop; 618 619 if (num_refs == 0) { 620 ret = btrfs_del_item(trans, root, path); 621 } else { 622 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) 623 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); 624 else if (key.type == BTRFS_SHARED_DATA_REF_KEY) 625 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); 626 btrfs_mark_buffer_dirty(trans, leaf); 627 } 628 return ret; 629 } 630 631 static noinline u32 extent_data_ref_count(struct btrfs_path *path, 632 struct btrfs_extent_inline_ref *iref) 633 { 634 struct btrfs_key key; 635 struct extent_buffer *leaf; 636 struct btrfs_extent_data_ref *ref1; 637 struct btrfs_shared_data_ref *ref2; 638 u32 num_refs = 0; 639 int type; 640 641 leaf = path->nodes[0]; 642 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 643 644 if (iref) { 645 /* 646 * If type is invalid, we should have bailed out earlier than 647 * this call. 648 */ 649 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 650 ASSERT(type != BTRFS_REF_TYPE_INVALID); 651 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 652 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); 653 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 654 } else { 655 ref2 = (struct btrfs_shared_data_ref *)(iref + 1); 656 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 657 } 658 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 659 ref1 = btrfs_item_ptr(leaf, path->slots[0], 660 struct btrfs_extent_data_ref); 661 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 662 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 663 ref2 = btrfs_item_ptr(leaf, path->slots[0], 664 struct btrfs_shared_data_ref); 665 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 666 } else { 667 WARN_ON(1); 668 } 669 return num_refs; 670 } 671 672 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, 673 struct btrfs_path *path, 674 u64 bytenr, u64 parent, 675 u64 root_objectid) 676 { 677 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 678 struct btrfs_key key; 679 int ret; 680 681 key.objectid = bytenr; 682 if (parent) { 683 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 684 key.offset = parent; 685 } else { 686 key.type = BTRFS_TREE_BLOCK_REF_KEY; 687 key.offset = root_objectid; 688 } 689 690 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 691 if (ret > 0) 692 ret = -ENOENT; 693 return ret; 694 } 695 696 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, 697 struct btrfs_path *path, 698 u64 bytenr, u64 parent, 699 u64 root_objectid) 700 { 701 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 702 struct btrfs_key key; 703 int ret; 704 705 key.objectid = bytenr; 706 if (parent) { 707 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 708 key.offset = parent; 709 } else { 710 key.type = BTRFS_TREE_BLOCK_REF_KEY; 711 key.offset = root_objectid; 712 } 713 714 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 715 btrfs_release_path(path); 716 return ret; 717 } 718 719 static inline int extent_ref_type(u64 parent, u64 owner) 720 { 721 int type; 722 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 723 if (parent > 0) 724 type = BTRFS_SHARED_BLOCK_REF_KEY; 725 else 726 type = BTRFS_TREE_BLOCK_REF_KEY; 727 } else { 728 if (parent > 0) 729 type = BTRFS_SHARED_DATA_REF_KEY; 730 else 731 type = BTRFS_EXTENT_DATA_REF_KEY; 732 } 733 return type; 734 } 735 736 static int find_next_key(struct btrfs_path *path, int level, 737 struct btrfs_key *key) 738 739 { 740 for (; level < BTRFS_MAX_LEVEL; level++) { 741 if (!path->nodes[level]) 742 break; 743 if (path->slots[level] + 1 >= 744 btrfs_header_nritems(path->nodes[level])) 745 continue; 746 if (level == 0) 747 btrfs_item_key_to_cpu(path->nodes[level], key, 748 path->slots[level] + 1); 749 else 750 btrfs_node_key_to_cpu(path->nodes[level], key, 751 path->slots[level] + 1); 752 return 0; 753 } 754 return 1; 755 } 756 757 /* 758 * look for inline back ref. if back ref is found, *ref_ret is set 759 * to the address of inline back ref, and 0 is returned. 760 * 761 * if back ref isn't found, *ref_ret is set to the address where it 762 * should be inserted, and -ENOENT is returned. 763 * 764 * if insert is true and there are too many inline back refs, the path 765 * points to the extent item, and -EAGAIN is returned. 766 * 767 * NOTE: inline back refs are ordered in the same way that back ref 768 * items in the tree are ordered. 769 */ 770 static noinline_for_stack 771 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, 772 struct btrfs_path *path, 773 struct btrfs_extent_inline_ref **ref_ret, 774 u64 bytenr, u64 num_bytes, 775 u64 parent, u64 root_objectid, 776 u64 owner, u64 offset, int insert) 777 { 778 struct btrfs_fs_info *fs_info = trans->fs_info; 779 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); 780 struct btrfs_key key; 781 struct extent_buffer *leaf; 782 struct btrfs_extent_item *ei; 783 struct btrfs_extent_inline_ref *iref; 784 u64 flags; 785 u64 item_size; 786 unsigned long ptr; 787 unsigned long end; 788 int extra_size; 789 int type; 790 int want; 791 int ret; 792 int err = 0; 793 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 794 int needed; 795 796 key.objectid = bytenr; 797 key.type = BTRFS_EXTENT_ITEM_KEY; 798 key.offset = num_bytes; 799 800 want = extent_ref_type(parent, owner); 801 if (insert) { 802 extra_size = btrfs_extent_inline_ref_size(want); 803 path->search_for_extension = 1; 804 path->keep_locks = 1; 805 } else 806 extra_size = -1; 807 808 /* 809 * Owner is our level, so we can just add one to get the level for the 810 * block we are interested in. 811 */ 812 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { 813 key.type = BTRFS_METADATA_ITEM_KEY; 814 key.offset = owner; 815 } 816 817 again: 818 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); 819 if (ret < 0) { 820 err = ret; 821 goto out; 822 } 823 824 /* 825 * We may be a newly converted file system which still has the old fat 826 * extent entries for metadata, so try and see if we have one of those. 827 */ 828 if (ret > 0 && skinny_metadata) { 829 skinny_metadata = false; 830 if (path->slots[0]) { 831 path->slots[0]--; 832 btrfs_item_key_to_cpu(path->nodes[0], &key, 833 path->slots[0]); 834 if (key.objectid == bytenr && 835 key.type == BTRFS_EXTENT_ITEM_KEY && 836 key.offset == num_bytes) 837 ret = 0; 838 } 839 if (ret) { 840 key.objectid = bytenr; 841 key.type = BTRFS_EXTENT_ITEM_KEY; 842 key.offset = num_bytes; 843 btrfs_release_path(path); 844 goto again; 845 } 846 } 847 848 if (ret && !insert) { 849 err = -ENOENT; 850 goto out; 851 } else if (WARN_ON(ret)) { 852 btrfs_print_leaf(path->nodes[0]); 853 btrfs_err(fs_info, 854 "extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu", 855 bytenr, num_bytes, parent, root_objectid, owner, 856 offset); 857 err = -EIO; 858 goto out; 859 } 860 861 leaf = path->nodes[0]; 862 item_size = btrfs_item_size(leaf, path->slots[0]); 863 if (unlikely(item_size < sizeof(*ei))) { 864 err = -EUCLEAN; 865 btrfs_err(fs_info, 866 "unexpected extent item size, has %llu expect >= %zu", 867 item_size, sizeof(*ei)); 868 btrfs_abort_transaction(trans, err); 869 goto out; 870 } 871 872 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 873 flags = btrfs_extent_flags(leaf, ei); 874 875 ptr = (unsigned long)(ei + 1); 876 end = (unsigned long)ei + item_size; 877 878 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { 879 ptr += sizeof(struct btrfs_tree_block_info); 880 BUG_ON(ptr > end); 881 } 882 883 if (owner >= BTRFS_FIRST_FREE_OBJECTID) 884 needed = BTRFS_REF_TYPE_DATA; 885 else 886 needed = BTRFS_REF_TYPE_BLOCK; 887 888 err = -ENOENT; 889 while (1) { 890 if (ptr >= end) { 891 if (ptr > end) { 892 err = -EUCLEAN; 893 btrfs_print_leaf(path->nodes[0]); 894 btrfs_crit(fs_info, 895 "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu", 896 path->slots[0], root_objectid, owner, offset, parent); 897 } 898 break; 899 } 900 iref = (struct btrfs_extent_inline_ref *)ptr; 901 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed); 902 if (type == BTRFS_REF_TYPE_INVALID) { 903 err = -EUCLEAN; 904 goto out; 905 } 906 907 if (want < type) 908 break; 909 if (want > type) { 910 ptr += btrfs_extent_inline_ref_size(type); 911 continue; 912 } 913 914 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 915 struct btrfs_extent_data_ref *dref; 916 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 917 if (match_extent_data_ref(leaf, dref, root_objectid, 918 owner, offset)) { 919 err = 0; 920 break; 921 } 922 if (hash_extent_data_ref_item(leaf, dref) < 923 hash_extent_data_ref(root_objectid, owner, offset)) 924 break; 925 } else { 926 u64 ref_offset; 927 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); 928 if (parent > 0) { 929 if (parent == ref_offset) { 930 err = 0; 931 break; 932 } 933 if (ref_offset < parent) 934 break; 935 } else { 936 if (root_objectid == ref_offset) { 937 err = 0; 938 break; 939 } 940 if (ref_offset < root_objectid) 941 break; 942 } 943 } 944 ptr += btrfs_extent_inline_ref_size(type); 945 } 946 if (err == -ENOENT && insert) { 947 if (item_size + extra_size >= 948 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { 949 err = -EAGAIN; 950 goto out; 951 } 952 /* 953 * To add new inline back ref, we have to make sure 954 * there is no corresponding back ref item. 955 * For simplicity, we just do not add new inline back 956 * ref if there is any kind of item for this block 957 */ 958 if (find_next_key(path, 0, &key) == 0 && 959 key.objectid == bytenr && 960 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 961 err = -EAGAIN; 962 goto out; 963 } 964 } 965 *ref_ret = (struct btrfs_extent_inline_ref *)ptr; 966 out: 967 if (insert) { 968 path->keep_locks = 0; 969 path->search_for_extension = 0; 970 btrfs_unlock_up_safe(path, 1); 971 } 972 return err; 973 } 974 975 /* 976 * helper to add new inline back ref 977 */ 978 static noinline_for_stack 979 void setup_inline_extent_backref(struct btrfs_trans_handle *trans, 980 struct btrfs_path *path, 981 struct btrfs_extent_inline_ref *iref, 982 u64 parent, u64 root_objectid, 983 u64 owner, u64 offset, int refs_to_add, 984 struct btrfs_delayed_extent_op *extent_op) 985 { 986 struct extent_buffer *leaf; 987 struct btrfs_extent_item *ei; 988 unsigned long ptr; 989 unsigned long end; 990 unsigned long item_offset; 991 u64 refs; 992 int size; 993 int type; 994 995 leaf = path->nodes[0]; 996 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 997 item_offset = (unsigned long)iref - (unsigned long)ei; 998 999 type = extent_ref_type(parent, owner); 1000 size = btrfs_extent_inline_ref_size(type); 1001 1002 btrfs_extend_item(trans, path, size); 1003 1004 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1005 refs = btrfs_extent_refs(leaf, ei); 1006 refs += refs_to_add; 1007 btrfs_set_extent_refs(leaf, ei, refs); 1008 if (extent_op) 1009 __run_delayed_extent_op(extent_op, leaf, ei); 1010 1011 ptr = (unsigned long)ei + item_offset; 1012 end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]); 1013 if (ptr < end - size) 1014 memmove_extent_buffer(leaf, ptr + size, ptr, 1015 end - size - ptr); 1016 1017 iref = (struct btrfs_extent_inline_ref *)ptr; 1018 btrfs_set_extent_inline_ref_type(leaf, iref, type); 1019 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1020 struct btrfs_extent_data_ref *dref; 1021 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1022 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); 1023 btrfs_set_extent_data_ref_objectid(leaf, dref, owner); 1024 btrfs_set_extent_data_ref_offset(leaf, dref, offset); 1025 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); 1026 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1027 struct btrfs_shared_data_ref *sref; 1028 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1029 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); 1030 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1031 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 1032 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1033 } else { 1034 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1035 } 1036 btrfs_mark_buffer_dirty(trans, leaf); 1037 } 1038 1039 static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1040 struct btrfs_path *path, 1041 struct btrfs_extent_inline_ref **ref_ret, 1042 u64 bytenr, u64 num_bytes, u64 parent, 1043 u64 root_objectid, u64 owner, u64 offset) 1044 { 1045 int ret; 1046 1047 ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr, 1048 num_bytes, parent, root_objectid, 1049 owner, offset, 0); 1050 if (ret != -ENOENT) 1051 return ret; 1052 1053 btrfs_release_path(path); 1054 *ref_ret = NULL; 1055 1056 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1057 ret = lookup_tree_block_ref(trans, path, bytenr, parent, 1058 root_objectid); 1059 } else { 1060 ret = lookup_extent_data_ref(trans, path, bytenr, parent, 1061 root_objectid, owner, offset); 1062 } 1063 return ret; 1064 } 1065 1066 /* 1067 * helper to update/remove inline back ref 1068 */ 1069 static noinline_for_stack int update_inline_extent_backref( 1070 struct btrfs_trans_handle *trans, 1071 struct btrfs_path *path, 1072 struct btrfs_extent_inline_ref *iref, 1073 int refs_to_mod, 1074 struct btrfs_delayed_extent_op *extent_op) 1075 { 1076 struct extent_buffer *leaf = path->nodes[0]; 1077 struct btrfs_fs_info *fs_info = leaf->fs_info; 1078 struct btrfs_extent_item *ei; 1079 struct btrfs_extent_data_ref *dref = NULL; 1080 struct btrfs_shared_data_ref *sref = NULL; 1081 unsigned long ptr; 1082 unsigned long end; 1083 u32 item_size; 1084 int size; 1085 int type; 1086 u64 refs; 1087 1088 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1089 refs = btrfs_extent_refs(leaf, ei); 1090 if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) { 1091 struct btrfs_key key; 1092 u32 extent_size; 1093 1094 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1095 if (key.type == BTRFS_METADATA_ITEM_KEY) 1096 extent_size = fs_info->nodesize; 1097 else 1098 extent_size = key.offset; 1099 btrfs_print_leaf(leaf); 1100 btrfs_err(fs_info, 1101 "invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu", 1102 key.objectid, extent_size, refs_to_mod, refs); 1103 return -EUCLEAN; 1104 } 1105 refs += refs_to_mod; 1106 btrfs_set_extent_refs(leaf, ei, refs); 1107 if (extent_op) 1108 __run_delayed_extent_op(extent_op, leaf, ei); 1109 1110 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); 1111 /* 1112 * Function btrfs_get_extent_inline_ref_type() has already printed 1113 * error messages. 1114 */ 1115 if (unlikely(type == BTRFS_REF_TYPE_INVALID)) 1116 return -EUCLEAN; 1117 1118 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1119 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1120 refs = btrfs_extent_data_ref_count(leaf, dref); 1121 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1122 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1123 refs = btrfs_shared_data_ref_count(leaf, sref); 1124 } else { 1125 refs = 1; 1126 /* 1127 * For tree blocks we can only drop one ref for it, and tree 1128 * blocks should not have refs > 1. 1129 * 1130 * Furthermore if we're inserting a new inline backref, we 1131 * won't reach this path either. That would be 1132 * setup_inline_extent_backref(). 1133 */ 1134 if (unlikely(refs_to_mod != -1)) { 1135 struct btrfs_key key; 1136 1137 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1138 1139 btrfs_print_leaf(leaf); 1140 btrfs_err(fs_info, 1141 "invalid refs_to_mod for tree block %llu, has %d expect -1", 1142 key.objectid, refs_to_mod); 1143 return -EUCLEAN; 1144 } 1145 } 1146 1147 if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) { 1148 struct btrfs_key key; 1149 u32 extent_size; 1150 1151 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1152 if (key.type == BTRFS_METADATA_ITEM_KEY) 1153 extent_size = fs_info->nodesize; 1154 else 1155 extent_size = key.offset; 1156 btrfs_print_leaf(leaf); 1157 btrfs_err(fs_info, 1158 "invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu", 1159 (unsigned long)iref, key.objectid, extent_size, 1160 refs_to_mod, refs); 1161 return -EUCLEAN; 1162 } 1163 refs += refs_to_mod; 1164 1165 if (refs > 0) { 1166 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1167 btrfs_set_extent_data_ref_count(leaf, dref, refs); 1168 else 1169 btrfs_set_shared_data_ref_count(leaf, sref, refs); 1170 } else { 1171 size = btrfs_extent_inline_ref_size(type); 1172 item_size = btrfs_item_size(leaf, path->slots[0]); 1173 ptr = (unsigned long)iref; 1174 end = (unsigned long)ei + item_size; 1175 if (ptr + size < end) 1176 memmove_extent_buffer(leaf, ptr, ptr + size, 1177 end - ptr - size); 1178 item_size -= size; 1179 btrfs_truncate_item(trans, path, item_size, 1); 1180 } 1181 btrfs_mark_buffer_dirty(trans, leaf); 1182 return 0; 1183 } 1184 1185 static noinline_for_stack 1186 int insert_inline_extent_backref(struct btrfs_trans_handle *trans, 1187 struct btrfs_path *path, 1188 u64 bytenr, u64 num_bytes, u64 parent, 1189 u64 root_objectid, u64 owner, 1190 u64 offset, int refs_to_add, 1191 struct btrfs_delayed_extent_op *extent_op) 1192 { 1193 struct btrfs_extent_inline_ref *iref; 1194 int ret; 1195 1196 ret = lookup_inline_extent_backref(trans, path, &iref, bytenr, 1197 num_bytes, parent, root_objectid, 1198 owner, offset, 1); 1199 if (ret == 0) { 1200 /* 1201 * We're adding refs to a tree block we already own, this 1202 * should not happen at all. 1203 */ 1204 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1205 btrfs_print_leaf(path->nodes[0]); 1206 btrfs_crit(trans->fs_info, 1207 "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u", 1208 bytenr, num_bytes, root_objectid, path->slots[0]); 1209 return -EUCLEAN; 1210 } 1211 ret = update_inline_extent_backref(trans, path, iref, 1212 refs_to_add, extent_op); 1213 } else if (ret == -ENOENT) { 1214 setup_inline_extent_backref(trans, path, iref, parent, 1215 root_objectid, owner, offset, 1216 refs_to_add, extent_op); 1217 ret = 0; 1218 } 1219 return ret; 1220 } 1221 1222 static int remove_extent_backref(struct btrfs_trans_handle *trans, 1223 struct btrfs_root *root, 1224 struct btrfs_path *path, 1225 struct btrfs_extent_inline_ref *iref, 1226 int refs_to_drop, int is_data) 1227 { 1228 int ret = 0; 1229 1230 BUG_ON(!is_data && refs_to_drop != 1); 1231 if (iref) 1232 ret = update_inline_extent_backref(trans, path, iref, 1233 -refs_to_drop, NULL); 1234 else if (is_data) 1235 ret = remove_extent_data_ref(trans, root, path, refs_to_drop); 1236 else 1237 ret = btrfs_del_item(trans, root, path); 1238 return ret; 1239 } 1240 1241 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, 1242 u64 *discarded_bytes) 1243 { 1244 int j, ret = 0; 1245 u64 bytes_left, end; 1246 u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT); 1247 1248 if (WARN_ON(start != aligned_start)) { 1249 len -= aligned_start - start; 1250 len = round_down(len, 1 << SECTOR_SHIFT); 1251 start = aligned_start; 1252 } 1253 1254 *discarded_bytes = 0; 1255 1256 if (!len) 1257 return 0; 1258 1259 end = start + len; 1260 bytes_left = len; 1261 1262 /* Skip any superblocks on this device. */ 1263 for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) { 1264 u64 sb_start = btrfs_sb_offset(j); 1265 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE; 1266 u64 size = sb_start - start; 1267 1268 if (!in_range(sb_start, start, bytes_left) && 1269 !in_range(sb_end, start, bytes_left) && 1270 !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE)) 1271 continue; 1272 1273 /* 1274 * Superblock spans beginning of range. Adjust start and 1275 * try again. 1276 */ 1277 if (sb_start <= start) { 1278 start += sb_end - start; 1279 if (start > end) { 1280 bytes_left = 0; 1281 break; 1282 } 1283 bytes_left = end - start; 1284 continue; 1285 } 1286 1287 if (size) { 1288 ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, 1289 size >> SECTOR_SHIFT, 1290 GFP_NOFS); 1291 if (!ret) 1292 *discarded_bytes += size; 1293 else if (ret != -EOPNOTSUPP) 1294 return ret; 1295 } 1296 1297 start = sb_end; 1298 if (start > end) { 1299 bytes_left = 0; 1300 break; 1301 } 1302 bytes_left = end - start; 1303 } 1304 1305 if (bytes_left) { 1306 ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, 1307 bytes_left >> SECTOR_SHIFT, 1308 GFP_NOFS); 1309 if (!ret) 1310 *discarded_bytes += bytes_left; 1311 } 1312 return ret; 1313 } 1314 1315 static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes) 1316 { 1317 struct btrfs_device *dev = stripe->dev; 1318 struct btrfs_fs_info *fs_info = dev->fs_info; 1319 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1320 u64 phys = stripe->physical; 1321 u64 len = stripe->length; 1322 u64 discarded = 0; 1323 int ret = 0; 1324 1325 /* Zone reset on a zoned filesystem */ 1326 if (btrfs_can_zone_reset(dev, phys, len)) { 1327 u64 src_disc; 1328 1329 ret = btrfs_reset_device_zone(dev, phys, len, &discarded); 1330 if (ret) 1331 goto out; 1332 1333 if (!btrfs_dev_replace_is_ongoing(dev_replace) || 1334 dev != dev_replace->srcdev) 1335 goto out; 1336 1337 src_disc = discarded; 1338 1339 /* Send to replace target as well */ 1340 ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len, 1341 &discarded); 1342 discarded += src_disc; 1343 } else if (bdev_max_discard_sectors(stripe->dev->bdev)) { 1344 ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded); 1345 } else { 1346 ret = 0; 1347 *bytes = 0; 1348 } 1349 1350 out: 1351 *bytes = discarded; 1352 return ret; 1353 } 1354 1355 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, 1356 u64 num_bytes, u64 *actual_bytes) 1357 { 1358 int ret = 0; 1359 u64 discarded_bytes = 0; 1360 u64 end = bytenr + num_bytes; 1361 u64 cur = bytenr; 1362 1363 /* 1364 * Avoid races with device replace and make sure the devices in the 1365 * stripes don't go away while we are discarding. 1366 */ 1367 btrfs_bio_counter_inc_blocked(fs_info); 1368 while (cur < end) { 1369 struct btrfs_discard_stripe *stripes; 1370 unsigned int num_stripes; 1371 int i; 1372 1373 num_bytes = end - cur; 1374 stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes); 1375 if (IS_ERR(stripes)) { 1376 ret = PTR_ERR(stripes); 1377 if (ret == -EOPNOTSUPP) 1378 ret = 0; 1379 break; 1380 } 1381 1382 for (i = 0; i < num_stripes; i++) { 1383 struct btrfs_discard_stripe *stripe = stripes + i; 1384 u64 bytes; 1385 1386 if (!stripe->dev->bdev) { 1387 ASSERT(btrfs_test_opt(fs_info, DEGRADED)); 1388 continue; 1389 } 1390 1391 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 1392 &stripe->dev->dev_state)) 1393 continue; 1394 1395 ret = do_discard_extent(stripe, &bytes); 1396 if (ret) { 1397 /* 1398 * Keep going if discard is not supported by the 1399 * device. 1400 */ 1401 if (ret != -EOPNOTSUPP) 1402 break; 1403 ret = 0; 1404 } else { 1405 discarded_bytes += bytes; 1406 } 1407 } 1408 kfree(stripes); 1409 if (ret) 1410 break; 1411 cur += num_bytes; 1412 } 1413 btrfs_bio_counter_dec(fs_info); 1414 if (actual_bytes) 1415 *actual_bytes = discarded_bytes; 1416 return ret; 1417 } 1418 1419 /* Can return -ENOMEM */ 1420 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1421 struct btrfs_ref *generic_ref) 1422 { 1423 struct btrfs_fs_info *fs_info = trans->fs_info; 1424 int ret; 1425 1426 ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && 1427 generic_ref->action); 1428 BUG_ON(generic_ref->type == BTRFS_REF_METADATA && 1429 generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID); 1430 1431 if (generic_ref->type == BTRFS_REF_METADATA) 1432 ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL); 1433 else 1434 ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0); 1435 1436 btrfs_ref_tree_mod(fs_info, generic_ref); 1437 1438 return ret; 1439 } 1440 1441 /* 1442 * __btrfs_inc_extent_ref - insert backreference for a given extent 1443 * 1444 * The counterpart is in __btrfs_free_extent(), with examples and more details 1445 * how it works. 1446 * 1447 * @trans: Handle of transaction 1448 * 1449 * @node: The delayed ref node used to get the bytenr/length for 1450 * extent whose references are incremented. 1451 * 1452 * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/ 1453 * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical 1454 * bytenr of the parent block. Since new extents are always 1455 * created with indirect references, this will only be the case 1456 * when relocating a shared extent. In that case, root_objectid 1457 * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must 1458 * be 0 1459 * 1460 * @root_objectid: The id of the root where this modification has originated, 1461 * this can be either one of the well-known metadata trees or 1462 * the subvolume id which references this extent. 1463 * 1464 * @owner: For data extents it is the inode number of the owning file. 1465 * For metadata extents this parameter holds the level in the 1466 * tree of the extent. 1467 * 1468 * @offset: For metadata extents the offset is ignored and is currently 1469 * always passed as 0. For data extents it is the fileoffset 1470 * this extent belongs to. 1471 * 1472 * @refs_to_add Number of references to add 1473 * 1474 * @extent_op Pointer to a structure, holding information necessary when 1475 * updating a tree block's flags 1476 * 1477 */ 1478 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1479 struct btrfs_delayed_ref_node *node, 1480 u64 parent, u64 root_objectid, 1481 u64 owner, u64 offset, int refs_to_add, 1482 struct btrfs_delayed_extent_op *extent_op) 1483 { 1484 struct btrfs_path *path; 1485 struct extent_buffer *leaf; 1486 struct btrfs_extent_item *item; 1487 struct btrfs_key key; 1488 u64 bytenr = node->bytenr; 1489 u64 num_bytes = node->num_bytes; 1490 u64 refs; 1491 int ret; 1492 1493 path = btrfs_alloc_path(); 1494 if (!path) 1495 return -ENOMEM; 1496 1497 /* this will setup the path even if it fails to insert the back ref */ 1498 ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes, 1499 parent, root_objectid, owner, 1500 offset, refs_to_add, extent_op); 1501 if ((ret < 0 && ret != -EAGAIN) || !ret) 1502 goto out; 1503 1504 /* 1505 * Ok we had -EAGAIN which means we didn't have space to insert and 1506 * inline extent ref, so just update the reference count and add a 1507 * normal backref. 1508 */ 1509 leaf = path->nodes[0]; 1510 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1511 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1512 refs = btrfs_extent_refs(leaf, item); 1513 btrfs_set_extent_refs(leaf, item, refs + refs_to_add); 1514 if (extent_op) 1515 __run_delayed_extent_op(extent_op, leaf, item); 1516 1517 btrfs_mark_buffer_dirty(trans, leaf); 1518 btrfs_release_path(path); 1519 1520 /* now insert the actual backref */ 1521 if (owner < BTRFS_FIRST_FREE_OBJECTID) 1522 ret = insert_tree_block_ref(trans, path, bytenr, parent, 1523 root_objectid); 1524 else 1525 ret = insert_extent_data_ref(trans, path, bytenr, parent, 1526 root_objectid, owner, offset, 1527 refs_to_add); 1528 1529 if (ret) 1530 btrfs_abort_transaction(trans, ret); 1531 out: 1532 btrfs_free_path(path); 1533 return ret; 1534 } 1535 1536 static int run_delayed_data_ref(struct btrfs_trans_handle *trans, 1537 struct btrfs_delayed_ref_node *node, 1538 struct btrfs_delayed_extent_op *extent_op, 1539 bool insert_reserved) 1540 { 1541 int ret = 0; 1542 struct btrfs_delayed_data_ref *ref; 1543 struct btrfs_key ins; 1544 u64 parent = 0; 1545 u64 ref_root = 0; 1546 u64 flags = 0; 1547 1548 ins.objectid = node->bytenr; 1549 ins.offset = node->num_bytes; 1550 ins.type = BTRFS_EXTENT_ITEM_KEY; 1551 1552 ref = btrfs_delayed_node_to_data_ref(node); 1553 trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action); 1554 1555 if (node->type == BTRFS_SHARED_DATA_REF_KEY) 1556 parent = ref->parent; 1557 ref_root = ref->root; 1558 1559 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1560 if (extent_op) 1561 flags |= extent_op->flags_to_set; 1562 ret = alloc_reserved_file_extent(trans, parent, ref_root, 1563 flags, ref->objectid, 1564 ref->offset, &ins, 1565 node->ref_mod); 1566 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1567 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1568 ref->objectid, ref->offset, 1569 node->ref_mod, extent_op); 1570 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1571 ret = __btrfs_free_extent(trans, node, parent, 1572 ref_root, ref->objectid, 1573 ref->offset, node->ref_mod, 1574 extent_op); 1575 } else { 1576 BUG(); 1577 } 1578 return ret; 1579 } 1580 1581 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 1582 struct extent_buffer *leaf, 1583 struct btrfs_extent_item *ei) 1584 { 1585 u64 flags = btrfs_extent_flags(leaf, ei); 1586 if (extent_op->update_flags) { 1587 flags |= extent_op->flags_to_set; 1588 btrfs_set_extent_flags(leaf, ei, flags); 1589 } 1590 1591 if (extent_op->update_key) { 1592 struct btrfs_tree_block_info *bi; 1593 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); 1594 bi = (struct btrfs_tree_block_info *)(ei + 1); 1595 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); 1596 } 1597 } 1598 1599 static int run_delayed_extent_op(struct btrfs_trans_handle *trans, 1600 struct btrfs_delayed_ref_head *head, 1601 struct btrfs_delayed_extent_op *extent_op) 1602 { 1603 struct btrfs_fs_info *fs_info = trans->fs_info; 1604 struct btrfs_root *root; 1605 struct btrfs_key key; 1606 struct btrfs_path *path; 1607 struct btrfs_extent_item *ei; 1608 struct extent_buffer *leaf; 1609 u32 item_size; 1610 int ret; 1611 int err = 0; 1612 int metadata = 1; 1613 1614 if (TRANS_ABORTED(trans)) 1615 return 0; 1616 1617 if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1618 metadata = 0; 1619 1620 path = btrfs_alloc_path(); 1621 if (!path) 1622 return -ENOMEM; 1623 1624 key.objectid = head->bytenr; 1625 1626 if (metadata) { 1627 key.type = BTRFS_METADATA_ITEM_KEY; 1628 key.offset = extent_op->level; 1629 } else { 1630 key.type = BTRFS_EXTENT_ITEM_KEY; 1631 key.offset = head->num_bytes; 1632 } 1633 1634 root = btrfs_extent_root(fs_info, key.objectid); 1635 again: 1636 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1637 if (ret < 0) { 1638 err = ret; 1639 goto out; 1640 } 1641 if (ret > 0) { 1642 if (metadata) { 1643 if (path->slots[0] > 0) { 1644 path->slots[0]--; 1645 btrfs_item_key_to_cpu(path->nodes[0], &key, 1646 path->slots[0]); 1647 if (key.objectid == head->bytenr && 1648 key.type == BTRFS_EXTENT_ITEM_KEY && 1649 key.offset == head->num_bytes) 1650 ret = 0; 1651 } 1652 if (ret > 0) { 1653 btrfs_release_path(path); 1654 metadata = 0; 1655 1656 key.objectid = head->bytenr; 1657 key.offset = head->num_bytes; 1658 key.type = BTRFS_EXTENT_ITEM_KEY; 1659 goto again; 1660 } 1661 } else { 1662 err = -EUCLEAN; 1663 btrfs_err(fs_info, 1664 "missing extent item for extent %llu num_bytes %llu level %d", 1665 head->bytenr, head->num_bytes, extent_op->level); 1666 goto out; 1667 } 1668 } 1669 1670 leaf = path->nodes[0]; 1671 item_size = btrfs_item_size(leaf, path->slots[0]); 1672 1673 if (unlikely(item_size < sizeof(*ei))) { 1674 err = -EUCLEAN; 1675 btrfs_err(fs_info, 1676 "unexpected extent item size, has %u expect >= %zu", 1677 item_size, sizeof(*ei)); 1678 btrfs_abort_transaction(trans, err); 1679 goto out; 1680 } 1681 1682 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1683 __run_delayed_extent_op(extent_op, leaf, ei); 1684 1685 btrfs_mark_buffer_dirty(trans, leaf); 1686 out: 1687 btrfs_free_path(path); 1688 return err; 1689 } 1690 1691 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, 1692 struct btrfs_delayed_ref_node *node, 1693 struct btrfs_delayed_extent_op *extent_op, 1694 bool insert_reserved) 1695 { 1696 int ret = 0; 1697 struct btrfs_delayed_tree_ref *ref; 1698 u64 parent = 0; 1699 u64 ref_root = 0; 1700 1701 ref = btrfs_delayed_node_to_tree_ref(node); 1702 trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action); 1703 1704 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1705 parent = ref->parent; 1706 ref_root = ref->root; 1707 1708 if (unlikely(node->ref_mod != 1)) { 1709 btrfs_err(trans->fs_info, 1710 "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu", 1711 node->bytenr, node->ref_mod, node->action, ref_root, 1712 parent); 1713 return -EUCLEAN; 1714 } 1715 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1716 BUG_ON(!extent_op || !extent_op->update_flags); 1717 ret = alloc_reserved_tree_block(trans, node, extent_op); 1718 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1719 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1720 ref->level, 0, 1, extent_op); 1721 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1722 ret = __btrfs_free_extent(trans, node, parent, ref_root, 1723 ref->level, 0, 1, extent_op); 1724 } else { 1725 BUG(); 1726 } 1727 return ret; 1728 } 1729 1730 /* helper function to actually process a single delayed ref entry */ 1731 static int run_one_delayed_ref(struct btrfs_trans_handle *trans, 1732 struct btrfs_delayed_ref_node *node, 1733 struct btrfs_delayed_extent_op *extent_op, 1734 bool insert_reserved) 1735 { 1736 int ret = 0; 1737 1738 if (TRANS_ABORTED(trans)) { 1739 if (insert_reserved) 1740 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1741 return 0; 1742 } 1743 1744 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 1745 node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1746 ret = run_delayed_tree_ref(trans, node, extent_op, 1747 insert_reserved); 1748 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || 1749 node->type == BTRFS_SHARED_DATA_REF_KEY) 1750 ret = run_delayed_data_ref(trans, node, extent_op, 1751 insert_reserved); 1752 else 1753 BUG(); 1754 if (ret && insert_reserved) 1755 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1756 if (ret < 0) 1757 btrfs_err(trans->fs_info, 1758 "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d", 1759 node->bytenr, node->num_bytes, node->type, 1760 node->action, node->ref_mod, ret); 1761 return ret; 1762 } 1763 1764 static inline struct btrfs_delayed_ref_node * 1765 select_delayed_ref(struct btrfs_delayed_ref_head *head) 1766 { 1767 struct btrfs_delayed_ref_node *ref; 1768 1769 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 1770 return NULL; 1771 1772 /* 1773 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first. 1774 * This is to prevent a ref count from going down to zero, which deletes 1775 * the extent item from the extent tree, when there still are references 1776 * to add, which would fail because they would not find the extent item. 1777 */ 1778 if (!list_empty(&head->ref_add_list)) 1779 return list_first_entry(&head->ref_add_list, 1780 struct btrfs_delayed_ref_node, add_list); 1781 1782 ref = rb_entry(rb_first_cached(&head->ref_tree), 1783 struct btrfs_delayed_ref_node, ref_node); 1784 ASSERT(list_empty(&ref->add_list)); 1785 return ref; 1786 } 1787 1788 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 1789 struct btrfs_delayed_ref_head *head) 1790 { 1791 spin_lock(&delayed_refs->lock); 1792 head->processing = false; 1793 delayed_refs->num_heads_ready++; 1794 spin_unlock(&delayed_refs->lock); 1795 btrfs_delayed_ref_unlock(head); 1796 } 1797 1798 static struct btrfs_delayed_extent_op *cleanup_extent_op( 1799 struct btrfs_delayed_ref_head *head) 1800 { 1801 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 1802 1803 if (!extent_op) 1804 return NULL; 1805 1806 if (head->must_insert_reserved) { 1807 head->extent_op = NULL; 1808 btrfs_free_delayed_extent_op(extent_op); 1809 return NULL; 1810 } 1811 return extent_op; 1812 } 1813 1814 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, 1815 struct btrfs_delayed_ref_head *head) 1816 { 1817 struct btrfs_delayed_extent_op *extent_op; 1818 int ret; 1819 1820 extent_op = cleanup_extent_op(head); 1821 if (!extent_op) 1822 return 0; 1823 head->extent_op = NULL; 1824 spin_unlock(&head->lock); 1825 ret = run_delayed_extent_op(trans, head, extent_op); 1826 btrfs_free_delayed_extent_op(extent_op); 1827 return ret ? ret : 1; 1828 } 1829 1830 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, 1831 struct btrfs_delayed_ref_root *delayed_refs, 1832 struct btrfs_delayed_ref_head *head) 1833 { 1834 int nr_items = 1; /* Dropping this ref head update. */ 1835 1836 /* 1837 * We had csum deletions accounted for in our delayed refs rsv, we need 1838 * to drop the csum leaves for this update from our delayed_refs_rsv. 1839 */ 1840 if (head->total_ref_mod < 0 && head->is_data) { 1841 spin_lock(&delayed_refs->lock); 1842 delayed_refs->pending_csums -= head->num_bytes; 1843 spin_unlock(&delayed_refs->lock); 1844 nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes); 1845 } 1846 1847 btrfs_delayed_refs_rsv_release(fs_info, nr_items); 1848 } 1849 1850 static int cleanup_ref_head(struct btrfs_trans_handle *trans, 1851 struct btrfs_delayed_ref_head *head) 1852 { 1853 1854 struct btrfs_fs_info *fs_info = trans->fs_info; 1855 struct btrfs_delayed_ref_root *delayed_refs; 1856 int ret; 1857 1858 delayed_refs = &trans->transaction->delayed_refs; 1859 1860 ret = run_and_cleanup_extent_op(trans, head); 1861 if (ret < 0) { 1862 unselect_delayed_ref_head(delayed_refs, head); 1863 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); 1864 return ret; 1865 } else if (ret) { 1866 return ret; 1867 } 1868 1869 /* 1870 * Need to drop our head ref lock and re-acquire the delayed ref lock 1871 * and then re-check to make sure nobody got added. 1872 */ 1873 spin_unlock(&head->lock); 1874 spin_lock(&delayed_refs->lock); 1875 spin_lock(&head->lock); 1876 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { 1877 spin_unlock(&head->lock); 1878 spin_unlock(&delayed_refs->lock); 1879 return 1; 1880 } 1881 btrfs_delete_ref_head(delayed_refs, head); 1882 spin_unlock(&head->lock); 1883 spin_unlock(&delayed_refs->lock); 1884 1885 if (head->must_insert_reserved) { 1886 btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1); 1887 if (head->is_data) { 1888 struct btrfs_root *csum_root; 1889 1890 csum_root = btrfs_csum_root(fs_info, head->bytenr); 1891 ret = btrfs_del_csums(trans, csum_root, head->bytenr, 1892 head->num_bytes); 1893 } 1894 } 1895 1896 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); 1897 1898 trace_run_delayed_ref_head(fs_info, head, 0); 1899 btrfs_delayed_ref_unlock(head); 1900 btrfs_put_delayed_ref_head(head); 1901 return ret; 1902 } 1903 1904 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( 1905 struct btrfs_trans_handle *trans) 1906 { 1907 struct btrfs_delayed_ref_root *delayed_refs = 1908 &trans->transaction->delayed_refs; 1909 struct btrfs_delayed_ref_head *head = NULL; 1910 int ret; 1911 1912 spin_lock(&delayed_refs->lock); 1913 head = btrfs_select_ref_head(delayed_refs); 1914 if (!head) { 1915 spin_unlock(&delayed_refs->lock); 1916 return head; 1917 } 1918 1919 /* 1920 * Grab the lock that says we are going to process all the refs for 1921 * this head 1922 */ 1923 ret = btrfs_delayed_ref_lock(delayed_refs, head); 1924 spin_unlock(&delayed_refs->lock); 1925 1926 /* 1927 * We may have dropped the spin lock to get the head mutex lock, and 1928 * that might have given someone else time to free the head. If that's 1929 * true, it has been removed from our list and we can move on. 1930 */ 1931 if (ret == -EAGAIN) 1932 head = ERR_PTR(-EAGAIN); 1933 1934 return head; 1935 } 1936 1937 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, 1938 struct btrfs_delayed_ref_head *locked_ref) 1939 { 1940 struct btrfs_fs_info *fs_info = trans->fs_info; 1941 struct btrfs_delayed_ref_root *delayed_refs; 1942 struct btrfs_delayed_extent_op *extent_op; 1943 struct btrfs_delayed_ref_node *ref; 1944 bool must_insert_reserved; 1945 int ret; 1946 1947 delayed_refs = &trans->transaction->delayed_refs; 1948 1949 lockdep_assert_held(&locked_ref->mutex); 1950 lockdep_assert_held(&locked_ref->lock); 1951 1952 while ((ref = select_delayed_ref(locked_ref))) { 1953 if (ref->seq && 1954 btrfs_check_delayed_seq(fs_info, ref->seq)) { 1955 spin_unlock(&locked_ref->lock); 1956 unselect_delayed_ref_head(delayed_refs, locked_ref); 1957 return -EAGAIN; 1958 } 1959 1960 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); 1961 RB_CLEAR_NODE(&ref->ref_node); 1962 if (!list_empty(&ref->add_list)) 1963 list_del(&ref->add_list); 1964 /* 1965 * When we play the delayed ref, also correct the ref_mod on 1966 * head 1967 */ 1968 switch (ref->action) { 1969 case BTRFS_ADD_DELAYED_REF: 1970 case BTRFS_ADD_DELAYED_EXTENT: 1971 locked_ref->ref_mod -= ref->ref_mod; 1972 break; 1973 case BTRFS_DROP_DELAYED_REF: 1974 locked_ref->ref_mod += ref->ref_mod; 1975 break; 1976 default: 1977 WARN_ON(1); 1978 } 1979 atomic_dec(&delayed_refs->num_entries); 1980 1981 /* 1982 * Record the must_insert_reserved flag before we drop the 1983 * spin lock. 1984 */ 1985 must_insert_reserved = locked_ref->must_insert_reserved; 1986 locked_ref->must_insert_reserved = false; 1987 1988 extent_op = locked_ref->extent_op; 1989 locked_ref->extent_op = NULL; 1990 spin_unlock(&locked_ref->lock); 1991 1992 ret = run_one_delayed_ref(trans, ref, extent_op, 1993 must_insert_reserved); 1994 1995 btrfs_free_delayed_extent_op(extent_op); 1996 if (ret) { 1997 unselect_delayed_ref_head(delayed_refs, locked_ref); 1998 btrfs_put_delayed_ref(ref); 1999 return ret; 2000 } 2001 2002 btrfs_put_delayed_ref(ref); 2003 cond_resched(); 2004 2005 spin_lock(&locked_ref->lock); 2006 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); 2007 } 2008 2009 return 0; 2010 } 2011 2012 /* 2013 * Returns 0 on success or if called with an already aborted transaction. 2014 * Returns -ENOMEM or -EIO on failure and will abort the transaction. 2015 */ 2016 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2017 unsigned long nr) 2018 { 2019 struct btrfs_fs_info *fs_info = trans->fs_info; 2020 struct btrfs_delayed_ref_root *delayed_refs; 2021 struct btrfs_delayed_ref_head *locked_ref = NULL; 2022 int ret; 2023 unsigned long count = 0; 2024 2025 delayed_refs = &trans->transaction->delayed_refs; 2026 do { 2027 if (!locked_ref) { 2028 locked_ref = btrfs_obtain_ref_head(trans); 2029 if (IS_ERR_OR_NULL(locked_ref)) { 2030 if (PTR_ERR(locked_ref) == -EAGAIN) { 2031 continue; 2032 } else { 2033 break; 2034 } 2035 } 2036 count++; 2037 } 2038 /* 2039 * We need to try and merge add/drops of the same ref since we 2040 * can run into issues with relocate dropping the implicit ref 2041 * and then it being added back again before the drop can 2042 * finish. If we merged anything we need to re-loop so we can 2043 * get a good ref. 2044 * Or we can get node references of the same type that weren't 2045 * merged when created due to bumps in the tree mod seq, and 2046 * we need to merge them to prevent adding an inline extent 2047 * backref before dropping it (triggering a BUG_ON at 2048 * insert_inline_extent_backref()). 2049 */ 2050 spin_lock(&locked_ref->lock); 2051 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); 2052 2053 ret = btrfs_run_delayed_refs_for_head(trans, locked_ref); 2054 if (ret < 0 && ret != -EAGAIN) { 2055 /* 2056 * Error, btrfs_run_delayed_refs_for_head already 2057 * unlocked everything so just bail out 2058 */ 2059 return ret; 2060 } else if (!ret) { 2061 /* 2062 * Success, perform the usual cleanup of a processed 2063 * head 2064 */ 2065 ret = cleanup_ref_head(trans, locked_ref); 2066 if (ret > 0 ) { 2067 /* We dropped our lock, we need to loop. */ 2068 ret = 0; 2069 continue; 2070 } else if (ret) { 2071 return ret; 2072 } 2073 } 2074 2075 /* 2076 * Either success case or btrfs_run_delayed_refs_for_head 2077 * returned -EAGAIN, meaning we need to select another head 2078 */ 2079 2080 locked_ref = NULL; 2081 cond_resched(); 2082 } while ((nr != -1 && count < nr) || locked_ref); 2083 2084 return 0; 2085 } 2086 2087 #ifdef SCRAMBLE_DELAYED_REFS 2088 /* 2089 * Normally delayed refs get processed in ascending bytenr order. This 2090 * correlates in most cases to the order added. To expose dependencies on this 2091 * order, we start to process the tree in the middle instead of the beginning 2092 */ 2093 static u64 find_middle(struct rb_root *root) 2094 { 2095 struct rb_node *n = root->rb_node; 2096 struct btrfs_delayed_ref_node *entry; 2097 int alt = 1; 2098 u64 middle; 2099 u64 first = 0, last = 0; 2100 2101 n = rb_first(root); 2102 if (n) { 2103 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2104 first = entry->bytenr; 2105 } 2106 n = rb_last(root); 2107 if (n) { 2108 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2109 last = entry->bytenr; 2110 } 2111 n = root->rb_node; 2112 2113 while (n) { 2114 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2115 WARN_ON(!entry->in_tree); 2116 2117 middle = entry->bytenr; 2118 2119 if (alt) 2120 n = n->rb_left; 2121 else 2122 n = n->rb_right; 2123 2124 alt = 1 - alt; 2125 } 2126 return middle; 2127 } 2128 #endif 2129 2130 /* 2131 * this starts processing the delayed reference count updates and 2132 * extent insertions we have queued up so far. count can be 2133 * 0, which means to process everything in the tree at the start 2134 * of the run (but not newly added entries), or it can be some target 2135 * number you'd like to process. 2136 * 2137 * Returns 0 on success or if called with an aborted transaction 2138 * Returns <0 on error and aborts the transaction 2139 */ 2140 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2141 unsigned long count) 2142 { 2143 struct btrfs_fs_info *fs_info = trans->fs_info; 2144 struct rb_node *node; 2145 struct btrfs_delayed_ref_root *delayed_refs; 2146 struct btrfs_delayed_ref_head *head; 2147 int ret; 2148 int run_all = count == (unsigned long)-1; 2149 2150 /* We'll clean this up in btrfs_cleanup_transaction */ 2151 if (TRANS_ABORTED(trans)) 2152 return 0; 2153 2154 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) 2155 return 0; 2156 2157 delayed_refs = &trans->transaction->delayed_refs; 2158 if (count == 0) 2159 count = delayed_refs->num_heads_ready; 2160 2161 again: 2162 #ifdef SCRAMBLE_DELAYED_REFS 2163 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); 2164 #endif 2165 ret = __btrfs_run_delayed_refs(trans, count); 2166 if (ret < 0) { 2167 btrfs_abort_transaction(trans, ret); 2168 return ret; 2169 } 2170 2171 if (run_all) { 2172 btrfs_create_pending_block_groups(trans); 2173 2174 spin_lock(&delayed_refs->lock); 2175 node = rb_first_cached(&delayed_refs->href_root); 2176 if (!node) { 2177 spin_unlock(&delayed_refs->lock); 2178 goto out; 2179 } 2180 head = rb_entry(node, struct btrfs_delayed_ref_head, 2181 href_node); 2182 refcount_inc(&head->refs); 2183 spin_unlock(&delayed_refs->lock); 2184 2185 /* Mutex was contended, block until it's released and retry. */ 2186 mutex_lock(&head->mutex); 2187 mutex_unlock(&head->mutex); 2188 2189 btrfs_put_delayed_ref_head(head); 2190 cond_resched(); 2191 goto again; 2192 } 2193 out: 2194 return 0; 2195 } 2196 2197 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2198 struct extent_buffer *eb, u64 flags) 2199 { 2200 struct btrfs_delayed_extent_op *extent_op; 2201 int level = btrfs_header_level(eb); 2202 int ret; 2203 2204 extent_op = btrfs_alloc_delayed_extent_op(); 2205 if (!extent_op) 2206 return -ENOMEM; 2207 2208 extent_op->flags_to_set = flags; 2209 extent_op->update_flags = true; 2210 extent_op->update_key = false; 2211 extent_op->level = level; 2212 2213 ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op); 2214 if (ret) 2215 btrfs_free_delayed_extent_op(extent_op); 2216 return ret; 2217 } 2218 2219 static noinline int check_delayed_ref(struct btrfs_root *root, 2220 struct btrfs_path *path, 2221 u64 objectid, u64 offset, u64 bytenr) 2222 { 2223 struct btrfs_delayed_ref_head *head; 2224 struct btrfs_delayed_ref_node *ref; 2225 struct btrfs_delayed_data_ref *data_ref; 2226 struct btrfs_delayed_ref_root *delayed_refs; 2227 struct btrfs_transaction *cur_trans; 2228 struct rb_node *node; 2229 int ret = 0; 2230 2231 spin_lock(&root->fs_info->trans_lock); 2232 cur_trans = root->fs_info->running_transaction; 2233 if (cur_trans) 2234 refcount_inc(&cur_trans->use_count); 2235 spin_unlock(&root->fs_info->trans_lock); 2236 if (!cur_trans) 2237 return 0; 2238 2239 delayed_refs = &cur_trans->delayed_refs; 2240 spin_lock(&delayed_refs->lock); 2241 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 2242 if (!head) { 2243 spin_unlock(&delayed_refs->lock); 2244 btrfs_put_transaction(cur_trans); 2245 return 0; 2246 } 2247 2248 if (!mutex_trylock(&head->mutex)) { 2249 if (path->nowait) { 2250 spin_unlock(&delayed_refs->lock); 2251 btrfs_put_transaction(cur_trans); 2252 return -EAGAIN; 2253 } 2254 2255 refcount_inc(&head->refs); 2256 spin_unlock(&delayed_refs->lock); 2257 2258 btrfs_release_path(path); 2259 2260 /* 2261 * Mutex was contended, block until it's released and let 2262 * caller try again 2263 */ 2264 mutex_lock(&head->mutex); 2265 mutex_unlock(&head->mutex); 2266 btrfs_put_delayed_ref_head(head); 2267 btrfs_put_transaction(cur_trans); 2268 return -EAGAIN; 2269 } 2270 spin_unlock(&delayed_refs->lock); 2271 2272 spin_lock(&head->lock); 2273 /* 2274 * XXX: We should replace this with a proper search function in the 2275 * future. 2276 */ 2277 for (node = rb_first_cached(&head->ref_tree); node; 2278 node = rb_next(node)) { 2279 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); 2280 /* If it's a shared ref we know a cross reference exists */ 2281 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { 2282 ret = 1; 2283 break; 2284 } 2285 2286 data_ref = btrfs_delayed_node_to_data_ref(ref); 2287 2288 /* 2289 * If our ref doesn't match the one we're currently looking at 2290 * then we have a cross reference. 2291 */ 2292 if (data_ref->root != root->root_key.objectid || 2293 data_ref->objectid != objectid || 2294 data_ref->offset != offset) { 2295 ret = 1; 2296 break; 2297 } 2298 } 2299 spin_unlock(&head->lock); 2300 mutex_unlock(&head->mutex); 2301 btrfs_put_transaction(cur_trans); 2302 return ret; 2303 } 2304 2305 static noinline int check_committed_ref(struct btrfs_root *root, 2306 struct btrfs_path *path, 2307 u64 objectid, u64 offset, u64 bytenr, 2308 bool strict) 2309 { 2310 struct btrfs_fs_info *fs_info = root->fs_info; 2311 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); 2312 struct extent_buffer *leaf; 2313 struct btrfs_extent_data_ref *ref; 2314 struct btrfs_extent_inline_ref *iref; 2315 struct btrfs_extent_item *ei; 2316 struct btrfs_key key; 2317 u32 item_size; 2318 int type; 2319 int ret; 2320 2321 key.objectid = bytenr; 2322 key.offset = (u64)-1; 2323 key.type = BTRFS_EXTENT_ITEM_KEY; 2324 2325 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2326 if (ret < 0) 2327 goto out; 2328 BUG_ON(ret == 0); /* Corruption */ 2329 2330 ret = -ENOENT; 2331 if (path->slots[0] == 0) 2332 goto out; 2333 2334 path->slots[0]--; 2335 leaf = path->nodes[0]; 2336 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2337 2338 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) 2339 goto out; 2340 2341 ret = 1; 2342 item_size = btrfs_item_size(leaf, path->slots[0]); 2343 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2344 2345 /* If extent item has more than 1 inline ref then it's shared */ 2346 if (item_size != sizeof(*ei) + 2347 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) 2348 goto out; 2349 2350 /* 2351 * If extent created before last snapshot => it's shared unless the 2352 * snapshot has been deleted. Use the heuristic if strict is false. 2353 */ 2354 if (!strict && 2355 (btrfs_extent_generation(leaf, ei) <= 2356 btrfs_root_last_snapshot(&root->root_item))) 2357 goto out; 2358 2359 iref = (struct btrfs_extent_inline_ref *)(ei + 1); 2360 2361 /* If this extent has SHARED_DATA_REF then it's shared */ 2362 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 2363 if (type != BTRFS_EXTENT_DATA_REF_KEY) 2364 goto out; 2365 2366 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 2367 if (btrfs_extent_refs(leaf, ei) != 2368 btrfs_extent_data_ref_count(leaf, ref) || 2369 btrfs_extent_data_ref_root(leaf, ref) != 2370 root->root_key.objectid || 2371 btrfs_extent_data_ref_objectid(leaf, ref) != objectid || 2372 btrfs_extent_data_ref_offset(leaf, ref) != offset) 2373 goto out; 2374 2375 ret = 0; 2376 out: 2377 return ret; 2378 } 2379 2380 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, 2381 u64 bytenr, bool strict, struct btrfs_path *path) 2382 { 2383 int ret; 2384 2385 do { 2386 ret = check_committed_ref(root, path, objectid, 2387 offset, bytenr, strict); 2388 if (ret && ret != -ENOENT) 2389 goto out; 2390 2391 ret = check_delayed_ref(root, path, objectid, offset, bytenr); 2392 } while (ret == -EAGAIN); 2393 2394 out: 2395 btrfs_release_path(path); 2396 if (btrfs_is_data_reloc_root(root)) 2397 WARN_ON(ret > 0); 2398 return ret; 2399 } 2400 2401 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, 2402 struct btrfs_root *root, 2403 struct extent_buffer *buf, 2404 int full_backref, int inc) 2405 { 2406 struct btrfs_fs_info *fs_info = root->fs_info; 2407 u64 bytenr; 2408 u64 num_bytes; 2409 u64 parent; 2410 u64 ref_root; 2411 u32 nritems; 2412 struct btrfs_key key; 2413 struct btrfs_file_extent_item *fi; 2414 struct btrfs_ref generic_ref = { 0 }; 2415 bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC); 2416 int i; 2417 int action; 2418 int level; 2419 int ret = 0; 2420 2421 if (btrfs_is_testing(fs_info)) 2422 return 0; 2423 2424 ref_root = btrfs_header_owner(buf); 2425 nritems = btrfs_header_nritems(buf); 2426 level = btrfs_header_level(buf); 2427 2428 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) 2429 return 0; 2430 2431 if (full_backref) 2432 parent = buf->start; 2433 else 2434 parent = 0; 2435 if (inc) 2436 action = BTRFS_ADD_DELAYED_REF; 2437 else 2438 action = BTRFS_DROP_DELAYED_REF; 2439 2440 for (i = 0; i < nritems; i++) { 2441 if (level == 0) { 2442 btrfs_item_key_to_cpu(buf, &key, i); 2443 if (key.type != BTRFS_EXTENT_DATA_KEY) 2444 continue; 2445 fi = btrfs_item_ptr(buf, i, 2446 struct btrfs_file_extent_item); 2447 if (btrfs_file_extent_type(buf, fi) == 2448 BTRFS_FILE_EXTENT_INLINE) 2449 continue; 2450 bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 2451 if (bytenr == 0) 2452 continue; 2453 2454 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); 2455 key.offset -= btrfs_file_extent_offset(buf, fi); 2456 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2457 num_bytes, parent); 2458 btrfs_init_data_ref(&generic_ref, ref_root, key.objectid, 2459 key.offset, root->root_key.objectid, 2460 for_reloc); 2461 if (inc) 2462 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2463 else 2464 ret = btrfs_free_extent(trans, &generic_ref); 2465 if (ret) 2466 goto fail; 2467 } else { 2468 bytenr = btrfs_node_blockptr(buf, i); 2469 num_bytes = fs_info->nodesize; 2470 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2471 num_bytes, parent); 2472 btrfs_init_tree_ref(&generic_ref, level - 1, ref_root, 2473 root->root_key.objectid, for_reloc); 2474 if (inc) 2475 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2476 else 2477 ret = btrfs_free_extent(trans, &generic_ref); 2478 if (ret) 2479 goto fail; 2480 } 2481 } 2482 return 0; 2483 fail: 2484 return ret; 2485 } 2486 2487 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2488 struct extent_buffer *buf, int full_backref) 2489 { 2490 return __btrfs_mod_ref(trans, root, buf, full_backref, 1); 2491 } 2492 2493 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2494 struct extent_buffer *buf, int full_backref) 2495 { 2496 return __btrfs_mod_ref(trans, root, buf, full_backref, 0); 2497 } 2498 2499 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) 2500 { 2501 struct btrfs_fs_info *fs_info = root->fs_info; 2502 u64 flags; 2503 u64 ret; 2504 2505 if (data) 2506 flags = BTRFS_BLOCK_GROUP_DATA; 2507 else if (root == fs_info->chunk_root) 2508 flags = BTRFS_BLOCK_GROUP_SYSTEM; 2509 else 2510 flags = BTRFS_BLOCK_GROUP_METADATA; 2511 2512 ret = btrfs_get_alloc_profile(fs_info, flags); 2513 return ret; 2514 } 2515 2516 static u64 first_logical_byte(struct btrfs_fs_info *fs_info) 2517 { 2518 struct rb_node *leftmost; 2519 u64 bytenr = 0; 2520 2521 read_lock(&fs_info->block_group_cache_lock); 2522 /* Get the block group with the lowest logical start address. */ 2523 leftmost = rb_first_cached(&fs_info->block_group_cache_tree); 2524 if (leftmost) { 2525 struct btrfs_block_group *bg; 2526 2527 bg = rb_entry(leftmost, struct btrfs_block_group, cache_node); 2528 bytenr = bg->start; 2529 } 2530 read_unlock(&fs_info->block_group_cache_lock); 2531 2532 return bytenr; 2533 } 2534 2535 static int pin_down_extent(struct btrfs_trans_handle *trans, 2536 struct btrfs_block_group *cache, 2537 u64 bytenr, u64 num_bytes, int reserved) 2538 { 2539 struct btrfs_fs_info *fs_info = cache->fs_info; 2540 2541 spin_lock(&cache->space_info->lock); 2542 spin_lock(&cache->lock); 2543 cache->pinned += num_bytes; 2544 btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info, 2545 num_bytes); 2546 if (reserved) { 2547 cache->reserved -= num_bytes; 2548 cache->space_info->bytes_reserved -= num_bytes; 2549 } 2550 spin_unlock(&cache->lock); 2551 spin_unlock(&cache->space_info->lock); 2552 2553 set_extent_bit(&trans->transaction->pinned_extents, bytenr, 2554 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); 2555 return 0; 2556 } 2557 2558 int btrfs_pin_extent(struct btrfs_trans_handle *trans, 2559 u64 bytenr, u64 num_bytes, int reserved) 2560 { 2561 struct btrfs_block_group *cache; 2562 2563 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2564 BUG_ON(!cache); /* Logic error */ 2565 2566 pin_down_extent(trans, cache, bytenr, num_bytes, reserved); 2567 2568 btrfs_put_block_group(cache); 2569 return 0; 2570 } 2571 2572 /* 2573 * this function must be called within transaction 2574 */ 2575 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, 2576 u64 bytenr, u64 num_bytes) 2577 { 2578 struct btrfs_block_group *cache; 2579 int ret; 2580 2581 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2582 if (!cache) 2583 return -EINVAL; 2584 2585 /* 2586 * Fully cache the free space first so that our pin removes the free space 2587 * from the cache. 2588 */ 2589 ret = btrfs_cache_block_group(cache, true); 2590 if (ret) 2591 goto out; 2592 2593 pin_down_extent(trans, cache, bytenr, num_bytes, 0); 2594 2595 /* remove us from the free space cache (if we're there at all) */ 2596 ret = btrfs_remove_free_space(cache, bytenr, num_bytes); 2597 out: 2598 btrfs_put_block_group(cache); 2599 return ret; 2600 } 2601 2602 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, 2603 u64 start, u64 num_bytes) 2604 { 2605 int ret; 2606 struct btrfs_block_group *block_group; 2607 2608 block_group = btrfs_lookup_block_group(fs_info, start); 2609 if (!block_group) 2610 return -EINVAL; 2611 2612 ret = btrfs_cache_block_group(block_group, true); 2613 if (ret) 2614 goto out; 2615 2616 ret = btrfs_remove_free_space(block_group, start, num_bytes); 2617 out: 2618 btrfs_put_block_group(block_group); 2619 return ret; 2620 } 2621 2622 int btrfs_exclude_logged_extents(struct extent_buffer *eb) 2623 { 2624 struct btrfs_fs_info *fs_info = eb->fs_info; 2625 struct btrfs_file_extent_item *item; 2626 struct btrfs_key key; 2627 int found_type; 2628 int i; 2629 int ret = 0; 2630 2631 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) 2632 return 0; 2633 2634 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2635 btrfs_item_key_to_cpu(eb, &key, i); 2636 if (key.type != BTRFS_EXTENT_DATA_KEY) 2637 continue; 2638 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 2639 found_type = btrfs_file_extent_type(eb, item); 2640 if (found_type == BTRFS_FILE_EXTENT_INLINE) 2641 continue; 2642 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 2643 continue; 2644 key.objectid = btrfs_file_extent_disk_bytenr(eb, item); 2645 key.offset = btrfs_file_extent_disk_num_bytes(eb, item); 2646 ret = __exclude_logged_extent(fs_info, key.objectid, key.offset); 2647 if (ret) 2648 break; 2649 } 2650 2651 return ret; 2652 } 2653 2654 static void 2655 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) 2656 { 2657 atomic_inc(&bg->reservations); 2658 } 2659 2660 /* 2661 * Returns the free cluster for the given space info and sets empty_cluster to 2662 * what it should be based on the mount options. 2663 */ 2664 static struct btrfs_free_cluster * 2665 fetch_cluster_info(struct btrfs_fs_info *fs_info, 2666 struct btrfs_space_info *space_info, u64 *empty_cluster) 2667 { 2668 struct btrfs_free_cluster *ret = NULL; 2669 2670 *empty_cluster = 0; 2671 if (btrfs_mixed_space_info(space_info)) 2672 return ret; 2673 2674 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 2675 ret = &fs_info->meta_alloc_cluster; 2676 if (btrfs_test_opt(fs_info, SSD)) 2677 *empty_cluster = SZ_2M; 2678 else 2679 *empty_cluster = SZ_64K; 2680 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && 2681 btrfs_test_opt(fs_info, SSD_SPREAD)) { 2682 *empty_cluster = SZ_2M; 2683 ret = &fs_info->data_alloc_cluster; 2684 } 2685 2686 return ret; 2687 } 2688 2689 static int unpin_extent_range(struct btrfs_fs_info *fs_info, 2690 u64 start, u64 end, 2691 const bool return_free_space) 2692 { 2693 struct btrfs_block_group *cache = NULL; 2694 struct btrfs_space_info *space_info; 2695 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 2696 struct btrfs_free_cluster *cluster = NULL; 2697 u64 len; 2698 u64 total_unpinned = 0; 2699 u64 empty_cluster = 0; 2700 bool readonly; 2701 2702 while (start <= end) { 2703 readonly = false; 2704 if (!cache || 2705 start >= cache->start + cache->length) { 2706 if (cache) 2707 btrfs_put_block_group(cache); 2708 total_unpinned = 0; 2709 cache = btrfs_lookup_block_group(fs_info, start); 2710 BUG_ON(!cache); /* Logic error */ 2711 2712 cluster = fetch_cluster_info(fs_info, 2713 cache->space_info, 2714 &empty_cluster); 2715 empty_cluster <<= 1; 2716 } 2717 2718 len = cache->start + cache->length - start; 2719 len = min(len, end + 1 - start); 2720 2721 if (return_free_space) 2722 btrfs_add_free_space(cache, start, len); 2723 2724 start += len; 2725 total_unpinned += len; 2726 space_info = cache->space_info; 2727 2728 /* 2729 * If this space cluster has been marked as fragmented and we've 2730 * unpinned enough in this block group to potentially allow a 2731 * cluster to be created inside of it go ahead and clear the 2732 * fragmented check. 2733 */ 2734 if (cluster && cluster->fragmented && 2735 total_unpinned > empty_cluster) { 2736 spin_lock(&cluster->lock); 2737 cluster->fragmented = 0; 2738 spin_unlock(&cluster->lock); 2739 } 2740 2741 spin_lock(&space_info->lock); 2742 spin_lock(&cache->lock); 2743 cache->pinned -= len; 2744 btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); 2745 space_info->max_extent_size = 0; 2746 if (cache->ro) { 2747 space_info->bytes_readonly += len; 2748 readonly = true; 2749 } else if (btrfs_is_zoned(fs_info)) { 2750 /* Need reset before reusing in a zoned block group */ 2751 space_info->bytes_zone_unusable += len; 2752 readonly = true; 2753 } 2754 spin_unlock(&cache->lock); 2755 if (!readonly && return_free_space && 2756 global_rsv->space_info == space_info) { 2757 spin_lock(&global_rsv->lock); 2758 if (!global_rsv->full) { 2759 u64 to_add = min(len, global_rsv->size - 2760 global_rsv->reserved); 2761 2762 global_rsv->reserved += to_add; 2763 btrfs_space_info_update_bytes_may_use(fs_info, 2764 space_info, to_add); 2765 if (global_rsv->reserved >= global_rsv->size) 2766 global_rsv->full = 1; 2767 len -= to_add; 2768 } 2769 spin_unlock(&global_rsv->lock); 2770 } 2771 /* Add to any tickets we may have */ 2772 if (!readonly && return_free_space && len) 2773 btrfs_try_granting_tickets(fs_info, space_info); 2774 spin_unlock(&space_info->lock); 2775 } 2776 2777 if (cache) 2778 btrfs_put_block_group(cache); 2779 return 0; 2780 } 2781 2782 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) 2783 { 2784 struct btrfs_fs_info *fs_info = trans->fs_info; 2785 struct btrfs_block_group *block_group, *tmp; 2786 struct list_head *deleted_bgs; 2787 struct extent_io_tree *unpin; 2788 u64 start; 2789 u64 end; 2790 int ret; 2791 2792 unpin = &trans->transaction->pinned_extents; 2793 2794 while (!TRANS_ABORTED(trans)) { 2795 struct extent_state *cached_state = NULL; 2796 2797 mutex_lock(&fs_info->unused_bg_unpin_mutex); 2798 if (!find_first_extent_bit(unpin, 0, &start, &end, 2799 EXTENT_DIRTY, &cached_state)) { 2800 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2801 break; 2802 } 2803 2804 if (btrfs_test_opt(fs_info, DISCARD_SYNC)) 2805 ret = btrfs_discard_extent(fs_info, start, 2806 end + 1 - start, NULL); 2807 2808 clear_extent_dirty(unpin, start, end, &cached_state); 2809 unpin_extent_range(fs_info, start, end, true); 2810 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2811 free_extent_state(cached_state); 2812 cond_resched(); 2813 } 2814 2815 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { 2816 btrfs_discard_calc_delay(&fs_info->discard_ctl); 2817 btrfs_discard_schedule_work(&fs_info->discard_ctl, true); 2818 } 2819 2820 /* 2821 * Transaction is finished. We don't need the lock anymore. We 2822 * do need to clean up the block groups in case of a transaction 2823 * abort. 2824 */ 2825 deleted_bgs = &trans->transaction->deleted_bgs; 2826 list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) { 2827 u64 trimmed = 0; 2828 2829 ret = -EROFS; 2830 if (!TRANS_ABORTED(trans)) 2831 ret = btrfs_discard_extent(fs_info, 2832 block_group->start, 2833 block_group->length, 2834 &trimmed); 2835 2836 list_del_init(&block_group->bg_list); 2837 btrfs_unfreeze_block_group(block_group); 2838 btrfs_put_block_group(block_group); 2839 2840 if (ret) { 2841 const char *errstr = btrfs_decode_error(ret); 2842 btrfs_warn(fs_info, 2843 "discard failed while removing blockgroup: errno=%d %s", 2844 ret, errstr); 2845 } 2846 } 2847 2848 return 0; 2849 } 2850 2851 static int do_free_extent_accounting(struct btrfs_trans_handle *trans, 2852 u64 bytenr, u64 num_bytes, bool is_data) 2853 { 2854 int ret; 2855 2856 if (is_data) { 2857 struct btrfs_root *csum_root; 2858 2859 csum_root = btrfs_csum_root(trans->fs_info, bytenr); 2860 ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes); 2861 if (ret) { 2862 btrfs_abort_transaction(trans, ret); 2863 return ret; 2864 } 2865 } 2866 2867 ret = add_to_free_space_tree(trans, bytenr, num_bytes); 2868 if (ret) { 2869 btrfs_abort_transaction(trans, ret); 2870 return ret; 2871 } 2872 2873 ret = btrfs_update_block_group(trans, bytenr, num_bytes, false); 2874 if (ret) 2875 btrfs_abort_transaction(trans, ret); 2876 2877 return ret; 2878 } 2879 2880 #define abort_and_dump(trans, path, fmt, args...) \ 2881 ({ \ 2882 btrfs_abort_transaction(trans, -EUCLEAN); \ 2883 btrfs_print_leaf(path->nodes[0]); \ 2884 btrfs_crit(trans->fs_info, fmt, ##args); \ 2885 }) 2886 2887 /* 2888 * Drop one or more refs of @node. 2889 * 2890 * 1. Locate the extent refs. 2891 * It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item. 2892 * Locate it, then reduce the refs number or remove the ref line completely. 2893 * 2894 * 2. Update the refs count in EXTENT/METADATA_ITEM 2895 * 2896 * Inline backref case: 2897 * 2898 * in extent tree we have: 2899 * 2900 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2901 * refs 2 gen 6 flags DATA 2902 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2903 * extent data backref root FS_TREE objectid 257 offset 0 count 1 2904 * 2905 * This function gets called with: 2906 * 2907 * node->bytenr = 13631488 2908 * node->num_bytes = 1048576 2909 * root_objectid = FS_TREE 2910 * owner_objectid = 257 2911 * owner_offset = 0 2912 * refs_to_drop = 1 2913 * 2914 * Then we should get some like: 2915 * 2916 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2917 * refs 1 gen 6 flags DATA 2918 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2919 * 2920 * Keyed backref case: 2921 * 2922 * in extent tree we have: 2923 * 2924 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2925 * refs 754 gen 6 flags DATA 2926 * [...] 2927 * item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28 2928 * extent data backref root FS_TREE objectid 866 offset 0 count 1 2929 * 2930 * This function get called with: 2931 * 2932 * node->bytenr = 13631488 2933 * node->num_bytes = 1048576 2934 * root_objectid = FS_TREE 2935 * owner_objectid = 866 2936 * owner_offset = 0 2937 * refs_to_drop = 1 2938 * 2939 * Then we should get some like: 2940 * 2941 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2942 * refs 753 gen 6 flags DATA 2943 * 2944 * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed. 2945 */ 2946 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 2947 struct btrfs_delayed_ref_node *node, u64 parent, 2948 u64 root_objectid, u64 owner_objectid, 2949 u64 owner_offset, int refs_to_drop, 2950 struct btrfs_delayed_extent_op *extent_op) 2951 { 2952 struct btrfs_fs_info *info = trans->fs_info; 2953 struct btrfs_key key; 2954 struct btrfs_path *path; 2955 struct btrfs_root *extent_root; 2956 struct extent_buffer *leaf; 2957 struct btrfs_extent_item *ei; 2958 struct btrfs_extent_inline_ref *iref; 2959 int ret; 2960 int is_data; 2961 int extent_slot = 0; 2962 int found_extent = 0; 2963 int num_to_del = 1; 2964 u32 item_size; 2965 u64 refs; 2966 u64 bytenr = node->bytenr; 2967 u64 num_bytes = node->num_bytes; 2968 bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA); 2969 2970 extent_root = btrfs_extent_root(info, bytenr); 2971 ASSERT(extent_root); 2972 2973 path = btrfs_alloc_path(); 2974 if (!path) 2975 return -ENOMEM; 2976 2977 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; 2978 2979 if (!is_data && refs_to_drop != 1) { 2980 btrfs_crit(info, 2981 "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u", 2982 node->bytenr, refs_to_drop); 2983 ret = -EINVAL; 2984 btrfs_abort_transaction(trans, ret); 2985 goto out; 2986 } 2987 2988 if (is_data) 2989 skinny_metadata = false; 2990 2991 ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes, 2992 parent, root_objectid, owner_objectid, 2993 owner_offset); 2994 if (ret == 0) { 2995 /* 2996 * Either the inline backref or the SHARED_DATA_REF/ 2997 * SHARED_BLOCK_REF is found 2998 * 2999 * Here is a quick path to locate EXTENT/METADATA_ITEM. 3000 * It's possible the EXTENT/METADATA_ITEM is near current slot. 3001 */ 3002 extent_slot = path->slots[0]; 3003 while (extent_slot >= 0) { 3004 btrfs_item_key_to_cpu(path->nodes[0], &key, 3005 extent_slot); 3006 if (key.objectid != bytenr) 3007 break; 3008 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3009 key.offset == num_bytes) { 3010 found_extent = 1; 3011 break; 3012 } 3013 if (key.type == BTRFS_METADATA_ITEM_KEY && 3014 key.offset == owner_objectid) { 3015 found_extent = 1; 3016 break; 3017 } 3018 3019 /* Quick path didn't find the EXTEMT/METADATA_ITEM */ 3020 if (path->slots[0] - extent_slot > 5) 3021 break; 3022 extent_slot--; 3023 } 3024 3025 if (!found_extent) { 3026 if (iref) { 3027 abort_and_dump(trans, path, 3028 "invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref", 3029 path->slots[0]); 3030 ret = -EUCLEAN; 3031 goto out; 3032 } 3033 /* Must be SHARED_* item, remove the backref first */ 3034 ret = remove_extent_backref(trans, extent_root, path, 3035 NULL, refs_to_drop, is_data); 3036 if (ret) { 3037 btrfs_abort_transaction(trans, ret); 3038 goto out; 3039 } 3040 btrfs_release_path(path); 3041 3042 /* Slow path to locate EXTENT/METADATA_ITEM */ 3043 key.objectid = bytenr; 3044 key.type = BTRFS_EXTENT_ITEM_KEY; 3045 key.offset = num_bytes; 3046 3047 if (!is_data && skinny_metadata) { 3048 key.type = BTRFS_METADATA_ITEM_KEY; 3049 key.offset = owner_objectid; 3050 } 3051 3052 ret = btrfs_search_slot(trans, extent_root, 3053 &key, path, -1, 1); 3054 if (ret > 0 && skinny_metadata && path->slots[0]) { 3055 /* 3056 * Couldn't find our skinny metadata item, 3057 * see if we have ye olde extent item. 3058 */ 3059 path->slots[0]--; 3060 btrfs_item_key_to_cpu(path->nodes[0], &key, 3061 path->slots[0]); 3062 if (key.objectid == bytenr && 3063 key.type == BTRFS_EXTENT_ITEM_KEY && 3064 key.offset == num_bytes) 3065 ret = 0; 3066 } 3067 3068 if (ret > 0 && skinny_metadata) { 3069 skinny_metadata = false; 3070 key.objectid = bytenr; 3071 key.type = BTRFS_EXTENT_ITEM_KEY; 3072 key.offset = num_bytes; 3073 btrfs_release_path(path); 3074 ret = btrfs_search_slot(trans, extent_root, 3075 &key, path, -1, 1); 3076 } 3077 3078 if (ret) { 3079 if (ret > 0) 3080 btrfs_print_leaf(path->nodes[0]); 3081 btrfs_err(info, 3082 "umm, got %d back from search, was looking for %llu, slot %d", 3083 ret, bytenr, path->slots[0]); 3084 } 3085 if (ret < 0) { 3086 btrfs_abort_transaction(trans, ret); 3087 goto out; 3088 } 3089 extent_slot = path->slots[0]; 3090 } 3091 } else if (WARN_ON(ret == -ENOENT)) { 3092 abort_and_dump(trans, path, 3093 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d", 3094 bytenr, parent, root_objectid, owner_objectid, 3095 owner_offset, path->slots[0]); 3096 goto out; 3097 } else { 3098 btrfs_abort_transaction(trans, ret); 3099 goto out; 3100 } 3101 3102 leaf = path->nodes[0]; 3103 item_size = btrfs_item_size(leaf, extent_slot); 3104 if (unlikely(item_size < sizeof(*ei))) { 3105 ret = -EUCLEAN; 3106 btrfs_err(trans->fs_info, 3107 "unexpected extent item size, has %u expect >= %zu", 3108 item_size, sizeof(*ei)); 3109 btrfs_abort_transaction(trans, ret); 3110 goto out; 3111 } 3112 ei = btrfs_item_ptr(leaf, extent_slot, 3113 struct btrfs_extent_item); 3114 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && 3115 key.type == BTRFS_EXTENT_ITEM_KEY) { 3116 struct btrfs_tree_block_info *bi; 3117 3118 if (item_size < sizeof(*ei) + sizeof(*bi)) { 3119 abort_and_dump(trans, path, 3120 "invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu", 3121 key.objectid, key.type, key.offset, 3122 path->slots[0], owner_objectid, item_size, 3123 sizeof(*ei) + sizeof(*bi)); 3124 ret = -EUCLEAN; 3125 goto out; 3126 } 3127 bi = (struct btrfs_tree_block_info *)(ei + 1); 3128 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); 3129 } 3130 3131 refs = btrfs_extent_refs(leaf, ei); 3132 if (refs < refs_to_drop) { 3133 abort_and_dump(trans, path, 3134 "trying to drop %d refs but we only have %llu for bytenr %llu slot %u", 3135 refs_to_drop, refs, bytenr, path->slots[0]); 3136 ret = -EUCLEAN; 3137 goto out; 3138 } 3139 refs -= refs_to_drop; 3140 3141 if (refs > 0) { 3142 if (extent_op) 3143 __run_delayed_extent_op(extent_op, leaf, ei); 3144 /* 3145 * In the case of inline back ref, reference count will 3146 * be updated by remove_extent_backref 3147 */ 3148 if (iref) { 3149 if (!found_extent) { 3150 abort_and_dump(trans, path, 3151 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u", 3152 path->slots[0]); 3153 ret = -EUCLEAN; 3154 goto out; 3155 } 3156 } else { 3157 btrfs_set_extent_refs(leaf, ei, refs); 3158 btrfs_mark_buffer_dirty(trans, leaf); 3159 } 3160 if (found_extent) { 3161 ret = remove_extent_backref(trans, extent_root, path, 3162 iref, refs_to_drop, is_data); 3163 if (ret) { 3164 btrfs_abort_transaction(trans, ret); 3165 goto out; 3166 } 3167 } 3168 } else { 3169 /* In this branch refs == 1 */ 3170 if (found_extent) { 3171 if (is_data && refs_to_drop != 3172 extent_data_ref_count(path, iref)) { 3173 abort_and_dump(trans, path, 3174 "invalid refs_to_drop, current refs %u refs_to_drop %u slot %u", 3175 extent_data_ref_count(path, iref), 3176 refs_to_drop, path->slots[0]); 3177 ret = -EUCLEAN; 3178 goto out; 3179 } 3180 if (iref) { 3181 if (path->slots[0] != extent_slot) { 3182 abort_and_dump(trans, path, 3183 "invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref", 3184 key.objectid, key.type, 3185 key.offset, path->slots[0]); 3186 ret = -EUCLEAN; 3187 goto out; 3188 } 3189 } else { 3190 /* 3191 * No inline ref, we must be at SHARED_* item, 3192 * And it's single ref, it must be: 3193 * | extent_slot ||extent_slot + 1| 3194 * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ] 3195 */ 3196 if (path->slots[0] != extent_slot + 1) { 3197 abort_and_dump(trans, path, 3198 "invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM", 3199 path->slots[0]); 3200 ret = -EUCLEAN; 3201 goto out; 3202 } 3203 path->slots[0] = extent_slot; 3204 num_to_del = 2; 3205 } 3206 } 3207 3208 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 3209 num_to_del); 3210 if (ret) { 3211 btrfs_abort_transaction(trans, ret); 3212 goto out; 3213 } 3214 btrfs_release_path(path); 3215 3216 ret = do_free_extent_accounting(trans, bytenr, num_bytes, is_data); 3217 } 3218 btrfs_release_path(path); 3219 3220 out: 3221 btrfs_free_path(path); 3222 return ret; 3223 } 3224 3225 /* 3226 * when we free an block, it is possible (and likely) that we free the last 3227 * delayed ref for that extent as well. This searches the delayed ref tree for 3228 * a given extent, and if there are no other delayed refs to be processed, it 3229 * removes it from the tree. 3230 */ 3231 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, 3232 u64 bytenr) 3233 { 3234 struct btrfs_delayed_ref_head *head; 3235 struct btrfs_delayed_ref_root *delayed_refs; 3236 int ret = 0; 3237 3238 delayed_refs = &trans->transaction->delayed_refs; 3239 spin_lock(&delayed_refs->lock); 3240 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 3241 if (!head) 3242 goto out_delayed_unlock; 3243 3244 spin_lock(&head->lock); 3245 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 3246 goto out; 3247 3248 if (cleanup_extent_op(head) != NULL) 3249 goto out; 3250 3251 /* 3252 * waiting for the lock here would deadlock. If someone else has it 3253 * locked they are already in the process of dropping it anyway 3254 */ 3255 if (!mutex_trylock(&head->mutex)) 3256 goto out; 3257 3258 btrfs_delete_ref_head(delayed_refs, head); 3259 head->processing = false; 3260 3261 spin_unlock(&head->lock); 3262 spin_unlock(&delayed_refs->lock); 3263 3264 BUG_ON(head->extent_op); 3265 if (head->must_insert_reserved) 3266 ret = 1; 3267 3268 btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); 3269 mutex_unlock(&head->mutex); 3270 btrfs_put_delayed_ref_head(head); 3271 return ret; 3272 out: 3273 spin_unlock(&head->lock); 3274 3275 out_delayed_unlock: 3276 spin_unlock(&delayed_refs->lock); 3277 return 0; 3278 } 3279 3280 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3281 u64 root_id, 3282 struct extent_buffer *buf, 3283 u64 parent, int last_ref) 3284 { 3285 struct btrfs_fs_info *fs_info = trans->fs_info; 3286 struct btrfs_ref generic_ref = { 0 }; 3287 int ret; 3288 3289 btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, 3290 buf->start, buf->len, parent); 3291 btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 3292 root_id, 0, false); 3293 3294 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3295 btrfs_ref_tree_mod(fs_info, &generic_ref); 3296 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL); 3297 BUG_ON(ret); /* -ENOMEM */ 3298 } 3299 3300 if (last_ref && btrfs_header_generation(buf) == trans->transid) { 3301 struct btrfs_block_group *cache; 3302 bool must_pin = false; 3303 3304 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3305 ret = check_ref_cleanup(trans, buf->start); 3306 if (!ret) { 3307 btrfs_redirty_list_add(trans->transaction, buf); 3308 goto out; 3309 } 3310 } 3311 3312 cache = btrfs_lookup_block_group(fs_info, buf->start); 3313 3314 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 3315 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3316 btrfs_put_block_group(cache); 3317 goto out; 3318 } 3319 3320 /* 3321 * If there are tree mod log users we may have recorded mod log 3322 * operations for this node. If we re-allocate this node we 3323 * could replay operations on this node that happened when it 3324 * existed in a completely different root. For example if it 3325 * was part of root A, then was reallocated to root B, and we 3326 * are doing a btrfs_old_search_slot(root b), we could replay 3327 * operations that happened when the block was part of root A, 3328 * giving us an inconsistent view of the btree. 3329 * 3330 * We are safe from races here because at this point no other 3331 * node or root points to this extent buffer, so if after this 3332 * check a new tree mod log user joins we will not have an 3333 * existing log of operations on this node that we have to 3334 * contend with. 3335 */ 3336 if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) 3337 must_pin = true; 3338 3339 if (must_pin || btrfs_is_zoned(fs_info)) { 3340 btrfs_redirty_list_add(trans->transaction, buf); 3341 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3342 btrfs_put_block_group(cache); 3343 goto out; 3344 } 3345 3346 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 3347 3348 btrfs_add_free_space(cache, buf->start, buf->len); 3349 btrfs_free_reserved_bytes(cache, buf->len, 0); 3350 btrfs_put_block_group(cache); 3351 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); 3352 } 3353 out: 3354 if (last_ref) { 3355 /* 3356 * Deleting the buffer, clear the corrupt flag since it doesn't 3357 * matter anymore. 3358 */ 3359 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); 3360 } 3361 } 3362 3363 /* Can return -ENOMEM */ 3364 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) 3365 { 3366 struct btrfs_fs_info *fs_info = trans->fs_info; 3367 int ret; 3368 3369 if (btrfs_is_testing(fs_info)) 3370 return 0; 3371 3372 /* 3373 * tree log blocks never actually go into the extent allocation 3374 * tree, just update pinning info and exit early. 3375 */ 3376 if ((ref->type == BTRFS_REF_METADATA && 3377 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3378 (ref->type == BTRFS_REF_DATA && 3379 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) { 3380 /* unlocks the pinned mutex */ 3381 btrfs_pin_extent(trans, ref->bytenr, ref->len, 1); 3382 ret = 0; 3383 } else if (ref->type == BTRFS_REF_METADATA) { 3384 ret = btrfs_add_delayed_tree_ref(trans, ref, NULL); 3385 } else { 3386 ret = btrfs_add_delayed_data_ref(trans, ref, 0); 3387 } 3388 3389 if (!((ref->type == BTRFS_REF_METADATA && 3390 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3391 (ref->type == BTRFS_REF_DATA && 3392 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID))) 3393 btrfs_ref_tree_mod(fs_info, ref); 3394 3395 return ret; 3396 } 3397 3398 enum btrfs_loop_type { 3399 /* 3400 * Start caching block groups but do not wait for progress or for them 3401 * to be done. 3402 */ 3403 LOOP_CACHING_NOWAIT, 3404 3405 /* 3406 * Wait for the block group free_space >= the space we're waiting for if 3407 * the block group isn't cached. 3408 */ 3409 LOOP_CACHING_WAIT, 3410 3411 /* 3412 * Allow allocations to happen from block groups that do not yet have a 3413 * size classification. 3414 */ 3415 LOOP_UNSET_SIZE_CLASS, 3416 3417 /* 3418 * Allocate a chunk and then retry the allocation. 3419 */ 3420 LOOP_ALLOC_CHUNK, 3421 3422 /* 3423 * Ignore the size class restrictions for this allocation. 3424 */ 3425 LOOP_WRONG_SIZE_CLASS, 3426 3427 /* 3428 * Ignore the empty size, only try to allocate the number of bytes 3429 * needed for this allocation. 3430 */ 3431 LOOP_NO_EMPTY_SIZE, 3432 }; 3433 3434 static inline void 3435 btrfs_lock_block_group(struct btrfs_block_group *cache, 3436 int delalloc) 3437 { 3438 if (delalloc) 3439 down_read(&cache->data_rwsem); 3440 } 3441 3442 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, 3443 int delalloc) 3444 { 3445 btrfs_get_block_group(cache); 3446 if (delalloc) 3447 down_read(&cache->data_rwsem); 3448 } 3449 3450 static struct btrfs_block_group *btrfs_lock_cluster( 3451 struct btrfs_block_group *block_group, 3452 struct btrfs_free_cluster *cluster, 3453 int delalloc) 3454 __acquires(&cluster->refill_lock) 3455 { 3456 struct btrfs_block_group *used_bg = NULL; 3457 3458 spin_lock(&cluster->refill_lock); 3459 while (1) { 3460 used_bg = cluster->block_group; 3461 if (!used_bg) 3462 return NULL; 3463 3464 if (used_bg == block_group) 3465 return used_bg; 3466 3467 btrfs_get_block_group(used_bg); 3468 3469 if (!delalloc) 3470 return used_bg; 3471 3472 if (down_read_trylock(&used_bg->data_rwsem)) 3473 return used_bg; 3474 3475 spin_unlock(&cluster->refill_lock); 3476 3477 /* We should only have one-level nested. */ 3478 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); 3479 3480 spin_lock(&cluster->refill_lock); 3481 if (used_bg == cluster->block_group) 3482 return used_bg; 3483 3484 up_read(&used_bg->data_rwsem); 3485 btrfs_put_block_group(used_bg); 3486 } 3487 } 3488 3489 static inline void 3490 btrfs_release_block_group(struct btrfs_block_group *cache, 3491 int delalloc) 3492 { 3493 if (delalloc) 3494 up_read(&cache->data_rwsem); 3495 btrfs_put_block_group(cache); 3496 } 3497 3498 /* 3499 * Helper function for find_free_extent(). 3500 * 3501 * Return -ENOENT to inform caller that we need fallback to unclustered mode. 3502 * Return >0 to inform caller that we find nothing 3503 * Return 0 means we have found a location and set ffe_ctl->found_offset. 3504 */ 3505 static int find_free_extent_clustered(struct btrfs_block_group *bg, 3506 struct find_free_extent_ctl *ffe_ctl, 3507 struct btrfs_block_group **cluster_bg_ret) 3508 { 3509 struct btrfs_block_group *cluster_bg; 3510 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3511 u64 aligned_cluster; 3512 u64 offset; 3513 int ret; 3514 3515 cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc); 3516 if (!cluster_bg) 3517 goto refill_cluster; 3518 if (cluster_bg != bg && (cluster_bg->ro || 3519 !block_group_bits(cluster_bg, ffe_ctl->flags))) 3520 goto release_cluster; 3521 3522 offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, 3523 ffe_ctl->num_bytes, cluster_bg->start, 3524 &ffe_ctl->max_extent_size); 3525 if (offset) { 3526 /* We have a block, we're done */ 3527 spin_unlock(&last_ptr->refill_lock); 3528 trace_btrfs_reserve_extent_cluster(cluster_bg, ffe_ctl); 3529 *cluster_bg_ret = cluster_bg; 3530 ffe_ctl->found_offset = offset; 3531 return 0; 3532 } 3533 WARN_ON(last_ptr->block_group != cluster_bg); 3534 3535 release_cluster: 3536 /* 3537 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so 3538 * lets just skip it and let the allocator find whatever block it can 3539 * find. If we reach this point, we will have tried the cluster 3540 * allocator plenty of times and not have found anything, so we are 3541 * likely way too fragmented for the clustering stuff to find anything. 3542 * 3543 * However, if the cluster is taken from the current block group, 3544 * release the cluster first, so that we stand a better chance of 3545 * succeeding in the unclustered allocation. 3546 */ 3547 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { 3548 spin_unlock(&last_ptr->refill_lock); 3549 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3550 return -ENOENT; 3551 } 3552 3553 /* This cluster didn't work out, free it and start over */ 3554 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3555 3556 if (cluster_bg != bg) 3557 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3558 3559 refill_cluster: 3560 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { 3561 spin_unlock(&last_ptr->refill_lock); 3562 return -ENOENT; 3563 } 3564 3565 aligned_cluster = max_t(u64, 3566 ffe_ctl->empty_cluster + ffe_ctl->empty_size, 3567 bg->full_stripe_len); 3568 ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start, 3569 ffe_ctl->num_bytes, aligned_cluster); 3570 if (ret == 0) { 3571 /* Now pull our allocation out of this cluster */ 3572 offset = btrfs_alloc_from_cluster(bg, last_ptr, 3573 ffe_ctl->num_bytes, ffe_ctl->search_start, 3574 &ffe_ctl->max_extent_size); 3575 if (offset) { 3576 /* We found one, proceed */ 3577 spin_unlock(&last_ptr->refill_lock); 3578 ffe_ctl->found_offset = offset; 3579 trace_btrfs_reserve_extent_cluster(bg, ffe_ctl); 3580 return 0; 3581 } 3582 } 3583 /* 3584 * At this point we either didn't find a cluster or we weren't able to 3585 * allocate a block from our cluster. Free the cluster we've been 3586 * trying to use, and go to the next block group. 3587 */ 3588 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3589 spin_unlock(&last_ptr->refill_lock); 3590 return 1; 3591 } 3592 3593 /* 3594 * Return >0 to inform caller that we find nothing 3595 * Return 0 when we found an free extent and set ffe_ctrl->found_offset 3596 */ 3597 static int find_free_extent_unclustered(struct btrfs_block_group *bg, 3598 struct find_free_extent_ctl *ffe_ctl) 3599 { 3600 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3601 u64 offset; 3602 3603 /* 3604 * We are doing an unclustered allocation, set the fragmented flag so 3605 * we don't bother trying to setup a cluster again until we get more 3606 * space. 3607 */ 3608 if (unlikely(last_ptr)) { 3609 spin_lock(&last_ptr->lock); 3610 last_ptr->fragmented = 1; 3611 spin_unlock(&last_ptr->lock); 3612 } 3613 if (ffe_ctl->cached) { 3614 struct btrfs_free_space_ctl *free_space_ctl; 3615 3616 free_space_ctl = bg->free_space_ctl; 3617 spin_lock(&free_space_ctl->tree_lock); 3618 if (free_space_ctl->free_space < 3619 ffe_ctl->num_bytes + ffe_ctl->empty_cluster + 3620 ffe_ctl->empty_size) { 3621 ffe_ctl->total_free_space = max_t(u64, 3622 ffe_ctl->total_free_space, 3623 free_space_ctl->free_space); 3624 spin_unlock(&free_space_ctl->tree_lock); 3625 return 1; 3626 } 3627 spin_unlock(&free_space_ctl->tree_lock); 3628 } 3629 3630 offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start, 3631 ffe_ctl->num_bytes, ffe_ctl->empty_size, 3632 &ffe_ctl->max_extent_size); 3633 if (!offset) 3634 return 1; 3635 ffe_ctl->found_offset = offset; 3636 return 0; 3637 } 3638 3639 static int do_allocation_clustered(struct btrfs_block_group *block_group, 3640 struct find_free_extent_ctl *ffe_ctl, 3641 struct btrfs_block_group **bg_ret) 3642 { 3643 int ret; 3644 3645 /* We want to try and use the cluster allocator, so lets look there */ 3646 if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { 3647 ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret); 3648 if (ret >= 0) 3649 return ret; 3650 /* ret == -ENOENT case falls through */ 3651 } 3652 3653 return find_free_extent_unclustered(block_group, ffe_ctl); 3654 } 3655 3656 /* 3657 * Tree-log block group locking 3658 * ============================ 3659 * 3660 * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which 3661 * indicates the starting address of a block group, which is reserved only 3662 * for tree-log metadata. 3663 * 3664 * Lock nesting 3665 * ============ 3666 * 3667 * space_info::lock 3668 * block_group::lock 3669 * fs_info::treelog_bg_lock 3670 */ 3671 3672 /* 3673 * Simple allocator for sequential-only block group. It only allows sequential 3674 * allocation. No need to play with trees. This function also reserves the 3675 * bytes as in btrfs_add_reserved_bytes. 3676 */ 3677 static int do_allocation_zoned(struct btrfs_block_group *block_group, 3678 struct find_free_extent_ctl *ffe_ctl, 3679 struct btrfs_block_group **bg_ret) 3680 { 3681 struct btrfs_fs_info *fs_info = block_group->fs_info; 3682 struct btrfs_space_info *space_info = block_group->space_info; 3683 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3684 u64 start = block_group->start; 3685 u64 num_bytes = ffe_ctl->num_bytes; 3686 u64 avail; 3687 u64 bytenr = block_group->start; 3688 u64 log_bytenr; 3689 u64 data_reloc_bytenr; 3690 int ret = 0; 3691 bool skip = false; 3692 3693 ASSERT(btrfs_is_zoned(block_group->fs_info)); 3694 3695 /* 3696 * Do not allow non-tree-log blocks in the dedicated tree-log block 3697 * group, and vice versa. 3698 */ 3699 spin_lock(&fs_info->treelog_bg_lock); 3700 log_bytenr = fs_info->treelog_bg; 3701 if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || 3702 (!ffe_ctl->for_treelog && bytenr == log_bytenr))) 3703 skip = true; 3704 spin_unlock(&fs_info->treelog_bg_lock); 3705 if (skip) 3706 return 1; 3707 3708 /* 3709 * Do not allow non-relocation blocks in the dedicated relocation block 3710 * group, and vice versa. 3711 */ 3712 spin_lock(&fs_info->relocation_bg_lock); 3713 data_reloc_bytenr = fs_info->data_reloc_bg; 3714 if (data_reloc_bytenr && 3715 ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) || 3716 (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr))) 3717 skip = true; 3718 spin_unlock(&fs_info->relocation_bg_lock); 3719 if (skip) 3720 return 1; 3721 3722 /* Check RO and no space case before trying to activate it */ 3723 spin_lock(&block_group->lock); 3724 if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) { 3725 ret = 1; 3726 /* 3727 * May need to clear fs_info->{treelog,data_reloc}_bg. 3728 * Return the error after taking the locks. 3729 */ 3730 } 3731 spin_unlock(&block_group->lock); 3732 3733 /* Metadata block group is activated at write time. */ 3734 if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && 3735 !btrfs_zone_activate(block_group)) { 3736 ret = 1; 3737 /* 3738 * May need to clear fs_info->{treelog,data_reloc}_bg. 3739 * Return the error after taking the locks. 3740 */ 3741 } 3742 3743 spin_lock(&space_info->lock); 3744 spin_lock(&block_group->lock); 3745 spin_lock(&fs_info->treelog_bg_lock); 3746 spin_lock(&fs_info->relocation_bg_lock); 3747 3748 if (ret) 3749 goto out; 3750 3751 ASSERT(!ffe_ctl->for_treelog || 3752 block_group->start == fs_info->treelog_bg || 3753 fs_info->treelog_bg == 0); 3754 ASSERT(!ffe_ctl->for_data_reloc || 3755 block_group->start == fs_info->data_reloc_bg || 3756 fs_info->data_reloc_bg == 0); 3757 3758 if (block_group->ro || 3759 (!ffe_ctl->for_data_reloc && 3760 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) { 3761 ret = 1; 3762 goto out; 3763 } 3764 3765 /* 3766 * Do not allow currently using block group to be tree-log dedicated 3767 * block group. 3768 */ 3769 if (ffe_ctl->for_treelog && !fs_info->treelog_bg && 3770 (block_group->used || block_group->reserved)) { 3771 ret = 1; 3772 goto out; 3773 } 3774 3775 /* 3776 * Do not allow currently used block group to be the data relocation 3777 * dedicated block group. 3778 */ 3779 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg && 3780 (block_group->used || block_group->reserved)) { 3781 ret = 1; 3782 goto out; 3783 } 3784 3785 WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity); 3786 avail = block_group->zone_capacity - block_group->alloc_offset; 3787 if (avail < num_bytes) { 3788 if (ffe_ctl->max_extent_size < avail) { 3789 /* 3790 * With sequential allocator, free space is always 3791 * contiguous 3792 */ 3793 ffe_ctl->max_extent_size = avail; 3794 ffe_ctl->total_free_space = avail; 3795 } 3796 ret = 1; 3797 goto out; 3798 } 3799 3800 if (ffe_ctl->for_treelog && !fs_info->treelog_bg) 3801 fs_info->treelog_bg = block_group->start; 3802 3803 if (ffe_ctl->for_data_reloc) { 3804 if (!fs_info->data_reloc_bg) 3805 fs_info->data_reloc_bg = block_group->start; 3806 /* 3807 * Do not allow allocations from this block group, unless it is 3808 * for data relocation. Compared to increasing the ->ro, setting 3809 * the ->zoned_data_reloc_ongoing flag still allows nocow 3810 * writers to come in. See btrfs_inc_nocow_writers(). 3811 * 3812 * We need to disable an allocation to avoid an allocation of 3813 * regular (non-relocation data) extent. With mix of relocation 3814 * extents and regular extents, we can dispatch WRITE commands 3815 * (for relocation extents) and ZONE APPEND commands (for 3816 * regular extents) at the same time to the same zone, which 3817 * easily break the write pointer. 3818 * 3819 * Also, this flag avoids this block group to be zone finished. 3820 */ 3821 set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags); 3822 } 3823 3824 ffe_ctl->found_offset = start + block_group->alloc_offset; 3825 block_group->alloc_offset += num_bytes; 3826 spin_lock(&ctl->tree_lock); 3827 ctl->free_space -= num_bytes; 3828 spin_unlock(&ctl->tree_lock); 3829 3830 /* 3831 * We do not check if found_offset is aligned to stripesize. The 3832 * address is anyway rewritten when using zone append writing. 3833 */ 3834 3835 ffe_ctl->search_start = ffe_ctl->found_offset; 3836 3837 out: 3838 if (ret && ffe_ctl->for_treelog) 3839 fs_info->treelog_bg = 0; 3840 if (ret && ffe_ctl->for_data_reloc) 3841 fs_info->data_reloc_bg = 0; 3842 spin_unlock(&fs_info->relocation_bg_lock); 3843 spin_unlock(&fs_info->treelog_bg_lock); 3844 spin_unlock(&block_group->lock); 3845 spin_unlock(&space_info->lock); 3846 return ret; 3847 } 3848 3849 static int do_allocation(struct btrfs_block_group *block_group, 3850 struct find_free_extent_ctl *ffe_ctl, 3851 struct btrfs_block_group **bg_ret) 3852 { 3853 switch (ffe_ctl->policy) { 3854 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3855 return do_allocation_clustered(block_group, ffe_ctl, bg_ret); 3856 case BTRFS_EXTENT_ALLOC_ZONED: 3857 return do_allocation_zoned(block_group, ffe_ctl, bg_ret); 3858 default: 3859 BUG(); 3860 } 3861 } 3862 3863 static void release_block_group(struct btrfs_block_group *block_group, 3864 struct find_free_extent_ctl *ffe_ctl, 3865 int delalloc) 3866 { 3867 switch (ffe_ctl->policy) { 3868 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3869 ffe_ctl->retry_uncached = false; 3870 break; 3871 case BTRFS_EXTENT_ALLOC_ZONED: 3872 /* Nothing to do */ 3873 break; 3874 default: 3875 BUG(); 3876 } 3877 3878 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != 3879 ffe_ctl->index); 3880 btrfs_release_block_group(block_group, delalloc); 3881 } 3882 3883 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl, 3884 struct btrfs_key *ins) 3885 { 3886 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3887 3888 if (!ffe_ctl->use_cluster && last_ptr) { 3889 spin_lock(&last_ptr->lock); 3890 last_ptr->window_start = ins->objectid; 3891 spin_unlock(&last_ptr->lock); 3892 } 3893 } 3894 3895 static void found_extent(struct find_free_extent_ctl *ffe_ctl, 3896 struct btrfs_key *ins) 3897 { 3898 switch (ffe_ctl->policy) { 3899 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3900 found_extent_clustered(ffe_ctl, ins); 3901 break; 3902 case BTRFS_EXTENT_ALLOC_ZONED: 3903 /* Nothing to do */ 3904 break; 3905 default: 3906 BUG(); 3907 } 3908 } 3909 3910 static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info, 3911 struct find_free_extent_ctl *ffe_ctl) 3912 { 3913 /* Block group's activeness is not a requirement for METADATA block groups. */ 3914 if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)) 3915 return 0; 3916 3917 /* If we can activate new zone, just allocate a chunk and use it */ 3918 if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags)) 3919 return 0; 3920 3921 /* 3922 * We already reached the max active zones. Try to finish one block 3923 * group to make a room for a new block group. This is only possible 3924 * for a data block group because btrfs_zone_finish() may need to wait 3925 * for a running transaction which can cause a deadlock for metadata 3926 * allocation. 3927 */ 3928 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { 3929 int ret = btrfs_zone_finish_one_bg(fs_info); 3930 3931 if (ret == 1) 3932 return 0; 3933 else if (ret < 0) 3934 return ret; 3935 } 3936 3937 /* 3938 * If we have enough free space left in an already active block group 3939 * and we can't activate any other zone now, do not allow allocating a 3940 * new chunk and let find_free_extent() retry with a smaller size. 3941 */ 3942 if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size) 3943 return -ENOSPC; 3944 3945 /* 3946 * Even min_alloc_size is not left in any block groups. Since we cannot 3947 * activate a new block group, allocating it may not help. Let's tell a 3948 * caller to try again and hope it progress something by writing some 3949 * parts of the region. That is only possible for data block groups, 3950 * where a part of the region can be written. 3951 */ 3952 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) 3953 return -EAGAIN; 3954 3955 /* 3956 * We cannot activate a new block group and no enough space left in any 3957 * block groups. So, allocating a new block group may not help. But, 3958 * there is nothing to do anyway, so let's go with it. 3959 */ 3960 return 0; 3961 } 3962 3963 static int can_allocate_chunk(struct btrfs_fs_info *fs_info, 3964 struct find_free_extent_ctl *ffe_ctl) 3965 { 3966 switch (ffe_ctl->policy) { 3967 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3968 return 0; 3969 case BTRFS_EXTENT_ALLOC_ZONED: 3970 return can_allocate_chunk_zoned(fs_info, ffe_ctl); 3971 default: 3972 BUG(); 3973 } 3974 } 3975 3976 /* 3977 * Return >0 means caller needs to re-search for free extent 3978 * Return 0 means we have the needed free extent. 3979 * Return <0 means we failed to locate any free extent. 3980 */ 3981 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, 3982 struct btrfs_key *ins, 3983 struct find_free_extent_ctl *ffe_ctl, 3984 bool full_search) 3985 { 3986 struct btrfs_root *root = fs_info->chunk_root; 3987 int ret; 3988 3989 if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && 3990 ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) 3991 ffe_ctl->orig_have_caching_bg = true; 3992 3993 if (ins->objectid) { 3994 found_extent(ffe_ctl, ins); 3995 return 0; 3996 } 3997 3998 if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) 3999 return 1; 4000 4001 ffe_ctl->index++; 4002 if (ffe_ctl->index < BTRFS_NR_RAID_TYPES) 4003 return 1; 4004 4005 /* See the comments for btrfs_loop_type for an explanation of the phases. */ 4006 if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { 4007 ffe_ctl->index = 0; 4008 /* 4009 * We want to skip the LOOP_CACHING_WAIT step if we don't have 4010 * any uncached bgs and we've already done a full search 4011 * through. 4012 */ 4013 if (ffe_ctl->loop == LOOP_CACHING_NOWAIT && 4014 (!ffe_ctl->orig_have_caching_bg && full_search)) 4015 ffe_ctl->loop++; 4016 ffe_ctl->loop++; 4017 4018 if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { 4019 struct btrfs_trans_handle *trans; 4020 int exist = 0; 4021 4022 /* Check if allocation policy allows to create a new chunk */ 4023 ret = can_allocate_chunk(fs_info, ffe_ctl); 4024 if (ret) 4025 return ret; 4026 4027 trans = current->journal_info; 4028 if (trans) 4029 exist = 1; 4030 else 4031 trans = btrfs_join_transaction(root); 4032 4033 if (IS_ERR(trans)) { 4034 ret = PTR_ERR(trans); 4035 return ret; 4036 } 4037 4038 ret = btrfs_chunk_alloc(trans, ffe_ctl->flags, 4039 CHUNK_ALLOC_FORCE_FOR_EXTENT); 4040 4041 /* Do not bail out on ENOSPC since we can do more. */ 4042 if (ret == -ENOSPC) { 4043 ret = 0; 4044 ffe_ctl->loop++; 4045 } 4046 else if (ret < 0) 4047 btrfs_abort_transaction(trans, ret); 4048 else 4049 ret = 0; 4050 if (!exist) 4051 btrfs_end_transaction(trans); 4052 if (ret) 4053 return ret; 4054 } 4055 4056 if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { 4057 if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) 4058 return -ENOSPC; 4059 4060 /* 4061 * Don't loop again if we already have no empty_size and 4062 * no empty_cluster. 4063 */ 4064 if (ffe_ctl->empty_size == 0 && 4065 ffe_ctl->empty_cluster == 0) 4066 return -ENOSPC; 4067 ffe_ctl->empty_size = 0; 4068 ffe_ctl->empty_cluster = 0; 4069 } 4070 return 1; 4071 } 4072 return -ENOSPC; 4073 } 4074 4075 static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl, 4076 struct btrfs_block_group *bg) 4077 { 4078 if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) 4079 return true; 4080 if (!btrfs_block_group_should_use_size_class(bg)) 4081 return true; 4082 if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) 4083 return true; 4084 if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && 4085 bg->size_class == BTRFS_BG_SZ_NONE) 4086 return true; 4087 return ffe_ctl->size_class == bg->size_class; 4088 } 4089 4090 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, 4091 struct find_free_extent_ctl *ffe_ctl, 4092 struct btrfs_space_info *space_info, 4093 struct btrfs_key *ins) 4094 { 4095 /* 4096 * If our free space is heavily fragmented we may not be able to make 4097 * big contiguous allocations, so instead of doing the expensive search 4098 * for free space, simply return ENOSPC with our max_extent_size so we 4099 * can go ahead and search for a more manageable chunk. 4100 * 4101 * If our max_extent_size is large enough for our allocation simply 4102 * disable clustering since we will likely not be able to find enough 4103 * space to create a cluster and induce latency trying. 4104 */ 4105 if (space_info->max_extent_size) { 4106 spin_lock(&space_info->lock); 4107 if (space_info->max_extent_size && 4108 ffe_ctl->num_bytes > space_info->max_extent_size) { 4109 ins->offset = space_info->max_extent_size; 4110 spin_unlock(&space_info->lock); 4111 return -ENOSPC; 4112 } else if (space_info->max_extent_size) { 4113 ffe_ctl->use_cluster = false; 4114 } 4115 spin_unlock(&space_info->lock); 4116 } 4117 4118 ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, 4119 &ffe_ctl->empty_cluster); 4120 if (ffe_ctl->last_ptr) { 4121 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 4122 4123 spin_lock(&last_ptr->lock); 4124 if (last_ptr->block_group) 4125 ffe_ctl->hint_byte = last_ptr->window_start; 4126 if (last_ptr->fragmented) { 4127 /* 4128 * We still set window_start so we can keep track of the 4129 * last place we found an allocation to try and save 4130 * some time. 4131 */ 4132 ffe_ctl->hint_byte = last_ptr->window_start; 4133 ffe_ctl->use_cluster = false; 4134 } 4135 spin_unlock(&last_ptr->lock); 4136 } 4137 4138 return 0; 4139 } 4140 4141 static int prepare_allocation(struct btrfs_fs_info *fs_info, 4142 struct find_free_extent_ctl *ffe_ctl, 4143 struct btrfs_space_info *space_info, 4144 struct btrfs_key *ins) 4145 { 4146 switch (ffe_ctl->policy) { 4147 case BTRFS_EXTENT_ALLOC_CLUSTERED: 4148 return prepare_allocation_clustered(fs_info, ffe_ctl, 4149 space_info, ins); 4150 case BTRFS_EXTENT_ALLOC_ZONED: 4151 if (ffe_ctl->for_treelog) { 4152 spin_lock(&fs_info->treelog_bg_lock); 4153 if (fs_info->treelog_bg) 4154 ffe_ctl->hint_byte = fs_info->treelog_bg; 4155 spin_unlock(&fs_info->treelog_bg_lock); 4156 } 4157 if (ffe_ctl->for_data_reloc) { 4158 spin_lock(&fs_info->relocation_bg_lock); 4159 if (fs_info->data_reloc_bg) 4160 ffe_ctl->hint_byte = fs_info->data_reloc_bg; 4161 spin_unlock(&fs_info->relocation_bg_lock); 4162 } 4163 return 0; 4164 default: 4165 BUG(); 4166 } 4167 } 4168 4169 /* 4170 * walks the btree of allocated extents and find a hole of a given size. 4171 * The key ins is changed to record the hole: 4172 * ins->objectid == start position 4173 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4174 * ins->offset == the size of the hole. 4175 * Any available blocks before search_start are skipped. 4176 * 4177 * If there is no suitable free space, we will record the max size of 4178 * the free space extent currently. 4179 * 4180 * The overall logic and call chain: 4181 * 4182 * find_free_extent() 4183 * |- Iterate through all block groups 4184 * | |- Get a valid block group 4185 * | |- Try to do clustered allocation in that block group 4186 * | |- Try to do unclustered allocation in that block group 4187 * | |- Check if the result is valid 4188 * | | |- If valid, then exit 4189 * | |- Jump to next block group 4190 * | 4191 * |- Push harder to find free extents 4192 * |- If not found, re-iterate all block groups 4193 */ 4194 static noinline int find_free_extent(struct btrfs_root *root, 4195 struct btrfs_key *ins, 4196 struct find_free_extent_ctl *ffe_ctl) 4197 { 4198 struct btrfs_fs_info *fs_info = root->fs_info; 4199 int ret = 0; 4200 int cache_block_group_error = 0; 4201 struct btrfs_block_group *block_group = NULL; 4202 struct btrfs_space_info *space_info; 4203 bool full_search = false; 4204 4205 WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize); 4206 4207 ffe_ctl->search_start = 0; 4208 /* For clustered allocation */ 4209 ffe_ctl->empty_cluster = 0; 4210 ffe_ctl->last_ptr = NULL; 4211 ffe_ctl->use_cluster = true; 4212 ffe_ctl->have_caching_bg = false; 4213 ffe_ctl->orig_have_caching_bg = false; 4214 ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags); 4215 ffe_ctl->loop = 0; 4216 ffe_ctl->retry_uncached = false; 4217 ffe_ctl->cached = 0; 4218 ffe_ctl->max_extent_size = 0; 4219 ffe_ctl->total_free_space = 0; 4220 ffe_ctl->found_offset = 0; 4221 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED; 4222 ffe_ctl->size_class = btrfs_calc_block_group_size_class(ffe_ctl->num_bytes); 4223 4224 if (btrfs_is_zoned(fs_info)) 4225 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED; 4226 4227 ins->type = BTRFS_EXTENT_ITEM_KEY; 4228 ins->objectid = 0; 4229 ins->offset = 0; 4230 4231 trace_find_free_extent(root, ffe_ctl); 4232 4233 space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags); 4234 if (!space_info) { 4235 btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags); 4236 return -ENOSPC; 4237 } 4238 4239 ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins); 4240 if (ret < 0) 4241 return ret; 4242 4243 ffe_ctl->search_start = max(ffe_ctl->search_start, 4244 first_logical_byte(fs_info)); 4245 ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte); 4246 if (ffe_ctl->search_start == ffe_ctl->hint_byte) { 4247 block_group = btrfs_lookup_block_group(fs_info, 4248 ffe_ctl->search_start); 4249 /* 4250 * we don't want to use the block group if it doesn't match our 4251 * allocation bits, or if its not cached. 4252 * 4253 * However if we are re-searching with an ideal block group 4254 * picked out then we don't care that the block group is cached. 4255 */ 4256 if (block_group && block_group_bits(block_group, ffe_ctl->flags) && 4257 block_group->cached != BTRFS_CACHE_NO) { 4258 down_read(&space_info->groups_sem); 4259 if (list_empty(&block_group->list) || 4260 block_group->ro) { 4261 /* 4262 * someone is removing this block group, 4263 * we can't jump into the have_block_group 4264 * target because our list pointers are not 4265 * valid 4266 */ 4267 btrfs_put_block_group(block_group); 4268 up_read(&space_info->groups_sem); 4269 } else { 4270 ffe_ctl->index = btrfs_bg_flags_to_raid_index( 4271 block_group->flags); 4272 btrfs_lock_block_group(block_group, 4273 ffe_ctl->delalloc); 4274 ffe_ctl->hinted = true; 4275 goto have_block_group; 4276 } 4277 } else if (block_group) { 4278 btrfs_put_block_group(block_group); 4279 } 4280 } 4281 search: 4282 trace_find_free_extent_search_loop(root, ffe_ctl); 4283 ffe_ctl->have_caching_bg = false; 4284 if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) || 4285 ffe_ctl->index == 0) 4286 full_search = true; 4287 down_read(&space_info->groups_sem); 4288 list_for_each_entry(block_group, 4289 &space_info->block_groups[ffe_ctl->index], list) { 4290 struct btrfs_block_group *bg_ret; 4291 4292 ffe_ctl->hinted = false; 4293 /* If the block group is read-only, we can skip it entirely. */ 4294 if (unlikely(block_group->ro)) { 4295 if (ffe_ctl->for_treelog) 4296 btrfs_clear_treelog_bg(block_group); 4297 if (ffe_ctl->for_data_reloc) 4298 btrfs_clear_data_reloc_bg(block_group); 4299 continue; 4300 } 4301 4302 btrfs_grab_block_group(block_group, ffe_ctl->delalloc); 4303 ffe_ctl->search_start = block_group->start; 4304 4305 /* 4306 * this can happen if we end up cycling through all the 4307 * raid types, but we want to make sure we only allocate 4308 * for the proper type. 4309 */ 4310 if (!block_group_bits(block_group, ffe_ctl->flags)) { 4311 u64 extra = BTRFS_BLOCK_GROUP_DUP | 4312 BTRFS_BLOCK_GROUP_RAID1_MASK | 4313 BTRFS_BLOCK_GROUP_RAID56_MASK | 4314 BTRFS_BLOCK_GROUP_RAID10; 4315 4316 /* 4317 * if they asked for extra copies and this block group 4318 * doesn't provide them, bail. This does allow us to 4319 * fill raid0 from raid1. 4320 */ 4321 if ((ffe_ctl->flags & extra) && !(block_group->flags & extra)) 4322 goto loop; 4323 4324 /* 4325 * This block group has different flags than we want. 4326 * It's possible that we have MIXED_GROUP flag but no 4327 * block group is mixed. Just skip such block group. 4328 */ 4329 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4330 continue; 4331 } 4332 4333 have_block_group: 4334 trace_find_free_extent_have_block_group(root, ffe_ctl, block_group); 4335 ffe_ctl->cached = btrfs_block_group_done(block_group); 4336 if (unlikely(!ffe_ctl->cached)) { 4337 ffe_ctl->have_caching_bg = true; 4338 ret = btrfs_cache_block_group(block_group, false); 4339 4340 /* 4341 * If we get ENOMEM here or something else we want to 4342 * try other block groups, because it may not be fatal. 4343 * However if we can't find anything else we need to 4344 * save our return here so that we return the actual 4345 * error that caused problems, not ENOSPC. 4346 */ 4347 if (ret < 0) { 4348 if (!cache_block_group_error) 4349 cache_block_group_error = ret; 4350 ret = 0; 4351 goto loop; 4352 } 4353 ret = 0; 4354 } 4355 4356 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { 4357 if (!cache_block_group_error) 4358 cache_block_group_error = -EIO; 4359 goto loop; 4360 } 4361 4362 if (!find_free_extent_check_size_class(ffe_ctl, block_group)) 4363 goto loop; 4364 4365 bg_ret = NULL; 4366 ret = do_allocation(block_group, ffe_ctl, &bg_ret); 4367 if (ret > 0) 4368 goto loop; 4369 4370 if (bg_ret && bg_ret != block_group) { 4371 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4372 block_group = bg_ret; 4373 } 4374 4375 /* Checks */ 4376 ffe_ctl->search_start = round_up(ffe_ctl->found_offset, 4377 fs_info->stripesize); 4378 4379 /* move on to the next group */ 4380 if (ffe_ctl->search_start + ffe_ctl->num_bytes > 4381 block_group->start + block_group->length) { 4382 btrfs_add_free_space_unused(block_group, 4383 ffe_ctl->found_offset, 4384 ffe_ctl->num_bytes); 4385 goto loop; 4386 } 4387 4388 if (ffe_ctl->found_offset < ffe_ctl->search_start) 4389 btrfs_add_free_space_unused(block_group, 4390 ffe_ctl->found_offset, 4391 ffe_ctl->search_start - ffe_ctl->found_offset); 4392 4393 ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes, 4394 ffe_ctl->num_bytes, 4395 ffe_ctl->delalloc, 4396 ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS); 4397 if (ret == -EAGAIN) { 4398 btrfs_add_free_space_unused(block_group, 4399 ffe_ctl->found_offset, 4400 ffe_ctl->num_bytes); 4401 goto loop; 4402 } 4403 btrfs_inc_block_group_reservations(block_group); 4404 4405 /* we are all good, lets return */ 4406 ins->objectid = ffe_ctl->search_start; 4407 ins->offset = ffe_ctl->num_bytes; 4408 4409 trace_btrfs_reserve_extent(block_group, ffe_ctl); 4410 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4411 break; 4412 loop: 4413 if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && 4414 !ffe_ctl->retry_uncached) { 4415 ffe_ctl->retry_uncached = true; 4416 btrfs_wait_block_group_cache_progress(block_group, 4417 ffe_ctl->num_bytes + 4418 ffe_ctl->empty_cluster + 4419 ffe_ctl->empty_size); 4420 goto have_block_group; 4421 } 4422 release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc); 4423 cond_resched(); 4424 } 4425 up_read(&space_info->groups_sem); 4426 4427 ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search); 4428 if (ret > 0) 4429 goto search; 4430 4431 if (ret == -ENOSPC && !cache_block_group_error) { 4432 /* 4433 * Use ffe_ctl->total_free_space as fallback if we can't find 4434 * any contiguous hole. 4435 */ 4436 if (!ffe_ctl->max_extent_size) 4437 ffe_ctl->max_extent_size = ffe_ctl->total_free_space; 4438 spin_lock(&space_info->lock); 4439 space_info->max_extent_size = ffe_ctl->max_extent_size; 4440 spin_unlock(&space_info->lock); 4441 ins->offset = ffe_ctl->max_extent_size; 4442 } else if (ret == -ENOSPC) { 4443 ret = cache_block_group_error; 4444 } 4445 return ret; 4446 } 4447 4448 /* 4449 * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a 4450 * hole that is at least as big as @num_bytes. 4451 * 4452 * @root - The root that will contain this extent 4453 * 4454 * @ram_bytes - The amount of space in ram that @num_bytes take. This 4455 * is used for accounting purposes. This value differs 4456 * from @num_bytes only in the case of compressed extents. 4457 * 4458 * @num_bytes - Number of bytes to allocate on-disk. 4459 * 4460 * @min_alloc_size - Indicates the minimum amount of space that the 4461 * allocator should try to satisfy. In some cases 4462 * @num_bytes may be larger than what is required and if 4463 * the filesystem is fragmented then allocation fails. 4464 * However, the presence of @min_alloc_size gives a 4465 * chance to try and satisfy the smaller allocation. 4466 * 4467 * @empty_size - A hint that you plan on doing more COW. This is the 4468 * size in bytes the allocator should try to find free 4469 * next to the block it returns. This is just a hint and 4470 * may be ignored by the allocator. 4471 * 4472 * @hint_byte - Hint to the allocator to start searching above the byte 4473 * address passed. It might be ignored. 4474 * 4475 * @ins - This key is modified to record the found hole. It will 4476 * have the following values: 4477 * ins->objectid == start position 4478 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4479 * ins->offset == the size of the hole. 4480 * 4481 * @is_data - Boolean flag indicating whether an extent is 4482 * allocated for data (true) or metadata (false) 4483 * 4484 * @delalloc - Boolean flag indicating whether this allocation is for 4485 * delalloc or not. If 'true' data_rwsem of block groups 4486 * is going to be acquired. 4487 * 4488 * 4489 * Returns 0 when an allocation succeeded or < 0 when an error occurred. In 4490 * case -ENOSPC is returned then @ins->offset will contain the size of the 4491 * largest available hole the allocator managed to find. 4492 */ 4493 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, 4494 u64 num_bytes, u64 min_alloc_size, 4495 u64 empty_size, u64 hint_byte, 4496 struct btrfs_key *ins, int is_data, int delalloc) 4497 { 4498 struct btrfs_fs_info *fs_info = root->fs_info; 4499 struct find_free_extent_ctl ffe_ctl = {}; 4500 bool final_tried = num_bytes == min_alloc_size; 4501 u64 flags; 4502 int ret; 4503 bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4504 bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data); 4505 4506 flags = get_alloc_profile_by_root(root, is_data); 4507 again: 4508 WARN_ON(num_bytes < fs_info->sectorsize); 4509 4510 ffe_ctl.ram_bytes = ram_bytes; 4511 ffe_ctl.num_bytes = num_bytes; 4512 ffe_ctl.min_alloc_size = min_alloc_size; 4513 ffe_ctl.empty_size = empty_size; 4514 ffe_ctl.flags = flags; 4515 ffe_ctl.delalloc = delalloc; 4516 ffe_ctl.hint_byte = hint_byte; 4517 ffe_ctl.for_treelog = for_treelog; 4518 ffe_ctl.for_data_reloc = for_data_reloc; 4519 4520 ret = find_free_extent(root, ins, &ffe_ctl); 4521 if (!ret && !is_data) { 4522 btrfs_dec_block_group_reservations(fs_info, ins->objectid); 4523 } else if (ret == -ENOSPC) { 4524 if (!final_tried && ins->offset) { 4525 num_bytes = min(num_bytes >> 1, ins->offset); 4526 num_bytes = round_down(num_bytes, 4527 fs_info->sectorsize); 4528 num_bytes = max(num_bytes, min_alloc_size); 4529 ram_bytes = num_bytes; 4530 if (num_bytes == min_alloc_size) 4531 final_tried = true; 4532 goto again; 4533 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4534 struct btrfs_space_info *sinfo; 4535 4536 sinfo = btrfs_find_space_info(fs_info, flags); 4537 btrfs_err(fs_info, 4538 "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d", 4539 flags, num_bytes, for_treelog, for_data_reloc); 4540 if (sinfo) 4541 btrfs_dump_space_info(fs_info, sinfo, 4542 num_bytes, 1); 4543 } 4544 } 4545 4546 return ret; 4547 } 4548 4549 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, 4550 u64 start, u64 len, int delalloc) 4551 { 4552 struct btrfs_block_group *cache; 4553 4554 cache = btrfs_lookup_block_group(fs_info, start); 4555 if (!cache) { 4556 btrfs_err(fs_info, "Unable to find block group for %llu", 4557 start); 4558 return -ENOSPC; 4559 } 4560 4561 btrfs_add_free_space(cache, start, len); 4562 btrfs_free_reserved_bytes(cache, len, delalloc); 4563 trace_btrfs_reserved_extent_free(fs_info, start, len); 4564 4565 btrfs_put_block_group(cache); 4566 return 0; 4567 } 4568 4569 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, 4570 u64 len) 4571 { 4572 struct btrfs_block_group *cache; 4573 int ret = 0; 4574 4575 cache = btrfs_lookup_block_group(trans->fs_info, start); 4576 if (!cache) { 4577 btrfs_err(trans->fs_info, "unable to find block group for %llu", 4578 start); 4579 return -ENOSPC; 4580 } 4581 4582 ret = pin_down_extent(trans, cache, start, len, 1); 4583 btrfs_put_block_group(cache); 4584 return ret; 4585 } 4586 4587 static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr, 4588 u64 num_bytes) 4589 { 4590 struct btrfs_fs_info *fs_info = trans->fs_info; 4591 int ret; 4592 4593 ret = remove_from_free_space_tree(trans, bytenr, num_bytes); 4594 if (ret) 4595 return ret; 4596 4597 ret = btrfs_update_block_group(trans, bytenr, num_bytes, true); 4598 if (ret) { 4599 ASSERT(!ret); 4600 btrfs_err(fs_info, "update block group failed for %llu %llu", 4601 bytenr, num_bytes); 4602 return ret; 4603 } 4604 4605 trace_btrfs_reserved_extent_alloc(fs_info, bytenr, num_bytes); 4606 return 0; 4607 } 4608 4609 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4610 u64 parent, u64 root_objectid, 4611 u64 flags, u64 owner, u64 offset, 4612 struct btrfs_key *ins, int ref_mod) 4613 { 4614 struct btrfs_fs_info *fs_info = trans->fs_info; 4615 struct btrfs_root *extent_root; 4616 int ret; 4617 struct btrfs_extent_item *extent_item; 4618 struct btrfs_extent_inline_ref *iref; 4619 struct btrfs_path *path; 4620 struct extent_buffer *leaf; 4621 int type; 4622 u32 size; 4623 4624 if (parent > 0) 4625 type = BTRFS_SHARED_DATA_REF_KEY; 4626 else 4627 type = BTRFS_EXTENT_DATA_REF_KEY; 4628 4629 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); 4630 4631 path = btrfs_alloc_path(); 4632 if (!path) 4633 return -ENOMEM; 4634 4635 extent_root = btrfs_extent_root(fs_info, ins->objectid); 4636 ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size); 4637 if (ret) { 4638 btrfs_free_path(path); 4639 return ret; 4640 } 4641 4642 leaf = path->nodes[0]; 4643 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4644 struct btrfs_extent_item); 4645 btrfs_set_extent_refs(leaf, extent_item, ref_mod); 4646 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4647 btrfs_set_extent_flags(leaf, extent_item, 4648 flags | BTRFS_EXTENT_FLAG_DATA); 4649 4650 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4651 btrfs_set_extent_inline_ref_type(leaf, iref, type); 4652 if (parent > 0) { 4653 struct btrfs_shared_data_ref *ref; 4654 ref = (struct btrfs_shared_data_ref *)(iref + 1); 4655 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 4656 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); 4657 } else { 4658 struct btrfs_extent_data_ref *ref; 4659 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 4660 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); 4661 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 4662 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 4663 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); 4664 } 4665 4666 btrfs_mark_buffer_dirty(trans, path->nodes[0]); 4667 btrfs_free_path(path); 4668 4669 return alloc_reserved_extent(trans, ins->objectid, ins->offset); 4670 } 4671 4672 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 4673 struct btrfs_delayed_ref_node *node, 4674 struct btrfs_delayed_extent_op *extent_op) 4675 { 4676 struct btrfs_fs_info *fs_info = trans->fs_info; 4677 struct btrfs_root *extent_root; 4678 int ret; 4679 struct btrfs_extent_item *extent_item; 4680 struct btrfs_key extent_key; 4681 struct btrfs_tree_block_info *block_info; 4682 struct btrfs_extent_inline_ref *iref; 4683 struct btrfs_path *path; 4684 struct extent_buffer *leaf; 4685 struct btrfs_delayed_tree_ref *ref; 4686 u32 size = sizeof(*extent_item) + sizeof(*iref); 4687 u64 flags = extent_op->flags_to_set; 4688 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4689 4690 ref = btrfs_delayed_node_to_tree_ref(node); 4691 4692 extent_key.objectid = node->bytenr; 4693 if (skinny_metadata) { 4694 extent_key.offset = ref->level; 4695 extent_key.type = BTRFS_METADATA_ITEM_KEY; 4696 } else { 4697 extent_key.offset = node->num_bytes; 4698 extent_key.type = BTRFS_EXTENT_ITEM_KEY; 4699 size += sizeof(*block_info); 4700 } 4701 4702 path = btrfs_alloc_path(); 4703 if (!path) 4704 return -ENOMEM; 4705 4706 extent_root = btrfs_extent_root(fs_info, extent_key.objectid); 4707 ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key, 4708 size); 4709 if (ret) { 4710 btrfs_free_path(path); 4711 return ret; 4712 } 4713 4714 leaf = path->nodes[0]; 4715 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4716 struct btrfs_extent_item); 4717 btrfs_set_extent_refs(leaf, extent_item, 1); 4718 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4719 btrfs_set_extent_flags(leaf, extent_item, 4720 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); 4721 4722 if (skinny_metadata) { 4723 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4724 } else { 4725 block_info = (struct btrfs_tree_block_info *)(extent_item + 1); 4726 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key); 4727 btrfs_set_tree_block_level(leaf, block_info, ref->level); 4728 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); 4729 } 4730 4731 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { 4732 btrfs_set_extent_inline_ref_type(leaf, iref, 4733 BTRFS_SHARED_BLOCK_REF_KEY); 4734 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent); 4735 } else { 4736 btrfs_set_extent_inline_ref_type(leaf, iref, 4737 BTRFS_TREE_BLOCK_REF_KEY); 4738 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root); 4739 } 4740 4741 btrfs_mark_buffer_dirty(trans, leaf); 4742 btrfs_free_path(path); 4743 4744 return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize); 4745 } 4746 4747 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4748 struct btrfs_root *root, u64 owner, 4749 u64 offset, u64 ram_bytes, 4750 struct btrfs_key *ins) 4751 { 4752 struct btrfs_ref generic_ref = { 0 }; 4753 4754 BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4755 4756 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 4757 ins->objectid, ins->offset, 0); 4758 btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, 4759 offset, 0, false); 4760 btrfs_ref_tree_mod(root->fs_info, &generic_ref); 4761 4762 return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes); 4763 } 4764 4765 /* 4766 * this is used by the tree logging recovery code. It records that 4767 * an extent has been allocated and makes sure to clear the free 4768 * space cache bits as well 4769 */ 4770 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 4771 u64 root_objectid, u64 owner, u64 offset, 4772 struct btrfs_key *ins) 4773 { 4774 struct btrfs_fs_info *fs_info = trans->fs_info; 4775 int ret; 4776 struct btrfs_block_group *block_group; 4777 struct btrfs_space_info *space_info; 4778 4779 /* 4780 * Mixed block groups will exclude before processing the log so we only 4781 * need to do the exclude dance if this fs isn't mixed. 4782 */ 4783 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 4784 ret = __exclude_logged_extent(fs_info, ins->objectid, 4785 ins->offset); 4786 if (ret) 4787 return ret; 4788 } 4789 4790 block_group = btrfs_lookup_block_group(fs_info, ins->objectid); 4791 if (!block_group) 4792 return -EINVAL; 4793 4794 space_info = block_group->space_info; 4795 spin_lock(&space_info->lock); 4796 spin_lock(&block_group->lock); 4797 space_info->bytes_reserved += ins->offset; 4798 block_group->reserved += ins->offset; 4799 spin_unlock(&block_group->lock); 4800 spin_unlock(&space_info->lock); 4801 4802 ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner, 4803 offset, ins, 1); 4804 if (ret) 4805 btrfs_pin_extent(trans, ins->objectid, ins->offset, 1); 4806 btrfs_put_block_group(block_group); 4807 return ret; 4808 } 4809 4810 static struct extent_buffer * 4811 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4812 u64 bytenr, int level, u64 owner, 4813 enum btrfs_lock_nesting nest) 4814 { 4815 struct btrfs_fs_info *fs_info = root->fs_info; 4816 struct extent_buffer *buf; 4817 u64 lockdep_owner = owner; 4818 4819 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level); 4820 if (IS_ERR(buf)) 4821 return buf; 4822 4823 /* 4824 * Extra safety check in case the extent tree is corrupted and extent 4825 * allocator chooses to use a tree block which is already used and 4826 * locked. 4827 */ 4828 if (buf->lock_owner == current->pid) { 4829 btrfs_err_rl(fs_info, 4830 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected", 4831 buf->start, btrfs_header_owner(buf), current->pid); 4832 free_extent_buffer(buf); 4833 return ERR_PTR(-EUCLEAN); 4834 } 4835 4836 /* 4837 * The reloc trees are just snapshots, so we need them to appear to be 4838 * just like any other fs tree WRT lockdep. 4839 * 4840 * The exception however is in replace_path() in relocation, where we 4841 * hold the lock on the original fs root and then search for the reloc 4842 * root. At that point we need to make sure any reloc root buffers are 4843 * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make 4844 * lockdep happy. 4845 */ 4846 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID && 4847 !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) 4848 lockdep_owner = BTRFS_FS_TREE_OBJECTID; 4849 4850 /* btrfs_clear_buffer_dirty() accesses generation field. */ 4851 btrfs_set_header_generation(buf, trans->transid); 4852 4853 /* 4854 * This needs to stay, because we could allocate a freed block from an 4855 * old tree into a new tree, so we need to make sure this new block is 4856 * set to the appropriate level and owner. 4857 */ 4858 btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level); 4859 4860 __btrfs_tree_lock(buf, nest); 4861 btrfs_clear_buffer_dirty(trans, buf); 4862 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); 4863 clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags); 4864 4865 set_extent_buffer_uptodate(buf); 4866 4867 memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header)); 4868 btrfs_set_header_level(buf, level); 4869 btrfs_set_header_bytenr(buf, buf->start); 4870 btrfs_set_header_generation(buf, trans->transid); 4871 btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV); 4872 btrfs_set_header_owner(buf, owner); 4873 write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid); 4874 write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid); 4875 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 4876 buf->log_index = root->log_transid % 2; 4877 /* 4878 * we allow two log transactions at a time, use different 4879 * EXTENT bit to differentiate dirty pages. 4880 */ 4881 if (buf->log_index == 0) 4882 set_extent_bit(&root->dirty_log_pages, buf->start, 4883 buf->start + buf->len - 1, 4884 EXTENT_DIRTY, NULL); 4885 else 4886 set_extent_bit(&root->dirty_log_pages, buf->start, 4887 buf->start + buf->len - 1, 4888 EXTENT_NEW, NULL); 4889 } else { 4890 buf->log_index = -1; 4891 set_extent_bit(&trans->transaction->dirty_pages, buf->start, 4892 buf->start + buf->len - 1, EXTENT_DIRTY, NULL); 4893 } 4894 /* this returns a buffer locked for blocking */ 4895 return buf; 4896 } 4897 4898 /* 4899 * finds a free extent and does all the dirty work required for allocation 4900 * returns the tree buffer or an ERR_PTR on error. 4901 */ 4902 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 4903 struct btrfs_root *root, 4904 u64 parent, u64 root_objectid, 4905 const struct btrfs_disk_key *key, 4906 int level, u64 hint, 4907 u64 empty_size, 4908 enum btrfs_lock_nesting nest) 4909 { 4910 struct btrfs_fs_info *fs_info = root->fs_info; 4911 struct btrfs_key ins; 4912 struct btrfs_block_rsv *block_rsv; 4913 struct extent_buffer *buf; 4914 struct btrfs_delayed_extent_op *extent_op; 4915 struct btrfs_ref generic_ref = { 0 }; 4916 u64 flags = 0; 4917 int ret; 4918 u32 blocksize = fs_info->nodesize; 4919 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4920 4921 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4922 if (btrfs_is_testing(fs_info)) { 4923 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, 4924 level, root_objectid, nest); 4925 if (!IS_ERR(buf)) 4926 root->alloc_bytenr += blocksize; 4927 return buf; 4928 } 4929 #endif 4930 4931 block_rsv = btrfs_use_block_rsv(trans, root, blocksize); 4932 if (IS_ERR(block_rsv)) 4933 return ERR_CAST(block_rsv); 4934 4935 ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize, 4936 empty_size, hint, &ins, 0, 0); 4937 if (ret) 4938 goto out_unuse; 4939 4940 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level, 4941 root_objectid, nest); 4942 if (IS_ERR(buf)) { 4943 ret = PTR_ERR(buf); 4944 goto out_free_reserved; 4945 } 4946 4947 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 4948 if (parent == 0) 4949 parent = ins.objectid; 4950 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 4951 } else 4952 BUG_ON(parent > 0); 4953 4954 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 4955 extent_op = btrfs_alloc_delayed_extent_op(); 4956 if (!extent_op) { 4957 ret = -ENOMEM; 4958 goto out_free_buf; 4959 } 4960 if (key) 4961 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 4962 else 4963 memset(&extent_op->key, 0, sizeof(extent_op->key)); 4964 extent_op->flags_to_set = flags; 4965 extent_op->update_key = skinny_metadata ? false : true; 4966 extent_op->update_flags = true; 4967 extent_op->level = level; 4968 4969 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 4970 ins.objectid, ins.offset, parent); 4971 btrfs_init_tree_ref(&generic_ref, level, root_objectid, 4972 root->root_key.objectid, false); 4973 btrfs_ref_tree_mod(fs_info, &generic_ref); 4974 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op); 4975 if (ret) 4976 goto out_free_delayed; 4977 } 4978 return buf; 4979 4980 out_free_delayed: 4981 btrfs_free_delayed_extent_op(extent_op); 4982 out_free_buf: 4983 btrfs_tree_unlock(buf); 4984 free_extent_buffer(buf); 4985 out_free_reserved: 4986 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0); 4987 out_unuse: 4988 btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize); 4989 return ERR_PTR(ret); 4990 } 4991 4992 struct walk_control { 4993 u64 refs[BTRFS_MAX_LEVEL]; 4994 u64 flags[BTRFS_MAX_LEVEL]; 4995 struct btrfs_key update_progress; 4996 struct btrfs_key drop_progress; 4997 int drop_level; 4998 int stage; 4999 int level; 5000 int shared_level; 5001 int update_ref; 5002 int keep_locks; 5003 int reada_slot; 5004 int reada_count; 5005 int restarted; 5006 }; 5007 5008 #define DROP_REFERENCE 1 5009 #define UPDATE_BACKREF 2 5010 5011 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, 5012 struct btrfs_root *root, 5013 struct walk_control *wc, 5014 struct btrfs_path *path) 5015 { 5016 struct btrfs_fs_info *fs_info = root->fs_info; 5017 u64 bytenr; 5018 u64 generation; 5019 u64 refs; 5020 u64 flags; 5021 u32 nritems; 5022 struct btrfs_key key; 5023 struct extent_buffer *eb; 5024 int ret; 5025 int slot; 5026 int nread = 0; 5027 5028 if (path->slots[wc->level] < wc->reada_slot) { 5029 wc->reada_count = wc->reada_count * 2 / 3; 5030 wc->reada_count = max(wc->reada_count, 2); 5031 } else { 5032 wc->reada_count = wc->reada_count * 3 / 2; 5033 wc->reada_count = min_t(int, wc->reada_count, 5034 BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 5035 } 5036 5037 eb = path->nodes[wc->level]; 5038 nritems = btrfs_header_nritems(eb); 5039 5040 for (slot = path->slots[wc->level]; slot < nritems; slot++) { 5041 if (nread >= wc->reada_count) 5042 break; 5043 5044 cond_resched(); 5045 bytenr = btrfs_node_blockptr(eb, slot); 5046 generation = btrfs_node_ptr_generation(eb, slot); 5047 5048 if (slot == path->slots[wc->level]) 5049 goto reada; 5050 5051 if (wc->stage == UPDATE_BACKREF && 5052 generation <= root->root_key.offset) 5053 continue; 5054 5055 /* We don't lock the tree block, it's OK to be racy here */ 5056 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, 5057 wc->level - 1, 1, &refs, 5058 &flags); 5059 /* We don't care about errors in readahead. */ 5060 if (ret < 0) 5061 continue; 5062 BUG_ON(refs == 0); 5063 5064 if (wc->stage == DROP_REFERENCE) { 5065 if (refs == 1) 5066 goto reada; 5067 5068 if (wc->level == 1 && 5069 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5070 continue; 5071 if (!wc->update_ref || 5072 generation <= root->root_key.offset) 5073 continue; 5074 btrfs_node_key_to_cpu(eb, &key, slot); 5075 ret = btrfs_comp_cpu_keys(&key, 5076 &wc->update_progress); 5077 if (ret < 0) 5078 continue; 5079 } else { 5080 if (wc->level == 1 && 5081 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5082 continue; 5083 } 5084 reada: 5085 btrfs_readahead_node_child(eb, slot); 5086 nread++; 5087 } 5088 wc->reada_slot = slot; 5089 } 5090 5091 /* 5092 * helper to process tree block while walking down the tree. 5093 * 5094 * when wc->stage == UPDATE_BACKREF, this function updates 5095 * back refs for pointers in the block. 5096 * 5097 * NOTE: return value 1 means we should stop walking down. 5098 */ 5099 static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 5100 struct btrfs_root *root, 5101 struct btrfs_path *path, 5102 struct walk_control *wc, int lookup_info) 5103 { 5104 struct btrfs_fs_info *fs_info = root->fs_info; 5105 int level = wc->level; 5106 struct extent_buffer *eb = path->nodes[level]; 5107 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5108 int ret; 5109 5110 if (wc->stage == UPDATE_BACKREF && 5111 btrfs_header_owner(eb) != root->root_key.objectid) 5112 return 1; 5113 5114 /* 5115 * when reference count of tree block is 1, it won't increase 5116 * again. once full backref flag is set, we never clear it. 5117 */ 5118 if (lookup_info && 5119 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 5120 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { 5121 BUG_ON(!path->locks[level]); 5122 ret = btrfs_lookup_extent_info(trans, fs_info, 5123 eb->start, level, 1, 5124 &wc->refs[level], 5125 &wc->flags[level]); 5126 BUG_ON(ret == -ENOMEM); 5127 if (ret) 5128 return ret; 5129 BUG_ON(wc->refs[level] == 0); 5130 } 5131 5132 if (wc->stage == DROP_REFERENCE) { 5133 if (wc->refs[level] > 1) 5134 return 1; 5135 5136 if (path->locks[level] && !wc->keep_locks) { 5137 btrfs_tree_unlock_rw(eb, path->locks[level]); 5138 path->locks[level] = 0; 5139 } 5140 return 0; 5141 } 5142 5143 /* wc->stage == UPDATE_BACKREF */ 5144 if (!(wc->flags[level] & flag)) { 5145 BUG_ON(!path->locks[level]); 5146 ret = btrfs_inc_ref(trans, root, eb, 1); 5147 BUG_ON(ret); /* -ENOMEM */ 5148 ret = btrfs_dec_ref(trans, root, eb, 0); 5149 BUG_ON(ret); /* -ENOMEM */ 5150 ret = btrfs_set_disk_extent_flags(trans, eb, flag); 5151 BUG_ON(ret); /* -ENOMEM */ 5152 wc->flags[level] |= flag; 5153 } 5154 5155 /* 5156 * the block is shared by multiple trees, so it's not good to 5157 * keep the tree lock 5158 */ 5159 if (path->locks[level] && level > 0) { 5160 btrfs_tree_unlock_rw(eb, path->locks[level]); 5161 path->locks[level] = 0; 5162 } 5163 return 0; 5164 } 5165 5166 /* 5167 * This is used to verify a ref exists for this root to deal with a bug where we 5168 * would have a drop_progress key that hadn't been updated properly. 5169 */ 5170 static int check_ref_exists(struct btrfs_trans_handle *trans, 5171 struct btrfs_root *root, u64 bytenr, u64 parent, 5172 int level) 5173 { 5174 struct btrfs_path *path; 5175 struct btrfs_extent_inline_ref *iref; 5176 int ret; 5177 5178 path = btrfs_alloc_path(); 5179 if (!path) 5180 return -ENOMEM; 5181 5182 ret = lookup_extent_backref(trans, path, &iref, bytenr, 5183 root->fs_info->nodesize, parent, 5184 root->root_key.objectid, level, 0); 5185 btrfs_free_path(path); 5186 if (ret == -ENOENT) 5187 return 0; 5188 if (ret < 0) 5189 return ret; 5190 return 1; 5191 } 5192 5193 /* 5194 * helper to process tree block pointer. 5195 * 5196 * when wc->stage == DROP_REFERENCE, this function checks 5197 * reference count of the block pointed to. if the block 5198 * is shared and we need update back refs for the subtree 5199 * rooted at the block, this function changes wc->stage to 5200 * UPDATE_BACKREF. if the block is shared and there is no 5201 * need to update back, this function drops the reference 5202 * to the block. 5203 * 5204 * NOTE: return value 1 means we should stop walking down. 5205 */ 5206 static noinline int do_walk_down(struct btrfs_trans_handle *trans, 5207 struct btrfs_root *root, 5208 struct btrfs_path *path, 5209 struct walk_control *wc, int *lookup_info) 5210 { 5211 struct btrfs_fs_info *fs_info = root->fs_info; 5212 u64 bytenr; 5213 u64 generation; 5214 u64 parent; 5215 struct btrfs_tree_parent_check check = { 0 }; 5216 struct btrfs_key key; 5217 struct btrfs_ref ref = { 0 }; 5218 struct extent_buffer *next; 5219 int level = wc->level; 5220 int reada = 0; 5221 int ret = 0; 5222 bool need_account = false; 5223 5224 generation = btrfs_node_ptr_generation(path->nodes[level], 5225 path->slots[level]); 5226 /* 5227 * if the lower level block was created before the snapshot 5228 * was created, we know there is no need to update back refs 5229 * for the subtree 5230 */ 5231 if (wc->stage == UPDATE_BACKREF && 5232 generation <= root->root_key.offset) { 5233 *lookup_info = 1; 5234 return 1; 5235 } 5236 5237 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 5238 5239 check.level = level - 1; 5240 check.transid = generation; 5241 check.owner_root = root->root_key.objectid; 5242 check.has_first_key = true; 5243 btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, 5244 path->slots[level]); 5245 5246 next = find_extent_buffer(fs_info, bytenr); 5247 if (!next) { 5248 next = btrfs_find_create_tree_block(fs_info, bytenr, 5249 root->root_key.objectid, level - 1); 5250 if (IS_ERR(next)) 5251 return PTR_ERR(next); 5252 reada = 1; 5253 } 5254 btrfs_tree_lock(next); 5255 5256 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, 5257 &wc->refs[level - 1], 5258 &wc->flags[level - 1]); 5259 if (ret < 0) 5260 goto out_unlock; 5261 5262 if (unlikely(wc->refs[level - 1] == 0)) { 5263 btrfs_err(fs_info, "Missing references."); 5264 ret = -EIO; 5265 goto out_unlock; 5266 } 5267 *lookup_info = 0; 5268 5269 if (wc->stage == DROP_REFERENCE) { 5270 if (wc->refs[level - 1] > 1) { 5271 need_account = true; 5272 if (level == 1 && 5273 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5274 goto skip; 5275 5276 if (!wc->update_ref || 5277 generation <= root->root_key.offset) 5278 goto skip; 5279 5280 btrfs_node_key_to_cpu(path->nodes[level], &key, 5281 path->slots[level]); 5282 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); 5283 if (ret < 0) 5284 goto skip; 5285 5286 wc->stage = UPDATE_BACKREF; 5287 wc->shared_level = level - 1; 5288 } 5289 } else { 5290 if (level == 1 && 5291 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5292 goto skip; 5293 } 5294 5295 if (!btrfs_buffer_uptodate(next, generation, 0)) { 5296 btrfs_tree_unlock(next); 5297 free_extent_buffer(next); 5298 next = NULL; 5299 *lookup_info = 1; 5300 } 5301 5302 if (!next) { 5303 if (reada && level == 1) 5304 reada_walk_down(trans, root, wc, path); 5305 next = read_tree_block(fs_info, bytenr, &check); 5306 if (IS_ERR(next)) { 5307 return PTR_ERR(next); 5308 } else if (!extent_buffer_uptodate(next)) { 5309 free_extent_buffer(next); 5310 return -EIO; 5311 } 5312 btrfs_tree_lock(next); 5313 } 5314 5315 level--; 5316 ASSERT(level == btrfs_header_level(next)); 5317 if (level != btrfs_header_level(next)) { 5318 btrfs_err(root->fs_info, "mismatched level"); 5319 ret = -EIO; 5320 goto out_unlock; 5321 } 5322 path->nodes[level] = next; 5323 path->slots[level] = 0; 5324 path->locks[level] = BTRFS_WRITE_LOCK; 5325 wc->level = level; 5326 if (wc->level == 1) 5327 wc->reada_slot = 0; 5328 return 0; 5329 skip: 5330 wc->refs[level - 1] = 0; 5331 wc->flags[level - 1] = 0; 5332 if (wc->stage == DROP_REFERENCE) { 5333 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 5334 parent = path->nodes[level]->start; 5335 } else { 5336 ASSERT(root->root_key.objectid == 5337 btrfs_header_owner(path->nodes[level])); 5338 if (root->root_key.objectid != 5339 btrfs_header_owner(path->nodes[level])) { 5340 btrfs_err(root->fs_info, 5341 "mismatched block owner"); 5342 ret = -EIO; 5343 goto out_unlock; 5344 } 5345 parent = 0; 5346 } 5347 5348 /* 5349 * If we had a drop_progress we need to verify the refs are set 5350 * as expected. If we find our ref then we know that from here 5351 * on out everything should be correct, and we can clear the 5352 * ->restarted flag. 5353 */ 5354 if (wc->restarted) { 5355 ret = check_ref_exists(trans, root, bytenr, parent, 5356 level - 1); 5357 if (ret < 0) 5358 goto out_unlock; 5359 if (ret == 0) 5360 goto no_delete; 5361 ret = 0; 5362 wc->restarted = 0; 5363 } 5364 5365 /* 5366 * Reloc tree doesn't contribute to qgroup numbers, and we have 5367 * already accounted them at merge time (replace_path), 5368 * thus we could skip expensive subtree trace here. 5369 */ 5370 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 5371 need_account) { 5372 ret = btrfs_qgroup_trace_subtree(trans, next, 5373 generation, level - 1); 5374 if (ret) { 5375 btrfs_err_rl(fs_info, 5376 "Error %d accounting shared subtree. Quota is out of sync, rescan required.", 5377 ret); 5378 } 5379 } 5380 5381 /* 5382 * We need to update the next key in our walk control so we can 5383 * update the drop_progress key accordingly. We don't care if 5384 * find_next_key doesn't find a key because that means we're at 5385 * the end and are going to clean up now. 5386 */ 5387 wc->drop_level = level; 5388 find_next_key(path, level, &wc->drop_progress); 5389 5390 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 5391 fs_info->nodesize, parent); 5392 btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid, 5393 0, false); 5394 ret = btrfs_free_extent(trans, &ref); 5395 if (ret) 5396 goto out_unlock; 5397 } 5398 no_delete: 5399 *lookup_info = 1; 5400 ret = 1; 5401 5402 out_unlock: 5403 btrfs_tree_unlock(next); 5404 free_extent_buffer(next); 5405 5406 return ret; 5407 } 5408 5409 /* 5410 * helper to process tree block while walking up the tree. 5411 * 5412 * when wc->stage == DROP_REFERENCE, this function drops 5413 * reference count on the block. 5414 * 5415 * when wc->stage == UPDATE_BACKREF, this function changes 5416 * wc->stage back to DROP_REFERENCE if we changed wc->stage 5417 * to UPDATE_BACKREF previously while processing the block. 5418 * 5419 * NOTE: return value 1 means we should stop walking up. 5420 */ 5421 static noinline int walk_up_proc(struct btrfs_trans_handle *trans, 5422 struct btrfs_root *root, 5423 struct btrfs_path *path, 5424 struct walk_control *wc) 5425 { 5426 struct btrfs_fs_info *fs_info = root->fs_info; 5427 int ret; 5428 int level = wc->level; 5429 struct extent_buffer *eb = path->nodes[level]; 5430 u64 parent = 0; 5431 5432 if (wc->stage == UPDATE_BACKREF) { 5433 BUG_ON(wc->shared_level < level); 5434 if (level < wc->shared_level) 5435 goto out; 5436 5437 ret = find_next_key(path, level + 1, &wc->update_progress); 5438 if (ret > 0) 5439 wc->update_ref = 0; 5440 5441 wc->stage = DROP_REFERENCE; 5442 wc->shared_level = -1; 5443 path->slots[level] = 0; 5444 5445 /* 5446 * check reference count again if the block isn't locked. 5447 * we should start walking down the tree again if reference 5448 * count is one. 5449 */ 5450 if (!path->locks[level]) { 5451 BUG_ON(level == 0); 5452 btrfs_tree_lock(eb); 5453 path->locks[level] = BTRFS_WRITE_LOCK; 5454 5455 ret = btrfs_lookup_extent_info(trans, fs_info, 5456 eb->start, level, 1, 5457 &wc->refs[level], 5458 &wc->flags[level]); 5459 if (ret < 0) { 5460 btrfs_tree_unlock_rw(eb, path->locks[level]); 5461 path->locks[level] = 0; 5462 return ret; 5463 } 5464 BUG_ON(wc->refs[level] == 0); 5465 if (wc->refs[level] == 1) { 5466 btrfs_tree_unlock_rw(eb, path->locks[level]); 5467 path->locks[level] = 0; 5468 return 1; 5469 } 5470 } 5471 } 5472 5473 /* wc->stage == DROP_REFERENCE */ 5474 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); 5475 5476 if (wc->refs[level] == 1) { 5477 if (level == 0) { 5478 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5479 ret = btrfs_dec_ref(trans, root, eb, 1); 5480 else 5481 ret = btrfs_dec_ref(trans, root, eb, 0); 5482 BUG_ON(ret); /* -ENOMEM */ 5483 if (is_fstree(root->root_key.objectid)) { 5484 ret = btrfs_qgroup_trace_leaf_items(trans, eb); 5485 if (ret) { 5486 btrfs_err_rl(fs_info, 5487 "error %d accounting leaf items, quota is out of sync, rescan required", 5488 ret); 5489 } 5490 } 5491 } 5492 /* Make block locked assertion in btrfs_clear_buffer_dirty happy. */ 5493 if (!path->locks[level]) { 5494 btrfs_tree_lock(eb); 5495 path->locks[level] = BTRFS_WRITE_LOCK; 5496 } 5497 btrfs_clear_buffer_dirty(trans, eb); 5498 } 5499 5500 if (eb == root->node) { 5501 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5502 parent = eb->start; 5503 else if (root->root_key.objectid != btrfs_header_owner(eb)) 5504 goto owner_mismatch; 5505 } else { 5506 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5507 parent = path->nodes[level + 1]->start; 5508 else if (root->root_key.objectid != 5509 btrfs_header_owner(path->nodes[level + 1])) 5510 goto owner_mismatch; 5511 } 5512 5513 btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent, 5514 wc->refs[level] == 1); 5515 out: 5516 wc->refs[level] = 0; 5517 wc->flags[level] = 0; 5518 return 0; 5519 5520 owner_mismatch: 5521 btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu", 5522 btrfs_header_owner(eb), root->root_key.objectid); 5523 return -EUCLEAN; 5524 } 5525 5526 static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 5527 struct btrfs_root *root, 5528 struct btrfs_path *path, 5529 struct walk_control *wc) 5530 { 5531 int level = wc->level; 5532 int lookup_info = 1; 5533 int ret = 0; 5534 5535 while (level >= 0) { 5536 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5537 if (ret) 5538 break; 5539 5540 if (level == 0) 5541 break; 5542 5543 if (path->slots[level] >= 5544 btrfs_header_nritems(path->nodes[level])) 5545 break; 5546 5547 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5548 if (ret > 0) { 5549 path->slots[level]++; 5550 continue; 5551 } else if (ret < 0) 5552 break; 5553 level = wc->level; 5554 } 5555 return (ret == 1) ? 0 : ret; 5556 } 5557 5558 static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 5559 struct btrfs_root *root, 5560 struct btrfs_path *path, 5561 struct walk_control *wc, int max_level) 5562 { 5563 int level = wc->level; 5564 int ret; 5565 5566 path->slots[level] = btrfs_header_nritems(path->nodes[level]); 5567 while (level < max_level && path->nodes[level]) { 5568 wc->level = level; 5569 if (path->slots[level] + 1 < 5570 btrfs_header_nritems(path->nodes[level])) { 5571 path->slots[level]++; 5572 return 0; 5573 } else { 5574 ret = walk_up_proc(trans, root, path, wc); 5575 if (ret > 0) 5576 return 0; 5577 if (ret < 0) 5578 return ret; 5579 5580 if (path->locks[level]) { 5581 btrfs_tree_unlock_rw(path->nodes[level], 5582 path->locks[level]); 5583 path->locks[level] = 0; 5584 } 5585 free_extent_buffer(path->nodes[level]); 5586 path->nodes[level] = NULL; 5587 level++; 5588 } 5589 } 5590 return 1; 5591 } 5592 5593 /* 5594 * drop a subvolume tree. 5595 * 5596 * this function traverses the tree freeing any blocks that only 5597 * referenced by the tree. 5598 * 5599 * when a shared tree block is found. this function decreases its 5600 * reference count by one. if update_ref is true, this function 5601 * also make sure backrefs for the shared block and all lower level 5602 * blocks are properly updated. 5603 * 5604 * If called with for_reloc == 0, may exit early with -EAGAIN 5605 */ 5606 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) 5607 { 5608 const bool is_reloc_root = (root->root_key.objectid == 5609 BTRFS_TREE_RELOC_OBJECTID); 5610 struct btrfs_fs_info *fs_info = root->fs_info; 5611 struct btrfs_path *path; 5612 struct btrfs_trans_handle *trans; 5613 struct btrfs_root *tree_root = fs_info->tree_root; 5614 struct btrfs_root_item *root_item = &root->root_item; 5615 struct walk_control *wc; 5616 struct btrfs_key key; 5617 int err = 0; 5618 int ret; 5619 int level; 5620 bool root_dropped = false; 5621 bool unfinished_drop = false; 5622 5623 btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid); 5624 5625 path = btrfs_alloc_path(); 5626 if (!path) { 5627 err = -ENOMEM; 5628 goto out; 5629 } 5630 5631 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5632 if (!wc) { 5633 btrfs_free_path(path); 5634 err = -ENOMEM; 5635 goto out; 5636 } 5637 5638 /* 5639 * Use join to avoid potential EINTR from transaction start. See 5640 * wait_reserve_ticket and the whole reservation callchain. 5641 */ 5642 if (for_reloc) 5643 trans = btrfs_join_transaction(tree_root); 5644 else 5645 trans = btrfs_start_transaction(tree_root, 0); 5646 if (IS_ERR(trans)) { 5647 err = PTR_ERR(trans); 5648 goto out_free; 5649 } 5650 5651 err = btrfs_run_delayed_items(trans); 5652 if (err) 5653 goto out_end_trans; 5654 5655 /* 5656 * This will help us catch people modifying the fs tree while we're 5657 * dropping it. It is unsafe to mess with the fs tree while it's being 5658 * dropped as we unlock the root node and parent nodes as we walk down 5659 * the tree, assuming nothing will change. If something does change 5660 * then we'll have stale information and drop references to blocks we've 5661 * already dropped. 5662 */ 5663 set_bit(BTRFS_ROOT_DELETING, &root->state); 5664 unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); 5665 5666 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 5667 level = btrfs_header_level(root->node); 5668 path->nodes[level] = btrfs_lock_root_node(root); 5669 path->slots[level] = 0; 5670 path->locks[level] = BTRFS_WRITE_LOCK; 5671 memset(&wc->update_progress, 0, 5672 sizeof(wc->update_progress)); 5673 } else { 5674 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 5675 memcpy(&wc->update_progress, &key, 5676 sizeof(wc->update_progress)); 5677 5678 level = btrfs_root_drop_level(root_item); 5679 BUG_ON(level == 0); 5680 path->lowest_level = level; 5681 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5682 path->lowest_level = 0; 5683 if (ret < 0) { 5684 err = ret; 5685 goto out_end_trans; 5686 } 5687 WARN_ON(ret > 0); 5688 5689 /* 5690 * unlock our path, this is safe because only this 5691 * function is allowed to delete this snapshot 5692 */ 5693 btrfs_unlock_up_safe(path, 0); 5694 5695 level = btrfs_header_level(root->node); 5696 while (1) { 5697 btrfs_tree_lock(path->nodes[level]); 5698 path->locks[level] = BTRFS_WRITE_LOCK; 5699 5700 ret = btrfs_lookup_extent_info(trans, fs_info, 5701 path->nodes[level]->start, 5702 level, 1, &wc->refs[level], 5703 &wc->flags[level]); 5704 if (ret < 0) { 5705 err = ret; 5706 goto out_end_trans; 5707 } 5708 BUG_ON(wc->refs[level] == 0); 5709 5710 if (level == btrfs_root_drop_level(root_item)) 5711 break; 5712 5713 btrfs_tree_unlock(path->nodes[level]); 5714 path->locks[level] = 0; 5715 WARN_ON(wc->refs[level] != 1); 5716 level--; 5717 } 5718 } 5719 5720 wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); 5721 wc->level = level; 5722 wc->shared_level = -1; 5723 wc->stage = DROP_REFERENCE; 5724 wc->update_ref = update_ref; 5725 wc->keep_locks = 0; 5726 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5727 5728 while (1) { 5729 5730 ret = walk_down_tree(trans, root, path, wc); 5731 if (ret < 0) { 5732 btrfs_abort_transaction(trans, ret); 5733 err = ret; 5734 break; 5735 } 5736 5737 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); 5738 if (ret < 0) { 5739 btrfs_abort_transaction(trans, ret); 5740 err = ret; 5741 break; 5742 } 5743 5744 if (ret > 0) { 5745 BUG_ON(wc->stage != DROP_REFERENCE); 5746 break; 5747 } 5748 5749 if (wc->stage == DROP_REFERENCE) { 5750 wc->drop_level = wc->level; 5751 btrfs_node_key_to_cpu(path->nodes[wc->drop_level], 5752 &wc->drop_progress, 5753 path->slots[wc->drop_level]); 5754 } 5755 btrfs_cpu_key_to_disk(&root_item->drop_progress, 5756 &wc->drop_progress); 5757 btrfs_set_root_drop_level(root_item, wc->drop_level); 5758 5759 BUG_ON(wc->level == 0); 5760 if (btrfs_should_end_transaction(trans) || 5761 (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) { 5762 ret = btrfs_update_root(trans, tree_root, 5763 &root->root_key, 5764 root_item); 5765 if (ret) { 5766 btrfs_abort_transaction(trans, ret); 5767 err = ret; 5768 goto out_end_trans; 5769 } 5770 5771 if (!is_reloc_root) 5772 btrfs_set_last_root_drop_gen(fs_info, trans->transid); 5773 5774 btrfs_end_transaction_throttle(trans); 5775 if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) { 5776 btrfs_debug(fs_info, 5777 "drop snapshot early exit"); 5778 err = -EAGAIN; 5779 goto out_free; 5780 } 5781 5782 /* 5783 * Use join to avoid potential EINTR from transaction 5784 * start. See wait_reserve_ticket and the whole 5785 * reservation callchain. 5786 */ 5787 if (for_reloc) 5788 trans = btrfs_join_transaction(tree_root); 5789 else 5790 trans = btrfs_start_transaction(tree_root, 0); 5791 if (IS_ERR(trans)) { 5792 err = PTR_ERR(trans); 5793 goto out_free; 5794 } 5795 } 5796 } 5797 btrfs_release_path(path); 5798 if (err) 5799 goto out_end_trans; 5800 5801 ret = btrfs_del_root(trans, &root->root_key); 5802 if (ret) { 5803 btrfs_abort_transaction(trans, ret); 5804 err = ret; 5805 goto out_end_trans; 5806 } 5807 5808 if (!is_reloc_root) { 5809 ret = btrfs_find_root(tree_root, &root->root_key, path, 5810 NULL, NULL); 5811 if (ret < 0) { 5812 btrfs_abort_transaction(trans, ret); 5813 err = ret; 5814 goto out_end_trans; 5815 } else if (ret > 0) { 5816 /* if we fail to delete the orphan item this time 5817 * around, it'll get picked up the next time. 5818 * 5819 * The most common failure here is just -ENOENT. 5820 */ 5821 btrfs_del_orphan_item(trans, tree_root, 5822 root->root_key.objectid); 5823 } 5824 } 5825 5826 /* 5827 * This subvolume is going to be completely dropped, and won't be 5828 * recorded as dirty roots, thus pertrans meta rsv will not be freed at 5829 * commit transaction time. So free it here manually. 5830 */ 5831 btrfs_qgroup_convert_reserved_meta(root, INT_MAX); 5832 btrfs_qgroup_free_meta_all_pertrans(root); 5833 5834 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) 5835 btrfs_add_dropped_root(trans, root); 5836 else 5837 btrfs_put_root(root); 5838 root_dropped = true; 5839 out_end_trans: 5840 if (!is_reloc_root) 5841 btrfs_set_last_root_drop_gen(fs_info, trans->transid); 5842 5843 btrfs_end_transaction_throttle(trans); 5844 out_free: 5845 kfree(wc); 5846 btrfs_free_path(path); 5847 out: 5848 /* 5849 * We were an unfinished drop root, check to see if there are any 5850 * pending, and if not clear and wake up any waiters. 5851 */ 5852 if (!err && unfinished_drop) 5853 btrfs_maybe_wake_unfinished_drop(fs_info); 5854 5855 /* 5856 * So if we need to stop dropping the snapshot for whatever reason we 5857 * need to make sure to add it back to the dead root list so that we 5858 * keep trying to do the work later. This also cleans up roots if we 5859 * don't have it in the radix (like when we recover after a power fail 5860 * or unmount) so we don't leak memory. 5861 */ 5862 if (!for_reloc && !root_dropped) 5863 btrfs_add_dead_root(root); 5864 return err; 5865 } 5866 5867 /* 5868 * drop subtree rooted at tree block 'node'. 5869 * 5870 * NOTE: this function will unlock and release tree block 'node' 5871 * only used by relocation code 5872 */ 5873 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 5874 struct btrfs_root *root, 5875 struct extent_buffer *node, 5876 struct extent_buffer *parent) 5877 { 5878 struct btrfs_fs_info *fs_info = root->fs_info; 5879 struct btrfs_path *path; 5880 struct walk_control *wc; 5881 int level; 5882 int parent_level; 5883 int ret = 0; 5884 int wret; 5885 5886 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 5887 5888 path = btrfs_alloc_path(); 5889 if (!path) 5890 return -ENOMEM; 5891 5892 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5893 if (!wc) { 5894 btrfs_free_path(path); 5895 return -ENOMEM; 5896 } 5897 5898 btrfs_assert_tree_write_locked(parent); 5899 parent_level = btrfs_header_level(parent); 5900 atomic_inc(&parent->refs); 5901 path->nodes[parent_level] = parent; 5902 path->slots[parent_level] = btrfs_header_nritems(parent); 5903 5904 btrfs_assert_tree_write_locked(node); 5905 level = btrfs_header_level(node); 5906 path->nodes[level] = node; 5907 path->slots[level] = 0; 5908 path->locks[level] = BTRFS_WRITE_LOCK; 5909 5910 wc->refs[parent_level] = 1; 5911 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5912 wc->level = level; 5913 wc->shared_level = -1; 5914 wc->stage = DROP_REFERENCE; 5915 wc->update_ref = 0; 5916 wc->keep_locks = 1; 5917 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5918 5919 while (1) { 5920 wret = walk_down_tree(trans, root, path, wc); 5921 if (wret < 0) { 5922 ret = wret; 5923 break; 5924 } 5925 5926 wret = walk_up_tree(trans, root, path, wc, parent_level); 5927 if (wret < 0) 5928 ret = wret; 5929 if (wret != 0) 5930 break; 5931 } 5932 5933 kfree(wc); 5934 btrfs_free_path(path); 5935 return ret; 5936 } 5937 5938 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, 5939 u64 start, u64 end) 5940 { 5941 return unpin_extent_range(fs_info, start, end, false); 5942 } 5943 5944 /* 5945 * It used to be that old block groups would be left around forever. 5946 * Iterating over them would be enough to trim unused space. Since we 5947 * now automatically remove them, we also need to iterate over unallocated 5948 * space. 5949 * 5950 * We don't want a transaction for this since the discard may take a 5951 * substantial amount of time. We don't require that a transaction be 5952 * running, but we do need to take a running transaction into account 5953 * to ensure that we're not discarding chunks that were released or 5954 * allocated in the current transaction. 5955 * 5956 * Holding the chunks lock will prevent other threads from allocating 5957 * or releasing chunks, but it won't prevent a running transaction 5958 * from committing and releasing the memory that the pending chunks 5959 * list head uses. For that, we need to take a reference to the 5960 * transaction and hold the commit root sem. We only need to hold 5961 * it while performing the free space search since we have already 5962 * held back allocations. 5963 */ 5964 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) 5965 { 5966 u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0; 5967 int ret; 5968 5969 *trimmed = 0; 5970 5971 /* Discard not supported = nothing to do. */ 5972 if (!bdev_max_discard_sectors(device->bdev)) 5973 return 0; 5974 5975 /* Not writable = nothing to do. */ 5976 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 5977 return 0; 5978 5979 /* No free space = nothing to do. */ 5980 if (device->total_bytes <= device->bytes_used) 5981 return 0; 5982 5983 ret = 0; 5984 5985 while (1) { 5986 struct btrfs_fs_info *fs_info = device->fs_info; 5987 u64 bytes; 5988 5989 ret = mutex_lock_interruptible(&fs_info->chunk_mutex); 5990 if (ret) 5991 break; 5992 5993 find_first_clear_extent_bit(&device->alloc_state, start, 5994 &start, &end, 5995 CHUNK_TRIMMED | CHUNK_ALLOCATED); 5996 5997 /* Check if there are any CHUNK_* bits left */ 5998 if (start > device->total_bytes) { 5999 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 6000 btrfs_warn_in_rcu(fs_info, 6001 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu", 6002 start, end - start + 1, 6003 btrfs_dev_name(device), 6004 device->total_bytes); 6005 mutex_unlock(&fs_info->chunk_mutex); 6006 ret = 0; 6007 break; 6008 } 6009 6010 /* Ensure we skip the reserved space on each device. */ 6011 start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); 6012 6013 /* 6014 * If find_first_clear_extent_bit find a range that spans the 6015 * end of the device it will set end to -1, in this case it's up 6016 * to the caller to trim the value to the size of the device. 6017 */ 6018 end = min(end, device->total_bytes - 1); 6019 6020 len = end - start + 1; 6021 6022 /* We didn't find any extents */ 6023 if (!len) { 6024 mutex_unlock(&fs_info->chunk_mutex); 6025 ret = 0; 6026 break; 6027 } 6028 6029 ret = btrfs_issue_discard(device->bdev, start, len, 6030 &bytes); 6031 if (!ret) 6032 set_extent_bit(&device->alloc_state, start, 6033 start + bytes - 1, CHUNK_TRIMMED, NULL); 6034 mutex_unlock(&fs_info->chunk_mutex); 6035 6036 if (ret) 6037 break; 6038 6039 start += len; 6040 *trimmed += bytes; 6041 6042 if (fatal_signal_pending(current)) { 6043 ret = -ERESTARTSYS; 6044 break; 6045 } 6046 6047 cond_resched(); 6048 } 6049 6050 return ret; 6051 } 6052 6053 /* 6054 * Trim the whole filesystem by: 6055 * 1) trimming the free space in each block group 6056 * 2) trimming the unallocated space on each device 6057 * 6058 * This will also continue trimming even if a block group or device encounters 6059 * an error. The return value will be the last error, or 0 if nothing bad 6060 * happens. 6061 */ 6062 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) 6063 { 6064 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6065 struct btrfs_block_group *cache = NULL; 6066 struct btrfs_device *device; 6067 u64 group_trimmed; 6068 u64 range_end = U64_MAX; 6069 u64 start; 6070 u64 end; 6071 u64 trimmed = 0; 6072 u64 bg_failed = 0; 6073 u64 dev_failed = 0; 6074 int bg_ret = 0; 6075 int dev_ret = 0; 6076 int ret = 0; 6077 6078 if (range->start == U64_MAX) 6079 return -EINVAL; 6080 6081 /* 6082 * Check range overflow if range->len is set. 6083 * The default range->len is U64_MAX. 6084 */ 6085 if (range->len != U64_MAX && 6086 check_add_overflow(range->start, range->len, &range_end)) 6087 return -EINVAL; 6088 6089 cache = btrfs_lookup_first_block_group(fs_info, range->start); 6090 for (; cache; cache = btrfs_next_block_group(cache)) { 6091 if (cache->start >= range_end) { 6092 btrfs_put_block_group(cache); 6093 break; 6094 } 6095 6096 start = max(range->start, cache->start); 6097 end = min(range_end, cache->start + cache->length); 6098 6099 if (end - start >= range->minlen) { 6100 if (!btrfs_block_group_done(cache)) { 6101 ret = btrfs_cache_block_group(cache, true); 6102 if (ret) { 6103 bg_failed++; 6104 bg_ret = ret; 6105 continue; 6106 } 6107 } 6108 ret = btrfs_trim_block_group(cache, 6109 &group_trimmed, 6110 start, 6111 end, 6112 range->minlen); 6113 6114 trimmed += group_trimmed; 6115 if (ret) { 6116 bg_failed++; 6117 bg_ret = ret; 6118 continue; 6119 } 6120 } 6121 } 6122 6123 if (bg_failed) 6124 btrfs_warn(fs_info, 6125 "failed to trim %llu block group(s), last error %d", 6126 bg_failed, bg_ret); 6127 6128 mutex_lock(&fs_devices->device_list_mutex); 6129 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6130 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 6131 continue; 6132 6133 ret = btrfs_trim_free_extents(device, &group_trimmed); 6134 if (ret) { 6135 dev_failed++; 6136 dev_ret = ret; 6137 break; 6138 } 6139 6140 trimmed += group_trimmed; 6141 } 6142 mutex_unlock(&fs_devices->device_list_mutex); 6143 6144 if (dev_failed) 6145 btrfs_warn(fs_info, 6146 "failed to trim %llu device(s), last error %d", 6147 dev_failed, dev_ret); 6148 range->len = trimmed; 6149 if (bg_ret) 6150 return bg_ret; 6151 return dev_ret; 6152 } 6153