1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2009 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/error-injection.h> 13 #include "ctree.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "volumes.h" 17 #include "locking.h" 18 #include "btrfs_inode.h" 19 #include "async-thread.h" 20 #include "free-space-cache.h" 21 #include "qgroup.h" 22 #include "print-tree.h" 23 #include "delalloc-space.h" 24 #include "block-group.h" 25 #include "backref.h" 26 #include "misc.h" 27 #include "subpage.h" 28 #include "zoned.h" 29 #include "inode-item.h" 30 #include "space-info.h" 31 #include "fs.h" 32 #include "accessors.h" 33 #include "extent-tree.h" 34 #include "root-tree.h" 35 #include "file-item.h" 36 #include "relocation.h" 37 #include "super.h" 38 39 /* 40 * Relocation overview 41 * 42 * [What does relocation do] 43 * 44 * The objective of relocation is to relocate all extents of the target block 45 * group to other block groups. 46 * This is utilized by resize (shrink only), profile converting, compacting 47 * space, or balance routine to spread chunks over devices. 48 * 49 * Before | After 50 * ------------------------------------------------------------------ 51 * BG A: 10 data extents | BG A: deleted 52 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated) 53 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated) 54 * 55 * [How does relocation work] 56 * 57 * 1. Mark the target block group read-only 58 * New extents won't be allocated from the target block group. 59 * 60 * 2.1 Record each extent in the target block group 61 * To build a proper map of extents to be relocated. 62 * 63 * 2.2 Build data reloc tree and reloc trees 64 * Data reloc tree will contain an inode, recording all newly relocated 65 * data extents. 66 * There will be only one data reloc tree for one data block group. 67 * 68 * Reloc tree will be a special snapshot of its source tree, containing 69 * relocated tree blocks. 70 * Each tree referring to a tree block in target block group will get its 71 * reloc tree built. 72 * 73 * 2.3 Swap source tree with its corresponding reloc tree 74 * Each involved tree only refers to new extents after swap. 75 * 76 * 3. Cleanup reloc trees and data reloc tree. 77 * As old extents in the target block group are still referenced by reloc 78 * trees, we need to clean them up before really freeing the target block 79 * group. 80 * 81 * The main complexity is in steps 2.2 and 2.3. 82 * 83 * The entry point of relocation is relocate_block_group() function. 84 */ 85 86 #define RELOCATION_RESERVED_NODES 256 87 /* 88 * map address of tree root to tree 89 */ 90 struct mapping_node { 91 struct { 92 struct rb_node rb_node; 93 u64 bytenr; 94 }; /* Use rb_simle_node for search/insert */ 95 void *data; 96 }; 97 98 struct mapping_tree { 99 struct rb_root rb_root; 100 spinlock_t lock; 101 }; 102 103 /* 104 * present a tree block to process 105 */ 106 struct tree_block { 107 struct { 108 struct rb_node rb_node; 109 u64 bytenr; 110 }; /* Use rb_simple_node for search/insert */ 111 u64 owner; 112 struct btrfs_key key; 113 unsigned int level:8; 114 unsigned int key_ready:1; 115 }; 116 117 #define MAX_EXTENTS 128 118 119 struct file_extent_cluster { 120 u64 start; 121 u64 end; 122 u64 boundary[MAX_EXTENTS]; 123 unsigned int nr; 124 }; 125 126 struct reloc_control { 127 /* block group to relocate */ 128 struct btrfs_block_group *block_group; 129 /* extent tree */ 130 struct btrfs_root *extent_root; 131 /* inode for moving data */ 132 struct inode *data_inode; 133 134 struct btrfs_block_rsv *block_rsv; 135 136 struct btrfs_backref_cache backref_cache; 137 138 struct file_extent_cluster cluster; 139 /* tree blocks have been processed */ 140 struct extent_io_tree processed_blocks; 141 /* map start of tree root to corresponding reloc tree */ 142 struct mapping_tree reloc_root_tree; 143 /* list of reloc trees */ 144 struct list_head reloc_roots; 145 /* list of subvolume trees that get relocated */ 146 struct list_head dirty_subvol_roots; 147 /* size of metadata reservation for merging reloc trees */ 148 u64 merging_rsv_size; 149 /* size of relocated tree nodes */ 150 u64 nodes_relocated; 151 /* reserved size for block group relocation*/ 152 u64 reserved_bytes; 153 154 u64 search_start; 155 u64 extents_found; 156 157 unsigned int stage:8; 158 unsigned int create_reloc_tree:1; 159 unsigned int merge_reloc_tree:1; 160 unsigned int found_file_extent:1; 161 }; 162 163 /* stages of data relocation */ 164 #define MOVE_DATA_EXTENTS 0 165 #define UPDATE_DATA_PTRS 1 166 167 static void mark_block_processed(struct reloc_control *rc, 168 struct btrfs_backref_node *node) 169 { 170 u32 blocksize; 171 172 if (node->level == 0 || 173 in_range(node->bytenr, rc->block_group->start, 174 rc->block_group->length)) { 175 blocksize = rc->extent_root->fs_info->nodesize; 176 set_extent_bits(&rc->processed_blocks, node->bytenr, 177 node->bytenr + blocksize - 1, EXTENT_DIRTY); 178 } 179 node->processed = 1; 180 } 181 182 183 static void mapping_tree_init(struct mapping_tree *tree) 184 { 185 tree->rb_root = RB_ROOT; 186 spin_lock_init(&tree->lock); 187 } 188 189 /* 190 * walk up backref nodes until reach node presents tree root 191 */ 192 static struct btrfs_backref_node *walk_up_backref( 193 struct btrfs_backref_node *node, 194 struct btrfs_backref_edge *edges[], int *index) 195 { 196 struct btrfs_backref_edge *edge; 197 int idx = *index; 198 199 while (!list_empty(&node->upper)) { 200 edge = list_entry(node->upper.next, 201 struct btrfs_backref_edge, list[LOWER]); 202 edges[idx++] = edge; 203 node = edge->node[UPPER]; 204 } 205 BUG_ON(node->detached); 206 *index = idx; 207 return node; 208 } 209 210 /* 211 * walk down backref nodes to find start of next reference path 212 */ 213 static struct btrfs_backref_node *walk_down_backref( 214 struct btrfs_backref_edge *edges[], int *index) 215 { 216 struct btrfs_backref_edge *edge; 217 struct btrfs_backref_node *lower; 218 int idx = *index; 219 220 while (idx > 0) { 221 edge = edges[idx - 1]; 222 lower = edge->node[LOWER]; 223 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 224 idx--; 225 continue; 226 } 227 edge = list_entry(edge->list[LOWER].next, 228 struct btrfs_backref_edge, list[LOWER]); 229 edges[idx - 1] = edge; 230 *index = idx; 231 return edge->node[UPPER]; 232 } 233 *index = 0; 234 return NULL; 235 } 236 237 static void update_backref_node(struct btrfs_backref_cache *cache, 238 struct btrfs_backref_node *node, u64 bytenr) 239 { 240 struct rb_node *rb_node; 241 rb_erase(&node->rb_node, &cache->rb_root); 242 node->bytenr = bytenr; 243 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node); 244 if (rb_node) 245 btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST); 246 } 247 248 /* 249 * update backref cache after a transaction commit 250 */ 251 static int update_backref_cache(struct btrfs_trans_handle *trans, 252 struct btrfs_backref_cache *cache) 253 { 254 struct btrfs_backref_node *node; 255 int level = 0; 256 257 if (cache->last_trans == 0) { 258 cache->last_trans = trans->transid; 259 return 0; 260 } 261 262 if (cache->last_trans == trans->transid) 263 return 0; 264 265 /* 266 * detached nodes are used to avoid unnecessary backref 267 * lookup. transaction commit changes the extent tree. 268 * so the detached nodes are no longer useful. 269 */ 270 while (!list_empty(&cache->detached)) { 271 node = list_entry(cache->detached.next, 272 struct btrfs_backref_node, list); 273 btrfs_backref_cleanup_node(cache, node); 274 } 275 276 while (!list_empty(&cache->changed)) { 277 node = list_entry(cache->changed.next, 278 struct btrfs_backref_node, list); 279 list_del_init(&node->list); 280 BUG_ON(node->pending); 281 update_backref_node(cache, node, node->new_bytenr); 282 } 283 284 /* 285 * some nodes can be left in the pending list if there were 286 * errors during processing the pending nodes. 287 */ 288 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 289 list_for_each_entry(node, &cache->pending[level], list) { 290 BUG_ON(!node->pending); 291 if (node->bytenr == node->new_bytenr) 292 continue; 293 update_backref_node(cache, node, node->new_bytenr); 294 } 295 } 296 297 cache->last_trans = 0; 298 return 1; 299 } 300 301 static bool reloc_root_is_dead(struct btrfs_root *root) 302 { 303 /* 304 * Pair with set_bit/clear_bit in clean_dirty_subvols and 305 * btrfs_update_reloc_root. We need to see the updated bit before 306 * trying to access reloc_root 307 */ 308 smp_rmb(); 309 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) 310 return true; 311 return false; 312 } 313 314 /* 315 * Check if this subvolume tree has valid reloc tree. 316 * 317 * Reloc tree after swap is considered dead, thus not considered as valid. 318 * This is enough for most callers, as they don't distinguish dead reloc root 319 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a 320 * special case. 321 */ 322 static bool have_reloc_root(struct btrfs_root *root) 323 { 324 if (reloc_root_is_dead(root)) 325 return false; 326 if (!root->reloc_root) 327 return false; 328 return true; 329 } 330 331 int btrfs_should_ignore_reloc_root(struct btrfs_root *root) 332 { 333 struct btrfs_root *reloc_root; 334 335 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 336 return 0; 337 338 /* This root has been merged with its reloc tree, we can ignore it */ 339 if (reloc_root_is_dead(root)) 340 return 1; 341 342 reloc_root = root->reloc_root; 343 if (!reloc_root) 344 return 0; 345 346 if (btrfs_header_generation(reloc_root->commit_root) == 347 root->fs_info->running_transaction->transid) 348 return 0; 349 /* 350 * if there is reloc tree and it was created in previous 351 * transaction backref lookup can find the reloc tree, 352 * so backref node for the fs tree root is useless for 353 * relocation. 354 */ 355 return 1; 356 } 357 358 /* 359 * find reloc tree by address of tree root 360 */ 361 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr) 362 { 363 struct reloc_control *rc = fs_info->reloc_ctl; 364 struct rb_node *rb_node; 365 struct mapping_node *node; 366 struct btrfs_root *root = NULL; 367 368 ASSERT(rc); 369 spin_lock(&rc->reloc_root_tree.lock); 370 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr); 371 if (rb_node) { 372 node = rb_entry(rb_node, struct mapping_node, rb_node); 373 root = node->data; 374 } 375 spin_unlock(&rc->reloc_root_tree.lock); 376 return btrfs_grab_root(root); 377 } 378 379 /* 380 * For useless nodes, do two major clean ups: 381 * 382 * - Cleanup the children edges and nodes 383 * If child node is also orphan (no parent) during cleanup, then the child 384 * node will also be cleaned up. 385 * 386 * - Freeing up leaves (level 0), keeps nodes detached 387 * For nodes, the node is still cached as "detached" 388 * 389 * Return false if @node is not in the @useless_nodes list. 390 * Return true if @node is in the @useless_nodes list. 391 */ 392 static bool handle_useless_nodes(struct reloc_control *rc, 393 struct btrfs_backref_node *node) 394 { 395 struct btrfs_backref_cache *cache = &rc->backref_cache; 396 struct list_head *useless_node = &cache->useless_node; 397 bool ret = false; 398 399 while (!list_empty(useless_node)) { 400 struct btrfs_backref_node *cur; 401 402 cur = list_first_entry(useless_node, struct btrfs_backref_node, 403 list); 404 list_del_init(&cur->list); 405 406 /* Only tree root nodes can be added to @useless_nodes */ 407 ASSERT(list_empty(&cur->upper)); 408 409 if (cur == node) 410 ret = true; 411 412 /* The node is the lowest node */ 413 if (cur->lowest) { 414 list_del_init(&cur->lower); 415 cur->lowest = 0; 416 } 417 418 /* Cleanup the lower edges */ 419 while (!list_empty(&cur->lower)) { 420 struct btrfs_backref_edge *edge; 421 struct btrfs_backref_node *lower; 422 423 edge = list_entry(cur->lower.next, 424 struct btrfs_backref_edge, list[UPPER]); 425 list_del(&edge->list[UPPER]); 426 list_del(&edge->list[LOWER]); 427 lower = edge->node[LOWER]; 428 btrfs_backref_free_edge(cache, edge); 429 430 /* Child node is also orphan, queue for cleanup */ 431 if (list_empty(&lower->upper)) 432 list_add(&lower->list, useless_node); 433 } 434 /* Mark this block processed for relocation */ 435 mark_block_processed(rc, cur); 436 437 /* 438 * Backref nodes for tree leaves are deleted from the cache. 439 * Backref nodes for upper level tree blocks are left in the 440 * cache to avoid unnecessary backref lookup. 441 */ 442 if (cur->level > 0) { 443 list_add(&cur->list, &cache->detached); 444 cur->detached = 1; 445 } else { 446 rb_erase(&cur->rb_node, &cache->rb_root); 447 btrfs_backref_free_node(cache, cur); 448 } 449 } 450 return ret; 451 } 452 453 /* 454 * Build backref tree for a given tree block. Root of the backref tree 455 * corresponds the tree block, leaves of the backref tree correspond roots of 456 * b-trees that reference the tree block. 457 * 458 * The basic idea of this function is check backrefs of a given block to find 459 * upper level blocks that reference the block, and then check backrefs of 460 * these upper level blocks recursively. The recursion stops when tree root is 461 * reached or backrefs for the block is cached. 462 * 463 * NOTE: if we find that backrefs for a block are cached, we know backrefs for 464 * all upper level blocks that directly/indirectly reference the block are also 465 * cached. 466 */ 467 static noinline_for_stack struct btrfs_backref_node *build_backref_tree( 468 struct reloc_control *rc, struct btrfs_key *node_key, 469 int level, u64 bytenr) 470 { 471 struct btrfs_backref_iter *iter; 472 struct btrfs_backref_cache *cache = &rc->backref_cache; 473 /* For searching parent of TREE_BLOCK_REF */ 474 struct btrfs_path *path; 475 struct btrfs_backref_node *cur; 476 struct btrfs_backref_node *node = NULL; 477 struct btrfs_backref_edge *edge; 478 int ret; 479 int err = 0; 480 481 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info); 482 if (!iter) 483 return ERR_PTR(-ENOMEM); 484 path = btrfs_alloc_path(); 485 if (!path) { 486 err = -ENOMEM; 487 goto out; 488 } 489 490 node = btrfs_backref_alloc_node(cache, bytenr, level); 491 if (!node) { 492 err = -ENOMEM; 493 goto out; 494 } 495 496 node->lowest = 1; 497 cur = node; 498 499 /* Breadth-first search to build backref cache */ 500 do { 501 ret = btrfs_backref_add_tree_node(cache, path, iter, node_key, 502 cur); 503 if (ret < 0) { 504 err = ret; 505 goto out; 506 } 507 edge = list_first_entry_or_null(&cache->pending_edge, 508 struct btrfs_backref_edge, list[UPPER]); 509 /* 510 * The pending list isn't empty, take the first block to 511 * process 512 */ 513 if (edge) { 514 list_del_init(&edge->list[UPPER]); 515 cur = edge->node[UPPER]; 516 } 517 } while (edge); 518 519 /* Finish the upper linkage of newly added edges/nodes */ 520 ret = btrfs_backref_finish_upper_links(cache, node); 521 if (ret < 0) { 522 err = ret; 523 goto out; 524 } 525 526 if (handle_useless_nodes(rc, node)) 527 node = NULL; 528 out: 529 btrfs_backref_iter_free(iter); 530 btrfs_free_path(path); 531 if (err) { 532 btrfs_backref_error_cleanup(cache, node); 533 return ERR_PTR(err); 534 } 535 ASSERT(!node || !node->detached); 536 ASSERT(list_empty(&cache->useless_node) && 537 list_empty(&cache->pending_edge)); 538 return node; 539 } 540 541 /* 542 * helper to add backref node for the newly created snapshot. 543 * the backref node is created by cloning backref node that 544 * corresponds to root of source tree 545 */ 546 static int clone_backref_node(struct btrfs_trans_handle *trans, 547 struct reloc_control *rc, 548 struct btrfs_root *src, 549 struct btrfs_root *dest) 550 { 551 struct btrfs_root *reloc_root = src->reloc_root; 552 struct btrfs_backref_cache *cache = &rc->backref_cache; 553 struct btrfs_backref_node *node = NULL; 554 struct btrfs_backref_node *new_node; 555 struct btrfs_backref_edge *edge; 556 struct btrfs_backref_edge *new_edge; 557 struct rb_node *rb_node; 558 559 if (cache->last_trans > 0) 560 update_backref_cache(trans, cache); 561 562 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start); 563 if (rb_node) { 564 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); 565 if (node->detached) 566 node = NULL; 567 else 568 BUG_ON(node->new_bytenr != reloc_root->node->start); 569 } 570 571 if (!node) { 572 rb_node = rb_simple_search(&cache->rb_root, 573 reloc_root->commit_root->start); 574 if (rb_node) { 575 node = rb_entry(rb_node, struct btrfs_backref_node, 576 rb_node); 577 BUG_ON(node->detached); 578 } 579 } 580 581 if (!node) 582 return 0; 583 584 new_node = btrfs_backref_alloc_node(cache, dest->node->start, 585 node->level); 586 if (!new_node) 587 return -ENOMEM; 588 589 new_node->lowest = node->lowest; 590 new_node->checked = 1; 591 new_node->root = btrfs_grab_root(dest); 592 ASSERT(new_node->root); 593 594 if (!node->lowest) { 595 list_for_each_entry(edge, &node->lower, list[UPPER]) { 596 new_edge = btrfs_backref_alloc_edge(cache); 597 if (!new_edge) 598 goto fail; 599 600 btrfs_backref_link_edge(new_edge, edge->node[LOWER], 601 new_node, LINK_UPPER); 602 } 603 } else { 604 list_add_tail(&new_node->lower, &cache->leaves); 605 } 606 607 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr, 608 &new_node->rb_node); 609 if (rb_node) 610 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST); 611 612 if (!new_node->lowest) { 613 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 614 list_add_tail(&new_edge->list[LOWER], 615 &new_edge->node[LOWER]->upper); 616 } 617 } 618 return 0; 619 fail: 620 while (!list_empty(&new_node->lower)) { 621 new_edge = list_entry(new_node->lower.next, 622 struct btrfs_backref_edge, list[UPPER]); 623 list_del(&new_edge->list[UPPER]); 624 btrfs_backref_free_edge(cache, new_edge); 625 } 626 btrfs_backref_free_node(cache, new_node); 627 return -ENOMEM; 628 } 629 630 /* 631 * helper to add 'address of tree root -> reloc tree' mapping 632 */ 633 static int __must_check __add_reloc_root(struct btrfs_root *root) 634 { 635 struct btrfs_fs_info *fs_info = root->fs_info; 636 struct rb_node *rb_node; 637 struct mapping_node *node; 638 struct reloc_control *rc = fs_info->reloc_ctl; 639 640 node = kmalloc(sizeof(*node), GFP_NOFS); 641 if (!node) 642 return -ENOMEM; 643 644 node->bytenr = root->commit_root->start; 645 node->data = root; 646 647 spin_lock(&rc->reloc_root_tree.lock); 648 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, 649 node->bytenr, &node->rb_node); 650 spin_unlock(&rc->reloc_root_tree.lock); 651 if (rb_node) { 652 btrfs_err(fs_info, 653 "Duplicate root found for start=%llu while inserting into relocation tree", 654 node->bytenr); 655 return -EEXIST; 656 } 657 658 list_add_tail(&root->root_list, &rc->reloc_roots); 659 return 0; 660 } 661 662 /* 663 * helper to delete the 'address of tree root -> reloc tree' 664 * mapping 665 */ 666 static void __del_reloc_root(struct btrfs_root *root) 667 { 668 struct btrfs_fs_info *fs_info = root->fs_info; 669 struct rb_node *rb_node; 670 struct mapping_node *node = NULL; 671 struct reloc_control *rc = fs_info->reloc_ctl; 672 bool put_ref = false; 673 674 if (rc && root->node) { 675 spin_lock(&rc->reloc_root_tree.lock); 676 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 677 root->commit_root->start); 678 if (rb_node) { 679 node = rb_entry(rb_node, struct mapping_node, rb_node); 680 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 681 RB_CLEAR_NODE(&node->rb_node); 682 } 683 spin_unlock(&rc->reloc_root_tree.lock); 684 ASSERT(!node || (struct btrfs_root *)node->data == root); 685 } 686 687 /* 688 * We only put the reloc root here if it's on the list. There's a lot 689 * of places where the pattern is to splice the rc->reloc_roots, process 690 * the reloc roots, and then add the reloc root back onto 691 * rc->reloc_roots. If we call __del_reloc_root while it's off of the 692 * list we don't want the reference being dropped, because the guy 693 * messing with the list is in charge of the reference. 694 */ 695 spin_lock(&fs_info->trans_lock); 696 if (!list_empty(&root->root_list)) { 697 put_ref = true; 698 list_del_init(&root->root_list); 699 } 700 spin_unlock(&fs_info->trans_lock); 701 if (put_ref) 702 btrfs_put_root(root); 703 kfree(node); 704 } 705 706 /* 707 * helper to update the 'address of tree root -> reloc tree' 708 * mapping 709 */ 710 static int __update_reloc_root(struct btrfs_root *root) 711 { 712 struct btrfs_fs_info *fs_info = root->fs_info; 713 struct rb_node *rb_node; 714 struct mapping_node *node = NULL; 715 struct reloc_control *rc = fs_info->reloc_ctl; 716 717 spin_lock(&rc->reloc_root_tree.lock); 718 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 719 root->commit_root->start); 720 if (rb_node) { 721 node = rb_entry(rb_node, struct mapping_node, rb_node); 722 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 723 } 724 spin_unlock(&rc->reloc_root_tree.lock); 725 726 if (!node) 727 return 0; 728 BUG_ON((struct btrfs_root *)node->data != root); 729 730 spin_lock(&rc->reloc_root_tree.lock); 731 node->bytenr = root->node->start; 732 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, 733 node->bytenr, &node->rb_node); 734 spin_unlock(&rc->reloc_root_tree.lock); 735 if (rb_node) 736 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); 737 return 0; 738 } 739 740 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 741 struct btrfs_root *root, u64 objectid) 742 { 743 struct btrfs_fs_info *fs_info = root->fs_info; 744 struct btrfs_root *reloc_root; 745 struct extent_buffer *eb; 746 struct btrfs_root_item *root_item; 747 struct btrfs_key root_key; 748 int ret = 0; 749 bool must_abort = false; 750 751 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 752 if (!root_item) 753 return ERR_PTR(-ENOMEM); 754 755 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 756 root_key.type = BTRFS_ROOT_ITEM_KEY; 757 root_key.offset = objectid; 758 759 if (root->root_key.objectid == objectid) { 760 u64 commit_root_gen; 761 762 /* called by btrfs_init_reloc_root */ 763 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 764 BTRFS_TREE_RELOC_OBJECTID); 765 if (ret) 766 goto fail; 767 768 /* 769 * Set the last_snapshot field to the generation of the commit 770 * root - like this ctree.c:btrfs_block_can_be_shared() behaves 771 * correctly (returns true) when the relocation root is created 772 * either inside the critical section of a transaction commit 773 * (through transaction.c:qgroup_account_snapshot()) and when 774 * it's created before the transaction commit is started. 775 */ 776 commit_root_gen = btrfs_header_generation(root->commit_root); 777 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen); 778 } else { 779 /* 780 * called by btrfs_reloc_post_snapshot_hook. 781 * the source tree is a reloc tree, all tree blocks 782 * modified after it was created have RELOC flag 783 * set in their headers. so it's OK to not update 784 * the 'last_snapshot'. 785 */ 786 ret = btrfs_copy_root(trans, root, root->node, &eb, 787 BTRFS_TREE_RELOC_OBJECTID); 788 if (ret) 789 goto fail; 790 } 791 792 /* 793 * We have changed references at this point, we must abort the 794 * transaction if anything fails. 795 */ 796 must_abort = true; 797 798 memcpy(root_item, &root->root_item, sizeof(*root_item)); 799 btrfs_set_root_bytenr(root_item, eb->start); 800 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 801 btrfs_set_root_generation(root_item, trans->transid); 802 803 if (root->root_key.objectid == objectid) { 804 btrfs_set_root_refs(root_item, 0); 805 memset(&root_item->drop_progress, 0, 806 sizeof(struct btrfs_disk_key)); 807 btrfs_set_root_drop_level(root_item, 0); 808 } 809 810 btrfs_tree_unlock(eb); 811 free_extent_buffer(eb); 812 813 ret = btrfs_insert_root(trans, fs_info->tree_root, 814 &root_key, root_item); 815 if (ret) 816 goto fail; 817 818 kfree(root_item); 819 820 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key); 821 if (IS_ERR(reloc_root)) { 822 ret = PTR_ERR(reloc_root); 823 goto abort; 824 } 825 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 826 reloc_root->last_trans = trans->transid; 827 return reloc_root; 828 fail: 829 kfree(root_item); 830 abort: 831 if (must_abort) 832 btrfs_abort_transaction(trans, ret); 833 return ERR_PTR(ret); 834 } 835 836 /* 837 * create reloc tree for a given fs tree. reloc tree is just a 838 * snapshot of the fs tree with special root objectid. 839 * 840 * The reloc_root comes out of here with two references, one for 841 * root->reloc_root, and another for being on the rc->reloc_roots list. 842 */ 843 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 844 struct btrfs_root *root) 845 { 846 struct btrfs_fs_info *fs_info = root->fs_info; 847 struct btrfs_root *reloc_root; 848 struct reloc_control *rc = fs_info->reloc_ctl; 849 struct btrfs_block_rsv *rsv; 850 int clear_rsv = 0; 851 int ret; 852 853 if (!rc) 854 return 0; 855 856 /* 857 * The subvolume has reloc tree but the swap is finished, no need to 858 * create/update the dead reloc tree 859 */ 860 if (reloc_root_is_dead(root)) 861 return 0; 862 863 /* 864 * This is subtle but important. We do not do 865 * record_root_in_transaction for reloc roots, instead we record their 866 * corresponding fs root, and then here we update the last trans for the 867 * reloc root. This means that we have to do this for the entire life 868 * of the reloc root, regardless of which stage of the relocation we are 869 * in. 870 */ 871 if (root->reloc_root) { 872 reloc_root = root->reloc_root; 873 reloc_root->last_trans = trans->transid; 874 return 0; 875 } 876 877 /* 878 * We are merging reloc roots, we do not need new reloc trees. Also 879 * reloc trees never need their own reloc tree. 880 */ 881 if (!rc->create_reloc_tree || 882 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 883 return 0; 884 885 if (!trans->reloc_reserved) { 886 rsv = trans->block_rsv; 887 trans->block_rsv = rc->block_rsv; 888 clear_rsv = 1; 889 } 890 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 891 if (clear_rsv) 892 trans->block_rsv = rsv; 893 if (IS_ERR(reloc_root)) 894 return PTR_ERR(reloc_root); 895 896 ret = __add_reloc_root(reloc_root); 897 ASSERT(ret != -EEXIST); 898 if (ret) { 899 /* Pairs with create_reloc_root */ 900 btrfs_put_root(reloc_root); 901 return ret; 902 } 903 root->reloc_root = btrfs_grab_root(reloc_root); 904 return 0; 905 } 906 907 /* 908 * update root item of reloc tree 909 */ 910 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 911 struct btrfs_root *root) 912 { 913 struct btrfs_fs_info *fs_info = root->fs_info; 914 struct btrfs_root *reloc_root; 915 struct btrfs_root_item *root_item; 916 int ret; 917 918 if (!have_reloc_root(root)) 919 return 0; 920 921 reloc_root = root->reloc_root; 922 root_item = &reloc_root->root_item; 923 924 /* 925 * We are probably ok here, but __del_reloc_root() will drop its ref of 926 * the root. We have the ref for root->reloc_root, but just in case 927 * hold it while we update the reloc root. 928 */ 929 btrfs_grab_root(reloc_root); 930 931 /* root->reloc_root will stay until current relocation finished */ 932 if (fs_info->reloc_ctl->merge_reloc_tree && 933 btrfs_root_refs(root_item) == 0) { 934 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); 935 /* 936 * Mark the tree as dead before we change reloc_root so 937 * have_reloc_root will not touch it from now on. 938 */ 939 smp_wmb(); 940 __del_reloc_root(reloc_root); 941 } 942 943 if (reloc_root->commit_root != reloc_root->node) { 944 __update_reloc_root(reloc_root); 945 btrfs_set_root_node(root_item, reloc_root->node); 946 free_extent_buffer(reloc_root->commit_root); 947 reloc_root->commit_root = btrfs_root_node(reloc_root); 948 } 949 950 ret = btrfs_update_root(trans, fs_info->tree_root, 951 &reloc_root->root_key, root_item); 952 btrfs_put_root(reloc_root); 953 return ret; 954 } 955 956 /* 957 * helper to find first cached inode with inode number >= objectid 958 * in a subvolume 959 */ 960 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 961 { 962 struct rb_node *node; 963 struct rb_node *prev; 964 struct btrfs_inode *entry; 965 struct inode *inode; 966 967 spin_lock(&root->inode_lock); 968 again: 969 node = root->inode_tree.rb_node; 970 prev = NULL; 971 while (node) { 972 prev = node; 973 entry = rb_entry(node, struct btrfs_inode, rb_node); 974 975 if (objectid < btrfs_ino(entry)) 976 node = node->rb_left; 977 else if (objectid > btrfs_ino(entry)) 978 node = node->rb_right; 979 else 980 break; 981 } 982 if (!node) { 983 while (prev) { 984 entry = rb_entry(prev, struct btrfs_inode, rb_node); 985 if (objectid <= btrfs_ino(entry)) { 986 node = prev; 987 break; 988 } 989 prev = rb_next(prev); 990 } 991 } 992 while (node) { 993 entry = rb_entry(node, struct btrfs_inode, rb_node); 994 inode = igrab(&entry->vfs_inode); 995 if (inode) { 996 spin_unlock(&root->inode_lock); 997 return inode; 998 } 999 1000 objectid = btrfs_ino(entry) + 1; 1001 if (cond_resched_lock(&root->inode_lock)) 1002 goto again; 1003 1004 node = rb_next(node); 1005 } 1006 spin_unlock(&root->inode_lock); 1007 return NULL; 1008 } 1009 1010 /* 1011 * get new location of data 1012 */ 1013 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1014 u64 bytenr, u64 num_bytes) 1015 { 1016 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1017 struct btrfs_path *path; 1018 struct btrfs_file_extent_item *fi; 1019 struct extent_buffer *leaf; 1020 int ret; 1021 1022 path = btrfs_alloc_path(); 1023 if (!path) 1024 return -ENOMEM; 1025 1026 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1027 ret = btrfs_lookup_file_extent(NULL, root, path, 1028 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0); 1029 if (ret < 0) 1030 goto out; 1031 if (ret > 0) { 1032 ret = -ENOENT; 1033 goto out; 1034 } 1035 1036 leaf = path->nodes[0]; 1037 fi = btrfs_item_ptr(leaf, path->slots[0], 1038 struct btrfs_file_extent_item); 1039 1040 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1041 btrfs_file_extent_compression(leaf, fi) || 1042 btrfs_file_extent_encryption(leaf, fi) || 1043 btrfs_file_extent_other_encoding(leaf, fi)); 1044 1045 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1046 ret = -EINVAL; 1047 goto out; 1048 } 1049 1050 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1051 ret = 0; 1052 out: 1053 btrfs_free_path(path); 1054 return ret; 1055 } 1056 1057 /* 1058 * update file extent items in the tree leaf to point to 1059 * the new locations. 1060 */ 1061 static noinline_for_stack 1062 int replace_file_extents(struct btrfs_trans_handle *trans, 1063 struct reloc_control *rc, 1064 struct btrfs_root *root, 1065 struct extent_buffer *leaf) 1066 { 1067 struct btrfs_fs_info *fs_info = root->fs_info; 1068 struct btrfs_key key; 1069 struct btrfs_file_extent_item *fi; 1070 struct inode *inode = NULL; 1071 u64 parent; 1072 u64 bytenr; 1073 u64 new_bytenr = 0; 1074 u64 num_bytes; 1075 u64 end; 1076 u32 nritems; 1077 u32 i; 1078 int ret = 0; 1079 int first = 1; 1080 int dirty = 0; 1081 1082 if (rc->stage != UPDATE_DATA_PTRS) 1083 return 0; 1084 1085 /* reloc trees always use full backref */ 1086 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1087 parent = leaf->start; 1088 else 1089 parent = 0; 1090 1091 nritems = btrfs_header_nritems(leaf); 1092 for (i = 0; i < nritems; i++) { 1093 struct btrfs_ref ref = { 0 }; 1094 1095 cond_resched(); 1096 btrfs_item_key_to_cpu(leaf, &key, i); 1097 if (key.type != BTRFS_EXTENT_DATA_KEY) 1098 continue; 1099 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1100 if (btrfs_file_extent_type(leaf, fi) == 1101 BTRFS_FILE_EXTENT_INLINE) 1102 continue; 1103 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1104 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1105 if (bytenr == 0) 1106 continue; 1107 if (!in_range(bytenr, rc->block_group->start, 1108 rc->block_group->length)) 1109 continue; 1110 1111 /* 1112 * if we are modifying block in fs tree, wait for read_folio 1113 * to complete and drop the extent cache 1114 */ 1115 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1116 if (first) { 1117 inode = find_next_inode(root, key.objectid); 1118 first = 0; 1119 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) { 1120 btrfs_add_delayed_iput(BTRFS_I(inode)); 1121 inode = find_next_inode(root, key.objectid); 1122 } 1123 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) { 1124 struct extent_state *cached_state = NULL; 1125 1126 end = key.offset + 1127 btrfs_file_extent_num_bytes(leaf, fi); 1128 WARN_ON(!IS_ALIGNED(key.offset, 1129 fs_info->sectorsize)); 1130 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1131 end--; 1132 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1133 key.offset, end, 1134 &cached_state); 1135 if (!ret) 1136 continue; 1137 1138 btrfs_drop_extent_map_range(BTRFS_I(inode), 1139 key.offset, end, true); 1140 unlock_extent(&BTRFS_I(inode)->io_tree, 1141 key.offset, end, &cached_state); 1142 } 1143 } 1144 1145 ret = get_new_location(rc->data_inode, &new_bytenr, 1146 bytenr, num_bytes); 1147 if (ret) { 1148 /* 1149 * Don't have to abort since we've not changed anything 1150 * in the file extent yet. 1151 */ 1152 break; 1153 } 1154 1155 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1156 dirty = 1; 1157 1158 key.offset -= btrfs_file_extent_offset(leaf, fi); 1159 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr, 1160 num_bytes, parent); 1161 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), 1162 key.objectid, key.offset, 1163 root->root_key.objectid, false); 1164 ret = btrfs_inc_extent_ref(trans, &ref); 1165 if (ret) { 1166 btrfs_abort_transaction(trans, ret); 1167 break; 1168 } 1169 1170 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 1171 num_bytes, parent); 1172 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), 1173 key.objectid, key.offset, 1174 root->root_key.objectid, false); 1175 ret = btrfs_free_extent(trans, &ref); 1176 if (ret) { 1177 btrfs_abort_transaction(trans, ret); 1178 break; 1179 } 1180 } 1181 if (dirty) 1182 btrfs_mark_buffer_dirty(leaf); 1183 if (inode) 1184 btrfs_add_delayed_iput(BTRFS_I(inode)); 1185 return ret; 1186 } 1187 1188 static noinline_for_stack 1189 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1190 struct btrfs_path *path, int level) 1191 { 1192 struct btrfs_disk_key key1; 1193 struct btrfs_disk_key key2; 1194 btrfs_node_key(eb, &key1, slot); 1195 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1196 return memcmp(&key1, &key2, sizeof(key1)); 1197 } 1198 1199 /* 1200 * try to replace tree blocks in fs tree with the new blocks 1201 * in reloc tree. tree blocks haven't been modified since the 1202 * reloc tree was create can be replaced. 1203 * 1204 * if a block was replaced, level of the block + 1 is returned. 1205 * if no block got replaced, 0 is returned. if there are other 1206 * errors, a negative error number is returned. 1207 */ 1208 static noinline_for_stack 1209 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, 1210 struct btrfs_root *dest, struct btrfs_root *src, 1211 struct btrfs_path *path, struct btrfs_key *next_key, 1212 int lowest_level, int max_level) 1213 { 1214 struct btrfs_fs_info *fs_info = dest->fs_info; 1215 struct extent_buffer *eb; 1216 struct extent_buffer *parent; 1217 struct btrfs_ref ref = { 0 }; 1218 struct btrfs_key key; 1219 u64 old_bytenr; 1220 u64 new_bytenr; 1221 u64 old_ptr_gen; 1222 u64 new_ptr_gen; 1223 u64 last_snapshot; 1224 u32 blocksize; 1225 int cow = 0; 1226 int level; 1227 int ret; 1228 int slot; 1229 1230 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1231 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1232 1233 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1234 again: 1235 slot = path->slots[lowest_level]; 1236 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1237 1238 eb = btrfs_lock_root_node(dest); 1239 level = btrfs_header_level(eb); 1240 1241 if (level < lowest_level) { 1242 btrfs_tree_unlock(eb); 1243 free_extent_buffer(eb); 1244 return 0; 1245 } 1246 1247 if (cow) { 1248 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb, 1249 BTRFS_NESTING_COW); 1250 if (ret) { 1251 btrfs_tree_unlock(eb); 1252 free_extent_buffer(eb); 1253 return ret; 1254 } 1255 } 1256 1257 if (next_key) { 1258 next_key->objectid = (u64)-1; 1259 next_key->type = (u8)-1; 1260 next_key->offset = (u64)-1; 1261 } 1262 1263 parent = eb; 1264 while (1) { 1265 level = btrfs_header_level(parent); 1266 ASSERT(level >= lowest_level); 1267 1268 ret = btrfs_bin_search(parent, &key, &slot); 1269 if (ret < 0) 1270 break; 1271 if (ret && slot > 0) 1272 slot--; 1273 1274 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1275 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1276 1277 old_bytenr = btrfs_node_blockptr(parent, slot); 1278 blocksize = fs_info->nodesize; 1279 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1280 1281 if (level <= max_level) { 1282 eb = path->nodes[level]; 1283 new_bytenr = btrfs_node_blockptr(eb, 1284 path->slots[level]); 1285 new_ptr_gen = btrfs_node_ptr_generation(eb, 1286 path->slots[level]); 1287 } else { 1288 new_bytenr = 0; 1289 new_ptr_gen = 0; 1290 } 1291 1292 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1293 ret = level; 1294 break; 1295 } 1296 1297 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1298 memcmp_node_keys(parent, slot, path, level)) { 1299 if (level <= lowest_level) { 1300 ret = 0; 1301 break; 1302 } 1303 1304 eb = btrfs_read_node_slot(parent, slot); 1305 if (IS_ERR(eb)) { 1306 ret = PTR_ERR(eb); 1307 break; 1308 } 1309 btrfs_tree_lock(eb); 1310 if (cow) { 1311 ret = btrfs_cow_block(trans, dest, eb, parent, 1312 slot, &eb, 1313 BTRFS_NESTING_COW); 1314 if (ret) { 1315 btrfs_tree_unlock(eb); 1316 free_extent_buffer(eb); 1317 break; 1318 } 1319 } 1320 1321 btrfs_tree_unlock(parent); 1322 free_extent_buffer(parent); 1323 1324 parent = eb; 1325 continue; 1326 } 1327 1328 if (!cow) { 1329 btrfs_tree_unlock(parent); 1330 free_extent_buffer(parent); 1331 cow = 1; 1332 goto again; 1333 } 1334 1335 btrfs_node_key_to_cpu(path->nodes[level], &key, 1336 path->slots[level]); 1337 btrfs_release_path(path); 1338 1339 path->lowest_level = level; 1340 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); 1341 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1342 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); 1343 path->lowest_level = 0; 1344 if (ret) { 1345 if (ret > 0) 1346 ret = -ENOENT; 1347 break; 1348 } 1349 1350 /* 1351 * Info qgroup to trace both subtrees. 1352 * 1353 * We must trace both trees. 1354 * 1) Tree reloc subtree 1355 * If not traced, we will leak data numbers 1356 * 2) Fs subtree 1357 * If not traced, we will double count old data 1358 * 1359 * We don't scan the subtree right now, but only record 1360 * the swapped tree blocks. 1361 * The real subtree rescan is delayed until we have new 1362 * CoW on the subtree root node before transaction commit. 1363 */ 1364 ret = btrfs_qgroup_add_swapped_blocks(trans, dest, 1365 rc->block_group, parent, slot, 1366 path->nodes[level], path->slots[level], 1367 last_snapshot); 1368 if (ret < 0) 1369 break; 1370 /* 1371 * swap blocks in fs tree and reloc tree. 1372 */ 1373 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1374 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1375 btrfs_mark_buffer_dirty(parent); 1376 1377 btrfs_set_node_blockptr(path->nodes[level], 1378 path->slots[level], old_bytenr); 1379 btrfs_set_node_ptr_generation(path->nodes[level], 1380 path->slots[level], old_ptr_gen); 1381 btrfs_mark_buffer_dirty(path->nodes[level]); 1382 1383 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr, 1384 blocksize, path->nodes[level]->start); 1385 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, 1386 0, true); 1387 ret = btrfs_inc_extent_ref(trans, &ref); 1388 if (ret) { 1389 btrfs_abort_transaction(trans, ret); 1390 break; 1391 } 1392 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr, 1393 blocksize, 0); 1394 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0, 1395 true); 1396 ret = btrfs_inc_extent_ref(trans, &ref); 1397 if (ret) { 1398 btrfs_abort_transaction(trans, ret); 1399 break; 1400 } 1401 1402 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr, 1403 blocksize, path->nodes[level]->start); 1404 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, 1405 0, true); 1406 ret = btrfs_free_extent(trans, &ref); 1407 if (ret) { 1408 btrfs_abort_transaction(trans, ret); 1409 break; 1410 } 1411 1412 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr, 1413 blocksize, 0); 1414 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 1415 0, true); 1416 ret = btrfs_free_extent(trans, &ref); 1417 if (ret) { 1418 btrfs_abort_transaction(trans, ret); 1419 break; 1420 } 1421 1422 btrfs_unlock_up_safe(path, 0); 1423 1424 ret = level; 1425 break; 1426 } 1427 btrfs_tree_unlock(parent); 1428 free_extent_buffer(parent); 1429 return ret; 1430 } 1431 1432 /* 1433 * helper to find next relocated block in reloc tree 1434 */ 1435 static noinline_for_stack 1436 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1437 int *level) 1438 { 1439 struct extent_buffer *eb; 1440 int i; 1441 u64 last_snapshot; 1442 u32 nritems; 1443 1444 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1445 1446 for (i = 0; i < *level; i++) { 1447 free_extent_buffer(path->nodes[i]); 1448 path->nodes[i] = NULL; 1449 } 1450 1451 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1452 eb = path->nodes[i]; 1453 nritems = btrfs_header_nritems(eb); 1454 while (path->slots[i] + 1 < nritems) { 1455 path->slots[i]++; 1456 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1457 last_snapshot) 1458 continue; 1459 1460 *level = i; 1461 return 0; 1462 } 1463 free_extent_buffer(path->nodes[i]); 1464 path->nodes[i] = NULL; 1465 } 1466 return 1; 1467 } 1468 1469 /* 1470 * walk down reloc tree to find relocated block of lowest level 1471 */ 1472 static noinline_for_stack 1473 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1474 int *level) 1475 { 1476 struct extent_buffer *eb = NULL; 1477 int i; 1478 u64 ptr_gen = 0; 1479 u64 last_snapshot; 1480 u32 nritems; 1481 1482 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1483 1484 for (i = *level; i > 0; i--) { 1485 eb = path->nodes[i]; 1486 nritems = btrfs_header_nritems(eb); 1487 while (path->slots[i] < nritems) { 1488 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1489 if (ptr_gen > last_snapshot) 1490 break; 1491 path->slots[i]++; 1492 } 1493 if (path->slots[i] >= nritems) { 1494 if (i == *level) 1495 break; 1496 *level = i + 1; 1497 return 0; 1498 } 1499 if (i == 1) { 1500 *level = i; 1501 return 0; 1502 } 1503 1504 eb = btrfs_read_node_slot(eb, path->slots[i]); 1505 if (IS_ERR(eb)) 1506 return PTR_ERR(eb); 1507 BUG_ON(btrfs_header_level(eb) != i - 1); 1508 path->nodes[i - 1] = eb; 1509 path->slots[i - 1] = 0; 1510 } 1511 return 1; 1512 } 1513 1514 /* 1515 * invalidate extent cache for file extents whose key in range of 1516 * [min_key, max_key) 1517 */ 1518 static int invalidate_extent_cache(struct btrfs_root *root, 1519 struct btrfs_key *min_key, 1520 struct btrfs_key *max_key) 1521 { 1522 struct btrfs_fs_info *fs_info = root->fs_info; 1523 struct inode *inode = NULL; 1524 u64 objectid; 1525 u64 start, end; 1526 u64 ino; 1527 1528 objectid = min_key->objectid; 1529 while (1) { 1530 struct extent_state *cached_state = NULL; 1531 1532 cond_resched(); 1533 iput(inode); 1534 1535 if (objectid > max_key->objectid) 1536 break; 1537 1538 inode = find_next_inode(root, objectid); 1539 if (!inode) 1540 break; 1541 ino = btrfs_ino(BTRFS_I(inode)); 1542 1543 if (ino > max_key->objectid) { 1544 iput(inode); 1545 break; 1546 } 1547 1548 objectid = ino + 1; 1549 if (!S_ISREG(inode->i_mode)) 1550 continue; 1551 1552 if (unlikely(min_key->objectid == ino)) { 1553 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 1554 continue; 1555 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 1556 start = 0; 1557 else { 1558 start = min_key->offset; 1559 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); 1560 } 1561 } else { 1562 start = 0; 1563 } 1564 1565 if (unlikely(max_key->objectid == ino)) { 1566 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 1567 continue; 1568 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 1569 end = (u64)-1; 1570 } else { 1571 if (max_key->offset == 0) 1572 continue; 1573 end = max_key->offset; 1574 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1575 end--; 1576 } 1577 } else { 1578 end = (u64)-1; 1579 } 1580 1581 /* the lock_extent waits for read_folio to complete */ 1582 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 1583 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true); 1584 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 1585 } 1586 return 0; 1587 } 1588 1589 static int find_next_key(struct btrfs_path *path, int level, 1590 struct btrfs_key *key) 1591 1592 { 1593 while (level < BTRFS_MAX_LEVEL) { 1594 if (!path->nodes[level]) 1595 break; 1596 if (path->slots[level] + 1 < 1597 btrfs_header_nritems(path->nodes[level])) { 1598 btrfs_node_key_to_cpu(path->nodes[level], key, 1599 path->slots[level] + 1); 1600 return 0; 1601 } 1602 level++; 1603 } 1604 return 1; 1605 } 1606 1607 /* 1608 * Insert current subvolume into reloc_control::dirty_subvol_roots 1609 */ 1610 static int insert_dirty_subvol(struct btrfs_trans_handle *trans, 1611 struct reloc_control *rc, 1612 struct btrfs_root *root) 1613 { 1614 struct btrfs_root *reloc_root = root->reloc_root; 1615 struct btrfs_root_item *reloc_root_item; 1616 int ret; 1617 1618 /* @root must be a subvolume tree root with a valid reloc tree */ 1619 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1620 ASSERT(reloc_root); 1621 1622 reloc_root_item = &reloc_root->root_item; 1623 memset(&reloc_root_item->drop_progress, 0, 1624 sizeof(reloc_root_item->drop_progress)); 1625 btrfs_set_root_drop_level(reloc_root_item, 0); 1626 btrfs_set_root_refs(reloc_root_item, 0); 1627 ret = btrfs_update_reloc_root(trans, root); 1628 if (ret) 1629 return ret; 1630 1631 if (list_empty(&root->reloc_dirty_list)) { 1632 btrfs_grab_root(root); 1633 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots); 1634 } 1635 1636 return 0; 1637 } 1638 1639 static int clean_dirty_subvols(struct reloc_control *rc) 1640 { 1641 struct btrfs_root *root; 1642 struct btrfs_root *next; 1643 int ret = 0; 1644 int ret2; 1645 1646 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots, 1647 reloc_dirty_list) { 1648 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1649 /* Merged subvolume, cleanup its reloc root */ 1650 struct btrfs_root *reloc_root = root->reloc_root; 1651 1652 list_del_init(&root->reloc_dirty_list); 1653 root->reloc_root = NULL; 1654 /* 1655 * Need barrier to ensure clear_bit() only happens after 1656 * root->reloc_root = NULL. Pairs with have_reloc_root. 1657 */ 1658 smp_wmb(); 1659 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); 1660 if (reloc_root) { 1661 /* 1662 * btrfs_drop_snapshot drops our ref we hold for 1663 * ->reloc_root. If it fails however we must 1664 * drop the ref ourselves. 1665 */ 1666 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1); 1667 if (ret2 < 0) { 1668 btrfs_put_root(reloc_root); 1669 if (!ret) 1670 ret = ret2; 1671 } 1672 } 1673 btrfs_put_root(root); 1674 } else { 1675 /* Orphan reloc tree, just clean it up */ 1676 ret2 = btrfs_drop_snapshot(root, 0, 1); 1677 if (ret2 < 0) { 1678 btrfs_put_root(root); 1679 if (!ret) 1680 ret = ret2; 1681 } 1682 } 1683 } 1684 return ret; 1685 } 1686 1687 /* 1688 * merge the relocated tree blocks in reloc tree with corresponding 1689 * fs tree. 1690 */ 1691 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 1692 struct btrfs_root *root) 1693 { 1694 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 1695 struct btrfs_key key; 1696 struct btrfs_key next_key; 1697 struct btrfs_trans_handle *trans = NULL; 1698 struct btrfs_root *reloc_root; 1699 struct btrfs_root_item *root_item; 1700 struct btrfs_path *path; 1701 struct extent_buffer *leaf; 1702 int reserve_level; 1703 int level; 1704 int max_level; 1705 int replaced = 0; 1706 int ret = 0; 1707 u32 min_reserved; 1708 1709 path = btrfs_alloc_path(); 1710 if (!path) 1711 return -ENOMEM; 1712 path->reada = READA_FORWARD; 1713 1714 reloc_root = root->reloc_root; 1715 root_item = &reloc_root->root_item; 1716 1717 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 1718 level = btrfs_root_level(root_item); 1719 atomic_inc(&reloc_root->node->refs); 1720 path->nodes[level] = reloc_root->node; 1721 path->slots[level] = 0; 1722 } else { 1723 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 1724 1725 level = btrfs_root_drop_level(root_item); 1726 BUG_ON(level == 0); 1727 path->lowest_level = level; 1728 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 1729 path->lowest_level = 0; 1730 if (ret < 0) { 1731 btrfs_free_path(path); 1732 return ret; 1733 } 1734 1735 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 1736 path->slots[level]); 1737 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 1738 1739 btrfs_unlock_up_safe(path, 0); 1740 } 1741 1742 /* 1743 * In merge_reloc_root(), we modify the upper level pointer to swap the 1744 * tree blocks between reloc tree and subvolume tree. Thus for tree 1745 * block COW, we COW at most from level 1 to root level for each tree. 1746 * 1747 * Thus the needed metadata size is at most root_level * nodesize, 1748 * and * 2 since we have two trees to COW. 1749 */ 1750 reserve_level = max_t(int, 1, btrfs_root_level(root_item)); 1751 min_reserved = fs_info->nodesize * reserve_level * 2; 1752 memset(&next_key, 0, sizeof(next_key)); 1753 1754 while (1) { 1755 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, 1756 min_reserved, 1757 BTRFS_RESERVE_FLUSH_LIMIT); 1758 if (ret) 1759 goto out; 1760 trans = btrfs_start_transaction(root, 0); 1761 if (IS_ERR(trans)) { 1762 ret = PTR_ERR(trans); 1763 trans = NULL; 1764 goto out; 1765 } 1766 1767 /* 1768 * At this point we no longer have a reloc_control, so we can't 1769 * depend on btrfs_init_reloc_root to update our last_trans. 1770 * 1771 * But that's ok, we started the trans handle on our 1772 * corresponding fs_root, which means it's been added to the 1773 * dirty list. At commit time we'll still call 1774 * btrfs_update_reloc_root() and update our root item 1775 * appropriately. 1776 */ 1777 reloc_root->last_trans = trans->transid; 1778 trans->block_rsv = rc->block_rsv; 1779 1780 replaced = 0; 1781 max_level = level; 1782 1783 ret = walk_down_reloc_tree(reloc_root, path, &level); 1784 if (ret < 0) 1785 goto out; 1786 if (ret > 0) 1787 break; 1788 1789 if (!find_next_key(path, level, &key) && 1790 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 1791 ret = 0; 1792 } else { 1793 ret = replace_path(trans, rc, root, reloc_root, path, 1794 &next_key, level, max_level); 1795 } 1796 if (ret < 0) 1797 goto out; 1798 if (ret > 0) { 1799 level = ret; 1800 btrfs_node_key_to_cpu(path->nodes[level], &key, 1801 path->slots[level]); 1802 replaced = 1; 1803 } 1804 1805 ret = walk_up_reloc_tree(reloc_root, path, &level); 1806 if (ret > 0) 1807 break; 1808 1809 BUG_ON(level == 0); 1810 /* 1811 * save the merging progress in the drop_progress. 1812 * this is OK since root refs == 1 in this case. 1813 */ 1814 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 1815 path->slots[level]); 1816 btrfs_set_root_drop_level(root_item, level); 1817 1818 btrfs_end_transaction_throttle(trans); 1819 trans = NULL; 1820 1821 btrfs_btree_balance_dirty(fs_info); 1822 1823 if (replaced && rc->stage == UPDATE_DATA_PTRS) 1824 invalidate_extent_cache(root, &key, &next_key); 1825 } 1826 1827 /* 1828 * handle the case only one block in the fs tree need to be 1829 * relocated and the block is tree root. 1830 */ 1831 leaf = btrfs_lock_root_node(root); 1832 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf, 1833 BTRFS_NESTING_COW); 1834 btrfs_tree_unlock(leaf); 1835 free_extent_buffer(leaf); 1836 out: 1837 btrfs_free_path(path); 1838 1839 if (ret == 0) { 1840 ret = insert_dirty_subvol(trans, rc, root); 1841 if (ret) 1842 btrfs_abort_transaction(trans, ret); 1843 } 1844 1845 if (trans) 1846 btrfs_end_transaction_throttle(trans); 1847 1848 btrfs_btree_balance_dirty(fs_info); 1849 1850 if (replaced && rc->stage == UPDATE_DATA_PTRS) 1851 invalidate_extent_cache(root, &key, &next_key); 1852 1853 return ret; 1854 } 1855 1856 static noinline_for_stack 1857 int prepare_to_merge(struct reloc_control *rc, int err) 1858 { 1859 struct btrfs_root *root = rc->extent_root; 1860 struct btrfs_fs_info *fs_info = root->fs_info; 1861 struct btrfs_root *reloc_root; 1862 struct btrfs_trans_handle *trans; 1863 LIST_HEAD(reloc_roots); 1864 u64 num_bytes = 0; 1865 int ret; 1866 1867 mutex_lock(&fs_info->reloc_mutex); 1868 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 1869 rc->merging_rsv_size += rc->nodes_relocated * 2; 1870 mutex_unlock(&fs_info->reloc_mutex); 1871 1872 again: 1873 if (!err) { 1874 num_bytes = rc->merging_rsv_size; 1875 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes, 1876 BTRFS_RESERVE_FLUSH_ALL); 1877 if (ret) 1878 err = ret; 1879 } 1880 1881 trans = btrfs_join_transaction(rc->extent_root); 1882 if (IS_ERR(trans)) { 1883 if (!err) 1884 btrfs_block_rsv_release(fs_info, rc->block_rsv, 1885 num_bytes, NULL); 1886 return PTR_ERR(trans); 1887 } 1888 1889 if (!err) { 1890 if (num_bytes != rc->merging_rsv_size) { 1891 btrfs_end_transaction(trans); 1892 btrfs_block_rsv_release(fs_info, rc->block_rsv, 1893 num_bytes, NULL); 1894 goto again; 1895 } 1896 } 1897 1898 rc->merge_reloc_tree = 1; 1899 1900 while (!list_empty(&rc->reloc_roots)) { 1901 reloc_root = list_entry(rc->reloc_roots.next, 1902 struct btrfs_root, root_list); 1903 list_del_init(&reloc_root->root_list); 1904 1905 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 1906 false); 1907 if (IS_ERR(root)) { 1908 /* 1909 * Even if we have an error we need this reloc root 1910 * back on our list so we can clean up properly. 1911 */ 1912 list_add(&reloc_root->root_list, &reloc_roots); 1913 btrfs_abort_transaction(trans, (int)PTR_ERR(root)); 1914 if (!err) 1915 err = PTR_ERR(root); 1916 break; 1917 } 1918 ASSERT(root->reloc_root == reloc_root); 1919 1920 /* 1921 * set reference count to 1, so btrfs_recover_relocation 1922 * knows it should resumes merging 1923 */ 1924 if (!err) 1925 btrfs_set_root_refs(&reloc_root->root_item, 1); 1926 ret = btrfs_update_reloc_root(trans, root); 1927 1928 /* 1929 * Even if we have an error we need this reloc root back on our 1930 * list so we can clean up properly. 1931 */ 1932 list_add(&reloc_root->root_list, &reloc_roots); 1933 btrfs_put_root(root); 1934 1935 if (ret) { 1936 btrfs_abort_transaction(trans, ret); 1937 if (!err) 1938 err = ret; 1939 break; 1940 } 1941 } 1942 1943 list_splice(&reloc_roots, &rc->reloc_roots); 1944 1945 if (!err) 1946 err = btrfs_commit_transaction(trans); 1947 else 1948 btrfs_end_transaction(trans); 1949 return err; 1950 } 1951 1952 static noinline_for_stack 1953 void free_reloc_roots(struct list_head *list) 1954 { 1955 struct btrfs_root *reloc_root, *tmp; 1956 1957 list_for_each_entry_safe(reloc_root, tmp, list, root_list) 1958 __del_reloc_root(reloc_root); 1959 } 1960 1961 static noinline_for_stack 1962 void merge_reloc_roots(struct reloc_control *rc) 1963 { 1964 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 1965 struct btrfs_root *root; 1966 struct btrfs_root *reloc_root; 1967 LIST_HEAD(reloc_roots); 1968 int found = 0; 1969 int ret = 0; 1970 again: 1971 root = rc->extent_root; 1972 1973 /* 1974 * this serializes us with btrfs_record_root_in_transaction, 1975 * we have to make sure nobody is in the middle of 1976 * adding their roots to the list while we are 1977 * doing this splice 1978 */ 1979 mutex_lock(&fs_info->reloc_mutex); 1980 list_splice_init(&rc->reloc_roots, &reloc_roots); 1981 mutex_unlock(&fs_info->reloc_mutex); 1982 1983 while (!list_empty(&reloc_roots)) { 1984 found = 1; 1985 reloc_root = list_entry(reloc_roots.next, 1986 struct btrfs_root, root_list); 1987 1988 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 1989 false); 1990 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 1991 if (IS_ERR(root)) { 1992 /* 1993 * For recovery we read the fs roots on mount, 1994 * and if we didn't find the root then we marked 1995 * the reloc root as a garbage root. For normal 1996 * relocation obviously the root should exist in 1997 * memory. However there's no reason we can't 1998 * handle the error properly here just in case. 1999 */ 2000 ASSERT(0); 2001 ret = PTR_ERR(root); 2002 goto out; 2003 } 2004 if (root->reloc_root != reloc_root) { 2005 /* 2006 * This is actually impossible without something 2007 * going really wrong (like weird race condition 2008 * or cosmic rays). 2009 */ 2010 ASSERT(0); 2011 ret = -EINVAL; 2012 goto out; 2013 } 2014 ret = merge_reloc_root(rc, root); 2015 btrfs_put_root(root); 2016 if (ret) { 2017 if (list_empty(&reloc_root->root_list)) 2018 list_add_tail(&reloc_root->root_list, 2019 &reloc_roots); 2020 goto out; 2021 } 2022 } else { 2023 if (!IS_ERR(root)) { 2024 if (root->reloc_root == reloc_root) { 2025 root->reloc_root = NULL; 2026 btrfs_put_root(reloc_root); 2027 } 2028 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, 2029 &root->state); 2030 btrfs_put_root(root); 2031 } 2032 2033 list_del_init(&reloc_root->root_list); 2034 /* Don't forget to queue this reloc root for cleanup */ 2035 list_add_tail(&reloc_root->reloc_dirty_list, 2036 &rc->dirty_subvol_roots); 2037 } 2038 } 2039 2040 if (found) { 2041 found = 0; 2042 goto again; 2043 } 2044 out: 2045 if (ret) { 2046 btrfs_handle_fs_error(fs_info, ret, NULL); 2047 free_reloc_roots(&reloc_roots); 2048 2049 /* new reloc root may be added */ 2050 mutex_lock(&fs_info->reloc_mutex); 2051 list_splice_init(&rc->reloc_roots, &reloc_roots); 2052 mutex_unlock(&fs_info->reloc_mutex); 2053 free_reloc_roots(&reloc_roots); 2054 } 2055 2056 /* 2057 * We used to have 2058 * 2059 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2060 * 2061 * here, but it's wrong. If we fail to start the transaction in 2062 * prepare_to_merge() we will have only 0 ref reloc roots, none of which 2063 * have actually been removed from the reloc_root_tree rb tree. This is 2064 * fine because we're bailing here, and we hold a reference on the root 2065 * for the list that holds it, so these roots will be cleaned up when we 2066 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root 2067 * will be cleaned up on unmount. 2068 * 2069 * The remaining nodes will be cleaned up by free_reloc_control. 2070 */ 2071 } 2072 2073 static void free_block_list(struct rb_root *blocks) 2074 { 2075 struct tree_block *block; 2076 struct rb_node *rb_node; 2077 while ((rb_node = rb_first(blocks))) { 2078 block = rb_entry(rb_node, struct tree_block, rb_node); 2079 rb_erase(rb_node, blocks); 2080 kfree(block); 2081 } 2082 } 2083 2084 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2085 struct btrfs_root *reloc_root) 2086 { 2087 struct btrfs_fs_info *fs_info = reloc_root->fs_info; 2088 struct btrfs_root *root; 2089 int ret; 2090 2091 if (reloc_root->last_trans == trans->transid) 2092 return 0; 2093 2094 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); 2095 2096 /* 2097 * This should succeed, since we can't have a reloc root without having 2098 * already looked up the actual root and created the reloc root for this 2099 * root. 2100 * 2101 * However if there's some sort of corruption where we have a ref to a 2102 * reloc root without a corresponding root this could return ENOENT. 2103 */ 2104 if (IS_ERR(root)) { 2105 ASSERT(0); 2106 return PTR_ERR(root); 2107 } 2108 if (root->reloc_root != reloc_root) { 2109 ASSERT(0); 2110 btrfs_err(fs_info, 2111 "root %llu has two reloc roots associated with it", 2112 reloc_root->root_key.offset); 2113 btrfs_put_root(root); 2114 return -EUCLEAN; 2115 } 2116 ret = btrfs_record_root_in_trans(trans, root); 2117 btrfs_put_root(root); 2118 2119 return ret; 2120 } 2121 2122 static noinline_for_stack 2123 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2124 struct reloc_control *rc, 2125 struct btrfs_backref_node *node, 2126 struct btrfs_backref_edge *edges[]) 2127 { 2128 struct btrfs_backref_node *next; 2129 struct btrfs_root *root; 2130 int index = 0; 2131 int ret; 2132 2133 next = node; 2134 while (1) { 2135 cond_resched(); 2136 next = walk_up_backref(next, edges, &index); 2137 root = next->root; 2138 2139 /* 2140 * If there is no root, then our references for this block are 2141 * incomplete, as we should be able to walk all the way up to a 2142 * block that is owned by a root. 2143 * 2144 * This path is only for SHAREABLE roots, so if we come upon a 2145 * non-SHAREABLE root then we have backrefs that resolve 2146 * improperly. 2147 * 2148 * Both of these cases indicate file system corruption, or a bug 2149 * in the backref walking code. 2150 */ 2151 if (!root) { 2152 ASSERT(0); 2153 btrfs_err(trans->fs_info, 2154 "bytenr %llu doesn't have a backref path ending in a root", 2155 node->bytenr); 2156 return ERR_PTR(-EUCLEAN); 2157 } 2158 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 2159 ASSERT(0); 2160 btrfs_err(trans->fs_info, 2161 "bytenr %llu has multiple refs with one ending in a non-shareable root", 2162 node->bytenr); 2163 return ERR_PTR(-EUCLEAN); 2164 } 2165 2166 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2167 ret = record_reloc_root_in_trans(trans, root); 2168 if (ret) 2169 return ERR_PTR(ret); 2170 break; 2171 } 2172 2173 ret = btrfs_record_root_in_trans(trans, root); 2174 if (ret) 2175 return ERR_PTR(ret); 2176 root = root->reloc_root; 2177 2178 /* 2179 * We could have raced with another thread which failed, so 2180 * root->reloc_root may not be set, return ENOENT in this case. 2181 */ 2182 if (!root) 2183 return ERR_PTR(-ENOENT); 2184 2185 if (next->new_bytenr != root->node->start) { 2186 /* 2187 * We just created the reloc root, so we shouldn't have 2188 * ->new_bytenr set and this shouldn't be in the changed 2189 * list. If it is then we have multiple roots pointing 2190 * at the same bytenr which indicates corruption, or 2191 * we've made a mistake in the backref walking code. 2192 */ 2193 ASSERT(next->new_bytenr == 0); 2194 ASSERT(list_empty(&next->list)); 2195 if (next->new_bytenr || !list_empty(&next->list)) { 2196 btrfs_err(trans->fs_info, 2197 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu", 2198 node->bytenr, next->bytenr); 2199 return ERR_PTR(-EUCLEAN); 2200 } 2201 2202 next->new_bytenr = root->node->start; 2203 btrfs_put_root(next->root); 2204 next->root = btrfs_grab_root(root); 2205 ASSERT(next->root); 2206 list_add_tail(&next->list, 2207 &rc->backref_cache.changed); 2208 mark_block_processed(rc, next); 2209 break; 2210 } 2211 2212 WARN_ON(1); 2213 root = NULL; 2214 next = walk_down_backref(edges, &index); 2215 if (!next || next->level <= node->level) 2216 break; 2217 } 2218 if (!root) { 2219 /* 2220 * This can happen if there's fs corruption or if there's a bug 2221 * in the backref lookup code. 2222 */ 2223 ASSERT(0); 2224 return ERR_PTR(-ENOENT); 2225 } 2226 2227 next = node; 2228 /* setup backref node path for btrfs_reloc_cow_block */ 2229 while (1) { 2230 rc->backref_cache.path[next->level] = next; 2231 if (--index < 0) 2232 break; 2233 next = edges[index]->node[UPPER]; 2234 } 2235 return root; 2236 } 2237 2238 /* 2239 * Select a tree root for relocation. 2240 * 2241 * Return NULL if the block is not shareable. We should use do_relocation() in 2242 * this case. 2243 * 2244 * Return a tree root pointer if the block is shareable. 2245 * Return -ENOENT if the block is root of reloc tree. 2246 */ 2247 static noinline_for_stack 2248 struct btrfs_root *select_one_root(struct btrfs_backref_node *node) 2249 { 2250 struct btrfs_backref_node *next; 2251 struct btrfs_root *root; 2252 struct btrfs_root *fs_root = NULL; 2253 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2254 int index = 0; 2255 2256 next = node; 2257 while (1) { 2258 cond_resched(); 2259 next = walk_up_backref(next, edges, &index); 2260 root = next->root; 2261 2262 /* 2263 * This can occur if we have incomplete extent refs leading all 2264 * the way up a particular path, in this case return -EUCLEAN. 2265 */ 2266 if (!root) 2267 return ERR_PTR(-EUCLEAN); 2268 2269 /* No other choice for non-shareable tree */ 2270 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2271 return root; 2272 2273 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2274 fs_root = root; 2275 2276 if (next != node) 2277 return NULL; 2278 2279 next = walk_down_backref(edges, &index); 2280 if (!next || next->level <= node->level) 2281 break; 2282 } 2283 2284 if (!fs_root) 2285 return ERR_PTR(-ENOENT); 2286 return fs_root; 2287 } 2288 2289 static noinline_for_stack 2290 u64 calcu_metadata_size(struct reloc_control *rc, 2291 struct btrfs_backref_node *node, int reserve) 2292 { 2293 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2294 struct btrfs_backref_node *next = node; 2295 struct btrfs_backref_edge *edge; 2296 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2297 u64 num_bytes = 0; 2298 int index = 0; 2299 2300 BUG_ON(reserve && node->processed); 2301 2302 while (next) { 2303 cond_resched(); 2304 while (1) { 2305 if (next->processed && (reserve || next != node)) 2306 break; 2307 2308 num_bytes += fs_info->nodesize; 2309 2310 if (list_empty(&next->upper)) 2311 break; 2312 2313 edge = list_entry(next->upper.next, 2314 struct btrfs_backref_edge, list[LOWER]); 2315 edges[index++] = edge; 2316 next = edge->node[UPPER]; 2317 } 2318 next = walk_down_backref(edges, &index); 2319 } 2320 return num_bytes; 2321 } 2322 2323 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2324 struct reloc_control *rc, 2325 struct btrfs_backref_node *node) 2326 { 2327 struct btrfs_root *root = rc->extent_root; 2328 struct btrfs_fs_info *fs_info = root->fs_info; 2329 u64 num_bytes; 2330 int ret; 2331 u64 tmp; 2332 2333 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2334 2335 trans->block_rsv = rc->block_rsv; 2336 rc->reserved_bytes += num_bytes; 2337 2338 /* 2339 * We are under a transaction here so we can only do limited flushing. 2340 * If we get an enospc just kick back -EAGAIN so we know to drop the 2341 * transaction and try to refill when we can flush all the things. 2342 */ 2343 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes, 2344 BTRFS_RESERVE_FLUSH_LIMIT); 2345 if (ret) { 2346 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; 2347 while (tmp <= rc->reserved_bytes) 2348 tmp <<= 1; 2349 /* 2350 * only one thread can access block_rsv at this point, 2351 * so we don't need hold lock to protect block_rsv. 2352 * we expand more reservation size here to allow enough 2353 * space for relocation and we will return earlier in 2354 * enospc case. 2355 */ 2356 rc->block_rsv->size = tmp + fs_info->nodesize * 2357 RELOCATION_RESERVED_NODES; 2358 return -EAGAIN; 2359 } 2360 2361 return 0; 2362 } 2363 2364 /* 2365 * relocate a block tree, and then update pointers in upper level 2366 * blocks that reference the block to point to the new location. 2367 * 2368 * if called by link_to_upper, the block has already been relocated. 2369 * in that case this function just updates pointers. 2370 */ 2371 static int do_relocation(struct btrfs_trans_handle *trans, 2372 struct reloc_control *rc, 2373 struct btrfs_backref_node *node, 2374 struct btrfs_key *key, 2375 struct btrfs_path *path, int lowest) 2376 { 2377 struct btrfs_backref_node *upper; 2378 struct btrfs_backref_edge *edge; 2379 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2380 struct btrfs_root *root; 2381 struct extent_buffer *eb; 2382 u32 blocksize; 2383 u64 bytenr; 2384 int slot; 2385 int ret = 0; 2386 2387 /* 2388 * If we are lowest then this is the first time we're processing this 2389 * block, and thus shouldn't have an eb associated with it yet. 2390 */ 2391 ASSERT(!lowest || !node->eb); 2392 2393 path->lowest_level = node->level + 1; 2394 rc->backref_cache.path[node->level] = node; 2395 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2396 struct btrfs_ref ref = { 0 }; 2397 2398 cond_resched(); 2399 2400 upper = edge->node[UPPER]; 2401 root = select_reloc_root(trans, rc, upper, edges); 2402 if (IS_ERR(root)) { 2403 ret = PTR_ERR(root); 2404 goto next; 2405 } 2406 2407 if (upper->eb && !upper->locked) { 2408 if (!lowest) { 2409 ret = btrfs_bin_search(upper->eb, key, &slot); 2410 if (ret < 0) 2411 goto next; 2412 BUG_ON(ret); 2413 bytenr = btrfs_node_blockptr(upper->eb, slot); 2414 if (node->eb->start == bytenr) 2415 goto next; 2416 } 2417 btrfs_backref_drop_node_buffer(upper); 2418 } 2419 2420 if (!upper->eb) { 2421 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2422 if (ret) { 2423 if (ret > 0) 2424 ret = -ENOENT; 2425 2426 btrfs_release_path(path); 2427 break; 2428 } 2429 2430 if (!upper->eb) { 2431 upper->eb = path->nodes[upper->level]; 2432 path->nodes[upper->level] = NULL; 2433 } else { 2434 BUG_ON(upper->eb != path->nodes[upper->level]); 2435 } 2436 2437 upper->locked = 1; 2438 path->locks[upper->level] = 0; 2439 2440 slot = path->slots[upper->level]; 2441 btrfs_release_path(path); 2442 } else { 2443 ret = btrfs_bin_search(upper->eb, key, &slot); 2444 if (ret < 0) 2445 goto next; 2446 BUG_ON(ret); 2447 } 2448 2449 bytenr = btrfs_node_blockptr(upper->eb, slot); 2450 if (lowest) { 2451 if (bytenr != node->bytenr) { 2452 btrfs_err(root->fs_info, 2453 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", 2454 bytenr, node->bytenr, slot, 2455 upper->eb->start); 2456 ret = -EIO; 2457 goto next; 2458 } 2459 } else { 2460 if (node->eb->start == bytenr) 2461 goto next; 2462 } 2463 2464 blocksize = root->fs_info->nodesize; 2465 eb = btrfs_read_node_slot(upper->eb, slot); 2466 if (IS_ERR(eb)) { 2467 ret = PTR_ERR(eb); 2468 goto next; 2469 } 2470 btrfs_tree_lock(eb); 2471 2472 if (!node->eb) { 2473 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2474 slot, &eb, BTRFS_NESTING_COW); 2475 btrfs_tree_unlock(eb); 2476 free_extent_buffer(eb); 2477 if (ret < 0) 2478 goto next; 2479 /* 2480 * We've just COWed this block, it should have updated 2481 * the correct backref node entry. 2482 */ 2483 ASSERT(node->eb == eb); 2484 } else { 2485 btrfs_set_node_blockptr(upper->eb, slot, 2486 node->eb->start); 2487 btrfs_set_node_ptr_generation(upper->eb, slot, 2488 trans->transid); 2489 btrfs_mark_buffer_dirty(upper->eb); 2490 2491 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, 2492 node->eb->start, blocksize, 2493 upper->eb->start); 2494 btrfs_init_tree_ref(&ref, node->level, 2495 btrfs_header_owner(upper->eb), 2496 root->root_key.objectid, false); 2497 ret = btrfs_inc_extent_ref(trans, &ref); 2498 if (!ret) 2499 ret = btrfs_drop_subtree(trans, root, eb, 2500 upper->eb); 2501 if (ret) 2502 btrfs_abort_transaction(trans, ret); 2503 } 2504 next: 2505 if (!upper->pending) 2506 btrfs_backref_drop_node_buffer(upper); 2507 else 2508 btrfs_backref_unlock_node_buffer(upper); 2509 if (ret) 2510 break; 2511 } 2512 2513 if (!ret && node->pending) { 2514 btrfs_backref_drop_node_buffer(node); 2515 list_move_tail(&node->list, &rc->backref_cache.changed); 2516 node->pending = 0; 2517 } 2518 2519 path->lowest_level = 0; 2520 2521 /* 2522 * We should have allocated all of our space in the block rsv and thus 2523 * shouldn't ENOSPC. 2524 */ 2525 ASSERT(ret != -ENOSPC); 2526 return ret; 2527 } 2528 2529 static int link_to_upper(struct btrfs_trans_handle *trans, 2530 struct reloc_control *rc, 2531 struct btrfs_backref_node *node, 2532 struct btrfs_path *path) 2533 { 2534 struct btrfs_key key; 2535 2536 btrfs_node_key_to_cpu(node->eb, &key, 0); 2537 return do_relocation(trans, rc, node, &key, path, 0); 2538 } 2539 2540 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2541 struct reloc_control *rc, 2542 struct btrfs_path *path, int err) 2543 { 2544 LIST_HEAD(list); 2545 struct btrfs_backref_cache *cache = &rc->backref_cache; 2546 struct btrfs_backref_node *node; 2547 int level; 2548 int ret; 2549 2550 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2551 while (!list_empty(&cache->pending[level])) { 2552 node = list_entry(cache->pending[level].next, 2553 struct btrfs_backref_node, list); 2554 list_move_tail(&node->list, &list); 2555 BUG_ON(!node->pending); 2556 2557 if (!err) { 2558 ret = link_to_upper(trans, rc, node, path); 2559 if (ret < 0) 2560 err = ret; 2561 } 2562 } 2563 list_splice_init(&list, &cache->pending[level]); 2564 } 2565 return err; 2566 } 2567 2568 /* 2569 * mark a block and all blocks directly/indirectly reference the block 2570 * as processed. 2571 */ 2572 static void update_processed_blocks(struct reloc_control *rc, 2573 struct btrfs_backref_node *node) 2574 { 2575 struct btrfs_backref_node *next = node; 2576 struct btrfs_backref_edge *edge; 2577 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2578 int index = 0; 2579 2580 while (next) { 2581 cond_resched(); 2582 while (1) { 2583 if (next->processed) 2584 break; 2585 2586 mark_block_processed(rc, next); 2587 2588 if (list_empty(&next->upper)) 2589 break; 2590 2591 edge = list_entry(next->upper.next, 2592 struct btrfs_backref_edge, list[LOWER]); 2593 edges[index++] = edge; 2594 next = edge->node[UPPER]; 2595 } 2596 next = walk_down_backref(edges, &index); 2597 } 2598 } 2599 2600 static int tree_block_processed(u64 bytenr, struct reloc_control *rc) 2601 { 2602 u32 blocksize = rc->extent_root->fs_info->nodesize; 2603 2604 if (test_range_bit(&rc->processed_blocks, bytenr, 2605 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2606 return 1; 2607 return 0; 2608 } 2609 2610 static int get_tree_block_key(struct btrfs_fs_info *fs_info, 2611 struct tree_block *block) 2612 { 2613 struct btrfs_tree_parent_check check = { 2614 .level = block->level, 2615 .owner_root = block->owner, 2616 .transid = block->key.offset 2617 }; 2618 struct extent_buffer *eb; 2619 2620 eb = read_tree_block(fs_info, block->bytenr, &check); 2621 if (IS_ERR(eb)) 2622 return PTR_ERR(eb); 2623 if (!extent_buffer_uptodate(eb)) { 2624 free_extent_buffer(eb); 2625 return -EIO; 2626 } 2627 if (block->level == 0) 2628 btrfs_item_key_to_cpu(eb, &block->key, 0); 2629 else 2630 btrfs_node_key_to_cpu(eb, &block->key, 0); 2631 free_extent_buffer(eb); 2632 block->key_ready = 1; 2633 return 0; 2634 } 2635 2636 /* 2637 * helper function to relocate a tree block 2638 */ 2639 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2640 struct reloc_control *rc, 2641 struct btrfs_backref_node *node, 2642 struct btrfs_key *key, 2643 struct btrfs_path *path) 2644 { 2645 struct btrfs_root *root; 2646 int ret = 0; 2647 2648 if (!node) 2649 return 0; 2650 2651 /* 2652 * If we fail here we want to drop our backref_node because we are going 2653 * to start over and regenerate the tree for it. 2654 */ 2655 ret = reserve_metadata_space(trans, rc, node); 2656 if (ret) 2657 goto out; 2658 2659 BUG_ON(node->processed); 2660 root = select_one_root(node); 2661 if (IS_ERR(root)) { 2662 ret = PTR_ERR(root); 2663 2664 /* See explanation in select_one_root for the -EUCLEAN case. */ 2665 ASSERT(ret == -ENOENT); 2666 if (ret == -ENOENT) { 2667 ret = 0; 2668 update_processed_blocks(rc, node); 2669 } 2670 goto out; 2671 } 2672 2673 if (root) { 2674 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 2675 /* 2676 * This block was the root block of a root, and this is 2677 * the first time we're processing the block and thus it 2678 * should not have had the ->new_bytenr modified and 2679 * should have not been included on the changed list. 2680 * 2681 * However in the case of corruption we could have 2682 * multiple refs pointing to the same block improperly, 2683 * and thus we would trip over these checks. ASSERT() 2684 * for the developer case, because it could indicate a 2685 * bug in the backref code, however error out for a 2686 * normal user in the case of corruption. 2687 */ 2688 ASSERT(node->new_bytenr == 0); 2689 ASSERT(list_empty(&node->list)); 2690 if (node->new_bytenr || !list_empty(&node->list)) { 2691 btrfs_err(root->fs_info, 2692 "bytenr %llu has improper references to it", 2693 node->bytenr); 2694 ret = -EUCLEAN; 2695 goto out; 2696 } 2697 ret = btrfs_record_root_in_trans(trans, root); 2698 if (ret) 2699 goto out; 2700 /* 2701 * Another thread could have failed, need to check if we 2702 * have reloc_root actually set. 2703 */ 2704 if (!root->reloc_root) { 2705 ret = -ENOENT; 2706 goto out; 2707 } 2708 root = root->reloc_root; 2709 node->new_bytenr = root->node->start; 2710 btrfs_put_root(node->root); 2711 node->root = btrfs_grab_root(root); 2712 ASSERT(node->root); 2713 list_add_tail(&node->list, &rc->backref_cache.changed); 2714 } else { 2715 path->lowest_level = node->level; 2716 if (root == root->fs_info->chunk_root) 2717 btrfs_reserve_chunk_metadata(trans, false); 2718 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2719 btrfs_release_path(path); 2720 if (root == root->fs_info->chunk_root) 2721 btrfs_trans_release_chunk_metadata(trans); 2722 if (ret > 0) 2723 ret = 0; 2724 } 2725 if (!ret) 2726 update_processed_blocks(rc, node); 2727 } else { 2728 ret = do_relocation(trans, rc, node, key, path, 1); 2729 } 2730 out: 2731 if (ret || node->level == 0 || node->cowonly) 2732 btrfs_backref_cleanup_node(&rc->backref_cache, node); 2733 return ret; 2734 } 2735 2736 /* 2737 * relocate a list of blocks 2738 */ 2739 static noinline_for_stack 2740 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2741 struct reloc_control *rc, struct rb_root *blocks) 2742 { 2743 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2744 struct btrfs_backref_node *node; 2745 struct btrfs_path *path; 2746 struct tree_block *block; 2747 struct tree_block *next; 2748 int ret; 2749 int err = 0; 2750 2751 path = btrfs_alloc_path(); 2752 if (!path) { 2753 err = -ENOMEM; 2754 goto out_free_blocks; 2755 } 2756 2757 /* Kick in readahead for tree blocks with missing keys */ 2758 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2759 if (!block->key_ready) 2760 btrfs_readahead_tree_block(fs_info, block->bytenr, 2761 block->owner, 0, 2762 block->level); 2763 } 2764 2765 /* Get first keys */ 2766 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2767 if (!block->key_ready) { 2768 err = get_tree_block_key(fs_info, block); 2769 if (err) 2770 goto out_free_path; 2771 } 2772 } 2773 2774 /* Do tree relocation */ 2775 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2776 node = build_backref_tree(rc, &block->key, 2777 block->level, block->bytenr); 2778 if (IS_ERR(node)) { 2779 err = PTR_ERR(node); 2780 goto out; 2781 } 2782 2783 ret = relocate_tree_block(trans, rc, node, &block->key, 2784 path); 2785 if (ret < 0) { 2786 err = ret; 2787 break; 2788 } 2789 } 2790 out: 2791 err = finish_pending_nodes(trans, rc, path, err); 2792 2793 out_free_path: 2794 btrfs_free_path(path); 2795 out_free_blocks: 2796 free_block_list(blocks); 2797 return err; 2798 } 2799 2800 static noinline_for_stack int prealloc_file_extent_cluster( 2801 struct btrfs_inode *inode, 2802 struct file_extent_cluster *cluster) 2803 { 2804 u64 alloc_hint = 0; 2805 u64 start; 2806 u64 end; 2807 u64 offset = inode->index_cnt; 2808 u64 num_bytes; 2809 int nr; 2810 int ret = 0; 2811 u64 i_size = i_size_read(&inode->vfs_inode); 2812 u64 prealloc_start = cluster->start - offset; 2813 u64 prealloc_end = cluster->end - offset; 2814 u64 cur_offset = prealloc_start; 2815 2816 /* 2817 * For subpage case, previous i_size may not be aligned to PAGE_SIZE. 2818 * This means the range [i_size, PAGE_END + 1) is filled with zeros by 2819 * btrfs_do_readpage() call of previously relocated file cluster. 2820 * 2821 * If the current cluster starts in the above range, btrfs_do_readpage() 2822 * will skip the read, and relocate_one_page() will later writeback 2823 * the padding zeros as new data, causing data corruption. 2824 * 2825 * Here we have to manually invalidate the range (i_size, PAGE_END + 1). 2826 */ 2827 if (!IS_ALIGNED(i_size, PAGE_SIZE)) { 2828 struct address_space *mapping = inode->vfs_inode.i_mapping; 2829 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2830 const u32 sectorsize = fs_info->sectorsize; 2831 struct page *page; 2832 2833 ASSERT(sectorsize < PAGE_SIZE); 2834 ASSERT(IS_ALIGNED(i_size, sectorsize)); 2835 2836 /* 2837 * Subpage can't handle page with DIRTY but without UPTODATE 2838 * bit as it can lead to the following deadlock: 2839 * 2840 * btrfs_read_folio() 2841 * | Page already *locked* 2842 * |- btrfs_lock_and_flush_ordered_range() 2843 * |- btrfs_start_ordered_extent() 2844 * |- extent_write_cache_pages() 2845 * |- lock_page() 2846 * We try to lock the page we already hold. 2847 * 2848 * Here we just writeback the whole data reloc inode, so that 2849 * we will be ensured to have no dirty range in the page, and 2850 * are safe to clear the uptodate bits. 2851 * 2852 * This shouldn't cause too much overhead, as we need to write 2853 * the data back anyway. 2854 */ 2855 ret = filemap_write_and_wait(mapping); 2856 if (ret < 0) 2857 return ret; 2858 2859 clear_extent_bits(&inode->io_tree, i_size, 2860 round_up(i_size, PAGE_SIZE) - 1, 2861 EXTENT_UPTODATE); 2862 page = find_lock_page(mapping, i_size >> PAGE_SHIFT); 2863 /* 2864 * If page is freed we don't need to do anything then, as we 2865 * will re-read the whole page anyway. 2866 */ 2867 if (page) { 2868 btrfs_subpage_clear_uptodate(fs_info, page, i_size, 2869 round_up(i_size, PAGE_SIZE) - i_size); 2870 unlock_page(page); 2871 put_page(page); 2872 } 2873 } 2874 2875 BUG_ON(cluster->start != cluster->boundary[0]); 2876 ret = btrfs_alloc_data_chunk_ondemand(inode, 2877 prealloc_end + 1 - prealloc_start); 2878 if (ret) 2879 return ret; 2880 2881 btrfs_inode_lock(inode, 0); 2882 for (nr = 0; nr < cluster->nr; nr++) { 2883 struct extent_state *cached_state = NULL; 2884 2885 start = cluster->boundary[nr] - offset; 2886 if (nr + 1 < cluster->nr) 2887 end = cluster->boundary[nr + 1] - 1 - offset; 2888 else 2889 end = cluster->end - offset; 2890 2891 lock_extent(&inode->io_tree, start, end, &cached_state); 2892 num_bytes = end + 1 - start; 2893 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, 2894 num_bytes, num_bytes, 2895 end + 1, &alloc_hint); 2896 cur_offset = end + 1; 2897 unlock_extent(&inode->io_tree, start, end, &cached_state); 2898 if (ret) 2899 break; 2900 } 2901 btrfs_inode_unlock(inode, 0); 2902 2903 if (cur_offset < prealloc_end) 2904 btrfs_free_reserved_data_space_noquota(inode->root->fs_info, 2905 prealloc_end + 1 - cur_offset); 2906 return ret; 2907 } 2908 2909 static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode, 2910 u64 start, u64 end, u64 block_start) 2911 { 2912 struct extent_map *em; 2913 struct extent_state *cached_state = NULL; 2914 int ret = 0; 2915 2916 em = alloc_extent_map(); 2917 if (!em) 2918 return -ENOMEM; 2919 2920 em->start = start; 2921 em->len = end + 1 - start; 2922 em->block_len = em->len; 2923 em->block_start = block_start; 2924 set_bit(EXTENT_FLAG_PINNED, &em->flags); 2925 2926 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 2927 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false); 2928 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 2929 free_extent_map(em); 2930 2931 return ret; 2932 } 2933 2934 /* 2935 * Allow error injection to test balance/relocation cancellation 2936 */ 2937 noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info) 2938 { 2939 return atomic_read(&fs_info->balance_cancel_req) || 2940 atomic_read(&fs_info->reloc_cancel_req) || 2941 fatal_signal_pending(current); 2942 } 2943 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE); 2944 2945 static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster, 2946 int cluster_nr) 2947 { 2948 /* Last extent, use cluster end directly */ 2949 if (cluster_nr >= cluster->nr - 1) 2950 return cluster->end; 2951 2952 /* Use next boundary start*/ 2953 return cluster->boundary[cluster_nr + 1] - 1; 2954 } 2955 2956 static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, 2957 struct file_extent_cluster *cluster, 2958 int *cluster_nr, unsigned long page_index) 2959 { 2960 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2961 u64 offset = BTRFS_I(inode)->index_cnt; 2962 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; 2963 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 2964 struct page *page; 2965 u64 page_start; 2966 u64 page_end; 2967 u64 cur; 2968 int ret; 2969 2970 ASSERT(page_index <= last_index); 2971 page = find_lock_page(inode->i_mapping, page_index); 2972 if (!page) { 2973 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 2974 page_index, last_index + 1 - page_index); 2975 page = find_or_create_page(inode->i_mapping, page_index, mask); 2976 if (!page) 2977 return -ENOMEM; 2978 } 2979 ret = set_page_extent_mapped(page); 2980 if (ret < 0) 2981 goto release_page; 2982 2983 if (PageReadahead(page)) 2984 page_cache_async_readahead(inode->i_mapping, ra, NULL, 2985 page_folio(page), page_index, 2986 last_index + 1 - page_index); 2987 2988 if (!PageUptodate(page)) { 2989 btrfs_read_folio(NULL, page_folio(page)); 2990 lock_page(page); 2991 if (!PageUptodate(page)) { 2992 ret = -EIO; 2993 goto release_page; 2994 } 2995 } 2996 2997 page_start = page_offset(page); 2998 page_end = page_start + PAGE_SIZE - 1; 2999 3000 /* 3001 * Start from the cluster, as for subpage case, the cluster can start 3002 * inside the page. 3003 */ 3004 cur = max(page_start, cluster->boundary[*cluster_nr] - offset); 3005 while (cur <= page_end) { 3006 struct extent_state *cached_state = NULL; 3007 u64 extent_start = cluster->boundary[*cluster_nr] - offset; 3008 u64 extent_end = get_cluster_boundary_end(cluster, 3009 *cluster_nr) - offset; 3010 u64 clamped_start = max(page_start, extent_start); 3011 u64 clamped_end = min(page_end, extent_end); 3012 u32 clamped_len = clamped_end + 1 - clamped_start; 3013 3014 /* Reserve metadata for this range */ 3015 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 3016 clamped_len, clamped_len, 3017 false); 3018 if (ret) 3019 goto release_page; 3020 3021 /* Mark the range delalloc and dirty for later writeback */ 3022 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, 3023 &cached_state); 3024 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start, 3025 clamped_end, 0, &cached_state); 3026 if (ret) { 3027 clear_extent_bit(&BTRFS_I(inode)->io_tree, 3028 clamped_start, clamped_end, 3029 EXTENT_LOCKED | EXTENT_BOUNDARY, 3030 &cached_state); 3031 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3032 clamped_len, true); 3033 btrfs_delalloc_release_extents(BTRFS_I(inode), 3034 clamped_len); 3035 goto release_page; 3036 } 3037 btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len); 3038 3039 /* 3040 * Set the boundary if it's inside the page. 3041 * Data relocation requires the destination extents to have the 3042 * same size as the source. 3043 * EXTENT_BOUNDARY bit prevents current extent from being merged 3044 * with previous extent. 3045 */ 3046 if (in_range(cluster->boundary[*cluster_nr] - offset, 3047 page_start, PAGE_SIZE)) { 3048 u64 boundary_start = cluster->boundary[*cluster_nr] - 3049 offset; 3050 u64 boundary_end = boundary_start + 3051 fs_info->sectorsize - 1; 3052 3053 set_extent_bits(&BTRFS_I(inode)->io_tree, 3054 boundary_start, boundary_end, 3055 EXTENT_BOUNDARY); 3056 } 3057 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, 3058 &cached_state); 3059 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len); 3060 cur += clamped_len; 3061 3062 /* Crossed extent end, go to next extent */ 3063 if (cur >= extent_end) { 3064 (*cluster_nr)++; 3065 /* Just finished the last extent of the cluster, exit. */ 3066 if (*cluster_nr >= cluster->nr) 3067 break; 3068 } 3069 } 3070 unlock_page(page); 3071 put_page(page); 3072 3073 balance_dirty_pages_ratelimited(inode->i_mapping); 3074 btrfs_throttle(fs_info); 3075 if (btrfs_should_cancel_balance(fs_info)) 3076 ret = -ECANCELED; 3077 return ret; 3078 3079 release_page: 3080 unlock_page(page); 3081 put_page(page); 3082 return ret; 3083 } 3084 3085 static int relocate_file_extent_cluster(struct inode *inode, 3086 struct file_extent_cluster *cluster) 3087 { 3088 u64 offset = BTRFS_I(inode)->index_cnt; 3089 unsigned long index; 3090 unsigned long last_index; 3091 struct file_ra_state *ra; 3092 int cluster_nr = 0; 3093 int ret = 0; 3094 3095 if (!cluster->nr) 3096 return 0; 3097 3098 ra = kzalloc(sizeof(*ra), GFP_NOFS); 3099 if (!ra) 3100 return -ENOMEM; 3101 3102 ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster); 3103 if (ret) 3104 goto out; 3105 3106 file_ra_state_init(ra, inode->i_mapping); 3107 3108 ret = setup_relocation_extent_mapping(inode, cluster->start - offset, 3109 cluster->end - offset, cluster->start); 3110 if (ret) 3111 goto out; 3112 3113 last_index = (cluster->end - offset) >> PAGE_SHIFT; 3114 for (index = (cluster->start - offset) >> PAGE_SHIFT; 3115 index <= last_index && !ret; index++) 3116 ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index); 3117 if (ret == 0) 3118 WARN_ON(cluster_nr != cluster->nr); 3119 out: 3120 kfree(ra); 3121 return ret; 3122 } 3123 3124 static noinline_for_stack 3125 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3126 struct file_extent_cluster *cluster) 3127 { 3128 int ret; 3129 3130 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3131 ret = relocate_file_extent_cluster(inode, cluster); 3132 if (ret) 3133 return ret; 3134 cluster->nr = 0; 3135 } 3136 3137 if (!cluster->nr) 3138 cluster->start = extent_key->objectid; 3139 else 3140 BUG_ON(cluster->nr >= MAX_EXTENTS); 3141 cluster->end = extent_key->objectid + extent_key->offset - 1; 3142 cluster->boundary[cluster->nr] = extent_key->objectid; 3143 cluster->nr++; 3144 3145 if (cluster->nr >= MAX_EXTENTS) { 3146 ret = relocate_file_extent_cluster(inode, cluster); 3147 if (ret) 3148 return ret; 3149 cluster->nr = 0; 3150 } 3151 return 0; 3152 } 3153 3154 /* 3155 * helper to add a tree block to the list. 3156 * the major work is getting the generation and level of the block 3157 */ 3158 static int add_tree_block(struct reloc_control *rc, 3159 struct btrfs_key *extent_key, 3160 struct btrfs_path *path, 3161 struct rb_root *blocks) 3162 { 3163 struct extent_buffer *eb; 3164 struct btrfs_extent_item *ei; 3165 struct btrfs_tree_block_info *bi; 3166 struct tree_block *block; 3167 struct rb_node *rb_node; 3168 u32 item_size; 3169 int level = -1; 3170 u64 generation; 3171 u64 owner = 0; 3172 3173 eb = path->nodes[0]; 3174 item_size = btrfs_item_size(eb, path->slots[0]); 3175 3176 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3177 item_size >= sizeof(*ei) + sizeof(*bi)) { 3178 unsigned long ptr = 0, end; 3179 3180 ei = btrfs_item_ptr(eb, path->slots[0], 3181 struct btrfs_extent_item); 3182 end = (unsigned long)ei + item_size; 3183 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3184 bi = (struct btrfs_tree_block_info *)(ei + 1); 3185 level = btrfs_tree_block_level(eb, bi); 3186 ptr = (unsigned long)(bi + 1); 3187 } else { 3188 level = (int)extent_key->offset; 3189 ptr = (unsigned long)(ei + 1); 3190 } 3191 generation = btrfs_extent_generation(eb, ei); 3192 3193 /* 3194 * We're reading random blocks without knowing their owner ahead 3195 * of time. This is ok most of the time, as all reloc roots and 3196 * fs roots have the same lock type. However normal trees do 3197 * not, and the only way to know ahead of time is to read the 3198 * inline ref offset. We know it's an fs root if 3199 * 3200 * 1. There's more than one ref. 3201 * 2. There's a SHARED_DATA_REF_KEY set. 3202 * 3. FULL_BACKREF is set on the flags. 3203 * 3204 * Otherwise it's safe to assume that the ref offset == the 3205 * owner of this block, so we can use that when calling 3206 * read_tree_block. 3207 */ 3208 if (btrfs_extent_refs(eb, ei) == 1 && 3209 !(btrfs_extent_flags(eb, ei) & 3210 BTRFS_BLOCK_FLAG_FULL_BACKREF) && 3211 ptr < end) { 3212 struct btrfs_extent_inline_ref *iref; 3213 int type; 3214 3215 iref = (struct btrfs_extent_inline_ref *)ptr; 3216 type = btrfs_get_extent_inline_ref_type(eb, iref, 3217 BTRFS_REF_TYPE_BLOCK); 3218 if (type == BTRFS_REF_TYPE_INVALID) 3219 return -EINVAL; 3220 if (type == BTRFS_TREE_BLOCK_REF_KEY) 3221 owner = btrfs_extent_inline_ref_offset(eb, iref); 3222 } 3223 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) { 3224 btrfs_print_v0_err(eb->fs_info); 3225 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); 3226 return -EINVAL; 3227 } else { 3228 BUG(); 3229 } 3230 3231 btrfs_release_path(path); 3232 3233 BUG_ON(level == -1); 3234 3235 block = kmalloc(sizeof(*block), GFP_NOFS); 3236 if (!block) 3237 return -ENOMEM; 3238 3239 block->bytenr = extent_key->objectid; 3240 block->key.objectid = rc->extent_root->fs_info->nodesize; 3241 block->key.offset = generation; 3242 block->level = level; 3243 block->key_ready = 0; 3244 block->owner = owner; 3245 3246 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node); 3247 if (rb_node) 3248 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, 3249 -EEXIST); 3250 3251 return 0; 3252 } 3253 3254 /* 3255 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3256 */ 3257 static int __add_tree_block(struct reloc_control *rc, 3258 u64 bytenr, u32 blocksize, 3259 struct rb_root *blocks) 3260 { 3261 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3262 struct btrfs_path *path; 3263 struct btrfs_key key; 3264 int ret; 3265 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 3266 3267 if (tree_block_processed(bytenr, rc)) 3268 return 0; 3269 3270 if (rb_simple_search(blocks, bytenr)) 3271 return 0; 3272 3273 path = btrfs_alloc_path(); 3274 if (!path) 3275 return -ENOMEM; 3276 again: 3277 key.objectid = bytenr; 3278 if (skinny) { 3279 key.type = BTRFS_METADATA_ITEM_KEY; 3280 key.offset = (u64)-1; 3281 } else { 3282 key.type = BTRFS_EXTENT_ITEM_KEY; 3283 key.offset = blocksize; 3284 } 3285 3286 path->search_commit_root = 1; 3287 path->skip_locking = 1; 3288 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3289 if (ret < 0) 3290 goto out; 3291 3292 if (ret > 0 && skinny) { 3293 if (path->slots[0]) { 3294 path->slots[0]--; 3295 btrfs_item_key_to_cpu(path->nodes[0], &key, 3296 path->slots[0]); 3297 if (key.objectid == bytenr && 3298 (key.type == BTRFS_METADATA_ITEM_KEY || 3299 (key.type == BTRFS_EXTENT_ITEM_KEY && 3300 key.offset == blocksize))) 3301 ret = 0; 3302 } 3303 3304 if (ret) { 3305 skinny = false; 3306 btrfs_release_path(path); 3307 goto again; 3308 } 3309 } 3310 if (ret) { 3311 ASSERT(ret == 1); 3312 btrfs_print_leaf(path->nodes[0]); 3313 btrfs_err(fs_info, 3314 "tree block extent item (%llu) is not found in extent tree", 3315 bytenr); 3316 WARN_ON(1); 3317 ret = -EINVAL; 3318 goto out; 3319 } 3320 3321 ret = add_tree_block(rc, &key, path, blocks); 3322 out: 3323 btrfs_free_path(path); 3324 return ret; 3325 } 3326 3327 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3328 struct btrfs_block_group *block_group, 3329 struct inode *inode, 3330 u64 ino) 3331 { 3332 struct btrfs_root *root = fs_info->tree_root; 3333 struct btrfs_trans_handle *trans; 3334 int ret = 0; 3335 3336 if (inode) 3337 goto truncate; 3338 3339 inode = btrfs_iget(fs_info->sb, ino, root); 3340 if (IS_ERR(inode)) 3341 return -ENOENT; 3342 3343 truncate: 3344 ret = btrfs_check_trunc_cache_free_space(fs_info, 3345 &fs_info->global_block_rsv); 3346 if (ret) 3347 goto out; 3348 3349 trans = btrfs_join_transaction(root); 3350 if (IS_ERR(trans)) { 3351 ret = PTR_ERR(trans); 3352 goto out; 3353 } 3354 3355 ret = btrfs_truncate_free_space_cache(trans, block_group, inode); 3356 3357 btrfs_end_transaction(trans); 3358 btrfs_btree_balance_dirty(fs_info); 3359 out: 3360 iput(inode); 3361 return ret; 3362 } 3363 3364 /* 3365 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the 3366 * cache inode, to avoid free space cache data extent blocking data relocation. 3367 */ 3368 static int delete_v1_space_cache(struct extent_buffer *leaf, 3369 struct btrfs_block_group *block_group, 3370 u64 data_bytenr) 3371 { 3372 u64 space_cache_ino; 3373 struct btrfs_file_extent_item *ei; 3374 struct btrfs_key key; 3375 bool found = false; 3376 int i; 3377 int ret; 3378 3379 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID) 3380 return 0; 3381 3382 for (i = 0; i < btrfs_header_nritems(leaf); i++) { 3383 u8 type; 3384 3385 btrfs_item_key_to_cpu(leaf, &key, i); 3386 if (key.type != BTRFS_EXTENT_DATA_KEY) 3387 continue; 3388 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 3389 type = btrfs_file_extent_type(leaf, ei); 3390 3391 if ((type == BTRFS_FILE_EXTENT_REG || 3392 type == BTRFS_FILE_EXTENT_PREALLOC) && 3393 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) { 3394 found = true; 3395 space_cache_ino = key.objectid; 3396 break; 3397 } 3398 } 3399 if (!found) 3400 return -ENOENT; 3401 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL, 3402 space_cache_ino); 3403 return ret; 3404 } 3405 3406 /* 3407 * helper to find all tree blocks that reference a given data extent 3408 */ 3409 static noinline_for_stack 3410 int add_data_references(struct reloc_control *rc, 3411 struct btrfs_key *extent_key, 3412 struct btrfs_path *path, 3413 struct rb_root *blocks) 3414 { 3415 struct btrfs_backref_walk_ctx ctx = { 0 }; 3416 struct ulist_iterator leaf_uiter; 3417 struct ulist_node *ref_node = NULL; 3418 const u32 blocksize = rc->extent_root->fs_info->nodesize; 3419 int ret = 0; 3420 3421 btrfs_release_path(path); 3422 3423 ctx.bytenr = extent_key->objectid; 3424 ctx.ignore_extent_item_pos = true; 3425 ctx.fs_info = rc->extent_root->fs_info; 3426 3427 ret = btrfs_find_all_leafs(&ctx); 3428 if (ret < 0) 3429 return ret; 3430 3431 ULIST_ITER_INIT(&leaf_uiter); 3432 while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) { 3433 struct btrfs_tree_parent_check check = { 0 }; 3434 struct extent_buffer *eb; 3435 3436 eb = read_tree_block(ctx.fs_info, ref_node->val, &check); 3437 if (IS_ERR(eb)) { 3438 ret = PTR_ERR(eb); 3439 break; 3440 } 3441 ret = delete_v1_space_cache(eb, rc->block_group, 3442 extent_key->objectid); 3443 free_extent_buffer(eb); 3444 if (ret < 0) 3445 break; 3446 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks); 3447 if (ret < 0) 3448 break; 3449 } 3450 if (ret < 0) 3451 free_block_list(blocks); 3452 ulist_free(ctx.refs); 3453 return ret; 3454 } 3455 3456 /* 3457 * helper to find next unprocessed extent 3458 */ 3459 static noinline_for_stack 3460 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, 3461 struct btrfs_key *extent_key) 3462 { 3463 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3464 struct btrfs_key key; 3465 struct extent_buffer *leaf; 3466 u64 start, end, last; 3467 int ret; 3468 3469 last = rc->block_group->start + rc->block_group->length; 3470 while (1) { 3471 cond_resched(); 3472 if (rc->search_start >= last) { 3473 ret = 1; 3474 break; 3475 } 3476 3477 key.objectid = rc->search_start; 3478 key.type = BTRFS_EXTENT_ITEM_KEY; 3479 key.offset = 0; 3480 3481 path->search_commit_root = 1; 3482 path->skip_locking = 1; 3483 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3484 0, 0); 3485 if (ret < 0) 3486 break; 3487 next: 3488 leaf = path->nodes[0]; 3489 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3490 ret = btrfs_next_leaf(rc->extent_root, path); 3491 if (ret != 0) 3492 break; 3493 leaf = path->nodes[0]; 3494 } 3495 3496 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3497 if (key.objectid >= last) { 3498 ret = 1; 3499 break; 3500 } 3501 3502 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3503 key.type != BTRFS_METADATA_ITEM_KEY) { 3504 path->slots[0]++; 3505 goto next; 3506 } 3507 3508 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3509 key.objectid + key.offset <= rc->search_start) { 3510 path->slots[0]++; 3511 goto next; 3512 } 3513 3514 if (key.type == BTRFS_METADATA_ITEM_KEY && 3515 key.objectid + fs_info->nodesize <= 3516 rc->search_start) { 3517 path->slots[0]++; 3518 goto next; 3519 } 3520 3521 ret = find_first_extent_bit(&rc->processed_blocks, 3522 key.objectid, &start, &end, 3523 EXTENT_DIRTY, NULL); 3524 3525 if (ret == 0 && start <= key.objectid) { 3526 btrfs_release_path(path); 3527 rc->search_start = end + 1; 3528 } else { 3529 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3530 rc->search_start = key.objectid + key.offset; 3531 else 3532 rc->search_start = key.objectid + 3533 fs_info->nodesize; 3534 memcpy(extent_key, &key, sizeof(key)); 3535 return 0; 3536 } 3537 } 3538 btrfs_release_path(path); 3539 return ret; 3540 } 3541 3542 static void set_reloc_control(struct reloc_control *rc) 3543 { 3544 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3545 3546 mutex_lock(&fs_info->reloc_mutex); 3547 fs_info->reloc_ctl = rc; 3548 mutex_unlock(&fs_info->reloc_mutex); 3549 } 3550 3551 static void unset_reloc_control(struct reloc_control *rc) 3552 { 3553 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3554 3555 mutex_lock(&fs_info->reloc_mutex); 3556 fs_info->reloc_ctl = NULL; 3557 mutex_unlock(&fs_info->reloc_mutex); 3558 } 3559 3560 static noinline_for_stack 3561 int prepare_to_relocate(struct reloc_control *rc) 3562 { 3563 struct btrfs_trans_handle *trans; 3564 int ret; 3565 3566 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info, 3567 BTRFS_BLOCK_RSV_TEMP); 3568 if (!rc->block_rsv) 3569 return -ENOMEM; 3570 3571 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3572 rc->search_start = rc->block_group->start; 3573 rc->extents_found = 0; 3574 rc->nodes_relocated = 0; 3575 rc->merging_rsv_size = 0; 3576 rc->reserved_bytes = 0; 3577 rc->block_rsv->size = rc->extent_root->fs_info->nodesize * 3578 RELOCATION_RESERVED_NODES; 3579 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info, 3580 rc->block_rsv, rc->block_rsv->size, 3581 BTRFS_RESERVE_FLUSH_ALL); 3582 if (ret) 3583 return ret; 3584 3585 rc->create_reloc_tree = 1; 3586 set_reloc_control(rc); 3587 3588 trans = btrfs_join_transaction(rc->extent_root); 3589 if (IS_ERR(trans)) { 3590 unset_reloc_control(rc); 3591 /* 3592 * extent tree is not a ref_cow tree and has no reloc_root to 3593 * cleanup. And callers are responsible to free the above 3594 * block rsv. 3595 */ 3596 return PTR_ERR(trans); 3597 } 3598 3599 ret = btrfs_commit_transaction(trans); 3600 if (ret) 3601 unset_reloc_control(rc); 3602 3603 return ret; 3604 } 3605 3606 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3607 { 3608 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3609 struct rb_root blocks = RB_ROOT; 3610 struct btrfs_key key; 3611 struct btrfs_trans_handle *trans = NULL; 3612 struct btrfs_path *path; 3613 struct btrfs_extent_item *ei; 3614 u64 flags; 3615 int ret; 3616 int err = 0; 3617 int progress = 0; 3618 3619 path = btrfs_alloc_path(); 3620 if (!path) 3621 return -ENOMEM; 3622 path->reada = READA_FORWARD; 3623 3624 ret = prepare_to_relocate(rc); 3625 if (ret) { 3626 err = ret; 3627 goto out_free; 3628 } 3629 3630 while (1) { 3631 rc->reserved_bytes = 0; 3632 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, 3633 rc->block_rsv->size, 3634 BTRFS_RESERVE_FLUSH_ALL); 3635 if (ret) { 3636 err = ret; 3637 break; 3638 } 3639 progress++; 3640 trans = btrfs_start_transaction(rc->extent_root, 0); 3641 if (IS_ERR(trans)) { 3642 err = PTR_ERR(trans); 3643 trans = NULL; 3644 break; 3645 } 3646 restart: 3647 if (update_backref_cache(trans, &rc->backref_cache)) { 3648 btrfs_end_transaction(trans); 3649 trans = NULL; 3650 continue; 3651 } 3652 3653 ret = find_next_extent(rc, path, &key); 3654 if (ret < 0) 3655 err = ret; 3656 if (ret != 0) 3657 break; 3658 3659 rc->extents_found++; 3660 3661 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3662 struct btrfs_extent_item); 3663 flags = btrfs_extent_flags(path->nodes[0], ei); 3664 3665 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 3666 ret = add_tree_block(rc, &key, path, &blocks); 3667 } else if (rc->stage == UPDATE_DATA_PTRS && 3668 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3669 ret = add_data_references(rc, &key, path, &blocks); 3670 } else { 3671 btrfs_release_path(path); 3672 ret = 0; 3673 } 3674 if (ret < 0) { 3675 err = ret; 3676 break; 3677 } 3678 3679 if (!RB_EMPTY_ROOT(&blocks)) { 3680 ret = relocate_tree_blocks(trans, rc, &blocks); 3681 if (ret < 0) { 3682 if (ret != -EAGAIN) { 3683 err = ret; 3684 break; 3685 } 3686 rc->extents_found--; 3687 rc->search_start = key.objectid; 3688 } 3689 } 3690 3691 btrfs_end_transaction_throttle(trans); 3692 btrfs_btree_balance_dirty(fs_info); 3693 trans = NULL; 3694 3695 if (rc->stage == MOVE_DATA_EXTENTS && 3696 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3697 rc->found_file_extent = 1; 3698 ret = relocate_data_extent(rc->data_inode, 3699 &key, &rc->cluster); 3700 if (ret < 0) { 3701 err = ret; 3702 break; 3703 } 3704 } 3705 if (btrfs_should_cancel_balance(fs_info)) { 3706 err = -ECANCELED; 3707 break; 3708 } 3709 } 3710 if (trans && progress && err == -ENOSPC) { 3711 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags); 3712 if (ret == 1) { 3713 err = 0; 3714 progress = 0; 3715 goto restart; 3716 } 3717 } 3718 3719 btrfs_release_path(path); 3720 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); 3721 3722 if (trans) { 3723 btrfs_end_transaction_throttle(trans); 3724 btrfs_btree_balance_dirty(fs_info); 3725 } 3726 3727 if (!err) { 3728 ret = relocate_file_extent_cluster(rc->data_inode, 3729 &rc->cluster); 3730 if (ret < 0) 3731 err = ret; 3732 } 3733 3734 rc->create_reloc_tree = 0; 3735 set_reloc_control(rc); 3736 3737 btrfs_backref_release_cache(&rc->backref_cache); 3738 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); 3739 3740 /* 3741 * Even in the case when the relocation is cancelled, we should all go 3742 * through prepare_to_merge() and merge_reloc_roots(). 3743 * 3744 * For error (including cancelled balance), prepare_to_merge() will 3745 * mark all reloc trees orphan, then queue them for cleanup in 3746 * merge_reloc_roots() 3747 */ 3748 err = prepare_to_merge(rc, err); 3749 3750 merge_reloc_roots(rc); 3751 3752 rc->merge_reloc_tree = 0; 3753 unset_reloc_control(rc); 3754 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); 3755 3756 /* get rid of pinned extents */ 3757 trans = btrfs_join_transaction(rc->extent_root); 3758 if (IS_ERR(trans)) { 3759 err = PTR_ERR(trans); 3760 goto out_free; 3761 } 3762 ret = btrfs_commit_transaction(trans); 3763 if (ret && !err) 3764 err = ret; 3765 out_free: 3766 ret = clean_dirty_subvols(rc); 3767 if (ret < 0 && !err) 3768 err = ret; 3769 btrfs_free_block_rsv(fs_info, rc->block_rsv); 3770 btrfs_free_path(path); 3771 return err; 3772 } 3773 3774 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 3775 struct btrfs_root *root, u64 objectid) 3776 { 3777 struct btrfs_path *path; 3778 struct btrfs_inode_item *item; 3779 struct extent_buffer *leaf; 3780 int ret; 3781 3782 path = btrfs_alloc_path(); 3783 if (!path) 3784 return -ENOMEM; 3785 3786 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 3787 if (ret) 3788 goto out; 3789 3790 leaf = path->nodes[0]; 3791 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 3792 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3793 btrfs_set_inode_generation(leaf, item, 1); 3794 btrfs_set_inode_size(leaf, item, 0); 3795 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 3796 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 3797 BTRFS_INODE_PREALLOC); 3798 btrfs_mark_buffer_dirty(leaf); 3799 out: 3800 btrfs_free_path(path); 3801 return ret; 3802 } 3803 3804 static void delete_orphan_inode(struct btrfs_trans_handle *trans, 3805 struct btrfs_root *root, u64 objectid) 3806 { 3807 struct btrfs_path *path; 3808 struct btrfs_key key; 3809 int ret = 0; 3810 3811 path = btrfs_alloc_path(); 3812 if (!path) { 3813 ret = -ENOMEM; 3814 goto out; 3815 } 3816 3817 key.objectid = objectid; 3818 key.type = BTRFS_INODE_ITEM_KEY; 3819 key.offset = 0; 3820 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3821 if (ret) { 3822 if (ret > 0) 3823 ret = -ENOENT; 3824 goto out; 3825 } 3826 ret = btrfs_del_item(trans, root, path); 3827 out: 3828 if (ret) 3829 btrfs_abort_transaction(trans, ret); 3830 btrfs_free_path(path); 3831 } 3832 3833 /* 3834 * helper to create inode for data relocation. 3835 * the inode is in data relocation tree and its link count is 0 3836 */ 3837 static noinline_for_stack 3838 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 3839 struct btrfs_block_group *group) 3840 { 3841 struct inode *inode = NULL; 3842 struct btrfs_trans_handle *trans; 3843 struct btrfs_root *root; 3844 u64 objectid; 3845 int err = 0; 3846 3847 root = btrfs_grab_root(fs_info->data_reloc_root); 3848 trans = btrfs_start_transaction(root, 6); 3849 if (IS_ERR(trans)) { 3850 btrfs_put_root(root); 3851 return ERR_CAST(trans); 3852 } 3853 3854 err = btrfs_get_free_objectid(root, &objectid); 3855 if (err) 3856 goto out; 3857 3858 err = __insert_orphan_inode(trans, root, objectid); 3859 if (err) 3860 goto out; 3861 3862 inode = btrfs_iget(fs_info->sb, objectid, root); 3863 if (IS_ERR(inode)) { 3864 delete_orphan_inode(trans, root, objectid); 3865 err = PTR_ERR(inode); 3866 inode = NULL; 3867 goto out; 3868 } 3869 BTRFS_I(inode)->index_cnt = group->start; 3870 3871 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 3872 out: 3873 btrfs_put_root(root); 3874 btrfs_end_transaction(trans); 3875 btrfs_btree_balance_dirty(fs_info); 3876 if (err) { 3877 iput(inode); 3878 inode = ERR_PTR(err); 3879 } 3880 return inode; 3881 } 3882 3883 /* 3884 * Mark start of chunk relocation that is cancellable. Check if the cancellation 3885 * has been requested meanwhile and don't start in that case. 3886 * 3887 * Return: 3888 * 0 success 3889 * -EINPROGRESS operation is already in progress, that's probably a bug 3890 * -ECANCELED cancellation request was set before the operation started 3891 */ 3892 static int reloc_chunk_start(struct btrfs_fs_info *fs_info) 3893 { 3894 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) { 3895 /* This should not happen */ 3896 btrfs_err(fs_info, "reloc already running, cannot start"); 3897 return -EINPROGRESS; 3898 } 3899 3900 if (atomic_read(&fs_info->reloc_cancel_req) > 0) { 3901 btrfs_info(fs_info, "chunk relocation canceled on start"); 3902 /* 3903 * On cancel, clear all requests but let the caller mark 3904 * the end after cleanup operations. 3905 */ 3906 atomic_set(&fs_info->reloc_cancel_req, 0); 3907 return -ECANCELED; 3908 } 3909 return 0; 3910 } 3911 3912 /* 3913 * Mark end of chunk relocation that is cancellable and wake any waiters. 3914 */ 3915 static void reloc_chunk_end(struct btrfs_fs_info *fs_info) 3916 { 3917 /* Requested after start, clear bit first so any waiters can continue */ 3918 if (atomic_read(&fs_info->reloc_cancel_req) > 0) 3919 btrfs_info(fs_info, "chunk relocation canceled during operation"); 3920 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); 3921 atomic_set(&fs_info->reloc_cancel_req, 0); 3922 } 3923 3924 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) 3925 { 3926 struct reloc_control *rc; 3927 3928 rc = kzalloc(sizeof(*rc), GFP_NOFS); 3929 if (!rc) 3930 return NULL; 3931 3932 INIT_LIST_HEAD(&rc->reloc_roots); 3933 INIT_LIST_HEAD(&rc->dirty_subvol_roots); 3934 btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1); 3935 mapping_tree_init(&rc->reloc_root_tree); 3936 extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS); 3937 return rc; 3938 } 3939 3940 static void free_reloc_control(struct reloc_control *rc) 3941 { 3942 struct mapping_node *node, *tmp; 3943 3944 free_reloc_roots(&rc->reloc_roots); 3945 rbtree_postorder_for_each_entry_safe(node, tmp, 3946 &rc->reloc_root_tree.rb_root, rb_node) 3947 kfree(node); 3948 3949 kfree(rc); 3950 } 3951 3952 /* 3953 * Print the block group being relocated 3954 */ 3955 static void describe_relocation(struct btrfs_fs_info *fs_info, 3956 struct btrfs_block_group *block_group) 3957 { 3958 char buf[128] = {'\0'}; 3959 3960 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf)); 3961 3962 btrfs_info(fs_info, 3963 "relocating block group %llu flags %s", 3964 block_group->start, buf); 3965 } 3966 3967 static const char *stage_to_string(int stage) 3968 { 3969 if (stage == MOVE_DATA_EXTENTS) 3970 return "move data extents"; 3971 if (stage == UPDATE_DATA_PTRS) 3972 return "update data pointers"; 3973 return "unknown"; 3974 } 3975 3976 /* 3977 * function to relocate all extents in a block group. 3978 */ 3979 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) 3980 { 3981 struct btrfs_block_group *bg; 3982 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start); 3983 struct reloc_control *rc; 3984 struct inode *inode; 3985 struct btrfs_path *path; 3986 int ret; 3987 int rw = 0; 3988 int err = 0; 3989 3990 /* 3991 * This only gets set if we had a half-deleted snapshot on mount. We 3992 * cannot allow relocation to start while we're still trying to clean up 3993 * these pending deletions. 3994 */ 3995 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE); 3996 if (ret) 3997 return ret; 3998 3999 /* We may have been woken up by close_ctree, so bail if we're closing. */ 4000 if (btrfs_fs_closing(fs_info)) 4001 return -EINTR; 4002 4003 bg = btrfs_lookup_block_group(fs_info, group_start); 4004 if (!bg) 4005 return -ENOENT; 4006 4007 /* 4008 * Relocation of a data block group creates ordered extents. Without 4009 * sb_start_write(), we can freeze the filesystem while unfinished 4010 * ordered extents are left. Such ordered extents can cause a deadlock 4011 * e.g. when syncfs() is waiting for their completion but they can't 4012 * finish because they block when joining a transaction, due to the 4013 * fact that the freeze locks are being held in write mode. 4014 */ 4015 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) 4016 ASSERT(sb_write_started(fs_info->sb)); 4017 4018 if (btrfs_pinned_by_swapfile(fs_info, bg)) { 4019 btrfs_put_block_group(bg); 4020 return -ETXTBSY; 4021 } 4022 4023 rc = alloc_reloc_control(fs_info); 4024 if (!rc) { 4025 btrfs_put_block_group(bg); 4026 return -ENOMEM; 4027 } 4028 4029 ret = reloc_chunk_start(fs_info); 4030 if (ret < 0) { 4031 err = ret; 4032 goto out_put_bg; 4033 } 4034 4035 rc->extent_root = extent_root; 4036 rc->block_group = bg; 4037 4038 ret = btrfs_inc_block_group_ro(rc->block_group, true); 4039 if (ret) { 4040 err = ret; 4041 goto out; 4042 } 4043 rw = 1; 4044 4045 path = btrfs_alloc_path(); 4046 if (!path) { 4047 err = -ENOMEM; 4048 goto out; 4049 } 4050 4051 inode = lookup_free_space_inode(rc->block_group, path); 4052 btrfs_free_path(path); 4053 4054 if (!IS_ERR(inode)) 4055 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); 4056 else 4057 ret = PTR_ERR(inode); 4058 4059 if (ret && ret != -ENOENT) { 4060 err = ret; 4061 goto out; 4062 } 4063 4064 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4065 if (IS_ERR(rc->data_inode)) { 4066 err = PTR_ERR(rc->data_inode); 4067 rc->data_inode = NULL; 4068 goto out; 4069 } 4070 4071 describe_relocation(fs_info, rc->block_group); 4072 4073 btrfs_wait_block_group_reservations(rc->block_group); 4074 btrfs_wait_nocow_writers(rc->block_group); 4075 btrfs_wait_ordered_roots(fs_info, U64_MAX, 4076 rc->block_group->start, 4077 rc->block_group->length); 4078 4079 ret = btrfs_zone_finish(rc->block_group); 4080 WARN_ON(ret && ret != -EAGAIN); 4081 4082 while (1) { 4083 int finishes_stage; 4084 4085 mutex_lock(&fs_info->cleaner_mutex); 4086 ret = relocate_block_group(rc); 4087 mutex_unlock(&fs_info->cleaner_mutex); 4088 if (ret < 0) 4089 err = ret; 4090 4091 finishes_stage = rc->stage; 4092 /* 4093 * We may have gotten ENOSPC after we already dirtied some 4094 * extents. If writeout happens while we're relocating a 4095 * different block group we could end up hitting the 4096 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in 4097 * btrfs_reloc_cow_block. Make sure we write everything out 4098 * properly so we don't trip over this problem, and then break 4099 * out of the loop if we hit an error. 4100 */ 4101 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4102 ret = btrfs_wait_ordered_range(rc->data_inode, 0, 4103 (u64)-1); 4104 if (ret) 4105 err = ret; 4106 invalidate_mapping_pages(rc->data_inode->i_mapping, 4107 0, -1); 4108 rc->stage = UPDATE_DATA_PTRS; 4109 } 4110 4111 if (err < 0) 4112 goto out; 4113 4114 if (rc->extents_found == 0) 4115 break; 4116 4117 btrfs_info(fs_info, "found %llu extents, stage: %s", 4118 rc->extents_found, stage_to_string(finishes_stage)); 4119 } 4120 4121 WARN_ON(rc->block_group->pinned > 0); 4122 WARN_ON(rc->block_group->reserved > 0); 4123 WARN_ON(rc->block_group->used > 0); 4124 out: 4125 if (err && rw) 4126 btrfs_dec_block_group_ro(rc->block_group); 4127 iput(rc->data_inode); 4128 out_put_bg: 4129 btrfs_put_block_group(bg); 4130 reloc_chunk_end(fs_info); 4131 free_reloc_control(rc); 4132 return err; 4133 } 4134 4135 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4136 { 4137 struct btrfs_fs_info *fs_info = root->fs_info; 4138 struct btrfs_trans_handle *trans; 4139 int ret, err; 4140 4141 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4142 if (IS_ERR(trans)) 4143 return PTR_ERR(trans); 4144 4145 memset(&root->root_item.drop_progress, 0, 4146 sizeof(root->root_item.drop_progress)); 4147 btrfs_set_root_drop_level(&root->root_item, 0); 4148 btrfs_set_root_refs(&root->root_item, 0); 4149 ret = btrfs_update_root(trans, fs_info->tree_root, 4150 &root->root_key, &root->root_item); 4151 4152 err = btrfs_end_transaction(trans); 4153 if (err) 4154 return err; 4155 return ret; 4156 } 4157 4158 /* 4159 * recover relocation interrupted by system crash. 4160 * 4161 * this function resumes merging reloc trees with corresponding fs trees. 4162 * this is important for keeping the sharing of tree blocks 4163 */ 4164 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) 4165 { 4166 LIST_HEAD(reloc_roots); 4167 struct btrfs_key key; 4168 struct btrfs_root *fs_root; 4169 struct btrfs_root *reloc_root; 4170 struct btrfs_path *path; 4171 struct extent_buffer *leaf; 4172 struct reloc_control *rc = NULL; 4173 struct btrfs_trans_handle *trans; 4174 int ret; 4175 int err = 0; 4176 4177 path = btrfs_alloc_path(); 4178 if (!path) 4179 return -ENOMEM; 4180 path->reada = READA_BACK; 4181 4182 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4183 key.type = BTRFS_ROOT_ITEM_KEY; 4184 key.offset = (u64)-1; 4185 4186 while (1) { 4187 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, 4188 path, 0, 0); 4189 if (ret < 0) { 4190 err = ret; 4191 goto out; 4192 } 4193 if (ret > 0) { 4194 if (path->slots[0] == 0) 4195 break; 4196 path->slots[0]--; 4197 } 4198 leaf = path->nodes[0]; 4199 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4200 btrfs_release_path(path); 4201 4202 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4203 key.type != BTRFS_ROOT_ITEM_KEY) 4204 break; 4205 4206 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key); 4207 if (IS_ERR(reloc_root)) { 4208 err = PTR_ERR(reloc_root); 4209 goto out; 4210 } 4211 4212 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 4213 list_add(&reloc_root->root_list, &reloc_roots); 4214 4215 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4216 fs_root = btrfs_get_fs_root(fs_info, 4217 reloc_root->root_key.offset, false); 4218 if (IS_ERR(fs_root)) { 4219 ret = PTR_ERR(fs_root); 4220 if (ret != -ENOENT) { 4221 err = ret; 4222 goto out; 4223 } 4224 ret = mark_garbage_root(reloc_root); 4225 if (ret < 0) { 4226 err = ret; 4227 goto out; 4228 } 4229 } else { 4230 btrfs_put_root(fs_root); 4231 } 4232 } 4233 4234 if (key.offset == 0) 4235 break; 4236 4237 key.offset--; 4238 } 4239 btrfs_release_path(path); 4240 4241 if (list_empty(&reloc_roots)) 4242 goto out; 4243 4244 rc = alloc_reloc_control(fs_info); 4245 if (!rc) { 4246 err = -ENOMEM; 4247 goto out; 4248 } 4249 4250 ret = reloc_chunk_start(fs_info); 4251 if (ret < 0) { 4252 err = ret; 4253 goto out_end; 4254 } 4255 4256 rc->extent_root = btrfs_extent_root(fs_info, 0); 4257 4258 set_reloc_control(rc); 4259 4260 trans = btrfs_join_transaction(rc->extent_root); 4261 if (IS_ERR(trans)) { 4262 err = PTR_ERR(trans); 4263 goto out_unset; 4264 } 4265 4266 rc->merge_reloc_tree = 1; 4267 4268 while (!list_empty(&reloc_roots)) { 4269 reloc_root = list_entry(reloc_roots.next, 4270 struct btrfs_root, root_list); 4271 list_del(&reloc_root->root_list); 4272 4273 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4274 list_add_tail(&reloc_root->root_list, 4275 &rc->reloc_roots); 4276 continue; 4277 } 4278 4279 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 4280 false); 4281 if (IS_ERR(fs_root)) { 4282 err = PTR_ERR(fs_root); 4283 list_add_tail(&reloc_root->root_list, &reloc_roots); 4284 btrfs_end_transaction(trans); 4285 goto out_unset; 4286 } 4287 4288 err = __add_reloc_root(reloc_root); 4289 ASSERT(err != -EEXIST); 4290 if (err) { 4291 list_add_tail(&reloc_root->root_list, &reloc_roots); 4292 btrfs_put_root(fs_root); 4293 btrfs_end_transaction(trans); 4294 goto out_unset; 4295 } 4296 fs_root->reloc_root = btrfs_grab_root(reloc_root); 4297 btrfs_put_root(fs_root); 4298 } 4299 4300 err = btrfs_commit_transaction(trans); 4301 if (err) 4302 goto out_unset; 4303 4304 merge_reloc_roots(rc); 4305 4306 unset_reloc_control(rc); 4307 4308 trans = btrfs_join_transaction(rc->extent_root); 4309 if (IS_ERR(trans)) { 4310 err = PTR_ERR(trans); 4311 goto out_clean; 4312 } 4313 err = btrfs_commit_transaction(trans); 4314 out_clean: 4315 ret = clean_dirty_subvols(rc); 4316 if (ret < 0 && !err) 4317 err = ret; 4318 out_unset: 4319 unset_reloc_control(rc); 4320 out_end: 4321 reloc_chunk_end(fs_info); 4322 free_reloc_control(rc); 4323 out: 4324 free_reloc_roots(&reloc_roots); 4325 4326 btrfs_free_path(path); 4327 4328 if (err == 0) { 4329 /* cleanup orphan inode in data relocation tree */ 4330 fs_root = btrfs_grab_root(fs_info->data_reloc_root); 4331 ASSERT(fs_root); 4332 err = btrfs_orphan_cleanup(fs_root); 4333 btrfs_put_root(fs_root); 4334 } 4335 return err; 4336 } 4337 4338 /* 4339 * helper to add ordered checksum for data relocation. 4340 * 4341 * cloning checksum properly handles the nodatasum extents. 4342 * it also saves CPU time to re-calculate the checksum. 4343 */ 4344 int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len) 4345 { 4346 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4347 struct btrfs_root *csum_root; 4348 struct btrfs_ordered_sum *sums; 4349 struct btrfs_ordered_extent *ordered; 4350 int ret; 4351 u64 disk_bytenr; 4352 u64 new_bytenr; 4353 LIST_HEAD(list); 4354 4355 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4356 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len); 4357 4358 disk_bytenr = file_pos + inode->index_cnt; 4359 csum_root = btrfs_csum_root(fs_info, disk_bytenr); 4360 ret = btrfs_lookup_csums_range(csum_root, disk_bytenr, 4361 disk_bytenr + len - 1, &list, 0, false); 4362 if (ret) 4363 goto out; 4364 4365 while (!list_empty(&list)) { 4366 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4367 list_del_init(&sums->list); 4368 4369 /* 4370 * We need to offset the new_bytenr based on where the csum is. 4371 * We need to do this because we will read in entire prealloc 4372 * extents but we may have written to say the middle of the 4373 * prealloc extent, so we need to make sure the csum goes with 4374 * the right disk offset. 4375 * 4376 * We can do this because the data reloc inode refers strictly 4377 * to the on disk bytes, so we don't have to worry about 4378 * disk_len vs real len like with real inodes since it's all 4379 * disk length. 4380 */ 4381 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr; 4382 sums->bytenr = new_bytenr; 4383 4384 btrfs_add_ordered_sum(ordered, sums); 4385 } 4386 out: 4387 btrfs_put_ordered_extent(ordered); 4388 return ret; 4389 } 4390 4391 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4392 struct btrfs_root *root, struct extent_buffer *buf, 4393 struct extent_buffer *cow) 4394 { 4395 struct btrfs_fs_info *fs_info = root->fs_info; 4396 struct reloc_control *rc; 4397 struct btrfs_backref_node *node; 4398 int first_cow = 0; 4399 int level; 4400 int ret = 0; 4401 4402 rc = fs_info->reloc_ctl; 4403 if (!rc) 4404 return 0; 4405 4406 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root)); 4407 4408 level = btrfs_header_level(buf); 4409 if (btrfs_header_generation(buf) <= 4410 btrfs_root_last_snapshot(&root->root_item)) 4411 first_cow = 1; 4412 4413 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4414 rc->create_reloc_tree) { 4415 WARN_ON(!first_cow && level == 0); 4416 4417 node = rc->backref_cache.path[level]; 4418 BUG_ON(node->bytenr != buf->start && 4419 node->new_bytenr != buf->start); 4420 4421 btrfs_backref_drop_node_buffer(node); 4422 atomic_inc(&cow->refs); 4423 node->eb = cow; 4424 node->new_bytenr = cow->start; 4425 4426 if (!node->pending) { 4427 list_move_tail(&node->list, 4428 &rc->backref_cache.pending[level]); 4429 node->pending = 1; 4430 } 4431 4432 if (first_cow) 4433 mark_block_processed(rc, node); 4434 4435 if (first_cow && level > 0) 4436 rc->nodes_relocated += buf->len; 4437 } 4438 4439 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4440 ret = replace_file_extents(trans, rc, root, cow); 4441 return ret; 4442 } 4443 4444 /* 4445 * called before creating snapshot. it calculates metadata reservation 4446 * required for relocating tree blocks in the snapshot 4447 */ 4448 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4449 u64 *bytes_to_reserve) 4450 { 4451 struct btrfs_root *root = pending->root; 4452 struct reloc_control *rc = root->fs_info->reloc_ctl; 4453 4454 if (!rc || !have_reloc_root(root)) 4455 return; 4456 4457 if (!rc->merge_reloc_tree) 4458 return; 4459 4460 root = root->reloc_root; 4461 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4462 /* 4463 * relocation is in the stage of merging trees. the space 4464 * used by merging a reloc tree is twice the size of 4465 * relocated tree nodes in the worst case. half for cowing 4466 * the reloc tree, half for cowing the fs tree. the space 4467 * used by cowing the reloc tree will be freed after the 4468 * tree is dropped. if we create snapshot, cowing the fs 4469 * tree may use more space than it frees. so we need 4470 * reserve extra space. 4471 */ 4472 *bytes_to_reserve += rc->nodes_relocated; 4473 } 4474 4475 /* 4476 * called after snapshot is created. migrate block reservation 4477 * and create reloc root for the newly created snapshot 4478 * 4479 * This is similar to btrfs_init_reloc_root(), we come out of here with two 4480 * references held on the reloc_root, one for root->reloc_root and one for 4481 * rc->reloc_roots. 4482 */ 4483 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4484 struct btrfs_pending_snapshot *pending) 4485 { 4486 struct btrfs_root *root = pending->root; 4487 struct btrfs_root *reloc_root; 4488 struct btrfs_root *new_root; 4489 struct reloc_control *rc = root->fs_info->reloc_ctl; 4490 int ret; 4491 4492 if (!rc || !have_reloc_root(root)) 4493 return 0; 4494 4495 rc = root->fs_info->reloc_ctl; 4496 rc->merging_rsv_size += rc->nodes_relocated; 4497 4498 if (rc->merge_reloc_tree) { 4499 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4500 rc->block_rsv, 4501 rc->nodes_relocated, true); 4502 if (ret) 4503 return ret; 4504 } 4505 4506 new_root = pending->snap; 4507 reloc_root = create_reloc_root(trans, root->reloc_root, 4508 new_root->root_key.objectid); 4509 if (IS_ERR(reloc_root)) 4510 return PTR_ERR(reloc_root); 4511 4512 ret = __add_reloc_root(reloc_root); 4513 ASSERT(ret != -EEXIST); 4514 if (ret) { 4515 /* Pairs with create_reloc_root */ 4516 btrfs_put_root(reloc_root); 4517 return ret; 4518 } 4519 new_root->reloc_root = btrfs_grab_root(reloc_root); 4520 4521 if (rc->create_reloc_tree) 4522 ret = clone_backref_node(trans, rc, root, reloc_root); 4523 return ret; 4524 } 4525