1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2009 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/error-injection.h> 13 #include "ctree.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "volumes.h" 17 #include "locking.h" 18 #include "btrfs_inode.h" 19 #include "async-thread.h" 20 #include "free-space-cache.h" 21 #include "qgroup.h" 22 #include "print-tree.h" 23 #include "delalloc-space.h" 24 #include "block-group.h" 25 #include "backref.h" 26 #include "misc.h" 27 #include "subpage.h" 28 #include "zoned.h" 29 #include "inode-item.h" 30 #include "space-info.h" 31 #include "fs.h" 32 #include "accessors.h" 33 #include "extent-tree.h" 34 #include "root-tree.h" 35 #include "file-item.h" 36 #include "relocation.h" 37 38 /* 39 * Relocation overview 40 * 41 * [What does relocation do] 42 * 43 * The objective of relocation is to relocate all extents of the target block 44 * group to other block groups. 45 * This is utilized by resize (shrink only), profile converting, compacting 46 * space, or balance routine to spread chunks over devices. 47 * 48 * Before | After 49 * ------------------------------------------------------------------ 50 * BG A: 10 data extents | BG A: deleted 51 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated) 52 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated) 53 * 54 * [How does relocation work] 55 * 56 * 1. Mark the target block group read-only 57 * New extents won't be allocated from the target block group. 58 * 59 * 2.1 Record each extent in the target block group 60 * To build a proper map of extents to be relocated. 61 * 62 * 2.2 Build data reloc tree and reloc trees 63 * Data reloc tree will contain an inode, recording all newly relocated 64 * data extents. 65 * There will be only one data reloc tree for one data block group. 66 * 67 * Reloc tree will be a special snapshot of its source tree, containing 68 * relocated tree blocks. 69 * Each tree referring to a tree block in target block group will get its 70 * reloc tree built. 71 * 72 * 2.3 Swap source tree with its corresponding reloc tree 73 * Each involved tree only refers to new extents after swap. 74 * 75 * 3. Cleanup reloc trees and data reloc tree. 76 * As old extents in the target block group are still referenced by reloc 77 * trees, we need to clean them up before really freeing the target block 78 * group. 79 * 80 * The main complexity is in steps 2.2 and 2.3. 81 * 82 * The entry point of relocation is relocate_block_group() function. 83 */ 84 85 #define RELOCATION_RESERVED_NODES 256 86 /* 87 * map address of tree root to tree 88 */ 89 struct mapping_node { 90 struct { 91 struct rb_node rb_node; 92 u64 bytenr; 93 }; /* Use rb_simle_node for search/insert */ 94 void *data; 95 }; 96 97 struct mapping_tree { 98 struct rb_root rb_root; 99 spinlock_t lock; 100 }; 101 102 /* 103 * present a tree block to process 104 */ 105 struct tree_block { 106 struct { 107 struct rb_node rb_node; 108 u64 bytenr; 109 }; /* Use rb_simple_node for search/insert */ 110 u64 owner; 111 struct btrfs_key key; 112 unsigned int level:8; 113 unsigned int key_ready:1; 114 }; 115 116 #define MAX_EXTENTS 128 117 118 struct file_extent_cluster { 119 u64 start; 120 u64 end; 121 u64 boundary[MAX_EXTENTS]; 122 unsigned int nr; 123 }; 124 125 struct reloc_control { 126 /* block group to relocate */ 127 struct btrfs_block_group *block_group; 128 /* extent tree */ 129 struct btrfs_root *extent_root; 130 /* inode for moving data */ 131 struct inode *data_inode; 132 133 struct btrfs_block_rsv *block_rsv; 134 135 struct btrfs_backref_cache backref_cache; 136 137 struct file_extent_cluster cluster; 138 /* tree blocks have been processed */ 139 struct extent_io_tree processed_blocks; 140 /* map start of tree root to corresponding reloc tree */ 141 struct mapping_tree reloc_root_tree; 142 /* list of reloc trees */ 143 struct list_head reloc_roots; 144 /* list of subvolume trees that get relocated */ 145 struct list_head dirty_subvol_roots; 146 /* size of metadata reservation for merging reloc trees */ 147 u64 merging_rsv_size; 148 /* size of relocated tree nodes */ 149 u64 nodes_relocated; 150 /* reserved size for block group relocation*/ 151 u64 reserved_bytes; 152 153 u64 search_start; 154 u64 extents_found; 155 156 unsigned int stage:8; 157 unsigned int create_reloc_tree:1; 158 unsigned int merge_reloc_tree:1; 159 unsigned int found_file_extent:1; 160 }; 161 162 /* stages of data relocation */ 163 #define MOVE_DATA_EXTENTS 0 164 #define UPDATE_DATA_PTRS 1 165 166 static void mark_block_processed(struct reloc_control *rc, 167 struct btrfs_backref_node *node) 168 { 169 u32 blocksize; 170 171 if (node->level == 0 || 172 in_range(node->bytenr, rc->block_group->start, 173 rc->block_group->length)) { 174 blocksize = rc->extent_root->fs_info->nodesize; 175 set_extent_bits(&rc->processed_blocks, node->bytenr, 176 node->bytenr + blocksize - 1, EXTENT_DIRTY); 177 } 178 node->processed = 1; 179 } 180 181 182 static void mapping_tree_init(struct mapping_tree *tree) 183 { 184 tree->rb_root = RB_ROOT; 185 spin_lock_init(&tree->lock); 186 } 187 188 /* 189 * walk up backref nodes until reach node presents tree root 190 */ 191 static struct btrfs_backref_node *walk_up_backref( 192 struct btrfs_backref_node *node, 193 struct btrfs_backref_edge *edges[], int *index) 194 { 195 struct btrfs_backref_edge *edge; 196 int idx = *index; 197 198 while (!list_empty(&node->upper)) { 199 edge = list_entry(node->upper.next, 200 struct btrfs_backref_edge, list[LOWER]); 201 edges[idx++] = edge; 202 node = edge->node[UPPER]; 203 } 204 BUG_ON(node->detached); 205 *index = idx; 206 return node; 207 } 208 209 /* 210 * walk down backref nodes to find start of next reference path 211 */ 212 static struct btrfs_backref_node *walk_down_backref( 213 struct btrfs_backref_edge *edges[], int *index) 214 { 215 struct btrfs_backref_edge *edge; 216 struct btrfs_backref_node *lower; 217 int idx = *index; 218 219 while (idx > 0) { 220 edge = edges[idx - 1]; 221 lower = edge->node[LOWER]; 222 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 223 idx--; 224 continue; 225 } 226 edge = list_entry(edge->list[LOWER].next, 227 struct btrfs_backref_edge, list[LOWER]); 228 edges[idx - 1] = edge; 229 *index = idx; 230 return edge->node[UPPER]; 231 } 232 *index = 0; 233 return NULL; 234 } 235 236 static void update_backref_node(struct btrfs_backref_cache *cache, 237 struct btrfs_backref_node *node, u64 bytenr) 238 { 239 struct rb_node *rb_node; 240 rb_erase(&node->rb_node, &cache->rb_root); 241 node->bytenr = bytenr; 242 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node); 243 if (rb_node) 244 btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST); 245 } 246 247 /* 248 * update backref cache after a transaction commit 249 */ 250 static int update_backref_cache(struct btrfs_trans_handle *trans, 251 struct btrfs_backref_cache *cache) 252 { 253 struct btrfs_backref_node *node; 254 int level = 0; 255 256 if (cache->last_trans == 0) { 257 cache->last_trans = trans->transid; 258 return 0; 259 } 260 261 if (cache->last_trans == trans->transid) 262 return 0; 263 264 /* 265 * detached nodes are used to avoid unnecessary backref 266 * lookup. transaction commit changes the extent tree. 267 * so the detached nodes are no longer useful. 268 */ 269 while (!list_empty(&cache->detached)) { 270 node = list_entry(cache->detached.next, 271 struct btrfs_backref_node, list); 272 btrfs_backref_cleanup_node(cache, node); 273 } 274 275 while (!list_empty(&cache->changed)) { 276 node = list_entry(cache->changed.next, 277 struct btrfs_backref_node, list); 278 list_del_init(&node->list); 279 BUG_ON(node->pending); 280 update_backref_node(cache, node, node->new_bytenr); 281 } 282 283 /* 284 * some nodes can be left in the pending list if there were 285 * errors during processing the pending nodes. 286 */ 287 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 288 list_for_each_entry(node, &cache->pending[level], list) { 289 BUG_ON(!node->pending); 290 if (node->bytenr == node->new_bytenr) 291 continue; 292 update_backref_node(cache, node, node->new_bytenr); 293 } 294 } 295 296 cache->last_trans = 0; 297 return 1; 298 } 299 300 static bool reloc_root_is_dead(struct btrfs_root *root) 301 { 302 /* 303 * Pair with set_bit/clear_bit in clean_dirty_subvols and 304 * btrfs_update_reloc_root. We need to see the updated bit before 305 * trying to access reloc_root 306 */ 307 smp_rmb(); 308 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) 309 return true; 310 return false; 311 } 312 313 /* 314 * Check if this subvolume tree has valid reloc tree. 315 * 316 * Reloc tree after swap is considered dead, thus not considered as valid. 317 * This is enough for most callers, as they don't distinguish dead reloc root 318 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a 319 * special case. 320 */ 321 static bool have_reloc_root(struct btrfs_root *root) 322 { 323 if (reloc_root_is_dead(root)) 324 return false; 325 if (!root->reloc_root) 326 return false; 327 return true; 328 } 329 330 int btrfs_should_ignore_reloc_root(struct btrfs_root *root) 331 { 332 struct btrfs_root *reloc_root; 333 334 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 335 return 0; 336 337 /* This root has been merged with its reloc tree, we can ignore it */ 338 if (reloc_root_is_dead(root)) 339 return 1; 340 341 reloc_root = root->reloc_root; 342 if (!reloc_root) 343 return 0; 344 345 if (btrfs_header_generation(reloc_root->commit_root) == 346 root->fs_info->running_transaction->transid) 347 return 0; 348 /* 349 * if there is reloc tree and it was created in previous 350 * transaction backref lookup can find the reloc tree, 351 * so backref node for the fs tree root is useless for 352 * relocation. 353 */ 354 return 1; 355 } 356 357 /* 358 * find reloc tree by address of tree root 359 */ 360 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr) 361 { 362 struct reloc_control *rc = fs_info->reloc_ctl; 363 struct rb_node *rb_node; 364 struct mapping_node *node; 365 struct btrfs_root *root = NULL; 366 367 ASSERT(rc); 368 spin_lock(&rc->reloc_root_tree.lock); 369 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr); 370 if (rb_node) { 371 node = rb_entry(rb_node, struct mapping_node, rb_node); 372 root = node->data; 373 } 374 spin_unlock(&rc->reloc_root_tree.lock); 375 return btrfs_grab_root(root); 376 } 377 378 /* 379 * For useless nodes, do two major clean ups: 380 * 381 * - Cleanup the children edges and nodes 382 * If child node is also orphan (no parent) during cleanup, then the child 383 * node will also be cleaned up. 384 * 385 * - Freeing up leaves (level 0), keeps nodes detached 386 * For nodes, the node is still cached as "detached" 387 * 388 * Return false if @node is not in the @useless_nodes list. 389 * Return true if @node is in the @useless_nodes list. 390 */ 391 static bool handle_useless_nodes(struct reloc_control *rc, 392 struct btrfs_backref_node *node) 393 { 394 struct btrfs_backref_cache *cache = &rc->backref_cache; 395 struct list_head *useless_node = &cache->useless_node; 396 bool ret = false; 397 398 while (!list_empty(useless_node)) { 399 struct btrfs_backref_node *cur; 400 401 cur = list_first_entry(useless_node, struct btrfs_backref_node, 402 list); 403 list_del_init(&cur->list); 404 405 /* Only tree root nodes can be added to @useless_nodes */ 406 ASSERT(list_empty(&cur->upper)); 407 408 if (cur == node) 409 ret = true; 410 411 /* The node is the lowest node */ 412 if (cur->lowest) { 413 list_del_init(&cur->lower); 414 cur->lowest = 0; 415 } 416 417 /* Cleanup the lower edges */ 418 while (!list_empty(&cur->lower)) { 419 struct btrfs_backref_edge *edge; 420 struct btrfs_backref_node *lower; 421 422 edge = list_entry(cur->lower.next, 423 struct btrfs_backref_edge, list[UPPER]); 424 list_del(&edge->list[UPPER]); 425 list_del(&edge->list[LOWER]); 426 lower = edge->node[LOWER]; 427 btrfs_backref_free_edge(cache, edge); 428 429 /* Child node is also orphan, queue for cleanup */ 430 if (list_empty(&lower->upper)) 431 list_add(&lower->list, useless_node); 432 } 433 /* Mark this block processed for relocation */ 434 mark_block_processed(rc, cur); 435 436 /* 437 * Backref nodes for tree leaves are deleted from the cache. 438 * Backref nodes for upper level tree blocks are left in the 439 * cache to avoid unnecessary backref lookup. 440 */ 441 if (cur->level > 0) { 442 list_add(&cur->list, &cache->detached); 443 cur->detached = 1; 444 } else { 445 rb_erase(&cur->rb_node, &cache->rb_root); 446 btrfs_backref_free_node(cache, cur); 447 } 448 } 449 return ret; 450 } 451 452 /* 453 * Build backref tree for a given tree block. Root of the backref tree 454 * corresponds the tree block, leaves of the backref tree correspond roots of 455 * b-trees that reference the tree block. 456 * 457 * The basic idea of this function is check backrefs of a given block to find 458 * upper level blocks that reference the block, and then check backrefs of 459 * these upper level blocks recursively. The recursion stops when tree root is 460 * reached or backrefs for the block is cached. 461 * 462 * NOTE: if we find that backrefs for a block are cached, we know backrefs for 463 * all upper level blocks that directly/indirectly reference the block are also 464 * cached. 465 */ 466 static noinline_for_stack struct btrfs_backref_node *build_backref_tree( 467 struct reloc_control *rc, struct btrfs_key *node_key, 468 int level, u64 bytenr) 469 { 470 struct btrfs_backref_iter *iter; 471 struct btrfs_backref_cache *cache = &rc->backref_cache; 472 /* For searching parent of TREE_BLOCK_REF */ 473 struct btrfs_path *path; 474 struct btrfs_backref_node *cur; 475 struct btrfs_backref_node *node = NULL; 476 struct btrfs_backref_edge *edge; 477 int ret; 478 int err = 0; 479 480 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info); 481 if (!iter) 482 return ERR_PTR(-ENOMEM); 483 path = btrfs_alloc_path(); 484 if (!path) { 485 err = -ENOMEM; 486 goto out; 487 } 488 489 node = btrfs_backref_alloc_node(cache, bytenr, level); 490 if (!node) { 491 err = -ENOMEM; 492 goto out; 493 } 494 495 node->lowest = 1; 496 cur = node; 497 498 /* Breadth-first search to build backref cache */ 499 do { 500 ret = btrfs_backref_add_tree_node(cache, path, iter, node_key, 501 cur); 502 if (ret < 0) { 503 err = ret; 504 goto out; 505 } 506 edge = list_first_entry_or_null(&cache->pending_edge, 507 struct btrfs_backref_edge, list[UPPER]); 508 /* 509 * The pending list isn't empty, take the first block to 510 * process 511 */ 512 if (edge) { 513 list_del_init(&edge->list[UPPER]); 514 cur = edge->node[UPPER]; 515 } 516 } while (edge); 517 518 /* Finish the upper linkage of newly added edges/nodes */ 519 ret = btrfs_backref_finish_upper_links(cache, node); 520 if (ret < 0) { 521 err = ret; 522 goto out; 523 } 524 525 if (handle_useless_nodes(rc, node)) 526 node = NULL; 527 out: 528 btrfs_backref_iter_free(iter); 529 btrfs_free_path(path); 530 if (err) { 531 btrfs_backref_error_cleanup(cache, node); 532 return ERR_PTR(err); 533 } 534 ASSERT(!node || !node->detached); 535 ASSERT(list_empty(&cache->useless_node) && 536 list_empty(&cache->pending_edge)); 537 return node; 538 } 539 540 /* 541 * helper to add backref node for the newly created snapshot. 542 * the backref node is created by cloning backref node that 543 * corresponds to root of source tree 544 */ 545 static int clone_backref_node(struct btrfs_trans_handle *trans, 546 struct reloc_control *rc, 547 struct btrfs_root *src, 548 struct btrfs_root *dest) 549 { 550 struct btrfs_root *reloc_root = src->reloc_root; 551 struct btrfs_backref_cache *cache = &rc->backref_cache; 552 struct btrfs_backref_node *node = NULL; 553 struct btrfs_backref_node *new_node; 554 struct btrfs_backref_edge *edge; 555 struct btrfs_backref_edge *new_edge; 556 struct rb_node *rb_node; 557 558 if (cache->last_trans > 0) 559 update_backref_cache(trans, cache); 560 561 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start); 562 if (rb_node) { 563 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); 564 if (node->detached) 565 node = NULL; 566 else 567 BUG_ON(node->new_bytenr != reloc_root->node->start); 568 } 569 570 if (!node) { 571 rb_node = rb_simple_search(&cache->rb_root, 572 reloc_root->commit_root->start); 573 if (rb_node) { 574 node = rb_entry(rb_node, struct btrfs_backref_node, 575 rb_node); 576 BUG_ON(node->detached); 577 } 578 } 579 580 if (!node) 581 return 0; 582 583 new_node = btrfs_backref_alloc_node(cache, dest->node->start, 584 node->level); 585 if (!new_node) 586 return -ENOMEM; 587 588 new_node->lowest = node->lowest; 589 new_node->checked = 1; 590 new_node->root = btrfs_grab_root(dest); 591 ASSERT(new_node->root); 592 593 if (!node->lowest) { 594 list_for_each_entry(edge, &node->lower, list[UPPER]) { 595 new_edge = btrfs_backref_alloc_edge(cache); 596 if (!new_edge) 597 goto fail; 598 599 btrfs_backref_link_edge(new_edge, edge->node[LOWER], 600 new_node, LINK_UPPER); 601 } 602 } else { 603 list_add_tail(&new_node->lower, &cache->leaves); 604 } 605 606 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr, 607 &new_node->rb_node); 608 if (rb_node) 609 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST); 610 611 if (!new_node->lowest) { 612 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 613 list_add_tail(&new_edge->list[LOWER], 614 &new_edge->node[LOWER]->upper); 615 } 616 } 617 return 0; 618 fail: 619 while (!list_empty(&new_node->lower)) { 620 new_edge = list_entry(new_node->lower.next, 621 struct btrfs_backref_edge, list[UPPER]); 622 list_del(&new_edge->list[UPPER]); 623 btrfs_backref_free_edge(cache, new_edge); 624 } 625 btrfs_backref_free_node(cache, new_node); 626 return -ENOMEM; 627 } 628 629 /* 630 * helper to add 'address of tree root -> reloc tree' mapping 631 */ 632 static int __must_check __add_reloc_root(struct btrfs_root *root) 633 { 634 struct btrfs_fs_info *fs_info = root->fs_info; 635 struct rb_node *rb_node; 636 struct mapping_node *node; 637 struct reloc_control *rc = fs_info->reloc_ctl; 638 639 node = kmalloc(sizeof(*node), GFP_NOFS); 640 if (!node) 641 return -ENOMEM; 642 643 node->bytenr = root->commit_root->start; 644 node->data = root; 645 646 spin_lock(&rc->reloc_root_tree.lock); 647 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, 648 node->bytenr, &node->rb_node); 649 spin_unlock(&rc->reloc_root_tree.lock); 650 if (rb_node) { 651 btrfs_err(fs_info, 652 "Duplicate root found for start=%llu while inserting into relocation tree", 653 node->bytenr); 654 return -EEXIST; 655 } 656 657 list_add_tail(&root->root_list, &rc->reloc_roots); 658 return 0; 659 } 660 661 /* 662 * helper to delete the 'address of tree root -> reloc tree' 663 * mapping 664 */ 665 static void __del_reloc_root(struct btrfs_root *root) 666 { 667 struct btrfs_fs_info *fs_info = root->fs_info; 668 struct rb_node *rb_node; 669 struct mapping_node *node = NULL; 670 struct reloc_control *rc = fs_info->reloc_ctl; 671 bool put_ref = false; 672 673 if (rc && root->node) { 674 spin_lock(&rc->reloc_root_tree.lock); 675 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 676 root->commit_root->start); 677 if (rb_node) { 678 node = rb_entry(rb_node, struct mapping_node, rb_node); 679 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 680 RB_CLEAR_NODE(&node->rb_node); 681 } 682 spin_unlock(&rc->reloc_root_tree.lock); 683 ASSERT(!node || (struct btrfs_root *)node->data == root); 684 } 685 686 /* 687 * We only put the reloc root here if it's on the list. There's a lot 688 * of places where the pattern is to splice the rc->reloc_roots, process 689 * the reloc roots, and then add the reloc root back onto 690 * rc->reloc_roots. If we call __del_reloc_root while it's off of the 691 * list we don't want the reference being dropped, because the guy 692 * messing with the list is in charge of the reference. 693 */ 694 spin_lock(&fs_info->trans_lock); 695 if (!list_empty(&root->root_list)) { 696 put_ref = true; 697 list_del_init(&root->root_list); 698 } 699 spin_unlock(&fs_info->trans_lock); 700 if (put_ref) 701 btrfs_put_root(root); 702 kfree(node); 703 } 704 705 /* 706 * helper to update the 'address of tree root -> reloc tree' 707 * mapping 708 */ 709 static int __update_reloc_root(struct btrfs_root *root) 710 { 711 struct btrfs_fs_info *fs_info = root->fs_info; 712 struct rb_node *rb_node; 713 struct mapping_node *node = NULL; 714 struct reloc_control *rc = fs_info->reloc_ctl; 715 716 spin_lock(&rc->reloc_root_tree.lock); 717 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 718 root->commit_root->start); 719 if (rb_node) { 720 node = rb_entry(rb_node, struct mapping_node, rb_node); 721 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 722 } 723 spin_unlock(&rc->reloc_root_tree.lock); 724 725 if (!node) 726 return 0; 727 BUG_ON((struct btrfs_root *)node->data != root); 728 729 spin_lock(&rc->reloc_root_tree.lock); 730 node->bytenr = root->node->start; 731 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, 732 node->bytenr, &node->rb_node); 733 spin_unlock(&rc->reloc_root_tree.lock); 734 if (rb_node) 735 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); 736 return 0; 737 } 738 739 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 740 struct btrfs_root *root, u64 objectid) 741 { 742 struct btrfs_fs_info *fs_info = root->fs_info; 743 struct btrfs_root *reloc_root; 744 struct extent_buffer *eb; 745 struct btrfs_root_item *root_item; 746 struct btrfs_key root_key; 747 int ret = 0; 748 bool must_abort = false; 749 750 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 751 if (!root_item) 752 return ERR_PTR(-ENOMEM); 753 754 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 755 root_key.type = BTRFS_ROOT_ITEM_KEY; 756 root_key.offset = objectid; 757 758 if (root->root_key.objectid == objectid) { 759 u64 commit_root_gen; 760 761 /* called by btrfs_init_reloc_root */ 762 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 763 BTRFS_TREE_RELOC_OBJECTID); 764 if (ret) 765 goto fail; 766 767 /* 768 * Set the last_snapshot field to the generation of the commit 769 * root - like this ctree.c:btrfs_block_can_be_shared() behaves 770 * correctly (returns true) when the relocation root is created 771 * either inside the critical section of a transaction commit 772 * (through transaction.c:qgroup_account_snapshot()) and when 773 * it's created before the transaction commit is started. 774 */ 775 commit_root_gen = btrfs_header_generation(root->commit_root); 776 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen); 777 } else { 778 /* 779 * called by btrfs_reloc_post_snapshot_hook. 780 * the source tree is a reloc tree, all tree blocks 781 * modified after it was created have RELOC flag 782 * set in their headers. so it's OK to not update 783 * the 'last_snapshot'. 784 */ 785 ret = btrfs_copy_root(trans, root, root->node, &eb, 786 BTRFS_TREE_RELOC_OBJECTID); 787 if (ret) 788 goto fail; 789 } 790 791 /* 792 * We have changed references at this point, we must abort the 793 * transaction if anything fails. 794 */ 795 must_abort = true; 796 797 memcpy(root_item, &root->root_item, sizeof(*root_item)); 798 btrfs_set_root_bytenr(root_item, eb->start); 799 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 800 btrfs_set_root_generation(root_item, trans->transid); 801 802 if (root->root_key.objectid == objectid) { 803 btrfs_set_root_refs(root_item, 0); 804 memset(&root_item->drop_progress, 0, 805 sizeof(struct btrfs_disk_key)); 806 btrfs_set_root_drop_level(root_item, 0); 807 } 808 809 btrfs_tree_unlock(eb); 810 free_extent_buffer(eb); 811 812 ret = btrfs_insert_root(trans, fs_info->tree_root, 813 &root_key, root_item); 814 if (ret) 815 goto fail; 816 817 kfree(root_item); 818 819 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key); 820 if (IS_ERR(reloc_root)) { 821 ret = PTR_ERR(reloc_root); 822 goto abort; 823 } 824 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 825 reloc_root->last_trans = trans->transid; 826 return reloc_root; 827 fail: 828 kfree(root_item); 829 abort: 830 if (must_abort) 831 btrfs_abort_transaction(trans, ret); 832 return ERR_PTR(ret); 833 } 834 835 /* 836 * create reloc tree for a given fs tree. reloc tree is just a 837 * snapshot of the fs tree with special root objectid. 838 * 839 * The reloc_root comes out of here with two references, one for 840 * root->reloc_root, and another for being on the rc->reloc_roots list. 841 */ 842 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 843 struct btrfs_root *root) 844 { 845 struct btrfs_fs_info *fs_info = root->fs_info; 846 struct btrfs_root *reloc_root; 847 struct reloc_control *rc = fs_info->reloc_ctl; 848 struct btrfs_block_rsv *rsv; 849 int clear_rsv = 0; 850 int ret; 851 852 if (!rc) 853 return 0; 854 855 /* 856 * The subvolume has reloc tree but the swap is finished, no need to 857 * create/update the dead reloc tree 858 */ 859 if (reloc_root_is_dead(root)) 860 return 0; 861 862 /* 863 * This is subtle but important. We do not do 864 * record_root_in_transaction for reloc roots, instead we record their 865 * corresponding fs root, and then here we update the last trans for the 866 * reloc root. This means that we have to do this for the entire life 867 * of the reloc root, regardless of which stage of the relocation we are 868 * in. 869 */ 870 if (root->reloc_root) { 871 reloc_root = root->reloc_root; 872 reloc_root->last_trans = trans->transid; 873 return 0; 874 } 875 876 /* 877 * We are merging reloc roots, we do not need new reloc trees. Also 878 * reloc trees never need their own reloc tree. 879 */ 880 if (!rc->create_reloc_tree || 881 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 882 return 0; 883 884 if (!trans->reloc_reserved) { 885 rsv = trans->block_rsv; 886 trans->block_rsv = rc->block_rsv; 887 clear_rsv = 1; 888 } 889 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 890 if (clear_rsv) 891 trans->block_rsv = rsv; 892 if (IS_ERR(reloc_root)) 893 return PTR_ERR(reloc_root); 894 895 ret = __add_reloc_root(reloc_root); 896 ASSERT(ret != -EEXIST); 897 if (ret) { 898 /* Pairs with create_reloc_root */ 899 btrfs_put_root(reloc_root); 900 return ret; 901 } 902 root->reloc_root = btrfs_grab_root(reloc_root); 903 return 0; 904 } 905 906 /* 907 * update root item of reloc tree 908 */ 909 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 910 struct btrfs_root *root) 911 { 912 struct btrfs_fs_info *fs_info = root->fs_info; 913 struct btrfs_root *reloc_root; 914 struct btrfs_root_item *root_item; 915 int ret; 916 917 if (!have_reloc_root(root)) 918 return 0; 919 920 reloc_root = root->reloc_root; 921 root_item = &reloc_root->root_item; 922 923 /* 924 * We are probably ok here, but __del_reloc_root() will drop its ref of 925 * the root. We have the ref for root->reloc_root, but just in case 926 * hold it while we update the reloc root. 927 */ 928 btrfs_grab_root(reloc_root); 929 930 /* root->reloc_root will stay until current relocation finished */ 931 if (fs_info->reloc_ctl->merge_reloc_tree && 932 btrfs_root_refs(root_item) == 0) { 933 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); 934 /* 935 * Mark the tree as dead before we change reloc_root so 936 * have_reloc_root will not touch it from now on. 937 */ 938 smp_wmb(); 939 __del_reloc_root(reloc_root); 940 } 941 942 if (reloc_root->commit_root != reloc_root->node) { 943 __update_reloc_root(reloc_root); 944 btrfs_set_root_node(root_item, reloc_root->node); 945 free_extent_buffer(reloc_root->commit_root); 946 reloc_root->commit_root = btrfs_root_node(reloc_root); 947 } 948 949 ret = btrfs_update_root(trans, fs_info->tree_root, 950 &reloc_root->root_key, root_item); 951 btrfs_put_root(reloc_root); 952 return ret; 953 } 954 955 /* 956 * helper to find first cached inode with inode number >= objectid 957 * in a subvolume 958 */ 959 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 960 { 961 struct rb_node *node; 962 struct rb_node *prev; 963 struct btrfs_inode *entry; 964 struct inode *inode; 965 966 spin_lock(&root->inode_lock); 967 again: 968 node = root->inode_tree.rb_node; 969 prev = NULL; 970 while (node) { 971 prev = node; 972 entry = rb_entry(node, struct btrfs_inode, rb_node); 973 974 if (objectid < btrfs_ino(entry)) 975 node = node->rb_left; 976 else if (objectid > btrfs_ino(entry)) 977 node = node->rb_right; 978 else 979 break; 980 } 981 if (!node) { 982 while (prev) { 983 entry = rb_entry(prev, struct btrfs_inode, rb_node); 984 if (objectid <= btrfs_ino(entry)) { 985 node = prev; 986 break; 987 } 988 prev = rb_next(prev); 989 } 990 } 991 while (node) { 992 entry = rb_entry(node, struct btrfs_inode, rb_node); 993 inode = igrab(&entry->vfs_inode); 994 if (inode) { 995 spin_unlock(&root->inode_lock); 996 return inode; 997 } 998 999 objectid = btrfs_ino(entry) + 1; 1000 if (cond_resched_lock(&root->inode_lock)) 1001 goto again; 1002 1003 node = rb_next(node); 1004 } 1005 spin_unlock(&root->inode_lock); 1006 return NULL; 1007 } 1008 1009 /* 1010 * get new location of data 1011 */ 1012 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1013 u64 bytenr, u64 num_bytes) 1014 { 1015 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1016 struct btrfs_path *path; 1017 struct btrfs_file_extent_item *fi; 1018 struct extent_buffer *leaf; 1019 int ret; 1020 1021 path = btrfs_alloc_path(); 1022 if (!path) 1023 return -ENOMEM; 1024 1025 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1026 ret = btrfs_lookup_file_extent(NULL, root, path, 1027 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0); 1028 if (ret < 0) 1029 goto out; 1030 if (ret > 0) { 1031 ret = -ENOENT; 1032 goto out; 1033 } 1034 1035 leaf = path->nodes[0]; 1036 fi = btrfs_item_ptr(leaf, path->slots[0], 1037 struct btrfs_file_extent_item); 1038 1039 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1040 btrfs_file_extent_compression(leaf, fi) || 1041 btrfs_file_extent_encryption(leaf, fi) || 1042 btrfs_file_extent_other_encoding(leaf, fi)); 1043 1044 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1045 ret = -EINVAL; 1046 goto out; 1047 } 1048 1049 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1050 ret = 0; 1051 out: 1052 btrfs_free_path(path); 1053 return ret; 1054 } 1055 1056 /* 1057 * update file extent items in the tree leaf to point to 1058 * the new locations. 1059 */ 1060 static noinline_for_stack 1061 int replace_file_extents(struct btrfs_trans_handle *trans, 1062 struct reloc_control *rc, 1063 struct btrfs_root *root, 1064 struct extent_buffer *leaf) 1065 { 1066 struct btrfs_fs_info *fs_info = root->fs_info; 1067 struct btrfs_key key; 1068 struct btrfs_file_extent_item *fi; 1069 struct inode *inode = NULL; 1070 u64 parent; 1071 u64 bytenr; 1072 u64 new_bytenr = 0; 1073 u64 num_bytes; 1074 u64 end; 1075 u32 nritems; 1076 u32 i; 1077 int ret = 0; 1078 int first = 1; 1079 int dirty = 0; 1080 1081 if (rc->stage != UPDATE_DATA_PTRS) 1082 return 0; 1083 1084 /* reloc trees always use full backref */ 1085 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1086 parent = leaf->start; 1087 else 1088 parent = 0; 1089 1090 nritems = btrfs_header_nritems(leaf); 1091 for (i = 0; i < nritems; i++) { 1092 struct btrfs_ref ref = { 0 }; 1093 1094 cond_resched(); 1095 btrfs_item_key_to_cpu(leaf, &key, i); 1096 if (key.type != BTRFS_EXTENT_DATA_KEY) 1097 continue; 1098 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1099 if (btrfs_file_extent_type(leaf, fi) == 1100 BTRFS_FILE_EXTENT_INLINE) 1101 continue; 1102 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1103 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1104 if (bytenr == 0) 1105 continue; 1106 if (!in_range(bytenr, rc->block_group->start, 1107 rc->block_group->length)) 1108 continue; 1109 1110 /* 1111 * if we are modifying block in fs tree, wait for read_folio 1112 * to complete and drop the extent cache 1113 */ 1114 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1115 if (first) { 1116 inode = find_next_inode(root, key.objectid); 1117 first = 0; 1118 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) { 1119 btrfs_add_delayed_iput(inode); 1120 inode = find_next_inode(root, key.objectid); 1121 } 1122 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) { 1123 struct extent_state *cached_state = NULL; 1124 1125 end = key.offset + 1126 btrfs_file_extent_num_bytes(leaf, fi); 1127 WARN_ON(!IS_ALIGNED(key.offset, 1128 fs_info->sectorsize)); 1129 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1130 end--; 1131 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1132 key.offset, end, 1133 &cached_state); 1134 if (!ret) 1135 continue; 1136 1137 btrfs_drop_extent_map_range(BTRFS_I(inode), 1138 key.offset, end, true); 1139 unlock_extent(&BTRFS_I(inode)->io_tree, 1140 key.offset, end, &cached_state); 1141 } 1142 } 1143 1144 ret = get_new_location(rc->data_inode, &new_bytenr, 1145 bytenr, num_bytes); 1146 if (ret) { 1147 /* 1148 * Don't have to abort since we've not changed anything 1149 * in the file extent yet. 1150 */ 1151 break; 1152 } 1153 1154 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1155 dirty = 1; 1156 1157 key.offset -= btrfs_file_extent_offset(leaf, fi); 1158 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr, 1159 num_bytes, parent); 1160 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), 1161 key.objectid, key.offset, 1162 root->root_key.objectid, false); 1163 ret = btrfs_inc_extent_ref(trans, &ref); 1164 if (ret) { 1165 btrfs_abort_transaction(trans, ret); 1166 break; 1167 } 1168 1169 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 1170 num_bytes, parent); 1171 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), 1172 key.objectid, key.offset, 1173 root->root_key.objectid, false); 1174 ret = btrfs_free_extent(trans, &ref); 1175 if (ret) { 1176 btrfs_abort_transaction(trans, ret); 1177 break; 1178 } 1179 } 1180 if (dirty) 1181 btrfs_mark_buffer_dirty(leaf); 1182 if (inode) 1183 btrfs_add_delayed_iput(inode); 1184 return ret; 1185 } 1186 1187 static noinline_for_stack 1188 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1189 struct btrfs_path *path, int level) 1190 { 1191 struct btrfs_disk_key key1; 1192 struct btrfs_disk_key key2; 1193 btrfs_node_key(eb, &key1, slot); 1194 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1195 return memcmp(&key1, &key2, sizeof(key1)); 1196 } 1197 1198 /* 1199 * try to replace tree blocks in fs tree with the new blocks 1200 * in reloc tree. tree blocks haven't been modified since the 1201 * reloc tree was create can be replaced. 1202 * 1203 * if a block was replaced, level of the block + 1 is returned. 1204 * if no block got replaced, 0 is returned. if there are other 1205 * errors, a negative error number is returned. 1206 */ 1207 static noinline_for_stack 1208 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, 1209 struct btrfs_root *dest, struct btrfs_root *src, 1210 struct btrfs_path *path, struct btrfs_key *next_key, 1211 int lowest_level, int max_level) 1212 { 1213 struct btrfs_fs_info *fs_info = dest->fs_info; 1214 struct extent_buffer *eb; 1215 struct extent_buffer *parent; 1216 struct btrfs_ref ref = { 0 }; 1217 struct btrfs_key key; 1218 u64 old_bytenr; 1219 u64 new_bytenr; 1220 u64 old_ptr_gen; 1221 u64 new_ptr_gen; 1222 u64 last_snapshot; 1223 u32 blocksize; 1224 int cow = 0; 1225 int level; 1226 int ret; 1227 int slot; 1228 1229 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1230 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1231 1232 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1233 again: 1234 slot = path->slots[lowest_level]; 1235 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1236 1237 eb = btrfs_lock_root_node(dest); 1238 level = btrfs_header_level(eb); 1239 1240 if (level < lowest_level) { 1241 btrfs_tree_unlock(eb); 1242 free_extent_buffer(eb); 1243 return 0; 1244 } 1245 1246 if (cow) { 1247 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb, 1248 BTRFS_NESTING_COW); 1249 if (ret) { 1250 btrfs_tree_unlock(eb); 1251 free_extent_buffer(eb); 1252 return ret; 1253 } 1254 } 1255 1256 if (next_key) { 1257 next_key->objectid = (u64)-1; 1258 next_key->type = (u8)-1; 1259 next_key->offset = (u64)-1; 1260 } 1261 1262 parent = eb; 1263 while (1) { 1264 level = btrfs_header_level(parent); 1265 ASSERT(level >= lowest_level); 1266 1267 ret = btrfs_bin_search(parent, &key, &slot); 1268 if (ret < 0) 1269 break; 1270 if (ret && slot > 0) 1271 slot--; 1272 1273 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1274 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1275 1276 old_bytenr = btrfs_node_blockptr(parent, slot); 1277 blocksize = fs_info->nodesize; 1278 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1279 1280 if (level <= max_level) { 1281 eb = path->nodes[level]; 1282 new_bytenr = btrfs_node_blockptr(eb, 1283 path->slots[level]); 1284 new_ptr_gen = btrfs_node_ptr_generation(eb, 1285 path->slots[level]); 1286 } else { 1287 new_bytenr = 0; 1288 new_ptr_gen = 0; 1289 } 1290 1291 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1292 ret = level; 1293 break; 1294 } 1295 1296 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1297 memcmp_node_keys(parent, slot, path, level)) { 1298 if (level <= lowest_level) { 1299 ret = 0; 1300 break; 1301 } 1302 1303 eb = btrfs_read_node_slot(parent, slot); 1304 if (IS_ERR(eb)) { 1305 ret = PTR_ERR(eb); 1306 break; 1307 } 1308 btrfs_tree_lock(eb); 1309 if (cow) { 1310 ret = btrfs_cow_block(trans, dest, eb, parent, 1311 slot, &eb, 1312 BTRFS_NESTING_COW); 1313 if (ret) { 1314 btrfs_tree_unlock(eb); 1315 free_extent_buffer(eb); 1316 break; 1317 } 1318 } 1319 1320 btrfs_tree_unlock(parent); 1321 free_extent_buffer(parent); 1322 1323 parent = eb; 1324 continue; 1325 } 1326 1327 if (!cow) { 1328 btrfs_tree_unlock(parent); 1329 free_extent_buffer(parent); 1330 cow = 1; 1331 goto again; 1332 } 1333 1334 btrfs_node_key_to_cpu(path->nodes[level], &key, 1335 path->slots[level]); 1336 btrfs_release_path(path); 1337 1338 path->lowest_level = level; 1339 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); 1340 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1341 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); 1342 path->lowest_level = 0; 1343 if (ret) { 1344 if (ret > 0) 1345 ret = -ENOENT; 1346 break; 1347 } 1348 1349 /* 1350 * Info qgroup to trace both subtrees. 1351 * 1352 * We must trace both trees. 1353 * 1) Tree reloc subtree 1354 * If not traced, we will leak data numbers 1355 * 2) Fs subtree 1356 * If not traced, we will double count old data 1357 * 1358 * We don't scan the subtree right now, but only record 1359 * the swapped tree blocks. 1360 * The real subtree rescan is delayed until we have new 1361 * CoW on the subtree root node before transaction commit. 1362 */ 1363 ret = btrfs_qgroup_add_swapped_blocks(trans, dest, 1364 rc->block_group, parent, slot, 1365 path->nodes[level], path->slots[level], 1366 last_snapshot); 1367 if (ret < 0) 1368 break; 1369 /* 1370 * swap blocks in fs tree and reloc tree. 1371 */ 1372 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1373 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1374 btrfs_mark_buffer_dirty(parent); 1375 1376 btrfs_set_node_blockptr(path->nodes[level], 1377 path->slots[level], old_bytenr); 1378 btrfs_set_node_ptr_generation(path->nodes[level], 1379 path->slots[level], old_ptr_gen); 1380 btrfs_mark_buffer_dirty(path->nodes[level]); 1381 1382 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr, 1383 blocksize, path->nodes[level]->start); 1384 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, 1385 0, true); 1386 ret = btrfs_inc_extent_ref(trans, &ref); 1387 if (ret) { 1388 btrfs_abort_transaction(trans, ret); 1389 break; 1390 } 1391 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr, 1392 blocksize, 0); 1393 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0, 1394 true); 1395 ret = btrfs_inc_extent_ref(trans, &ref); 1396 if (ret) { 1397 btrfs_abort_transaction(trans, ret); 1398 break; 1399 } 1400 1401 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr, 1402 blocksize, path->nodes[level]->start); 1403 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, 1404 0, true); 1405 ret = btrfs_free_extent(trans, &ref); 1406 if (ret) { 1407 btrfs_abort_transaction(trans, ret); 1408 break; 1409 } 1410 1411 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr, 1412 blocksize, 0); 1413 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 1414 0, true); 1415 ret = btrfs_free_extent(trans, &ref); 1416 if (ret) { 1417 btrfs_abort_transaction(trans, ret); 1418 break; 1419 } 1420 1421 btrfs_unlock_up_safe(path, 0); 1422 1423 ret = level; 1424 break; 1425 } 1426 btrfs_tree_unlock(parent); 1427 free_extent_buffer(parent); 1428 return ret; 1429 } 1430 1431 /* 1432 * helper to find next relocated block in reloc tree 1433 */ 1434 static noinline_for_stack 1435 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1436 int *level) 1437 { 1438 struct extent_buffer *eb; 1439 int i; 1440 u64 last_snapshot; 1441 u32 nritems; 1442 1443 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1444 1445 for (i = 0; i < *level; i++) { 1446 free_extent_buffer(path->nodes[i]); 1447 path->nodes[i] = NULL; 1448 } 1449 1450 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1451 eb = path->nodes[i]; 1452 nritems = btrfs_header_nritems(eb); 1453 while (path->slots[i] + 1 < nritems) { 1454 path->slots[i]++; 1455 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1456 last_snapshot) 1457 continue; 1458 1459 *level = i; 1460 return 0; 1461 } 1462 free_extent_buffer(path->nodes[i]); 1463 path->nodes[i] = NULL; 1464 } 1465 return 1; 1466 } 1467 1468 /* 1469 * walk down reloc tree to find relocated block of lowest level 1470 */ 1471 static noinline_for_stack 1472 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1473 int *level) 1474 { 1475 struct extent_buffer *eb = NULL; 1476 int i; 1477 u64 ptr_gen = 0; 1478 u64 last_snapshot; 1479 u32 nritems; 1480 1481 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1482 1483 for (i = *level; i > 0; i--) { 1484 eb = path->nodes[i]; 1485 nritems = btrfs_header_nritems(eb); 1486 while (path->slots[i] < nritems) { 1487 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1488 if (ptr_gen > last_snapshot) 1489 break; 1490 path->slots[i]++; 1491 } 1492 if (path->slots[i] >= nritems) { 1493 if (i == *level) 1494 break; 1495 *level = i + 1; 1496 return 0; 1497 } 1498 if (i == 1) { 1499 *level = i; 1500 return 0; 1501 } 1502 1503 eb = btrfs_read_node_slot(eb, path->slots[i]); 1504 if (IS_ERR(eb)) 1505 return PTR_ERR(eb); 1506 BUG_ON(btrfs_header_level(eb) != i - 1); 1507 path->nodes[i - 1] = eb; 1508 path->slots[i - 1] = 0; 1509 } 1510 return 1; 1511 } 1512 1513 /* 1514 * invalidate extent cache for file extents whose key in range of 1515 * [min_key, max_key) 1516 */ 1517 static int invalidate_extent_cache(struct btrfs_root *root, 1518 struct btrfs_key *min_key, 1519 struct btrfs_key *max_key) 1520 { 1521 struct btrfs_fs_info *fs_info = root->fs_info; 1522 struct inode *inode = NULL; 1523 u64 objectid; 1524 u64 start, end; 1525 u64 ino; 1526 1527 objectid = min_key->objectid; 1528 while (1) { 1529 struct extent_state *cached_state = NULL; 1530 1531 cond_resched(); 1532 iput(inode); 1533 1534 if (objectid > max_key->objectid) 1535 break; 1536 1537 inode = find_next_inode(root, objectid); 1538 if (!inode) 1539 break; 1540 ino = btrfs_ino(BTRFS_I(inode)); 1541 1542 if (ino > max_key->objectid) { 1543 iput(inode); 1544 break; 1545 } 1546 1547 objectid = ino + 1; 1548 if (!S_ISREG(inode->i_mode)) 1549 continue; 1550 1551 if (unlikely(min_key->objectid == ino)) { 1552 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 1553 continue; 1554 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 1555 start = 0; 1556 else { 1557 start = min_key->offset; 1558 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); 1559 } 1560 } else { 1561 start = 0; 1562 } 1563 1564 if (unlikely(max_key->objectid == ino)) { 1565 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 1566 continue; 1567 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 1568 end = (u64)-1; 1569 } else { 1570 if (max_key->offset == 0) 1571 continue; 1572 end = max_key->offset; 1573 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1574 end--; 1575 } 1576 } else { 1577 end = (u64)-1; 1578 } 1579 1580 /* the lock_extent waits for read_folio to complete */ 1581 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 1582 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true); 1583 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 1584 } 1585 return 0; 1586 } 1587 1588 static int find_next_key(struct btrfs_path *path, int level, 1589 struct btrfs_key *key) 1590 1591 { 1592 while (level < BTRFS_MAX_LEVEL) { 1593 if (!path->nodes[level]) 1594 break; 1595 if (path->slots[level] + 1 < 1596 btrfs_header_nritems(path->nodes[level])) { 1597 btrfs_node_key_to_cpu(path->nodes[level], key, 1598 path->slots[level] + 1); 1599 return 0; 1600 } 1601 level++; 1602 } 1603 return 1; 1604 } 1605 1606 /* 1607 * Insert current subvolume into reloc_control::dirty_subvol_roots 1608 */ 1609 static int insert_dirty_subvol(struct btrfs_trans_handle *trans, 1610 struct reloc_control *rc, 1611 struct btrfs_root *root) 1612 { 1613 struct btrfs_root *reloc_root = root->reloc_root; 1614 struct btrfs_root_item *reloc_root_item; 1615 int ret; 1616 1617 /* @root must be a subvolume tree root with a valid reloc tree */ 1618 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1619 ASSERT(reloc_root); 1620 1621 reloc_root_item = &reloc_root->root_item; 1622 memset(&reloc_root_item->drop_progress, 0, 1623 sizeof(reloc_root_item->drop_progress)); 1624 btrfs_set_root_drop_level(reloc_root_item, 0); 1625 btrfs_set_root_refs(reloc_root_item, 0); 1626 ret = btrfs_update_reloc_root(trans, root); 1627 if (ret) 1628 return ret; 1629 1630 if (list_empty(&root->reloc_dirty_list)) { 1631 btrfs_grab_root(root); 1632 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots); 1633 } 1634 1635 return 0; 1636 } 1637 1638 static int clean_dirty_subvols(struct reloc_control *rc) 1639 { 1640 struct btrfs_root *root; 1641 struct btrfs_root *next; 1642 int ret = 0; 1643 int ret2; 1644 1645 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots, 1646 reloc_dirty_list) { 1647 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1648 /* Merged subvolume, cleanup its reloc root */ 1649 struct btrfs_root *reloc_root = root->reloc_root; 1650 1651 list_del_init(&root->reloc_dirty_list); 1652 root->reloc_root = NULL; 1653 /* 1654 * Need barrier to ensure clear_bit() only happens after 1655 * root->reloc_root = NULL. Pairs with have_reloc_root. 1656 */ 1657 smp_wmb(); 1658 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); 1659 if (reloc_root) { 1660 /* 1661 * btrfs_drop_snapshot drops our ref we hold for 1662 * ->reloc_root. If it fails however we must 1663 * drop the ref ourselves. 1664 */ 1665 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1); 1666 if (ret2 < 0) { 1667 btrfs_put_root(reloc_root); 1668 if (!ret) 1669 ret = ret2; 1670 } 1671 } 1672 btrfs_put_root(root); 1673 } else { 1674 /* Orphan reloc tree, just clean it up */ 1675 ret2 = btrfs_drop_snapshot(root, 0, 1); 1676 if (ret2 < 0) { 1677 btrfs_put_root(root); 1678 if (!ret) 1679 ret = ret2; 1680 } 1681 } 1682 } 1683 return ret; 1684 } 1685 1686 /* 1687 * merge the relocated tree blocks in reloc tree with corresponding 1688 * fs tree. 1689 */ 1690 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 1691 struct btrfs_root *root) 1692 { 1693 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 1694 struct btrfs_key key; 1695 struct btrfs_key next_key; 1696 struct btrfs_trans_handle *trans = NULL; 1697 struct btrfs_root *reloc_root; 1698 struct btrfs_root_item *root_item; 1699 struct btrfs_path *path; 1700 struct extent_buffer *leaf; 1701 int reserve_level; 1702 int level; 1703 int max_level; 1704 int replaced = 0; 1705 int ret = 0; 1706 u32 min_reserved; 1707 1708 path = btrfs_alloc_path(); 1709 if (!path) 1710 return -ENOMEM; 1711 path->reada = READA_FORWARD; 1712 1713 reloc_root = root->reloc_root; 1714 root_item = &reloc_root->root_item; 1715 1716 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 1717 level = btrfs_root_level(root_item); 1718 atomic_inc(&reloc_root->node->refs); 1719 path->nodes[level] = reloc_root->node; 1720 path->slots[level] = 0; 1721 } else { 1722 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 1723 1724 level = btrfs_root_drop_level(root_item); 1725 BUG_ON(level == 0); 1726 path->lowest_level = level; 1727 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 1728 path->lowest_level = 0; 1729 if (ret < 0) { 1730 btrfs_free_path(path); 1731 return ret; 1732 } 1733 1734 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 1735 path->slots[level]); 1736 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 1737 1738 btrfs_unlock_up_safe(path, 0); 1739 } 1740 1741 /* 1742 * In merge_reloc_root(), we modify the upper level pointer to swap the 1743 * tree blocks between reloc tree and subvolume tree. Thus for tree 1744 * block COW, we COW at most from level 1 to root level for each tree. 1745 * 1746 * Thus the needed metadata size is at most root_level * nodesize, 1747 * and * 2 since we have two trees to COW. 1748 */ 1749 reserve_level = max_t(int, 1, btrfs_root_level(root_item)); 1750 min_reserved = fs_info->nodesize * reserve_level * 2; 1751 memset(&next_key, 0, sizeof(next_key)); 1752 1753 while (1) { 1754 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, 1755 min_reserved, 1756 BTRFS_RESERVE_FLUSH_LIMIT); 1757 if (ret) 1758 goto out; 1759 trans = btrfs_start_transaction(root, 0); 1760 if (IS_ERR(trans)) { 1761 ret = PTR_ERR(trans); 1762 trans = NULL; 1763 goto out; 1764 } 1765 1766 /* 1767 * At this point we no longer have a reloc_control, so we can't 1768 * depend on btrfs_init_reloc_root to update our last_trans. 1769 * 1770 * But that's ok, we started the trans handle on our 1771 * corresponding fs_root, which means it's been added to the 1772 * dirty list. At commit time we'll still call 1773 * btrfs_update_reloc_root() and update our root item 1774 * appropriately. 1775 */ 1776 reloc_root->last_trans = trans->transid; 1777 trans->block_rsv = rc->block_rsv; 1778 1779 replaced = 0; 1780 max_level = level; 1781 1782 ret = walk_down_reloc_tree(reloc_root, path, &level); 1783 if (ret < 0) 1784 goto out; 1785 if (ret > 0) 1786 break; 1787 1788 if (!find_next_key(path, level, &key) && 1789 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 1790 ret = 0; 1791 } else { 1792 ret = replace_path(trans, rc, root, reloc_root, path, 1793 &next_key, level, max_level); 1794 } 1795 if (ret < 0) 1796 goto out; 1797 if (ret > 0) { 1798 level = ret; 1799 btrfs_node_key_to_cpu(path->nodes[level], &key, 1800 path->slots[level]); 1801 replaced = 1; 1802 } 1803 1804 ret = walk_up_reloc_tree(reloc_root, path, &level); 1805 if (ret > 0) 1806 break; 1807 1808 BUG_ON(level == 0); 1809 /* 1810 * save the merging progress in the drop_progress. 1811 * this is OK since root refs == 1 in this case. 1812 */ 1813 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 1814 path->slots[level]); 1815 btrfs_set_root_drop_level(root_item, level); 1816 1817 btrfs_end_transaction_throttle(trans); 1818 trans = NULL; 1819 1820 btrfs_btree_balance_dirty(fs_info); 1821 1822 if (replaced && rc->stage == UPDATE_DATA_PTRS) 1823 invalidate_extent_cache(root, &key, &next_key); 1824 } 1825 1826 /* 1827 * handle the case only one block in the fs tree need to be 1828 * relocated and the block is tree root. 1829 */ 1830 leaf = btrfs_lock_root_node(root); 1831 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf, 1832 BTRFS_NESTING_COW); 1833 btrfs_tree_unlock(leaf); 1834 free_extent_buffer(leaf); 1835 out: 1836 btrfs_free_path(path); 1837 1838 if (ret == 0) { 1839 ret = insert_dirty_subvol(trans, rc, root); 1840 if (ret) 1841 btrfs_abort_transaction(trans, ret); 1842 } 1843 1844 if (trans) 1845 btrfs_end_transaction_throttle(trans); 1846 1847 btrfs_btree_balance_dirty(fs_info); 1848 1849 if (replaced && rc->stage == UPDATE_DATA_PTRS) 1850 invalidate_extent_cache(root, &key, &next_key); 1851 1852 return ret; 1853 } 1854 1855 static noinline_for_stack 1856 int prepare_to_merge(struct reloc_control *rc, int err) 1857 { 1858 struct btrfs_root *root = rc->extent_root; 1859 struct btrfs_fs_info *fs_info = root->fs_info; 1860 struct btrfs_root *reloc_root; 1861 struct btrfs_trans_handle *trans; 1862 LIST_HEAD(reloc_roots); 1863 u64 num_bytes = 0; 1864 int ret; 1865 1866 mutex_lock(&fs_info->reloc_mutex); 1867 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 1868 rc->merging_rsv_size += rc->nodes_relocated * 2; 1869 mutex_unlock(&fs_info->reloc_mutex); 1870 1871 again: 1872 if (!err) { 1873 num_bytes = rc->merging_rsv_size; 1874 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes, 1875 BTRFS_RESERVE_FLUSH_ALL); 1876 if (ret) 1877 err = ret; 1878 } 1879 1880 trans = btrfs_join_transaction(rc->extent_root); 1881 if (IS_ERR(trans)) { 1882 if (!err) 1883 btrfs_block_rsv_release(fs_info, rc->block_rsv, 1884 num_bytes, NULL); 1885 return PTR_ERR(trans); 1886 } 1887 1888 if (!err) { 1889 if (num_bytes != rc->merging_rsv_size) { 1890 btrfs_end_transaction(trans); 1891 btrfs_block_rsv_release(fs_info, rc->block_rsv, 1892 num_bytes, NULL); 1893 goto again; 1894 } 1895 } 1896 1897 rc->merge_reloc_tree = 1; 1898 1899 while (!list_empty(&rc->reloc_roots)) { 1900 reloc_root = list_entry(rc->reloc_roots.next, 1901 struct btrfs_root, root_list); 1902 list_del_init(&reloc_root->root_list); 1903 1904 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 1905 false); 1906 if (IS_ERR(root)) { 1907 /* 1908 * Even if we have an error we need this reloc root 1909 * back on our list so we can clean up properly. 1910 */ 1911 list_add(&reloc_root->root_list, &reloc_roots); 1912 btrfs_abort_transaction(trans, (int)PTR_ERR(root)); 1913 if (!err) 1914 err = PTR_ERR(root); 1915 break; 1916 } 1917 ASSERT(root->reloc_root == reloc_root); 1918 1919 /* 1920 * set reference count to 1, so btrfs_recover_relocation 1921 * knows it should resumes merging 1922 */ 1923 if (!err) 1924 btrfs_set_root_refs(&reloc_root->root_item, 1); 1925 ret = btrfs_update_reloc_root(trans, root); 1926 1927 /* 1928 * Even if we have an error we need this reloc root back on our 1929 * list so we can clean up properly. 1930 */ 1931 list_add(&reloc_root->root_list, &reloc_roots); 1932 btrfs_put_root(root); 1933 1934 if (ret) { 1935 btrfs_abort_transaction(trans, ret); 1936 if (!err) 1937 err = ret; 1938 break; 1939 } 1940 } 1941 1942 list_splice(&reloc_roots, &rc->reloc_roots); 1943 1944 if (!err) 1945 err = btrfs_commit_transaction(trans); 1946 else 1947 btrfs_end_transaction(trans); 1948 return err; 1949 } 1950 1951 static noinline_for_stack 1952 void free_reloc_roots(struct list_head *list) 1953 { 1954 struct btrfs_root *reloc_root, *tmp; 1955 1956 list_for_each_entry_safe(reloc_root, tmp, list, root_list) 1957 __del_reloc_root(reloc_root); 1958 } 1959 1960 static noinline_for_stack 1961 void merge_reloc_roots(struct reloc_control *rc) 1962 { 1963 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 1964 struct btrfs_root *root; 1965 struct btrfs_root *reloc_root; 1966 LIST_HEAD(reloc_roots); 1967 int found = 0; 1968 int ret = 0; 1969 again: 1970 root = rc->extent_root; 1971 1972 /* 1973 * this serializes us with btrfs_record_root_in_transaction, 1974 * we have to make sure nobody is in the middle of 1975 * adding their roots to the list while we are 1976 * doing this splice 1977 */ 1978 mutex_lock(&fs_info->reloc_mutex); 1979 list_splice_init(&rc->reloc_roots, &reloc_roots); 1980 mutex_unlock(&fs_info->reloc_mutex); 1981 1982 while (!list_empty(&reloc_roots)) { 1983 found = 1; 1984 reloc_root = list_entry(reloc_roots.next, 1985 struct btrfs_root, root_list); 1986 1987 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 1988 false); 1989 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 1990 if (IS_ERR(root)) { 1991 /* 1992 * For recovery we read the fs roots on mount, 1993 * and if we didn't find the root then we marked 1994 * the reloc root as a garbage root. For normal 1995 * relocation obviously the root should exist in 1996 * memory. However there's no reason we can't 1997 * handle the error properly here just in case. 1998 */ 1999 ASSERT(0); 2000 ret = PTR_ERR(root); 2001 goto out; 2002 } 2003 if (root->reloc_root != reloc_root) { 2004 /* 2005 * This is actually impossible without something 2006 * going really wrong (like weird race condition 2007 * or cosmic rays). 2008 */ 2009 ASSERT(0); 2010 ret = -EINVAL; 2011 goto out; 2012 } 2013 ret = merge_reloc_root(rc, root); 2014 btrfs_put_root(root); 2015 if (ret) { 2016 if (list_empty(&reloc_root->root_list)) 2017 list_add_tail(&reloc_root->root_list, 2018 &reloc_roots); 2019 goto out; 2020 } 2021 } else { 2022 if (!IS_ERR(root)) { 2023 if (root->reloc_root == reloc_root) { 2024 root->reloc_root = NULL; 2025 btrfs_put_root(reloc_root); 2026 } 2027 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, 2028 &root->state); 2029 btrfs_put_root(root); 2030 } 2031 2032 list_del_init(&reloc_root->root_list); 2033 /* Don't forget to queue this reloc root for cleanup */ 2034 list_add_tail(&reloc_root->reloc_dirty_list, 2035 &rc->dirty_subvol_roots); 2036 } 2037 } 2038 2039 if (found) { 2040 found = 0; 2041 goto again; 2042 } 2043 out: 2044 if (ret) { 2045 btrfs_handle_fs_error(fs_info, ret, NULL); 2046 free_reloc_roots(&reloc_roots); 2047 2048 /* new reloc root may be added */ 2049 mutex_lock(&fs_info->reloc_mutex); 2050 list_splice_init(&rc->reloc_roots, &reloc_roots); 2051 mutex_unlock(&fs_info->reloc_mutex); 2052 free_reloc_roots(&reloc_roots); 2053 } 2054 2055 /* 2056 * We used to have 2057 * 2058 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2059 * 2060 * here, but it's wrong. If we fail to start the transaction in 2061 * prepare_to_merge() we will have only 0 ref reloc roots, none of which 2062 * have actually been removed from the reloc_root_tree rb tree. This is 2063 * fine because we're bailing here, and we hold a reference on the root 2064 * for the list that holds it, so these roots will be cleaned up when we 2065 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root 2066 * will be cleaned up on unmount. 2067 * 2068 * The remaining nodes will be cleaned up by free_reloc_control. 2069 */ 2070 } 2071 2072 static void free_block_list(struct rb_root *blocks) 2073 { 2074 struct tree_block *block; 2075 struct rb_node *rb_node; 2076 while ((rb_node = rb_first(blocks))) { 2077 block = rb_entry(rb_node, struct tree_block, rb_node); 2078 rb_erase(rb_node, blocks); 2079 kfree(block); 2080 } 2081 } 2082 2083 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2084 struct btrfs_root *reloc_root) 2085 { 2086 struct btrfs_fs_info *fs_info = reloc_root->fs_info; 2087 struct btrfs_root *root; 2088 int ret; 2089 2090 if (reloc_root->last_trans == trans->transid) 2091 return 0; 2092 2093 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); 2094 2095 /* 2096 * This should succeed, since we can't have a reloc root without having 2097 * already looked up the actual root and created the reloc root for this 2098 * root. 2099 * 2100 * However if there's some sort of corruption where we have a ref to a 2101 * reloc root without a corresponding root this could return ENOENT. 2102 */ 2103 if (IS_ERR(root)) { 2104 ASSERT(0); 2105 return PTR_ERR(root); 2106 } 2107 if (root->reloc_root != reloc_root) { 2108 ASSERT(0); 2109 btrfs_err(fs_info, 2110 "root %llu has two reloc roots associated with it", 2111 reloc_root->root_key.offset); 2112 btrfs_put_root(root); 2113 return -EUCLEAN; 2114 } 2115 ret = btrfs_record_root_in_trans(trans, root); 2116 btrfs_put_root(root); 2117 2118 return ret; 2119 } 2120 2121 static noinline_for_stack 2122 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2123 struct reloc_control *rc, 2124 struct btrfs_backref_node *node, 2125 struct btrfs_backref_edge *edges[]) 2126 { 2127 struct btrfs_backref_node *next; 2128 struct btrfs_root *root; 2129 int index = 0; 2130 int ret; 2131 2132 next = node; 2133 while (1) { 2134 cond_resched(); 2135 next = walk_up_backref(next, edges, &index); 2136 root = next->root; 2137 2138 /* 2139 * If there is no root, then our references for this block are 2140 * incomplete, as we should be able to walk all the way up to a 2141 * block that is owned by a root. 2142 * 2143 * This path is only for SHAREABLE roots, so if we come upon a 2144 * non-SHAREABLE root then we have backrefs that resolve 2145 * improperly. 2146 * 2147 * Both of these cases indicate file system corruption, or a bug 2148 * in the backref walking code. 2149 */ 2150 if (!root) { 2151 ASSERT(0); 2152 btrfs_err(trans->fs_info, 2153 "bytenr %llu doesn't have a backref path ending in a root", 2154 node->bytenr); 2155 return ERR_PTR(-EUCLEAN); 2156 } 2157 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 2158 ASSERT(0); 2159 btrfs_err(trans->fs_info, 2160 "bytenr %llu has multiple refs with one ending in a non-shareable root", 2161 node->bytenr); 2162 return ERR_PTR(-EUCLEAN); 2163 } 2164 2165 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2166 ret = record_reloc_root_in_trans(trans, root); 2167 if (ret) 2168 return ERR_PTR(ret); 2169 break; 2170 } 2171 2172 ret = btrfs_record_root_in_trans(trans, root); 2173 if (ret) 2174 return ERR_PTR(ret); 2175 root = root->reloc_root; 2176 2177 /* 2178 * We could have raced with another thread which failed, so 2179 * root->reloc_root may not be set, return ENOENT in this case. 2180 */ 2181 if (!root) 2182 return ERR_PTR(-ENOENT); 2183 2184 if (next->new_bytenr != root->node->start) { 2185 /* 2186 * We just created the reloc root, so we shouldn't have 2187 * ->new_bytenr set and this shouldn't be in the changed 2188 * list. If it is then we have multiple roots pointing 2189 * at the same bytenr which indicates corruption, or 2190 * we've made a mistake in the backref walking code. 2191 */ 2192 ASSERT(next->new_bytenr == 0); 2193 ASSERT(list_empty(&next->list)); 2194 if (next->new_bytenr || !list_empty(&next->list)) { 2195 btrfs_err(trans->fs_info, 2196 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu", 2197 node->bytenr, next->bytenr); 2198 return ERR_PTR(-EUCLEAN); 2199 } 2200 2201 next->new_bytenr = root->node->start; 2202 btrfs_put_root(next->root); 2203 next->root = btrfs_grab_root(root); 2204 ASSERT(next->root); 2205 list_add_tail(&next->list, 2206 &rc->backref_cache.changed); 2207 mark_block_processed(rc, next); 2208 break; 2209 } 2210 2211 WARN_ON(1); 2212 root = NULL; 2213 next = walk_down_backref(edges, &index); 2214 if (!next || next->level <= node->level) 2215 break; 2216 } 2217 if (!root) { 2218 /* 2219 * This can happen if there's fs corruption or if there's a bug 2220 * in the backref lookup code. 2221 */ 2222 ASSERT(0); 2223 return ERR_PTR(-ENOENT); 2224 } 2225 2226 next = node; 2227 /* setup backref node path for btrfs_reloc_cow_block */ 2228 while (1) { 2229 rc->backref_cache.path[next->level] = next; 2230 if (--index < 0) 2231 break; 2232 next = edges[index]->node[UPPER]; 2233 } 2234 return root; 2235 } 2236 2237 /* 2238 * Select a tree root for relocation. 2239 * 2240 * Return NULL if the block is not shareable. We should use do_relocation() in 2241 * this case. 2242 * 2243 * Return a tree root pointer if the block is shareable. 2244 * Return -ENOENT if the block is root of reloc tree. 2245 */ 2246 static noinline_for_stack 2247 struct btrfs_root *select_one_root(struct btrfs_backref_node *node) 2248 { 2249 struct btrfs_backref_node *next; 2250 struct btrfs_root *root; 2251 struct btrfs_root *fs_root = NULL; 2252 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2253 int index = 0; 2254 2255 next = node; 2256 while (1) { 2257 cond_resched(); 2258 next = walk_up_backref(next, edges, &index); 2259 root = next->root; 2260 2261 /* 2262 * This can occur if we have incomplete extent refs leading all 2263 * the way up a particular path, in this case return -EUCLEAN. 2264 */ 2265 if (!root) 2266 return ERR_PTR(-EUCLEAN); 2267 2268 /* No other choice for non-shareable tree */ 2269 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2270 return root; 2271 2272 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2273 fs_root = root; 2274 2275 if (next != node) 2276 return NULL; 2277 2278 next = walk_down_backref(edges, &index); 2279 if (!next || next->level <= node->level) 2280 break; 2281 } 2282 2283 if (!fs_root) 2284 return ERR_PTR(-ENOENT); 2285 return fs_root; 2286 } 2287 2288 static noinline_for_stack 2289 u64 calcu_metadata_size(struct reloc_control *rc, 2290 struct btrfs_backref_node *node, int reserve) 2291 { 2292 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2293 struct btrfs_backref_node *next = node; 2294 struct btrfs_backref_edge *edge; 2295 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2296 u64 num_bytes = 0; 2297 int index = 0; 2298 2299 BUG_ON(reserve && node->processed); 2300 2301 while (next) { 2302 cond_resched(); 2303 while (1) { 2304 if (next->processed && (reserve || next != node)) 2305 break; 2306 2307 num_bytes += fs_info->nodesize; 2308 2309 if (list_empty(&next->upper)) 2310 break; 2311 2312 edge = list_entry(next->upper.next, 2313 struct btrfs_backref_edge, list[LOWER]); 2314 edges[index++] = edge; 2315 next = edge->node[UPPER]; 2316 } 2317 next = walk_down_backref(edges, &index); 2318 } 2319 return num_bytes; 2320 } 2321 2322 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2323 struct reloc_control *rc, 2324 struct btrfs_backref_node *node) 2325 { 2326 struct btrfs_root *root = rc->extent_root; 2327 struct btrfs_fs_info *fs_info = root->fs_info; 2328 u64 num_bytes; 2329 int ret; 2330 u64 tmp; 2331 2332 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2333 2334 trans->block_rsv = rc->block_rsv; 2335 rc->reserved_bytes += num_bytes; 2336 2337 /* 2338 * We are under a transaction here so we can only do limited flushing. 2339 * If we get an enospc just kick back -EAGAIN so we know to drop the 2340 * transaction and try to refill when we can flush all the things. 2341 */ 2342 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes, 2343 BTRFS_RESERVE_FLUSH_LIMIT); 2344 if (ret) { 2345 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; 2346 while (tmp <= rc->reserved_bytes) 2347 tmp <<= 1; 2348 /* 2349 * only one thread can access block_rsv at this point, 2350 * so we don't need hold lock to protect block_rsv. 2351 * we expand more reservation size here to allow enough 2352 * space for relocation and we will return earlier in 2353 * enospc case. 2354 */ 2355 rc->block_rsv->size = tmp + fs_info->nodesize * 2356 RELOCATION_RESERVED_NODES; 2357 return -EAGAIN; 2358 } 2359 2360 return 0; 2361 } 2362 2363 /* 2364 * relocate a block tree, and then update pointers in upper level 2365 * blocks that reference the block to point to the new location. 2366 * 2367 * if called by link_to_upper, the block has already been relocated. 2368 * in that case this function just updates pointers. 2369 */ 2370 static int do_relocation(struct btrfs_trans_handle *trans, 2371 struct reloc_control *rc, 2372 struct btrfs_backref_node *node, 2373 struct btrfs_key *key, 2374 struct btrfs_path *path, int lowest) 2375 { 2376 struct btrfs_backref_node *upper; 2377 struct btrfs_backref_edge *edge; 2378 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2379 struct btrfs_root *root; 2380 struct extent_buffer *eb; 2381 u32 blocksize; 2382 u64 bytenr; 2383 int slot; 2384 int ret = 0; 2385 2386 /* 2387 * If we are lowest then this is the first time we're processing this 2388 * block, and thus shouldn't have an eb associated with it yet. 2389 */ 2390 ASSERT(!lowest || !node->eb); 2391 2392 path->lowest_level = node->level + 1; 2393 rc->backref_cache.path[node->level] = node; 2394 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2395 struct btrfs_ref ref = { 0 }; 2396 2397 cond_resched(); 2398 2399 upper = edge->node[UPPER]; 2400 root = select_reloc_root(trans, rc, upper, edges); 2401 if (IS_ERR(root)) { 2402 ret = PTR_ERR(root); 2403 goto next; 2404 } 2405 2406 if (upper->eb && !upper->locked) { 2407 if (!lowest) { 2408 ret = btrfs_bin_search(upper->eb, key, &slot); 2409 if (ret < 0) 2410 goto next; 2411 BUG_ON(ret); 2412 bytenr = btrfs_node_blockptr(upper->eb, slot); 2413 if (node->eb->start == bytenr) 2414 goto next; 2415 } 2416 btrfs_backref_drop_node_buffer(upper); 2417 } 2418 2419 if (!upper->eb) { 2420 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2421 if (ret) { 2422 if (ret > 0) 2423 ret = -ENOENT; 2424 2425 btrfs_release_path(path); 2426 break; 2427 } 2428 2429 if (!upper->eb) { 2430 upper->eb = path->nodes[upper->level]; 2431 path->nodes[upper->level] = NULL; 2432 } else { 2433 BUG_ON(upper->eb != path->nodes[upper->level]); 2434 } 2435 2436 upper->locked = 1; 2437 path->locks[upper->level] = 0; 2438 2439 slot = path->slots[upper->level]; 2440 btrfs_release_path(path); 2441 } else { 2442 ret = btrfs_bin_search(upper->eb, key, &slot); 2443 if (ret < 0) 2444 goto next; 2445 BUG_ON(ret); 2446 } 2447 2448 bytenr = btrfs_node_blockptr(upper->eb, slot); 2449 if (lowest) { 2450 if (bytenr != node->bytenr) { 2451 btrfs_err(root->fs_info, 2452 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", 2453 bytenr, node->bytenr, slot, 2454 upper->eb->start); 2455 ret = -EIO; 2456 goto next; 2457 } 2458 } else { 2459 if (node->eb->start == bytenr) 2460 goto next; 2461 } 2462 2463 blocksize = root->fs_info->nodesize; 2464 eb = btrfs_read_node_slot(upper->eb, slot); 2465 if (IS_ERR(eb)) { 2466 ret = PTR_ERR(eb); 2467 goto next; 2468 } 2469 btrfs_tree_lock(eb); 2470 2471 if (!node->eb) { 2472 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2473 slot, &eb, BTRFS_NESTING_COW); 2474 btrfs_tree_unlock(eb); 2475 free_extent_buffer(eb); 2476 if (ret < 0) 2477 goto next; 2478 /* 2479 * We've just COWed this block, it should have updated 2480 * the correct backref node entry. 2481 */ 2482 ASSERT(node->eb == eb); 2483 } else { 2484 btrfs_set_node_blockptr(upper->eb, slot, 2485 node->eb->start); 2486 btrfs_set_node_ptr_generation(upper->eb, slot, 2487 trans->transid); 2488 btrfs_mark_buffer_dirty(upper->eb); 2489 2490 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, 2491 node->eb->start, blocksize, 2492 upper->eb->start); 2493 btrfs_init_tree_ref(&ref, node->level, 2494 btrfs_header_owner(upper->eb), 2495 root->root_key.objectid, false); 2496 ret = btrfs_inc_extent_ref(trans, &ref); 2497 if (!ret) 2498 ret = btrfs_drop_subtree(trans, root, eb, 2499 upper->eb); 2500 if (ret) 2501 btrfs_abort_transaction(trans, ret); 2502 } 2503 next: 2504 if (!upper->pending) 2505 btrfs_backref_drop_node_buffer(upper); 2506 else 2507 btrfs_backref_unlock_node_buffer(upper); 2508 if (ret) 2509 break; 2510 } 2511 2512 if (!ret && node->pending) { 2513 btrfs_backref_drop_node_buffer(node); 2514 list_move_tail(&node->list, &rc->backref_cache.changed); 2515 node->pending = 0; 2516 } 2517 2518 path->lowest_level = 0; 2519 2520 /* 2521 * We should have allocated all of our space in the block rsv and thus 2522 * shouldn't ENOSPC. 2523 */ 2524 ASSERT(ret != -ENOSPC); 2525 return ret; 2526 } 2527 2528 static int link_to_upper(struct btrfs_trans_handle *trans, 2529 struct reloc_control *rc, 2530 struct btrfs_backref_node *node, 2531 struct btrfs_path *path) 2532 { 2533 struct btrfs_key key; 2534 2535 btrfs_node_key_to_cpu(node->eb, &key, 0); 2536 return do_relocation(trans, rc, node, &key, path, 0); 2537 } 2538 2539 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2540 struct reloc_control *rc, 2541 struct btrfs_path *path, int err) 2542 { 2543 LIST_HEAD(list); 2544 struct btrfs_backref_cache *cache = &rc->backref_cache; 2545 struct btrfs_backref_node *node; 2546 int level; 2547 int ret; 2548 2549 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2550 while (!list_empty(&cache->pending[level])) { 2551 node = list_entry(cache->pending[level].next, 2552 struct btrfs_backref_node, list); 2553 list_move_tail(&node->list, &list); 2554 BUG_ON(!node->pending); 2555 2556 if (!err) { 2557 ret = link_to_upper(trans, rc, node, path); 2558 if (ret < 0) 2559 err = ret; 2560 } 2561 } 2562 list_splice_init(&list, &cache->pending[level]); 2563 } 2564 return err; 2565 } 2566 2567 /* 2568 * mark a block and all blocks directly/indirectly reference the block 2569 * as processed. 2570 */ 2571 static void update_processed_blocks(struct reloc_control *rc, 2572 struct btrfs_backref_node *node) 2573 { 2574 struct btrfs_backref_node *next = node; 2575 struct btrfs_backref_edge *edge; 2576 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2577 int index = 0; 2578 2579 while (next) { 2580 cond_resched(); 2581 while (1) { 2582 if (next->processed) 2583 break; 2584 2585 mark_block_processed(rc, next); 2586 2587 if (list_empty(&next->upper)) 2588 break; 2589 2590 edge = list_entry(next->upper.next, 2591 struct btrfs_backref_edge, list[LOWER]); 2592 edges[index++] = edge; 2593 next = edge->node[UPPER]; 2594 } 2595 next = walk_down_backref(edges, &index); 2596 } 2597 } 2598 2599 static int tree_block_processed(u64 bytenr, struct reloc_control *rc) 2600 { 2601 u32 blocksize = rc->extent_root->fs_info->nodesize; 2602 2603 if (test_range_bit(&rc->processed_blocks, bytenr, 2604 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2605 return 1; 2606 return 0; 2607 } 2608 2609 static int get_tree_block_key(struct btrfs_fs_info *fs_info, 2610 struct tree_block *block) 2611 { 2612 struct extent_buffer *eb; 2613 2614 eb = read_tree_block(fs_info, block->bytenr, block->owner, 2615 block->key.offset, block->level, NULL); 2616 if (IS_ERR(eb)) 2617 return PTR_ERR(eb); 2618 if (!extent_buffer_uptodate(eb)) { 2619 free_extent_buffer(eb); 2620 return -EIO; 2621 } 2622 if (block->level == 0) 2623 btrfs_item_key_to_cpu(eb, &block->key, 0); 2624 else 2625 btrfs_node_key_to_cpu(eb, &block->key, 0); 2626 free_extent_buffer(eb); 2627 block->key_ready = 1; 2628 return 0; 2629 } 2630 2631 /* 2632 * helper function to relocate a tree block 2633 */ 2634 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2635 struct reloc_control *rc, 2636 struct btrfs_backref_node *node, 2637 struct btrfs_key *key, 2638 struct btrfs_path *path) 2639 { 2640 struct btrfs_root *root; 2641 int ret = 0; 2642 2643 if (!node) 2644 return 0; 2645 2646 /* 2647 * If we fail here we want to drop our backref_node because we are going 2648 * to start over and regenerate the tree for it. 2649 */ 2650 ret = reserve_metadata_space(trans, rc, node); 2651 if (ret) 2652 goto out; 2653 2654 BUG_ON(node->processed); 2655 root = select_one_root(node); 2656 if (IS_ERR(root)) { 2657 ret = PTR_ERR(root); 2658 2659 /* See explanation in select_one_root for the -EUCLEAN case. */ 2660 ASSERT(ret == -ENOENT); 2661 if (ret == -ENOENT) { 2662 ret = 0; 2663 update_processed_blocks(rc, node); 2664 } 2665 goto out; 2666 } 2667 2668 if (root) { 2669 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 2670 /* 2671 * This block was the root block of a root, and this is 2672 * the first time we're processing the block and thus it 2673 * should not have had the ->new_bytenr modified and 2674 * should have not been included on the changed list. 2675 * 2676 * However in the case of corruption we could have 2677 * multiple refs pointing to the same block improperly, 2678 * and thus we would trip over these checks. ASSERT() 2679 * for the developer case, because it could indicate a 2680 * bug in the backref code, however error out for a 2681 * normal user in the case of corruption. 2682 */ 2683 ASSERT(node->new_bytenr == 0); 2684 ASSERT(list_empty(&node->list)); 2685 if (node->new_bytenr || !list_empty(&node->list)) { 2686 btrfs_err(root->fs_info, 2687 "bytenr %llu has improper references to it", 2688 node->bytenr); 2689 ret = -EUCLEAN; 2690 goto out; 2691 } 2692 ret = btrfs_record_root_in_trans(trans, root); 2693 if (ret) 2694 goto out; 2695 /* 2696 * Another thread could have failed, need to check if we 2697 * have reloc_root actually set. 2698 */ 2699 if (!root->reloc_root) { 2700 ret = -ENOENT; 2701 goto out; 2702 } 2703 root = root->reloc_root; 2704 node->new_bytenr = root->node->start; 2705 btrfs_put_root(node->root); 2706 node->root = btrfs_grab_root(root); 2707 ASSERT(node->root); 2708 list_add_tail(&node->list, &rc->backref_cache.changed); 2709 } else { 2710 path->lowest_level = node->level; 2711 if (root == root->fs_info->chunk_root) 2712 btrfs_reserve_chunk_metadata(trans, false); 2713 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2714 btrfs_release_path(path); 2715 if (root == root->fs_info->chunk_root) 2716 btrfs_trans_release_chunk_metadata(trans); 2717 if (ret > 0) 2718 ret = 0; 2719 } 2720 if (!ret) 2721 update_processed_blocks(rc, node); 2722 } else { 2723 ret = do_relocation(trans, rc, node, key, path, 1); 2724 } 2725 out: 2726 if (ret || node->level == 0 || node->cowonly) 2727 btrfs_backref_cleanup_node(&rc->backref_cache, node); 2728 return ret; 2729 } 2730 2731 /* 2732 * relocate a list of blocks 2733 */ 2734 static noinline_for_stack 2735 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2736 struct reloc_control *rc, struct rb_root *blocks) 2737 { 2738 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2739 struct btrfs_backref_node *node; 2740 struct btrfs_path *path; 2741 struct tree_block *block; 2742 struct tree_block *next; 2743 int ret; 2744 int err = 0; 2745 2746 path = btrfs_alloc_path(); 2747 if (!path) { 2748 err = -ENOMEM; 2749 goto out_free_blocks; 2750 } 2751 2752 /* Kick in readahead for tree blocks with missing keys */ 2753 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2754 if (!block->key_ready) 2755 btrfs_readahead_tree_block(fs_info, block->bytenr, 2756 block->owner, 0, 2757 block->level); 2758 } 2759 2760 /* Get first keys */ 2761 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2762 if (!block->key_ready) { 2763 err = get_tree_block_key(fs_info, block); 2764 if (err) 2765 goto out_free_path; 2766 } 2767 } 2768 2769 /* Do tree relocation */ 2770 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2771 node = build_backref_tree(rc, &block->key, 2772 block->level, block->bytenr); 2773 if (IS_ERR(node)) { 2774 err = PTR_ERR(node); 2775 goto out; 2776 } 2777 2778 ret = relocate_tree_block(trans, rc, node, &block->key, 2779 path); 2780 if (ret < 0) { 2781 err = ret; 2782 break; 2783 } 2784 } 2785 out: 2786 err = finish_pending_nodes(trans, rc, path, err); 2787 2788 out_free_path: 2789 btrfs_free_path(path); 2790 out_free_blocks: 2791 free_block_list(blocks); 2792 return err; 2793 } 2794 2795 static noinline_for_stack int prealloc_file_extent_cluster( 2796 struct btrfs_inode *inode, 2797 struct file_extent_cluster *cluster) 2798 { 2799 u64 alloc_hint = 0; 2800 u64 start; 2801 u64 end; 2802 u64 offset = inode->index_cnt; 2803 u64 num_bytes; 2804 int nr; 2805 int ret = 0; 2806 u64 i_size = i_size_read(&inode->vfs_inode); 2807 u64 prealloc_start = cluster->start - offset; 2808 u64 prealloc_end = cluster->end - offset; 2809 u64 cur_offset = prealloc_start; 2810 2811 /* 2812 * For subpage case, previous i_size may not be aligned to PAGE_SIZE. 2813 * This means the range [i_size, PAGE_END + 1) is filled with zeros by 2814 * btrfs_do_readpage() call of previously relocated file cluster. 2815 * 2816 * If the current cluster starts in the above range, btrfs_do_readpage() 2817 * will skip the read, and relocate_one_page() will later writeback 2818 * the padding zeros as new data, causing data corruption. 2819 * 2820 * Here we have to manually invalidate the range (i_size, PAGE_END + 1). 2821 */ 2822 if (!IS_ALIGNED(i_size, PAGE_SIZE)) { 2823 struct address_space *mapping = inode->vfs_inode.i_mapping; 2824 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2825 const u32 sectorsize = fs_info->sectorsize; 2826 struct page *page; 2827 2828 ASSERT(sectorsize < PAGE_SIZE); 2829 ASSERT(IS_ALIGNED(i_size, sectorsize)); 2830 2831 /* 2832 * Subpage can't handle page with DIRTY but without UPTODATE 2833 * bit as it can lead to the following deadlock: 2834 * 2835 * btrfs_read_folio() 2836 * | Page already *locked* 2837 * |- btrfs_lock_and_flush_ordered_range() 2838 * |- btrfs_start_ordered_extent() 2839 * |- extent_write_cache_pages() 2840 * |- lock_page() 2841 * We try to lock the page we already hold. 2842 * 2843 * Here we just writeback the whole data reloc inode, so that 2844 * we will be ensured to have no dirty range in the page, and 2845 * are safe to clear the uptodate bits. 2846 * 2847 * This shouldn't cause too much overhead, as we need to write 2848 * the data back anyway. 2849 */ 2850 ret = filemap_write_and_wait(mapping); 2851 if (ret < 0) 2852 return ret; 2853 2854 clear_extent_bits(&inode->io_tree, i_size, 2855 round_up(i_size, PAGE_SIZE) - 1, 2856 EXTENT_UPTODATE); 2857 page = find_lock_page(mapping, i_size >> PAGE_SHIFT); 2858 /* 2859 * If page is freed we don't need to do anything then, as we 2860 * will re-read the whole page anyway. 2861 */ 2862 if (page) { 2863 btrfs_subpage_clear_uptodate(fs_info, page, i_size, 2864 round_up(i_size, PAGE_SIZE) - i_size); 2865 unlock_page(page); 2866 put_page(page); 2867 } 2868 } 2869 2870 BUG_ON(cluster->start != cluster->boundary[0]); 2871 ret = btrfs_alloc_data_chunk_ondemand(inode, 2872 prealloc_end + 1 - prealloc_start); 2873 if (ret) 2874 return ret; 2875 2876 btrfs_inode_lock(&inode->vfs_inode, 0); 2877 for (nr = 0; nr < cluster->nr; nr++) { 2878 struct extent_state *cached_state = NULL; 2879 2880 start = cluster->boundary[nr] - offset; 2881 if (nr + 1 < cluster->nr) 2882 end = cluster->boundary[nr + 1] - 1 - offset; 2883 else 2884 end = cluster->end - offset; 2885 2886 lock_extent(&inode->io_tree, start, end, &cached_state); 2887 num_bytes = end + 1 - start; 2888 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, 2889 num_bytes, num_bytes, 2890 end + 1, &alloc_hint); 2891 cur_offset = end + 1; 2892 unlock_extent(&inode->io_tree, start, end, &cached_state); 2893 if (ret) 2894 break; 2895 } 2896 btrfs_inode_unlock(&inode->vfs_inode, 0); 2897 2898 if (cur_offset < prealloc_end) 2899 btrfs_free_reserved_data_space_noquota(inode->root->fs_info, 2900 prealloc_end + 1 - cur_offset); 2901 return ret; 2902 } 2903 2904 static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode, 2905 u64 start, u64 end, u64 block_start) 2906 { 2907 struct extent_map *em; 2908 struct extent_state *cached_state = NULL; 2909 int ret = 0; 2910 2911 em = alloc_extent_map(); 2912 if (!em) 2913 return -ENOMEM; 2914 2915 em->start = start; 2916 em->len = end + 1 - start; 2917 em->block_len = em->len; 2918 em->block_start = block_start; 2919 set_bit(EXTENT_FLAG_PINNED, &em->flags); 2920 2921 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 2922 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false); 2923 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 2924 free_extent_map(em); 2925 2926 return ret; 2927 } 2928 2929 /* 2930 * Allow error injection to test balance/relocation cancellation 2931 */ 2932 noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info) 2933 { 2934 return atomic_read(&fs_info->balance_cancel_req) || 2935 atomic_read(&fs_info->reloc_cancel_req) || 2936 fatal_signal_pending(current); 2937 } 2938 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE); 2939 2940 static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster, 2941 int cluster_nr) 2942 { 2943 /* Last extent, use cluster end directly */ 2944 if (cluster_nr >= cluster->nr - 1) 2945 return cluster->end; 2946 2947 /* Use next boundary start*/ 2948 return cluster->boundary[cluster_nr + 1] - 1; 2949 } 2950 2951 static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, 2952 struct file_extent_cluster *cluster, 2953 int *cluster_nr, unsigned long page_index) 2954 { 2955 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2956 u64 offset = BTRFS_I(inode)->index_cnt; 2957 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; 2958 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 2959 struct page *page; 2960 u64 page_start; 2961 u64 page_end; 2962 u64 cur; 2963 int ret; 2964 2965 ASSERT(page_index <= last_index); 2966 page = find_lock_page(inode->i_mapping, page_index); 2967 if (!page) { 2968 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 2969 page_index, last_index + 1 - page_index); 2970 page = find_or_create_page(inode->i_mapping, page_index, mask); 2971 if (!page) 2972 return -ENOMEM; 2973 } 2974 ret = set_page_extent_mapped(page); 2975 if (ret < 0) 2976 goto release_page; 2977 2978 if (PageReadahead(page)) 2979 page_cache_async_readahead(inode->i_mapping, ra, NULL, 2980 page_folio(page), page_index, 2981 last_index + 1 - page_index); 2982 2983 if (!PageUptodate(page)) { 2984 btrfs_read_folio(NULL, page_folio(page)); 2985 lock_page(page); 2986 if (!PageUptodate(page)) { 2987 ret = -EIO; 2988 goto release_page; 2989 } 2990 } 2991 2992 page_start = page_offset(page); 2993 page_end = page_start + PAGE_SIZE - 1; 2994 2995 /* 2996 * Start from the cluster, as for subpage case, the cluster can start 2997 * inside the page. 2998 */ 2999 cur = max(page_start, cluster->boundary[*cluster_nr] - offset); 3000 while (cur <= page_end) { 3001 struct extent_state *cached_state = NULL; 3002 u64 extent_start = cluster->boundary[*cluster_nr] - offset; 3003 u64 extent_end = get_cluster_boundary_end(cluster, 3004 *cluster_nr) - offset; 3005 u64 clamped_start = max(page_start, extent_start); 3006 u64 clamped_end = min(page_end, extent_end); 3007 u32 clamped_len = clamped_end + 1 - clamped_start; 3008 3009 /* Reserve metadata for this range */ 3010 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 3011 clamped_len, clamped_len, 3012 false); 3013 if (ret) 3014 goto release_page; 3015 3016 /* Mark the range delalloc and dirty for later writeback */ 3017 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, 3018 &cached_state); 3019 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start, 3020 clamped_end, 0, &cached_state); 3021 if (ret) { 3022 clear_extent_bit(&BTRFS_I(inode)->io_tree, 3023 clamped_start, clamped_end, 3024 EXTENT_LOCKED | EXTENT_BOUNDARY, 3025 &cached_state); 3026 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3027 clamped_len, true); 3028 btrfs_delalloc_release_extents(BTRFS_I(inode), 3029 clamped_len); 3030 goto release_page; 3031 } 3032 btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len); 3033 3034 /* 3035 * Set the boundary if it's inside the page. 3036 * Data relocation requires the destination extents to have the 3037 * same size as the source. 3038 * EXTENT_BOUNDARY bit prevents current extent from being merged 3039 * with previous extent. 3040 */ 3041 if (in_range(cluster->boundary[*cluster_nr] - offset, 3042 page_start, PAGE_SIZE)) { 3043 u64 boundary_start = cluster->boundary[*cluster_nr] - 3044 offset; 3045 u64 boundary_end = boundary_start + 3046 fs_info->sectorsize - 1; 3047 3048 set_extent_bits(&BTRFS_I(inode)->io_tree, 3049 boundary_start, boundary_end, 3050 EXTENT_BOUNDARY); 3051 } 3052 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, 3053 &cached_state); 3054 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len); 3055 cur += clamped_len; 3056 3057 /* Crossed extent end, go to next extent */ 3058 if (cur >= extent_end) { 3059 (*cluster_nr)++; 3060 /* Just finished the last extent of the cluster, exit. */ 3061 if (*cluster_nr >= cluster->nr) 3062 break; 3063 } 3064 } 3065 unlock_page(page); 3066 put_page(page); 3067 3068 balance_dirty_pages_ratelimited(inode->i_mapping); 3069 btrfs_throttle(fs_info); 3070 if (btrfs_should_cancel_balance(fs_info)) 3071 ret = -ECANCELED; 3072 return ret; 3073 3074 release_page: 3075 unlock_page(page); 3076 put_page(page); 3077 return ret; 3078 } 3079 3080 static int relocate_file_extent_cluster(struct inode *inode, 3081 struct file_extent_cluster *cluster) 3082 { 3083 u64 offset = BTRFS_I(inode)->index_cnt; 3084 unsigned long index; 3085 unsigned long last_index; 3086 struct file_ra_state *ra; 3087 int cluster_nr = 0; 3088 int ret = 0; 3089 3090 if (!cluster->nr) 3091 return 0; 3092 3093 ra = kzalloc(sizeof(*ra), GFP_NOFS); 3094 if (!ra) 3095 return -ENOMEM; 3096 3097 ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster); 3098 if (ret) 3099 goto out; 3100 3101 file_ra_state_init(ra, inode->i_mapping); 3102 3103 ret = setup_relocation_extent_mapping(inode, cluster->start - offset, 3104 cluster->end - offset, cluster->start); 3105 if (ret) 3106 goto out; 3107 3108 last_index = (cluster->end - offset) >> PAGE_SHIFT; 3109 for (index = (cluster->start - offset) >> PAGE_SHIFT; 3110 index <= last_index && !ret; index++) 3111 ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index); 3112 if (ret == 0) 3113 WARN_ON(cluster_nr != cluster->nr); 3114 out: 3115 kfree(ra); 3116 return ret; 3117 } 3118 3119 static noinline_for_stack 3120 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3121 struct file_extent_cluster *cluster) 3122 { 3123 int ret; 3124 3125 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3126 ret = relocate_file_extent_cluster(inode, cluster); 3127 if (ret) 3128 return ret; 3129 cluster->nr = 0; 3130 } 3131 3132 if (!cluster->nr) 3133 cluster->start = extent_key->objectid; 3134 else 3135 BUG_ON(cluster->nr >= MAX_EXTENTS); 3136 cluster->end = extent_key->objectid + extent_key->offset - 1; 3137 cluster->boundary[cluster->nr] = extent_key->objectid; 3138 cluster->nr++; 3139 3140 if (cluster->nr >= MAX_EXTENTS) { 3141 ret = relocate_file_extent_cluster(inode, cluster); 3142 if (ret) 3143 return ret; 3144 cluster->nr = 0; 3145 } 3146 return 0; 3147 } 3148 3149 /* 3150 * helper to add a tree block to the list. 3151 * the major work is getting the generation and level of the block 3152 */ 3153 static int add_tree_block(struct reloc_control *rc, 3154 struct btrfs_key *extent_key, 3155 struct btrfs_path *path, 3156 struct rb_root *blocks) 3157 { 3158 struct extent_buffer *eb; 3159 struct btrfs_extent_item *ei; 3160 struct btrfs_tree_block_info *bi; 3161 struct tree_block *block; 3162 struct rb_node *rb_node; 3163 u32 item_size; 3164 int level = -1; 3165 u64 generation; 3166 u64 owner = 0; 3167 3168 eb = path->nodes[0]; 3169 item_size = btrfs_item_size(eb, path->slots[0]); 3170 3171 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3172 item_size >= sizeof(*ei) + sizeof(*bi)) { 3173 unsigned long ptr = 0, end; 3174 3175 ei = btrfs_item_ptr(eb, path->slots[0], 3176 struct btrfs_extent_item); 3177 end = (unsigned long)ei + item_size; 3178 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3179 bi = (struct btrfs_tree_block_info *)(ei + 1); 3180 level = btrfs_tree_block_level(eb, bi); 3181 ptr = (unsigned long)(bi + 1); 3182 } else { 3183 level = (int)extent_key->offset; 3184 ptr = (unsigned long)(ei + 1); 3185 } 3186 generation = btrfs_extent_generation(eb, ei); 3187 3188 /* 3189 * We're reading random blocks without knowing their owner ahead 3190 * of time. This is ok most of the time, as all reloc roots and 3191 * fs roots have the same lock type. However normal trees do 3192 * not, and the only way to know ahead of time is to read the 3193 * inline ref offset. We know it's an fs root if 3194 * 3195 * 1. There's more than one ref. 3196 * 2. There's a SHARED_DATA_REF_KEY set. 3197 * 3. FULL_BACKREF is set on the flags. 3198 * 3199 * Otherwise it's safe to assume that the ref offset == the 3200 * owner of this block, so we can use that when calling 3201 * read_tree_block. 3202 */ 3203 if (btrfs_extent_refs(eb, ei) == 1 && 3204 !(btrfs_extent_flags(eb, ei) & 3205 BTRFS_BLOCK_FLAG_FULL_BACKREF) && 3206 ptr < end) { 3207 struct btrfs_extent_inline_ref *iref; 3208 int type; 3209 3210 iref = (struct btrfs_extent_inline_ref *)ptr; 3211 type = btrfs_get_extent_inline_ref_type(eb, iref, 3212 BTRFS_REF_TYPE_BLOCK); 3213 if (type == BTRFS_REF_TYPE_INVALID) 3214 return -EINVAL; 3215 if (type == BTRFS_TREE_BLOCK_REF_KEY) 3216 owner = btrfs_extent_inline_ref_offset(eb, iref); 3217 } 3218 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) { 3219 btrfs_print_v0_err(eb->fs_info); 3220 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); 3221 return -EINVAL; 3222 } else { 3223 BUG(); 3224 } 3225 3226 btrfs_release_path(path); 3227 3228 BUG_ON(level == -1); 3229 3230 block = kmalloc(sizeof(*block), GFP_NOFS); 3231 if (!block) 3232 return -ENOMEM; 3233 3234 block->bytenr = extent_key->objectid; 3235 block->key.objectid = rc->extent_root->fs_info->nodesize; 3236 block->key.offset = generation; 3237 block->level = level; 3238 block->key_ready = 0; 3239 block->owner = owner; 3240 3241 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node); 3242 if (rb_node) 3243 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, 3244 -EEXIST); 3245 3246 return 0; 3247 } 3248 3249 /* 3250 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3251 */ 3252 static int __add_tree_block(struct reloc_control *rc, 3253 u64 bytenr, u32 blocksize, 3254 struct rb_root *blocks) 3255 { 3256 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3257 struct btrfs_path *path; 3258 struct btrfs_key key; 3259 int ret; 3260 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 3261 3262 if (tree_block_processed(bytenr, rc)) 3263 return 0; 3264 3265 if (rb_simple_search(blocks, bytenr)) 3266 return 0; 3267 3268 path = btrfs_alloc_path(); 3269 if (!path) 3270 return -ENOMEM; 3271 again: 3272 key.objectid = bytenr; 3273 if (skinny) { 3274 key.type = BTRFS_METADATA_ITEM_KEY; 3275 key.offset = (u64)-1; 3276 } else { 3277 key.type = BTRFS_EXTENT_ITEM_KEY; 3278 key.offset = blocksize; 3279 } 3280 3281 path->search_commit_root = 1; 3282 path->skip_locking = 1; 3283 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3284 if (ret < 0) 3285 goto out; 3286 3287 if (ret > 0 && skinny) { 3288 if (path->slots[0]) { 3289 path->slots[0]--; 3290 btrfs_item_key_to_cpu(path->nodes[0], &key, 3291 path->slots[0]); 3292 if (key.objectid == bytenr && 3293 (key.type == BTRFS_METADATA_ITEM_KEY || 3294 (key.type == BTRFS_EXTENT_ITEM_KEY && 3295 key.offset == blocksize))) 3296 ret = 0; 3297 } 3298 3299 if (ret) { 3300 skinny = false; 3301 btrfs_release_path(path); 3302 goto again; 3303 } 3304 } 3305 if (ret) { 3306 ASSERT(ret == 1); 3307 btrfs_print_leaf(path->nodes[0]); 3308 btrfs_err(fs_info, 3309 "tree block extent item (%llu) is not found in extent tree", 3310 bytenr); 3311 WARN_ON(1); 3312 ret = -EINVAL; 3313 goto out; 3314 } 3315 3316 ret = add_tree_block(rc, &key, path, blocks); 3317 out: 3318 btrfs_free_path(path); 3319 return ret; 3320 } 3321 3322 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3323 struct btrfs_block_group *block_group, 3324 struct inode *inode, 3325 u64 ino) 3326 { 3327 struct btrfs_root *root = fs_info->tree_root; 3328 struct btrfs_trans_handle *trans; 3329 int ret = 0; 3330 3331 if (inode) 3332 goto truncate; 3333 3334 inode = btrfs_iget(fs_info->sb, ino, root); 3335 if (IS_ERR(inode)) 3336 return -ENOENT; 3337 3338 truncate: 3339 ret = btrfs_check_trunc_cache_free_space(fs_info, 3340 &fs_info->global_block_rsv); 3341 if (ret) 3342 goto out; 3343 3344 trans = btrfs_join_transaction(root); 3345 if (IS_ERR(trans)) { 3346 ret = PTR_ERR(trans); 3347 goto out; 3348 } 3349 3350 ret = btrfs_truncate_free_space_cache(trans, block_group, inode); 3351 3352 btrfs_end_transaction(trans); 3353 btrfs_btree_balance_dirty(fs_info); 3354 out: 3355 iput(inode); 3356 return ret; 3357 } 3358 3359 /* 3360 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the 3361 * cache inode, to avoid free space cache data extent blocking data relocation. 3362 */ 3363 static int delete_v1_space_cache(struct extent_buffer *leaf, 3364 struct btrfs_block_group *block_group, 3365 u64 data_bytenr) 3366 { 3367 u64 space_cache_ino; 3368 struct btrfs_file_extent_item *ei; 3369 struct btrfs_key key; 3370 bool found = false; 3371 int i; 3372 int ret; 3373 3374 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID) 3375 return 0; 3376 3377 for (i = 0; i < btrfs_header_nritems(leaf); i++) { 3378 u8 type; 3379 3380 btrfs_item_key_to_cpu(leaf, &key, i); 3381 if (key.type != BTRFS_EXTENT_DATA_KEY) 3382 continue; 3383 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 3384 type = btrfs_file_extent_type(leaf, ei); 3385 3386 if ((type == BTRFS_FILE_EXTENT_REG || 3387 type == BTRFS_FILE_EXTENT_PREALLOC) && 3388 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) { 3389 found = true; 3390 space_cache_ino = key.objectid; 3391 break; 3392 } 3393 } 3394 if (!found) 3395 return -ENOENT; 3396 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL, 3397 space_cache_ino); 3398 return ret; 3399 } 3400 3401 /* 3402 * helper to find all tree blocks that reference a given data extent 3403 */ 3404 static noinline_for_stack 3405 int add_data_references(struct reloc_control *rc, 3406 struct btrfs_key *extent_key, 3407 struct btrfs_path *path, 3408 struct rb_root *blocks) 3409 { 3410 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3411 struct ulist *leaves = NULL; 3412 struct ulist_iterator leaf_uiter; 3413 struct ulist_node *ref_node = NULL; 3414 const u32 blocksize = fs_info->nodesize; 3415 int ret = 0; 3416 3417 btrfs_release_path(path); 3418 ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid, 3419 0, &leaves, NULL, true); 3420 if (ret < 0) 3421 return ret; 3422 3423 ULIST_ITER_INIT(&leaf_uiter); 3424 while ((ref_node = ulist_next(leaves, &leaf_uiter))) { 3425 struct extent_buffer *eb; 3426 3427 eb = read_tree_block(fs_info, ref_node->val, 0, 0, 0, NULL); 3428 if (IS_ERR(eb)) { 3429 ret = PTR_ERR(eb); 3430 break; 3431 } 3432 ret = delete_v1_space_cache(eb, rc->block_group, 3433 extent_key->objectid); 3434 free_extent_buffer(eb); 3435 if (ret < 0) 3436 break; 3437 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks); 3438 if (ret < 0) 3439 break; 3440 } 3441 if (ret < 0) 3442 free_block_list(blocks); 3443 ulist_free(leaves); 3444 return ret; 3445 } 3446 3447 /* 3448 * helper to find next unprocessed extent 3449 */ 3450 static noinline_for_stack 3451 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, 3452 struct btrfs_key *extent_key) 3453 { 3454 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3455 struct btrfs_key key; 3456 struct extent_buffer *leaf; 3457 u64 start, end, last; 3458 int ret; 3459 3460 last = rc->block_group->start + rc->block_group->length; 3461 while (1) { 3462 cond_resched(); 3463 if (rc->search_start >= last) { 3464 ret = 1; 3465 break; 3466 } 3467 3468 key.objectid = rc->search_start; 3469 key.type = BTRFS_EXTENT_ITEM_KEY; 3470 key.offset = 0; 3471 3472 path->search_commit_root = 1; 3473 path->skip_locking = 1; 3474 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3475 0, 0); 3476 if (ret < 0) 3477 break; 3478 next: 3479 leaf = path->nodes[0]; 3480 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3481 ret = btrfs_next_leaf(rc->extent_root, path); 3482 if (ret != 0) 3483 break; 3484 leaf = path->nodes[0]; 3485 } 3486 3487 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3488 if (key.objectid >= last) { 3489 ret = 1; 3490 break; 3491 } 3492 3493 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3494 key.type != BTRFS_METADATA_ITEM_KEY) { 3495 path->slots[0]++; 3496 goto next; 3497 } 3498 3499 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3500 key.objectid + key.offset <= rc->search_start) { 3501 path->slots[0]++; 3502 goto next; 3503 } 3504 3505 if (key.type == BTRFS_METADATA_ITEM_KEY && 3506 key.objectid + fs_info->nodesize <= 3507 rc->search_start) { 3508 path->slots[0]++; 3509 goto next; 3510 } 3511 3512 ret = find_first_extent_bit(&rc->processed_blocks, 3513 key.objectid, &start, &end, 3514 EXTENT_DIRTY, NULL); 3515 3516 if (ret == 0 && start <= key.objectid) { 3517 btrfs_release_path(path); 3518 rc->search_start = end + 1; 3519 } else { 3520 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3521 rc->search_start = key.objectid + key.offset; 3522 else 3523 rc->search_start = key.objectid + 3524 fs_info->nodesize; 3525 memcpy(extent_key, &key, sizeof(key)); 3526 return 0; 3527 } 3528 } 3529 btrfs_release_path(path); 3530 return ret; 3531 } 3532 3533 static void set_reloc_control(struct reloc_control *rc) 3534 { 3535 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3536 3537 mutex_lock(&fs_info->reloc_mutex); 3538 fs_info->reloc_ctl = rc; 3539 mutex_unlock(&fs_info->reloc_mutex); 3540 } 3541 3542 static void unset_reloc_control(struct reloc_control *rc) 3543 { 3544 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3545 3546 mutex_lock(&fs_info->reloc_mutex); 3547 fs_info->reloc_ctl = NULL; 3548 mutex_unlock(&fs_info->reloc_mutex); 3549 } 3550 3551 static noinline_for_stack 3552 int prepare_to_relocate(struct reloc_control *rc) 3553 { 3554 struct btrfs_trans_handle *trans; 3555 int ret; 3556 3557 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info, 3558 BTRFS_BLOCK_RSV_TEMP); 3559 if (!rc->block_rsv) 3560 return -ENOMEM; 3561 3562 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3563 rc->search_start = rc->block_group->start; 3564 rc->extents_found = 0; 3565 rc->nodes_relocated = 0; 3566 rc->merging_rsv_size = 0; 3567 rc->reserved_bytes = 0; 3568 rc->block_rsv->size = rc->extent_root->fs_info->nodesize * 3569 RELOCATION_RESERVED_NODES; 3570 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info, 3571 rc->block_rsv, rc->block_rsv->size, 3572 BTRFS_RESERVE_FLUSH_ALL); 3573 if (ret) 3574 return ret; 3575 3576 rc->create_reloc_tree = 1; 3577 set_reloc_control(rc); 3578 3579 trans = btrfs_join_transaction(rc->extent_root); 3580 if (IS_ERR(trans)) { 3581 unset_reloc_control(rc); 3582 /* 3583 * extent tree is not a ref_cow tree and has no reloc_root to 3584 * cleanup. And callers are responsible to free the above 3585 * block rsv. 3586 */ 3587 return PTR_ERR(trans); 3588 } 3589 3590 ret = btrfs_commit_transaction(trans); 3591 if (ret) 3592 unset_reloc_control(rc); 3593 3594 return ret; 3595 } 3596 3597 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3598 { 3599 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3600 struct rb_root blocks = RB_ROOT; 3601 struct btrfs_key key; 3602 struct btrfs_trans_handle *trans = NULL; 3603 struct btrfs_path *path; 3604 struct btrfs_extent_item *ei; 3605 u64 flags; 3606 int ret; 3607 int err = 0; 3608 int progress = 0; 3609 3610 path = btrfs_alloc_path(); 3611 if (!path) 3612 return -ENOMEM; 3613 path->reada = READA_FORWARD; 3614 3615 ret = prepare_to_relocate(rc); 3616 if (ret) { 3617 err = ret; 3618 goto out_free; 3619 } 3620 3621 while (1) { 3622 rc->reserved_bytes = 0; 3623 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, 3624 rc->block_rsv->size, 3625 BTRFS_RESERVE_FLUSH_ALL); 3626 if (ret) { 3627 err = ret; 3628 break; 3629 } 3630 progress++; 3631 trans = btrfs_start_transaction(rc->extent_root, 0); 3632 if (IS_ERR(trans)) { 3633 err = PTR_ERR(trans); 3634 trans = NULL; 3635 break; 3636 } 3637 restart: 3638 if (update_backref_cache(trans, &rc->backref_cache)) { 3639 btrfs_end_transaction(trans); 3640 trans = NULL; 3641 continue; 3642 } 3643 3644 ret = find_next_extent(rc, path, &key); 3645 if (ret < 0) 3646 err = ret; 3647 if (ret != 0) 3648 break; 3649 3650 rc->extents_found++; 3651 3652 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3653 struct btrfs_extent_item); 3654 flags = btrfs_extent_flags(path->nodes[0], ei); 3655 3656 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 3657 ret = add_tree_block(rc, &key, path, &blocks); 3658 } else if (rc->stage == UPDATE_DATA_PTRS && 3659 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3660 ret = add_data_references(rc, &key, path, &blocks); 3661 } else { 3662 btrfs_release_path(path); 3663 ret = 0; 3664 } 3665 if (ret < 0) { 3666 err = ret; 3667 break; 3668 } 3669 3670 if (!RB_EMPTY_ROOT(&blocks)) { 3671 ret = relocate_tree_blocks(trans, rc, &blocks); 3672 if (ret < 0) { 3673 if (ret != -EAGAIN) { 3674 err = ret; 3675 break; 3676 } 3677 rc->extents_found--; 3678 rc->search_start = key.objectid; 3679 } 3680 } 3681 3682 btrfs_end_transaction_throttle(trans); 3683 btrfs_btree_balance_dirty(fs_info); 3684 trans = NULL; 3685 3686 if (rc->stage == MOVE_DATA_EXTENTS && 3687 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3688 rc->found_file_extent = 1; 3689 ret = relocate_data_extent(rc->data_inode, 3690 &key, &rc->cluster); 3691 if (ret < 0) { 3692 err = ret; 3693 break; 3694 } 3695 } 3696 if (btrfs_should_cancel_balance(fs_info)) { 3697 err = -ECANCELED; 3698 break; 3699 } 3700 } 3701 if (trans && progress && err == -ENOSPC) { 3702 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags); 3703 if (ret == 1) { 3704 err = 0; 3705 progress = 0; 3706 goto restart; 3707 } 3708 } 3709 3710 btrfs_release_path(path); 3711 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); 3712 3713 if (trans) { 3714 btrfs_end_transaction_throttle(trans); 3715 btrfs_btree_balance_dirty(fs_info); 3716 } 3717 3718 if (!err) { 3719 ret = relocate_file_extent_cluster(rc->data_inode, 3720 &rc->cluster); 3721 if (ret < 0) 3722 err = ret; 3723 } 3724 3725 rc->create_reloc_tree = 0; 3726 set_reloc_control(rc); 3727 3728 btrfs_backref_release_cache(&rc->backref_cache); 3729 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); 3730 3731 /* 3732 * Even in the case when the relocation is cancelled, we should all go 3733 * through prepare_to_merge() and merge_reloc_roots(). 3734 * 3735 * For error (including cancelled balance), prepare_to_merge() will 3736 * mark all reloc trees orphan, then queue them for cleanup in 3737 * merge_reloc_roots() 3738 */ 3739 err = prepare_to_merge(rc, err); 3740 3741 merge_reloc_roots(rc); 3742 3743 rc->merge_reloc_tree = 0; 3744 unset_reloc_control(rc); 3745 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); 3746 3747 /* get rid of pinned extents */ 3748 trans = btrfs_join_transaction(rc->extent_root); 3749 if (IS_ERR(trans)) { 3750 err = PTR_ERR(trans); 3751 goto out_free; 3752 } 3753 ret = btrfs_commit_transaction(trans); 3754 if (ret && !err) 3755 err = ret; 3756 out_free: 3757 ret = clean_dirty_subvols(rc); 3758 if (ret < 0 && !err) 3759 err = ret; 3760 btrfs_free_block_rsv(fs_info, rc->block_rsv); 3761 btrfs_free_path(path); 3762 return err; 3763 } 3764 3765 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 3766 struct btrfs_root *root, u64 objectid) 3767 { 3768 struct btrfs_path *path; 3769 struct btrfs_inode_item *item; 3770 struct extent_buffer *leaf; 3771 int ret; 3772 3773 path = btrfs_alloc_path(); 3774 if (!path) 3775 return -ENOMEM; 3776 3777 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 3778 if (ret) 3779 goto out; 3780 3781 leaf = path->nodes[0]; 3782 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 3783 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3784 btrfs_set_inode_generation(leaf, item, 1); 3785 btrfs_set_inode_size(leaf, item, 0); 3786 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 3787 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 3788 BTRFS_INODE_PREALLOC); 3789 btrfs_mark_buffer_dirty(leaf); 3790 out: 3791 btrfs_free_path(path); 3792 return ret; 3793 } 3794 3795 static void delete_orphan_inode(struct btrfs_trans_handle *trans, 3796 struct btrfs_root *root, u64 objectid) 3797 { 3798 struct btrfs_path *path; 3799 struct btrfs_key key; 3800 int ret = 0; 3801 3802 path = btrfs_alloc_path(); 3803 if (!path) { 3804 ret = -ENOMEM; 3805 goto out; 3806 } 3807 3808 key.objectid = objectid; 3809 key.type = BTRFS_INODE_ITEM_KEY; 3810 key.offset = 0; 3811 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3812 if (ret) { 3813 if (ret > 0) 3814 ret = -ENOENT; 3815 goto out; 3816 } 3817 ret = btrfs_del_item(trans, root, path); 3818 out: 3819 if (ret) 3820 btrfs_abort_transaction(trans, ret); 3821 btrfs_free_path(path); 3822 } 3823 3824 /* 3825 * helper to create inode for data relocation. 3826 * the inode is in data relocation tree and its link count is 0 3827 */ 3828 static noinline_for_stack 3829 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 3830 struct btrfs_block_group *group) 3831 { 3832 struct inode *inode = NULL; 3833 struct btrfs_trans_handle *trans; 3834 struct btrfs_root *root; 3835 u64 objectid; 3836 int err = 0; 3837 3838 root = btrfs_grab_root(fs_info->data_reloc_root); 3839 trans = btrfs_start_transaction(root, 6); 3840 if (IS_ERR(trans)) { 3841 btrfs_put_root(root); 3842 return ERR_CAST(trans); 3843 } 3844 3845 err = btrfs_get_free_objectid(root, &objectid); 3846 if (err) 3847 goto out; 3848 3849 err = __insert_orphan_inode(trans, root, objectid); 3850 if (err) 3851 goto out; 3852 3853 inode = btrfs_iget(fs_info->sb, objectid, root); 3854 if (IS_ERR(inode)) { 3855 delete_orphan_inode(trans, root, objectid); 3856 err = PTR_ERR(inode); 3857 inode = NULL; 3858 goto out; 3859 } 3860 BTRFS_I(inode)->index_cnt = group->start; 3861 3862 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 3863 out: 3864 btrfs_put_root(root); 3865 btrfs_end_transaction(trans); 3866 btrfs_btree_balance_dirty(fs_info); 3867 if (err) { 3868 iput(inode); 3869 inode = ERR_PTR(err); 3870 } 3871 return inode; 3872 } 3873 3874 /* 3875 * Mark start of chunk relocation that is cancellable. Check if the cancellation 3876 * has been requested meanwhile and don't start in that case. 3877 * 3878 * Return: 3879 * 0 success 3880 * -EINPROGRESS operation is already in progress, that's probably a bug 3881 * -ECANCELED cancellation request was set before the operation started 3882 */ 3883 static int reloc_chunk_start(struct btrfs_fs_info *fs_info) 3884 { 3885 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) { 3886 /* This should not happen */ 3887 btrfs_err(fs_info, "reloc already running, cannot start"); 3888 return -EINPROGRESS; 3889 } 3890 3891 if (atomic_read(&fs_info->reloc_cancel_req) > 0) { 3892 btrfs_info(fs_info, "chunk relocation canceled on start"); 3893 /* 3894 * On cancel, clear all requests but let the caller mark 3895 * the end after cleanup operations. 3896 */ 3897 atomic_set(&fs_info->reloc_cancel_req, 0); 3898 return -ECANCELED; 3899 } 3900 return 0; 3901 } 3902 3903 /* 3904 * Mark end of chunk relocation that is cancellable and wake any waiters. 3905 */ 3906 static void reloc_chunk_end(struct btrfs_fs_info *fs_info) 3907 { 3908 /* Requested after start, clear bit first so any waiters can continue */ 3909 if (atomic_read(&fs_info->reloc_cancel_req) > 0) 3910 btrfs_info(fs_info, "chunk relocation canceled during operation"); 3911 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); 3912 atomic_set(&fs_info->reloc_cancel_req, 0); 3913 } 3914 3915 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) 3916 { 3917 struct reloc_control *rc; 3918 3919 rc = kzalloc(sizeof(*rc), GFP_NOFS); 3920 if (!rc) 3921 return NULL; 3922 3923 INIT_LIST_HEAD(&rc->reloc_roots); 3924 INIT_LIST_HEAD(&rc->dirty_subvol_roots); 3925 btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1); 3926 mapping_tree_init(&rc->reloc_root_tree); 3927 extent_io_tree_init(fs_info, &rc->processed_blocks, 3928 IO_TREE_RELOC_BLOCKS, NULL); 3929 return rc; 3930 } 3931 3932 static void free_reloc_control(struct reloc_control *rc) 3933 { 3934 struct mapping_node *node, *tmp; 3935 3936 free_reloc_roots(&rc->reloc_roots); 3937 rbtree_postorder_for_each_entry_safe(node, tmp, 3938 &rc->reloc_root_tree.rb_root, rb_node) 3939 kfree(node); 3940 3941 kfree(rc); 3942 } 3943 3944 /* 3945 * Print the block group being relocated 3946 */ 3947 static void describe_relocation(struct btrfs_fs_info *fs_info, 3948 struct btrfs_block_group *block_group) 3949 { 3950 char buf[128] = {'\0'}; 3951 3952 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf)); 3953 3954 btrfs_info(fs_info, 3955 "relocating block group %llu flags %s", 3956 block_group->start, buf); 3957 } 3958 3959 static const char *stage_to_string(int stage) 3960 { 3961 if (stage == MOVE_DATA_EXTENTS) 3962 return "move data extents"; 3963 if (stage == UPDATE_DATA_PTRS) 3964 return "update data pointers"; 3965 return "unknown"; 3966 } 3967 3968 /* 3969 * function to relocate all extents in a block group. 3970 */ 3971 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) 3972 { 3973 struct btrfs_block_group *bg; 3974 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start); 3975 struct reloc_control *rc; 3976 struct inode *inode; 3977 struct btrfs_path *path; 3978 int ret; 3979 int rw = 0; 3980 int err = 0; 3981 3982 /* 3983 * This only gets set if we had a half-deleted snapshot on mount. We 3984 * cannot allow relocation to start while we're still trying to clean up 3985 * these pending deletions. 3986 */ 3987 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE); 3988 if (ret) 3989 return ret; 3990 3991 /* We may have been woken up by close_ctree, so bail if we're closing. */ 3992 if (btrfs_fs_closing(fs_info)) 3993 return -EINTR; 3994 3995 bg = btrfs_lookup_block_group(fs_info, group_start); 3996 if (!bg) 3997 return -ENOENT; 3998 3999 /* 4000 * Relocation of a data block group creates ordered extents. Without 4001 * sb_start_write(), we can freeze the filesystem while unfinished 4002 * ordered extents are left. Such ordered extents can cause a deadlock 4003 * e.g. when syncfs() is waiting for their completion but they can't 4004 * finish because they block when joining a transaction, due to the 4005 * fact that the freeze locks are being held in write mode. 4006 */ 4007 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) 4008 ASSERT(sb_write_started(fs_info->sb)); 4009 4010 if (btrfs_pinned_by_swapfile(fs_info, bg)) { 4011 btrfs_put_block_group(bg); 4012 return -ETXTBSY; 4013 } 4014 4015 rc = alloc_reloc_control(fs_info); 4016 if (!rc) { 4017 btrfs_put_block_group(bg); 4018 return -ENOMEM; 4019 } 4020 4021 ret = reloc_chunk_start(fs_info); 4022 if (ret < 0) { 4023 err = ret; 4024 goto out_put_bg; 4025 } 4026 4027 rc->extent_root = extent_root; 4028 rc->block_group = bg; 4029 4030 ret = btrfs_inc_block_group_ro(rc->block_group, true); 4031 if (ret) { 4032 err = ret; 4033 goto out; 4034 } 4035 rw = 1; 4036 4037 path = btrfs_alloc_path(); 4038 if (!path) { 4039 err = -ENOMEM; 4040 goto out; 4041 } 4042 4043 inode = lookup_free_space_inode(rc->block_group, path); 4044 btrfs_free_path(path); 4045 4046 if (!IS_ERR(inode)) 4047 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); 4048 else 4049 ret = PTR_ERR(inode); 4050 4051 if (ret && ret != -ENOENT) { 4052 err = ret; 4053 goto out; 4054 } 4055 4056 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4057 if (IS_ERR(rc->data_inode)) { 4058 err = PTR_ERR(rc->data_inode); 4059 rc->data_inode = NULL; 4060 goto out; 4061 } 4062 4063 describe_relocation(fs_info, rc->block_group); 4064 4065 btrfs_wait_block_group_reservations(rc->block_group); 4066 btrfs_wait_nocow_writers(rc->block_group); 4067 btrfs_wait_ordered_roots(fs_info, U64_MAX, 4068 rc->block_group->start, 4069 rc->block_group->length); 4070 4071 ret = btrfs_zone_finish(rc->block_group); 4072 WARN_ON(ret && ret != -EAGAIN); 4073 4074 while (1) { 4075 int finishes_stage; 4076 4077 mutex_lock(&fs_info->cleaner_mutex); 4078 ret = relocate_block_group(rc); 4079 mutex_unlock(&fs_info->cleaner_mutex); 4080 if (ret < 0) 4081 err = ret; 4082 4083 finishes_stage = rc->stage; 4084 /* 4085 * We may have gotten ENOSPC after we already dirtied some 4086 * extents. If writeout happens while we're relocating a 4087 * different block group we could end up hitting the 4088 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in 4089 * btrfs_reloc_cow_block. Make sure we write everything out 4090 * properly so we don't trip over this problem, and then break 4091 * out of the loop if we hit an error. 4092 */ 4093 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4094 ret = btrfs_wait_ordered_range(rc->data_inode, 0, 4095 (u64)-1); 4096 if (ret) 4097 err = ret; 4098 invalidate_mapping_pages(rc->data_inode->i_mapping, 4099 0, -1); 4100 rc->stage = UPDATE_DATA_PTRS; 4101 } 4102 4103 if (err < 0) 4104 goto out; 4105 4106 if (rc->extents_found == 0) 4107 break; 4108 4109 btrfs_info(fs_info, "found %llu extents, stage: %s", 4110 rc->extents_found, stage_to_string(finishes_stage)); 4111 } 4112 4113 WARN_ON(rc->block_group->pinned > 0); 4114 WARN_ON(rc->block_group->reserved > 0); 4115 WARN_ON(rc->block_group->used > 0); 4116 out: 4117 if (err && rw) 4118 btrfs_dec_block_group_ro(rc->block_group); 4119 iput(rc->data_inode); 4120 out_put_bg: 4121 btrfs_put_block_group(bg); 4122 reloc_chunk_end(fs_info); 4123 free_reloc_control(rc); 4124 return err; 4125 } 4126 4127 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4128 { 4129 struct btrfs_fs_info *fs_info = root->fs_info; 4130 struct btrfs_trans_handle *trans; 4131 int ret, err; 4132 4133 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4134 if (IS_ERR(trans)) 4135 return PTR_ERR(trans); 4136 4137 memset(&root->root_item.drop_progress, 0, 4138 sizeof(root->root_item.drop_progress)); 4139 btrfs_set_root_drop_level(&root->root_item, 0); 4140 btrfs_set_root_refs(&root->root_item, 0); 4141 ret = btrfs_update_root(trans, fs_info->tree_root, 4142 &root->root_key, &root->root_item); 4143 4144 err = btrfs_end_transaction(trans); 4145 if (err) 4146 return err; 4147 return ret; 4148 } 4149 4150 /* 4151 * recover relocation interrupted by system crash. 4152 * 4153 * this function resumes merging reloc trees with corresponding fs trees. 4154 * this is important for keeping the sharing of tree blocks 4155 */ 4156 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) 4157 { 4158 LIST_HEAD(reloc_roots); 4159 struct btrfs_key key; 4160 struct btrfs_root *fs_root; 4161 struct btrfs_root *reloc_root; 4162 struct btrfs_path *path; 4163 struct extent_buffer *leaf; 4164 struct reloc_control *rc = NULL; 4165 struct btrfs_trans_handle *trans; 4166 int ret; 4167 int err = 0; 4168 4169 path = btrfs_alloc_path(); 4170 if (!path) 4171 return -ENOMEM; 4172 path->reada = READA_BACK; 4173 4174 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4175 key.type = BTRFS_ROOT_ITEM_KEY; 4176 key.offset = (u64)-1; 4177 4178 while (1) { 4179 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, 4180 path, 0, 0); 4181 if (ret < 0) { 4182 err = ret; 4183 goto out; 4184 } 4185 if (ret > 0) { 4186 if (path->slots[0] == 0) 4187 break; 4188 path->slots[0]--; 4189 } 4190 leaf = path->nodes[0]; 4191 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4192 btrfs_release_path(path); 4193 4194 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4195 key.type != BTRFS_ROOT_ITEM_KEY) 4196 break; 4197 4198 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key); 4199 if (IS_ERR(reloc_root)) { 4200 err = PTR_ERR(reloc_root); 4201 goto out; 4202 } 4203 4204 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 4205 list_add(&reloc_root->root_list, &reloc_roots); 4206 4207 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4208 fs_root = btrfs_get_fs_root(fs_info, 4209 reloc_root->root_key.offset, false); 4210 if (IS_ERR(fs_root)) { 4211 ret = PTR_ERR(fs_root); 4212 if (ret != -ENOENT) { 4213 err = ret; 4214 goto out; 4215 } 4216 ret = mark_garbage_root(reloc_root); 4217 if (ret < 0) { 4218 err = ret; 4219 goto out; 4220 } 4221 } else { 4222 btrfs_put_root(fs_root); 4223 } 4224 } 4225 4226 if (key.offset == 0) 4227 break; 4228 4229 key.offset--; 4230 } 4231 btrfs_release_path(path); 4232 4233 if (list_empty(&reloc_roots)) 4234 goto out; 4235 4236 rc = alloc_reloc_control(fs_info); 4237 if (!rc) { 4238 err = -ENOMEM; 4239 goto out; 4240 } 4241 4242 ret = reloc_chunk_start(fs_info); 4243 if (ret < 0) { 4244 err = ret; 4245 goto out_end; 4246 } 4247 4248 rc->extent_root = btrfs_extent_root(fs_info, 0); 4249 4250 set_reloc_control(rc); 4251 4252 trans = btrfs_join_transaction(rc->extent_root); 4253 if (IS_ERR(trans)) { 4254 err = PTR_ERR(trans); 4255 goto out_unset; 4256 } 4257 4258 rc->merge_reloc_tree = 1; 4259 4260 while (!list_empty(&reloc_roots)) { 4261 reloc_root = list_entry(reloc_roots.next, 4262 struct btrfs_root, root_list); 4263 list_del(&reloc_root->root_list); 4264 4265 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4266 list_add_tail(&reloc_root->root_list, 4267 &rc->reloc_roots); 4268 continue; 4269 } 4270 4271 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 4272 false); 4273 if (IS_ERR(fs_root)) { 4274 err = PTR_ERR(fs_root); 4275 list_add_tail(&reloc_root->root_list, &reloc_roots); 4276 btrfs_end_transaction(trans); 4277 goto out_unset; 4278 } 4279 4280 err = __add_reloc_root(reloc_root); 4281 ASSERT(err != -EEXIST); 4282 if (err) { 4283 list_add_tail(&reloc_root->root_list, &reloc_roots); 4284 btrfs_put_root(fs_root); 4285 btrfs_end_transaction(trans); 4286 goto out_unset; 4287 } 4288 fs_root->reloc_root = btrfs_grab_root(reloc_root); 4289 btrfs_put_root(fs_root); 4290 } 4291 4292 err = btrfs_commit_transaction(trans); 4293 if (err) 4294 goto out_unset; 4295 4296 merge_reloc_roots(rc); 4297 4298 unset_reloc_control(rc); 4299 4300 trans = btrfs_join_transaction(rc->extent_root); 4301 if (IS_ERR(trans)) { 4302 err = PTR_ERR(trans); 4303 goto out_clean; 4304 } 4305 err = btrfs_commit_transaction(trans); 4306 out_clean: 4307 ret = clean_dirty_subvols(rc); 4308 if (ret < 0 && !err) 4309 err = ret; 4310 out_unset: 4311 unset_reloc_control(rc); 4312 out_end: 4313 reloc_chunk_end(fs_info); 4314 free_reloc_control(rc); 4315 out: 4316 free_reloc_roots(&reloc_roots); 4317 4318 btrfs_free_path(path); 4319 4320 if (err == 0) { 4321 /* cleanup orphan inode in data relocation tree */ 4322 fs_root = btrfs_grab_root(fs_info->data_reloc_root); 4323 ASSERT(fs_root); 4324 err = btrfs_orphan_cleanup(fs_root); 4325 btrfs_put_root(fs_root); 4326 } 4327 return err; 4328 } 4329 4330 /* 4331 * helper to add ordered checksum for data relocation. 4332 * 4333 * cloning checksum properly handles the nodatasum extents. 4334 * it also saves CPU time to re-calculate the checksum. 4335 */ 4336 int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len) 4337 { 4338 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4339 struct btrfs_root *csum_root; 4340 struct btrfs_ordered_sum *sums; 4341 struct btrfs_ordered_extent *ordered; 4342 int ret; 4343 u64 disk_bytenr; 4344 u64 new_bytenr; 4345 LIST_HEAD(list); 4346 4347 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4348 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len); 4349 4350 disk_bytenr = file_pos + inode->index_cnt; 4351 csum_root = btrfs_csum_root(fs_info, disk_bytenr); 4352 ret = btrfs_lookup_csums_range(csum_root, disk_bytenr, 4353 disk_bytenr + len - 1, &list, 0, false); 4354 if (ret) 4355 goto out; 4356 4357 while (!list_empty(&list)) { 4358 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4359 list_del_init(&sums->list); 4360 4361 /* 4362 * We need to offset the new_bytenr based on where the csum is. 4363 * We need to do this because we will read in entire prealloc 4364 * extents but we may have written to say the middle of the 4365 * prealloc extent, so we need to make sure the csum goes with 4366 * the right disk offset. 4367 * 4368 * We can do this because the data reloc inode refers strictly 4369 * to the on disk bytes, so we don't have to worry about 4370 * disk_len vs real len like with real inodes since it's all 4371 * disk length. 4372 */ 4373 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr; 4374 sums->bytenr = new_bytenr; 4375 4376 btrfs_add_ordered_sum(ordered, sums); 4377 } 4378 out: 4379 btrfs_put_ordered_extent(ordered); 4380 return ret; 4381 } 4382 4383 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4384 struct btrfs_root *root, struct extent_buffer *buf, 4385 struct extent_buffer *cow) 4386 { 4387 struct btrfs_fs_info *fs_info = root->fs_info; 4388 struct reloc_control *rc; 4389 struct btrfs_backref_node *node; 4390 int first_cow = 0; 4391 int level; 4392 int ret = 0; 4393 4394 rc = fs_info->reloc_ctl; 4395 if (!rc) 4396 return 0; 4397 4398 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root)); 4399 4400 level = btrfs_header_level(buf); 4401 if (btrfs_header_generation(buf) <= 4402 btrfs_root_last_snapshot(&root->root_item)) 4403 first_cow = 1; 4404 4405 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4406 rc->create_reloc_tree) { 4407 WARN_ON(!first_cow && level == 0); 4408 4409 node = rc->backref_cache.path[level]; 4410 BUG_ON(node->bytenr != buf->start && 4411 node->new_bytenr != buf->start); 4412 4413 btrfs_backref_drop_node_buffer(node); 4414 atomic_inc(&cow->refs); 4415 node->eb = cow; 4416 node->new_bytenr = cow->start; 4417 4418 if (!node->pending) { 4419 list_move_tail(&node->list, 4420 &rc->backref_cache.pending[level]); 4421 node->pending = 1; 4422 } 4423 4424 if (first_cow) 4425 mark_block_processed(rc, node); 4426 4427 if (first_cow && level > 0) 4428 rc->nodes_relocated += buf->len; 4429 } 4430 4431 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4432 ret = replace_file_extents(trans, rc, root, cow); 4433 return ret; 4434 } 4435 4436 /* 4437 * called before creating snapshot. it calculates metadata reservation 4438 * required for relocating tree blocks in the snapshot 4439 */ 4440 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4441 u64 *bytes_to_reserve) 4442 { 4443 struct btrfs_root *root = pending->root; 4444 struct reloc_control *rc = root->fs_info->reloc_ctl; 4445 4446 if (!rc || !have_reloc_root(root)) 4447 return; 4448 4449 if (!rc->merge_reloc_tree) 4450 return; 4451 4452 root = root->reloc_root; 4453 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4454 /* 4455 * relocation is in the stage of merging trees. the space 4456 * used by merging a reloc tree is twice the size of 4457 * relocated tree nodes in the worst case. half for cowing 4458 * the reloc tree, half for cowing the fs tree. the space 4459 * used by cowing the reloc tree will be freed after the 4460 * tree is dropped. if we create snapshot, cowing the fs 4461 * tree may use more space than it frees. so we need 4462 * reserve extra space. 4463 */ 4464 *bytes_to_reserve += rc->nodes_relocated; 4465 } 4466 4467 /* 4468 * called after snapshot is created. migrate block reservation 4469 * and create reloc root for the newly created snapshot 4470 * 4471 * This is similar to btrfs_init_reloc_root(), we come out of here with two 4472 * references held on the reloc_root, one for root->reloc_root and one for 4473 * rc->reloc_roots. 4474 */ 4475 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4476 struct btrfs_pending_snapshot *pending) 4477 { 4478 struct btrfs_root *root = pending->root; 4479 struct btrfs_root *reloc_root; 4480 struct btrfs_root *new_root; 4481 struct reloc_control *rc = root->fs_info->reloc_ctl; 4482 int ret; 4483 4484 if (!rc || !have_reloc_root(root)) 4485 return 0; 4486 4487 rc = root->fs_info->reloc_ctl; 4488 rc->merging_rsv_size += rc->nodes_relocated; 4489 4490 if (rc->merge_reloc_tree) { 4491 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4492 rc->block_rsv, 4493 rc->nodes_relocated, true); 4494 if (ret) 4495 return ret; 4496 } 4497 4498 new_root = pending->snap; 4499 reloc_root = create_reloc_root(trans, root->reloc_root, 4500 new_root->root_key.objectid); 4501 if (IS_ERR(reloc_root)) 4502 return PTR_ERR(reloc_root); 4503 4504 ret = __add_reloc_root(reloc_root); 4505 ASSERT(ret != -EEXIST); 4506 if (ret) { 4507 /* Pairs with create_reloc_root */ 4508 btrfs_put_root(reloc_root); 4509 return ret; 4510 } 4511 new_root->reloc_root = btrfs_grab_root(reloc_root); 4512 4513 if (rc->create_reloc_tree) 4514 ret = clone_backref_node(trans, rc, root, reloc_root); 4515 return ret; 4516 } 4517