1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2009 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/error-injection.h> 13 #include "ctree.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "volumes.h" 17 #include "locking.h" 18 #include "btrfs_inode.h" 19 #include "async-thread.h" 20 #include "free-space-cache.h" 21 #include "qgroup.h" 22 #include "print-tree.h" 23 #include "delalloc-space.h" 24 #include "block-group.h" 25 #include "backref.h" 26 #include "misc.h" 27 #include "subpage.h" 28 #include "zoned.h" 29 #include "inode-item.h" 30 #include "space-info.h" 31 #include "fs.h" 32 #include "accessors.h" 33 #include "extent-tree.h" 34 #include "root-tree.h" 35 #include "file-item.h" 36 #include "relocation.h" 37 #include "super.h" 38 #include "tree-checker.h" 39 40 /* 41 * Relocation overview 42 * 43 * [What does relocation do] 44 * 45 * The objective of relocation is to relocate all extents of the target block 46 * group to other block groups. 47 * This is utilized by resize (shrink only), profile converting, compacting 48 * space, or balance routine to spread chunks over devices. 49 * 50 * Before | After 51 * ------------------------------------------------------------------ 52 * BG A: 10 data extents | BG A: deleted 53 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated) 54 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated) 55 * 56 * [How does relocation work] 57 * 58 * 1. Mark the target block group read-only 59 * New extents won't be allocated from the target block group. 60 * 61 * 2.1 Record each extent in the target block group 62 * To build a proper map of extents to be relocated. 63 * 64 * 2.2 Build data reloc tree and reloc trees 65 * Data reloc tree will contain an inode, recording all newly relocated 66 * data extents. 67 * There will be only one data reloc tree for one data block group. 68 * 69 * Reloc tree will be a special snapshot of its source tree, containing 70 * relocated tree blocks. 71 * Each tree referring to a tree block in target block group will get its 72 * reloc tree built. 73 * 74 * 2.3 Swap source tree with its corresponding reloc tree 75 * Each involved tree only refers to new extents after swap. 76 * 77 * 3. Cleanup reloc trees and data reloc tree. 78 * As old extents in the target block group are still referenced by reloc 79 * trees, we need to clean them up before really freeing the target block 80 * group. 81 * 82 * The main complexity is in steps 2.2 and 2.3. 83 * 84 * The entry point of relocation is relocate_block_group() function. 85 */ 86 87 #define RELOCATION_RESERVED_NODES 256 88 /* 89 * map address of tree root to tree 90 */ 91 struct mapping_node { 92 struct { 93 struct rb_node rb_node; 94 u64 bytenr; 95 }; /* Use rb_simle_node for search/insert */ 96 void *data; 97 }; 98 99 struct mapping_tree { 100 struct rb_root rb_root; 101 spinlock_t lock; 102 }; 103 104 /* 105 * present a tree block to process 106 */ 107 struct tree_block { 108 struct { 109 struct rb_node rb_node; 110 u64 bytenr; 111 }; /* Use rb_simple_node for search/insert */ 112 u64 owner; 113 struct btrfs_key key; 114 unsigned int level:8; 115 unsigned int key_ready:1; 116 }; 117 118 #define MAX_EXTENTS 128 119 120 struct file_extent_cluster { 121 u64 start; 122 u64 end; 123 u64 boundary[MAX_EXTENTS]; 124 unsigned int nr; 125 }; 126 127 struct reloc_control { 128 /* block group to relocate */ 129 struct btrfs_block_group *block_group; 130 /* extent tree */ 131 struct btrfs_root *extent_root; 132 /* inode for moving data */ 133 struct inode *data_inode; 134 135 struct btrfs_block_rsv *block_rsv; 136 137 struct btrfs_backref_cache backref_cache; 138 139 struct file_extent_cluster cluster; 140 /* tree blocks have been processed */ 141 struct extent_io_tree processed_blocks; 142 /* map start of tree root to corresponding reloc tree */ 143 struct mapping_tree reloc_root_tree; 144 /* list of reloc trees */ 145 struct list_head reloc_roots; 146 /* list of subvolume trees that get relocated */ 147 struct list_head dirty_subvol_roots; 148 /* size of metadata reservation for merging reloc trees */ 149 u64 merging_rsv_size; 150 /* size of relocated tree nodes */ 151 u64 nodes_relocated; 152 /* reserved size for block group relocation*/ 153 u64 reserved_bytes; 154 155 u64 search_start; 156 u64 extents_found; 157 158 unsigned int stage:8; 159 unsigned int create_reloc_tree:1; 160 unsigned int merge_reloc_tree:1; 161 unsigned int found_file_extent:1; 162 }; 163 164 /* stages of data relocation */ 165 #define MOVE_DATA_EXTENTS 0 166 #define UPDATE_DATA_PTRS 1 167 168 static void mark_block_processed(struct reloc_control *rc, 169 struct btrfs_backref_node *node) 170 { 171 u32 blocksize; 172 173 if (node->level == 0 || 174 in_range(node->bytenr, rc->block_group->start, 175 rc->block_group->length)) { 176 blocksize = rc->extent_root->fs_info->nodesize; 177 set_extent_bit(&rc->processed_blocks, node->bytenr, 178 node->bytenr + blocksize - 1, EXTENT_DIRTY, 179 NULL, GFP_NOFS); 180 } 181 node->processed = 1; 182 } 183 184 185 static void mapping_tree_init(struct mapping_tree *tree) 186 { 187 tree->rb_root = RB_ROOT; 188 spin_lock_init(&tree->lock); 189 } 190 191 /* 192 * walk up backref nodes until reach node presents tree root 193 */ 194 static struct btrfs_backref_node *walk_up_backref( 195 struct btrfs_backref_node *node, 196 struct btrfs_backref_edge *edges[], int *index) 197 { 198 struct btrfs_backref_edge *edge; 199 int idx = *index; 200 201 while (!list_empty(&node->upper)) { 202 edge = list_entry(node->upper.next, 203 struct btrfs_backref_edge, list[LOWER]); 204 edges[idx++] = edge; 205 node = edge->node[UPPER]; 206 } 207 BUG_ON(node->detached); 208 *index = idx; 209 return node; 210 } 211 212 /* 213 * walk down backref nodes to find start of next reference path 214 */ 215 static struct btrfs_backref_node *walk_down_backref( 216 struct btrfs_backref_edge *edges[], int *index) 217 { 218 struct btrfs_backref_edge *edge; 219 struct btrfs_backref_node *lower; 220 int idx = *index; 221 222 while (idx > 0) { 223 edge = edges[idx - 1]; 224 lower = edge->node[LOWER]; 225 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 226 idx--; 227 continue; 228 } 229 edge = list_entry(edge->list[LOWER].next, 230 struct btrfs_backref_edge, list[LOWER]); 231 edges[idx - 1] = edge; 232 *index = idx; 233 return edge->node[UPPER]; 234 } 235 *index = 0; 236 return NULL; 237 } 238 239 static void update_backref_node(struct btrfs_backref_cache *cache, 240 struct btrfs_backref_node *node, u64 bytenr) 241 { 242 struct rb_node *rb_node; 243 rb_erase(&node->rb_node, &cache->rb_root); 244 node->bytenr = bytenr; 245 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node); 246 if (rb_node) 247 btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST); 248 } 249 250 /* 251 * update backref cache after a transaction commit 252 */ 253 static int update_backref_cache(struct btrfs_trans_handle *trans, 254 struct btrfs_backref_cache *cache) 255 { 256 struct btrfs_backref_node *node; 257 int level = 0; 258 259 if (cache->last_trans == 0) { 260 cache->last_trans = trans->transid; 261 return 0; 262 } 263 264 if (cache->last_trans == trans->transid) 265 return 0; 266 267 /* 268 * detached nodes are used to avoid unnecessary backref 269 * lookup. transaction commit changes the extent tree. 270 * so the detached nodes are no longer useful. 271 */ 272 while (!list_empty(&cache->detached)) { 273 node = list_entry(cache->detached.next, 274 struct btrfs_backref_node, list); 275 btrfs_backref_cleanup_node(cache, node); 276 } 277 278 while (!list_empty(&cache->changed)) { 279 node = list_entry(cache->changed.next, 280 struct btrfs_backref_node, list); 281 list_del_init(&node->list); 282 BUG_ON(node->pending); 283 update_backref_node(cache, node, node->new_bytenr); 284 } 285 286 /* 287 * some nodes can be left in the pending list if there were 288 * errors during processing the pending nodes. 289 */ 290 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 291 list_for_each_entry(node, &cache->pending[level], list) { 292 BUG_ON(!node->pending); 293 if (node->bytenr == node->new_bytenr) 294 continue; 295 update_backref_node(cache, node, node->new_bytenr); 296 } 297 } 298 299 cache->last_trans = 0; 300 return 1; 301 } 302 303 static bool reloc_root_is_dead(struct btrfs_root *root) 304 { 305 /* 306 * Pair with set_bit/clear_bit in clean_dirty_subvols and 307 * btrfs_update_reloc_root. We need to see the updated bit before 308 * trying to access reloc_root 309 */ 310 smp_rmb(); 311 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) 312 return true; 313 return false; 314 } 315 316 /* 317 * Check if this subvolume tree has valid reloc tree. 318 * 319 * Reloc tree after swap is considered dead, thus not considered as valid. 320 * This is enough for most callers, as they don't distinguish dead reloc root 321 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a 322 * special case. 323 */ 324 static bool have_reloc_root(struct btrfs_root *root) 325 { 326 if (reloc_root_is_dead(root)) 327 return false; 328 if (!root->reloc_root) 329 return false; 330 return true; 331 } 332 333 int btrfs_should_ignore_reloc_root(struct btrfs_root *root) 334 { 335 struct btrfs_root *reloc_root; 336 337 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 338 return 0; 339 340 /* This root has been merged with its reloc tree, we can ignore it */ 341 if (reloc_root_is_dead(root)) 342 return 1; 343 344 reloc_root = root->reloc_root; 345 if (!reloc_root) 346 return 0; 347 348 if (btrfs_header_generation(reloc_root->commit_root) == 349 root->fs_info->running_transaction->transid) 350 return 0; 351 /* 352 * if there is reloc tree and it was created in previous 353 * transaction backref lookup can find the reloc tree, 354 * so backref node for the fs tree root is useless for 355 * relocation. 356 */ 357 return 1; 358 } 359 360 /* 361 * find reloc tree by address of tree root 362 */ 363 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr) 364 { 365 struct reloc_control *rc = fs_info->reloc_ctl; 366 struct rb_node *rb_node; 367 struct mapping_node *node; 368 struct btrfs_root *root = NULL; 369 370 ASSERT(rc); 371 spin_lock(&rc->reloc_root_tree.lock); 372 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr); 373 if (rb_node) { 374 node = rb_entry(rb_node, struct mapping_node, rb_node); 375 root = node->data; 376 } 377 spin_unlock(&rc->reloc_root_tree.lock); 378 return btrfs_grab_root(root); 379 } 380 381 /* 382 * For useless nodes, do two major clean ups: 383 * 384 * - Cleanup the children edges and nodes 385 * If child node is also orphan (no parent) during cleanup, then the child 386 * node will also be cleaned up. 387 * 388 * - Freeing up leaves (level 0), keeps nodes detached 389 * For nodes, the node is still cached as "detached" 390 * 391 * Return false if @node is not in the @useless_nodes list. 392 * Return true if @node is in the @useless_nodes list. 393 */ 394 static bool handle_useless_nodes(struct reloc_control *rc, 395 struct btrfs_backref_node *node) 396 { 397 struct btrfs_backref_cache *cache = &rc->backref_cache; 398 struct list_head *useless_node = &cache->useless_node; 399 bool ret = false; 400 401 while (!list_empty(useless_node)) { 402 struct btrfs_backref_node *cur; 403 404 cur = list_first_entry(useless_node, struct btrfs_backref_node, 405 list); 406 list_del_init(&cur->list); 407 408 /* Only tree root nodes can be added to @useless_nodes */ 409 ASSERT(list_empty(&cur->upper)); 410 411 if (cur == node) 412 ret = true; 413 414 /* The node is the lowest node */ 415 if (cur->lowest) { 416 list_del_init(&cur->lower); 417 cur->lowest = 0; 418 } 419 420 /* Cleanup the lower edges */ 421 while (!list_empty(&cur->lower)) { 422 struct btrfs_backref_edge *edge; 423 struct btrfs_backref_node *lower; 424 425 edge = list_entry(cur->lower.next, 426 struct btrfs_backref_edge, list[UPPER]); 427 list_del(&edge->list[UPPER]); 428 list_del(&edge->list[LOWER]); 429 lower = edge->node[LOWER]; 430 btrfs_backref_free_edge(cache, edge); 431 432 /* Child node is also orphan, queue for cleanup */ 433 if (list_empty(&lower->upper)) 434 list_add(&lower->list, useless_node); 435 } 436 /* Mark this block processed for relocation */ 437 mark_block_processed(rc, cur); 438 439 /* 440 * Backref nodes for tree leaves are deleted from the cache. 441 * Backref nodes for upper level tree blocks are left in the 442 * cache to avoid unnecessary backref lookup. 443 */ 444 if (cur->level > 0) { 445 list_add(&cur->list, &cache->detached); 446 cur->detached = 1; 447 } else { 448 rb_erase(&cur->rb_node, &cache->rb_root); 449 btrfs_backref_free_node(cache, cur); 450 } 451 } 452 return ret; 453 } 454 455 /* 456 * Build backref tree for a given tree block. Root of the backref tree 457 * corresponds the tree block, leaves of the backref tree correspond roots of 458 * b-trees that reference the tree block. 459 * 460 * The basic idea of this function is check backrefs of a given block to find 461 * upper level blocks that reference the block, and then check backrefs of 462 * these upper level blocks recursively. The recursion stops when tree root is 463 * reached or backrefs for the block is cached. 464 * 465 * NOTE: if we find that backrefs for a block are cached, we know backrefs for 466 * all upper level blocks that directly/indirectly reference the block are also 467 * cached. 468 */ 469 static noinline_for_stack struct btrfs_backref_node *build_backref_tree( 470 struct reloc_control *rc, struct btrfs_key *node_key, 471 int level, u64 bytenr) 472 { 473 struct btrfs_backref_iter *iter; 474 struct btrfs_backref_cache *cache = &rc->backref_cache; 475 /* For searching parent of TREE_BLOCK_REF */ 476 struct btrfs_path *path; 477 struct btrfs_backref_node *cur; 478 struct btrfs_backref_node *node = NULL; 479 struct btrfs_backref_edge *edge; 480 int ret; 481 int err = 0; 482 483 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info); 484 if (!iter) 485 return ERR_PTR(-ENOMEM); 486 path = btrfs_alloc_path(); 487 if (!path) { 488 err = -ENOMEM; 489 goto out; 490 } 491 492 node = btrfs_backref_alloc_node(cache, bytenr, level); 493 if (!node) { 494 err = -ENOMEM; 495 goto out; 496 } 497 498 node->lowest = 1; 499 cur = node; 500 501 /* Breadth-first search to build backref cache */ 502 do { 503 ret = btrfs_backref_add_tree_node(cache, path, iter, node_key, 504 cur); 505 if (ret < 0) { 506 err = ret; 507 goto out; 508 } 509 edge = list_first_entry_or_null(&cache->pending_edge, 510 struct btrfs_backref_edge, list[UPPER]); 511 /* 512 * The pending list isn't empty, take the first block to 513 * process 514 */ 515 if (edge) { 516 list_del_init(&edge->list[UPPER]); 517 cur = edge->node[UPPER]; 518 } 519 } while (edge); 520 521 /* Finish the upper linkage of newly added edges/nodes */ 522 ret = btrfs_backref_finish_upper_links(cache, node); 523 if (ret < 0) { 524 err = ret; 525 goto out; 526 } 527 528 if (handle_useless_nodes(rc, node)) 529 node = NULL; 530 out: 531 btrfs_backref_iter_free(iter); 532 btrfs_free_path(path); 533 if (err) { 534 btrfs_backref_error_cleanup(cache, node); 535 return ERR_PTR(err); 536 } 537 ASSERT(!node || !node->detached); 538 ASSERT(list_empty(&cache->useless_node) && 539 list_empty(&cache->pending_edge)); 540 return node; 541 } 542 543 /* 544 * helper to add backref node for the newly created snapshot. 545 * the backref node is created by cloning backref node that 546 * corresponds to root of source tree 547 */ 548 static int clone_backref_node(struct btrfs_trans_handle *trans, 549 struct reloc_control *rc, 550 struct btrfs_root *src, 551 struct btrfs_root *dest) 552 { 553 struct btrfs_root *reloc_root = src->reloc_root; 554 struct btrfs_backref_cache *cache = &rc->backref_cache; 555 struct btrfs_backref_node *node = NULL; 556 struct btrfs_backref_node *new_node; 557 struct btrfs_backref_edge *edge; 558 struct btrfs_backref_edge *new_edge; 559 struct rb_node *rb_node; 560 561 if (cache->last_trans > 0) 562 update_backref_cache(trans, cache); 563 564 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start); 565 if (rb_node) { 566 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); 567 if (node->detached) 568 node = NULL; 569 else 570 BUG_ON(node->new_bytenr != reloc_root->node->start); 571 } 572 573 if (!node) { 574 rb_node = rb_simple_search(&cache->rb_root, 575 reloc_root->commit_root->start); 576 if (rb_node) { 577 node = rb_entry(rb_node, struct btrfs_backref_node, 578 rb_node); 579 BUG_ON(node->detached); 580 } 581 } 582 583 if (!node) 584 return 0; 585 586 new_node = btrfs_backref_alloc_node(cache, dest->node->start, 587 node->level); 588 if (!new_node) 589 return -ENOMEM; 590 591 new_node->lowest = node->lowest; 592 new_node->checked = 1; 593 new_node->root = btrfs_grab_root(dest); 594 ASSERT(new_node->root); 595 596 if (!node->lowest) { 597 list_for_each_entry(edge, &node->lower, list[UPPER]) { 598 new_edge = btrfs_backref_alloc_edge(cache); 599 if (!new_edge) 600 goto fail; 601 602 btrfs_backref_link_edge(new_edge, edge->node[LOWER], 603 new_node, LINK_UPPER); 604 } 605 } else { 606 list_add_tail(&new_node->lower, &cache->leaves); 607 } 608 609 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr, 610 &new_node->rb_node); 611 if (rb_node) 612 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST); 613 614 if (!new_node->lowest) { 615 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 616 list_add_tail(&new_edge->list[LOWER], 617 &new_edge->node[LOWER]->upper); 618 } 619 } 620 return 0; 621 fail: 622 while (!list_empty(&new_node->lower)) { 623 new_edge = list_entry(new_node->lower.next, 624 struct btrfs_backref_edge, list[UPPER]); 625 list_del(&new_edge->list[UPPER]); 626 btrfs_backref_free_edge(cache, new_edge); 627 } 628 btrfs_backref_free_node(cache, new_node); 629 return -ENOMEM; 630 } 631 632 /* 633 * helper to add 'address of tree root -> reloc tree' mapping 634 */ 635 static int __must_check __add_reloc_root(struct btrfs_root *root) 636 { 637 struct btrfs_fs_info *fs_info = root->fs_info; 638 struct rb_node *rb_node; 639 struct mapping_node *node; 640 struct reloc_control *rc = fs_info->reloc_ctl; 641 642 node = kmalloc(sizeof(*node), GFP_NOFS); 643 if (!node) 644 return -ENOMEM; 645 646 node->bytenr = root->commit_root->start; 647 node->data = root; 648 649 spin_lock(&rc->reloc_root_tree.lock); 650 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, 651 node->bytenr, &node->rb_node); 652 spin_unlock(&rc->reloc_root_tree.lock); 653 if (rb_node) { 654 btrfs_err(fs_info, 655 "Duplicate root found for start=%llu while inserting into relocation tree", 656 node->bytenr); 657 return -EEXIST; 658 } 659 660 list_add_tail(&root->root_list, &rc->reloc_roots); 661 return 0; 662 } 663 664 /* 665 * helper to delete the 'address of tree root -> reloc tree' 666 * mapping 667 */ 668 static void __del_reloc_root(struct btrfs_root *root) 669 { 670 struct btrfs_fs_info *fs_info = root->fs_info; 671 struct rb_node *rb_node; 672 struct mapping_node *node = NULL; 673 struct reloc_control *rc = fs_info->reloc_ctl; 674 bool put_ref = false; 675 676 if (rc && root->node) { 677 spin_lock(&rc->reloc_root_tree.lock); 678 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 679 root->commit_root->start); 680 if (rb_node) { 681 node = rb_entry(rb_node, struct mapping_node, rb_node); 682 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 683 RB_CLEAR_NODE(&node->rb_node); 684 } 685 spin_unlock(&rc->reloc_root_tree.lock); 686 ASSERT(!node || (struct btrfs_root *)node->data == root); 687 } 688 689 /* 690 * We only put the reloc root here if it's on the list. There's a lot 691 * of places where the pattern is to splice the rc->reloc_roots, process 692 * the reloc roots, and then add the reloc root back onto 693 * rc->reloc_roots. If we call __del_reloc_root while it's off of the 694 * list we don't want the reference being dropped, because the guy 695 * messing with the list is in charge of the reference. 696 */ 697 spin_lock(&fs_info->trans_lock); 698 if (!list_empty(&root->root_list)) { 699 put_ref = true; 700 list_del_init(&root->root_list); 701 } 702 spin_unlock(&fs_info->trans_lock); 703 if (put_ref) 704 btrfs_put_root(root); 705 kfree(node); 706 } 707 708 /* 709 * helper to update the 'address of tree root -> reloc tree' 710 * mapping 711 */ 712 static int __update_reloc_root(struct btrfs_root *root) 713 { 714 struct btrfs_fs_info *fs_info = root->fs_info; 715 struct rb_node *rb_node; 716 struct mapping_node *node = NULL; 717 struct reloc_control *rc = fs_info->reloc_ctl; 718 719 spin_lock(&rc->reloc_root_tree.lock); 720 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 721 root->commit_root->start); 722 if (rb_node) { 723 node = rb_entry(rb_node, struct mapping_node, rb_node); 724 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 725 } 726 spin_unlock(&rc->reloc_root_tree.lock); 727 728 if (!node) 729 return 0; 730 BUG_ON((struct btrfs_root *)node->data != root); 731 732 spin_lock(&rc->reloc_root_tree.lock); 733 node->bytenr = root->node->start; 734 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, 735 node->bytenr, &node->rb_node); 736 spin_unlock(&rc->reloc_root_tree.lock); 737 if (rb_node) 738 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); 739 return 0; 740 } 741 742 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 743 struct btrfs_root *root, u64 objectid) 744 { 745 struct btrfs_fs_info *fs_info = root->fs_info; 746 struct btrfs_root *reloc_root; 747 struct extent_buffer *eb; 748 struct btrfs_root_item *root_item; 749 struct btrfs_key root_key; 750 int ret = 0; 751 bool must_abort = false; 752 753 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 754 if (!root_item) 755 return ERR_PTR(-ENOMEM); 756 757 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 758 root_key.type = BTRFS_ROOT_ITEM_KEY; 759 root_key.offset = objectid; 760 761 if (root->root_key.objectid == objectid) { 762 u64 commit_root_gen; 763 764 /* called by btrfs_init_reloc_root */ 765 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 766 BTRFS_TREE_RELOC_OBJECTID); 767 if (ret) 768 goto fail; 769 770 /* 771 * Set the last_snapshot field to the generation of the commit 772 * root - like this ctree.c:btrfs_block_can_be_shared() behaves 773 * correctly (returns true) when the relocation root is created 774 * either inside the critical section of a transaction commit 775 * (through transaction.c:qgroup_account_snapshot()) and when 776 * it's created before the transaction commit is started. 777 */ 778 commit_root_gen = btrfs_header_generation(root->commit_root); 779 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen); 780 } else { 781 /* 782 * called by btrfs_reloc_post_snapshot_hook. 783 * the source tree is a reloc tree, all tree blocks 784 * modified after it was created have RELOC flag 785 * set in their headers. so it's OK to not update 786 * the 'last_snapshot'. 787 */ 788 ret = btrfs_copy_root(trans, root, root->node, &eb, 789 BTRFS_TREE_RELOC_OBJECTID); 790 if (ret) 791 goto fail; 792 } 793 794 /* 795 * We have changed references at this point, we must abort the 796 * transaction if anything fails. 797 */ 798 must_abort = true; 799 800 memcpy(root_item, &root->root_item, sizeof(*root_item)); 801 btrfs_set_root_bytenr(root_item, eb->start); 802 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 803 btrfs_set_root_generation(root_item, trans->transid); 804 805 if (root->root_key.objectid == objectid) { 806 btrfs_set_root_refs(root_item, 0); 807 memset(&root_item->drop_progress, 0, 808 sizeof(struct btrfs_disk_key)); 809 btrfs_set_root_drop_level(root_item, 0); 810 } 811 812 btrfs_tree_unlock(eb); 813 free_extent_buffer(eb); 814 815 ret = btrfs_insert_root(trans, fs_info->tree_root, 816 &root_key, root_item); 817 if (ret) 818 goto fail; 819 820 kfree(root_item); 821 822 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key); 823 if (IS_ERR(reloc_root)) { 824 ret = PTR_ERR(reloc_root); 825 goto abort; 826 } 827 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 828 reloc_root->last_trans = trans->transid; 829 return reloc_root; 830 fail: 831 kfree(root_item); 832 abort: 833 if (must_abort) 834 btrfs_abort_transaction(trans, ret); 835 return ERR_PTR(ret); 836 } 837 838 /* 839 * create reloc tree for a given fs tree. reloc tree is just a 840 * snapshot of the fs tree with special root objectid. 841 * 842 * The reloc_root comes out of here with two references, one for 843 * root->reloc_root, and another for being on the rc->reloc_roots list. 844 */ 845 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 846 struct btrfs_root *root) 847 { 848 struct btrfs_fs_info *fs_info = root->fs_info; 849 struct btrfs_root *reloc_root; 850 struct reloc_control *rc = fs_info->reloc_ctl; 851 struct btrfs_block_rsv *rsv; 852 int clear_rsv = 0; 853 int ret; 854 855 if (!rc) 856 return 0; 857 858 /* 859 * The subvolume has reloc tree but the swap is finished, no need to 860 * create/update the dead reloc tree 861 */ 862 if (reloc_root_is_dead(root)) 863 return 0; 864 865 /* 866 * This is subtle but important. We do not do 867 * record_root_in_transaction for reloc roots, instead we record their 868 * corresponding fs root, and then here we update the last trans for the 869 * reloc root. This means that we have to do this for the entire life 870 * of the reloc root, regardless of which stage of the relocation we are 871 * in. 872 */ 873 if (root->reloc_root) { 874 reloc_root = root->reloc_root; 875 reloc_root->last_trans = trans->transid; 876 return 0; 877 } 878 879 /* 880 * We are merging reloc roots, we do not need new reloc trees. Also 881 * reloc trees never need their own reloc tree. 882 */ 883 if (!rc->create_reloc_tree || 884 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 885 return 0; 886 887 if (!trans->reloc_reserved) { 888 rsv = trans->block_rsv; 889 trans->block_rsv = rc->block_rsv; 890 clear_rsv = 1; 891 } 892 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 893 if (clear_rsv) 894 trans->block_rsv = rsv; 895 if (IS_ERR(reloc_root)) 896 return PTR_ERR(reloc_root); 897 898 ret = __add_reloc_root(reloc_root); 899 ASSERT(ret != -EEXIST); 900 if (ret) { 901 /* Pairs with create_reloc_root */ 902 btrfs_put_root(reloc_root); 903 return ret; 904 } 905 root->reloc_root = btrfs_grab_root(reloc_root); 906 return 0; 907 } 908 909 /* 910 * update root item of reloc tree 911 */ 912 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 913 struct btrfs_root *root) 914 { 915 struct btrfs_fs_info *fs_info = root->fs_info; 916 struct btrfs_root *reloc_root; 917 struct btrfs_root_item *root_item; 918 int ret; 919 920 if (!have_reloc_root(root)) 921 return 0; 922 923 reloc_root = root->reloc_root; 924 root_item = &reloc_root->root_item; 925 926 /* 927 * We are probably ok here, but __del_reloc_root() will drop its ref of 928 * the root. We have the ref for root->reloc_root, but just in case 929 * hold it while we update the reloc root. 930 */ 931 btrfs_grab_root(reloc_root); 932 933 /* root->reloc_root will stay until current relocation finished */ 934 if (fs_info->reloc_ctl->merge_reloc_tree && 935 btrfs_root_refs(root_item) == 0) { 936 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); 937 /* 938 * Mark the tree as dead before we change reloc_root so 939 * have_reloc_root will not touch it from now on. 940 */ 941 smp_wmb(); 942 __del_reloc_root(reloc_root); 943 } 944 945 if (reloc_root->commit_root != reloc_root->node) { 946 __update_reloc_root(reloc_root); 947 btrfs_set_root_node(root_item, reloc_root->node); 948 free_extent_buffer(reloc_root->commit_root); 949 reloc_root->commit_root = btrfs_root_node(reloc_root); 950 } 951 952 ret = btrfs_update_root(trans, fs_info->tree_root, 953 &reloc_root->root_key, root_item); 954 btrfs_put_root(reloc_root); 955 return ret; 956 } 957 958 /* 959 * helper to find first cached inode with inode number >= objectid 960 * in a subvolume 961 */ 962 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 963 { 964 struct rb_node *node; 965 struct rb_node *prev; 966 struct btrfs_inode *entry; 967 struct inode *inode; 968 969 spin_lock(&root->inode_lock); 970 again: 971 node = root->inode_tree.rb_node; 972 prev = NULL; 973 while (node) { 974 prev = node; 975 entry = rb_entry(node, struct btrfs_inode, rb_node); 976 977 if (objectid < btrfs_ino(entry)) 978 node = node->rb_left; 979 else if (objectid > btrfs_ino(entry)) 980 node = node->rb_right; 981 else 982 break; 983 } 984 if (!node) { 985 while (prev) { 986 entry = rb_entry(prev, struct btrfs_inode, rb_node); 987 if (objectid <= btrfs_ino(entry)) { 988 node = prev; 989 break; 990 } 991 prev = rb_next(prev); 992 } 993 } 994 while (node) { 995 entry = rb_entry(node, struct btrfs_inode, rb_node); 996 inode = igrab(&entry->vfs_inode); 997 if (inode) { 998 spin_unlock(&root->inode_lock); 999 return inode; 1000 } 1001 1002 objectid = btrfs_ino(entry) + 1; 1003 if (cond_resched_lock(&root->inode_lock)) 1004 goto again; 1005 1006 node = rb_next(node); 1007 } 1008 spin_unlock(&root->inode_lock); 1009 return NULL; 1010 } 1011 1012 /* 1013 * get new location of data 1014 */ 1015 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1016 u64 bytenr, u64 num_bytes) 1017 { 1018 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1019 struct btrfs_path *path; 1020 struct btrfs_file_extent_item *fi; 1021 struct extent_buffer *leaf; 1022 int ret; 1023 1024 path = btrfs_alloc_path(); 1025 if (!path) 1026 return -ENOMEM; 1027 1028 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1029 ret = btrfs_lookup_file_extent(NULL, root, path, 1030 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0); 1031 if (ret < 0) 1032 goto out; 1033 if (ret > 0) { 1034 ret = -ENOENT; 1035 goto out; 1036 } 1037 1038 leaf = path->nodes[0]; 1039 fi = btrfs_item_ptr(leaf, path->slots[0], 1040 struct btrfs_file_extent_item); 1041 1042 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1043 btrfs_file_extent_compression(leaf, fi) || 1044 btrfs_file_extent_encryption(leaf, fi) || 1045 btrfs_file_extent_other_encoding(leaf, fi)); 1046 1047 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1048 ret = -EINVAL; 1049 goto out; 1050 } 1051 1052 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1053 ret = 0; 1054 out: 1055 btrfs_free_path(path); 1056 return ret; 1057 } 1058 1059 /* 1060 * update file extent items in the tree leaf to point to 1061 * the new locations. 1062 */ 1063 static noinline_for_stack 1064 int replace_file_extents(struct btrfs_trans_handle *trans, 1065 struct reloc_control *rc, 1066 struct btrfs_root *root, 1067 struct extent_buffer *leaf) 1068 { 1069 struct btrfs_fs_info *fs_info = root->fs_info; 1070 struct btrfs_key key; 1071 struct btrfs_file_extent_item *fi; 1072 struct inode *inode = NULL; 1073 u64 parent; 1074 u64 bytenr; 1075 u64 new_bytenr = 0; 1076 u64 num_bytes; 1077 u64 end; 1078 u32 nritems; 1079 u32 i; 1080 int ret = 0; 1081 int first = 1; 1082 int dirty = 0; 1083 1084 if (rc->stage != UPDATE_DATA_PTRS) 1085 return 0; 1086 1087 /* reloc trees always use full backref */ 1088 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1089 parent = leaf->start; 1090 else 1091 parent = 0; 1092 1093 nritems = btrfs_header_nritems(leaf); 1094 for (i = 0; i < nritems; i++) { 1095 struct btrfs_ref ref = { 0 }; 1096 1097 cond_resched(); 1098 btrfs_item_key_to_cpu(leaf, &key, i); 1099 if (key.type != BTRFS_EXTENT_DATA_KEY) 1100 continue; 1101 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1102 if (btrfs_file_extent_type(leaf, fi) == 1103 BTRFS_FILE_EXTENT_INLINE) 1104 continue; 1105 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1106 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1107 if (bytenr == 0) 1108 continue; 1109 if (!in_range(bytenr, rc->block_group->start, 1110 rc->block_group->length)) 1111 continue; 1112 1113 /* 1114 * if we are modifying block in fs tree, wait for read_folio 1115 * to complete and drop the extent cache 1116 */ 1117 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1118 if (first) { 1119 inode = find_next_inode(root, key.objectid); 1120 first = 0; 1121 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) { 1122 btrfs_add_delayed_iput(BTRFS_I(inode)); 1123 inode = find_next_inode(root, key.objectid); 1124 } 1125 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) { 1126 struct extent_state *cached_state = NULL; 1127 1128 end = key.offset + 1129 btrfs_file_extent_num_bytes(leaf, fi); 1130 WARN_ON(!IS_ALIGNED(key.offset, 1131 fs_info->sectorsize)); 1132 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1133 end--; 1134 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1135 key.offset, end, 1136 &cached_state); 1137 if (!ret) 1138 continue; 1139 1140 btrfs_drop_extent_map_range(BTRFS_I(inode), 1141 key.offset, end, true); 1142 unlock_extent(&BTRFS_I(inode)->io_tree, 1143 key.offset, end, &cached_state); 1144 } 1145 } 1146 1147 ret = get_new_location(rc->data_inode, &new_bytenr, 1148 bytenr, num_bytes); 1149 if (ret) { 1150 /* 1151 * Don't have to abort since we've not changed anything 1152 * in the file extent yet. 1153 */ 1154 break; 1155 } 1156 1157 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1158 dirty = 1; 1159 1160 key.offset -= btrfs_file_extent_offset(leaf, fi); 1161 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr, 1162 num_bytes, parent); 1163 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), 1164 key.objectid, key.offset, 1165 root->root_key.objectid, false); 1166 ret = btrfs_inc_extent_ref(trans, &ref); 1167 if (ret) { 1168 btrfs_abort_transaction(trans, ret); 1169 break; 1170 } 1171 1172 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 1173 num_bytes, parent); 1174 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), 1175 key.objectid, key.offset, 1176 root->root_key.objectid, false); 1177 ret = btrfs_free_extent(trans, &ref); 1178 if (ret) { 1179 btrfs_abort_transaction(trans, ret); 1180 break; 1181 } 1182 } 1183 if (dirty) 1184 btrfs_mark_buffer_dirty(leaf); 1185 if (inode) 1186 btrfs_add_delayed_iput(BTRFS_I(inode)); 1187 return ret; 1188 } 1189 1190 static noinline_for_stack 1191 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1192 struct btrfs_path *path, int level) 1193 { 1194 struct btrfs_disk_key key1; 1195 struct btrfs_disk_key key2; 1196 btrfs_node_key(eb, &key1, slot); 1197 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1198 return memcmp(&key1, &key2, sizeof(key1)); 1199 } 1200 1201 /* 1202 * try to replace tree blocks in fs tree with the new blocks 1203 * in reloc tree. tree blocks haven't been modified since the 1204 * reloc tree was create can be replaced. 1205 * 1206 * if a block was replaced, level of the block + 1 is returned. 1207 * if no block got replaced, 0 is returned. if there are other 1208 * errors, a negative error number is returned. 1209 */ 1210 static noinline_for_stack 1211 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, 1212 struct btrfs_root *dest, struct btrfs_root *src, 1213 struct btrfs_path *path, struct btrfs_key *next_key, 1214 int lowest_level, int max_level) 1215 { 1216 struct btrfs_fs_info *fs_info = dest->fs_info; 1217 struct extent_buffer *eb; 1218 struct extent_buffer *parent; 1219 struct btrfs_ref ref = { 0 }; 1220 struct btrfs_key key; 1221 u64 old_bytenr; 1222 u64 new_bytenr; 1223 u64 old_ptr_gen; 1224 u64 new_ptr_gen; 1225 u64 last_snapshot; 1226 u32 blocksize; 1227 int cow = 0; 1228 int level; 1229 int ret; 1230 int slot; 1231 1232 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1233 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1234 1235 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1236 again: 1237 slot = path->slots[lowest_level]; 1238 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1239 1240 eb = btrfs_lock_root_node(dest); 1241 level = btrfs_header_level(eb); 1242 1243 if (level < lowest_level) { 1244 btrfs_tree_unlock(eb); 1245 free_extent_buffer(eb); 1246 return 0; 1247 } 1248 1249 if (cow) { 1250 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb, 1251 BTRFS_NESTING_COW); 1252 if (ret) { 1253 btrfs_tree_unlock(eb); 1254 free_extent_buffer(eb); 1255 return ret; 1256 } 1257 } 1258 1259 if (next_key) { 1260 next_key->objectid = (u64)-1; 1261 next_key->type = (u8)-1; 1262 next_key->offset = (u64)-1; 1263 } 1264 1265 parent = eb; 1266 while (1) { 1267 level = btrfs_header_level(parent); 1268 ASSERT(level >= lowest_level); 1269 1270 ret = btrfs_bin_search(parent, 0, &key, &slot); 1271 if (ret < 0) 1272 break; 1273 if (ret && slot > 0) 1274 slot--; 1275 1276 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1277 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1278 1279 old_bytenr = btrfs_node_blockptr(parent, slot); 1280 blocksize = fs_info->nodesize; 1281 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1282 1283 if (level <= max_level) { 1284 eb = path->nodes[level]; 1285 new_bytenr = btrfs_node_blockptr(eb, 1286 path->slots[level]); 1287 new_ptr_gen = btrfs_node_ptr_generation(eb, 1288 path->slots[level]); 1289 } else { 1290 new_bytenr = 0; 1291 new_ptr_gen = 0; 1292 } 1293 1294 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1295 ret = level; 1296 break; 1297 } 1298 1299 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1300 memcmp_node_keys(parent, slot, path, level)) { 1301 if (level <= lowest_level) { 1302 ret = 0; 1303 break; 1304 } 1305 1306 eb = btrfs_read_node_slot(parent, slot); 1307 if (IS_ERR(eb)) { 1308 ret = PTR_ERR(eb); 1309 break; 1310 } 1311 btrfs_tree_lock(eb); 1312 if (cow) { 1313 ret = btrfs_cow_block(trans, dest, eb, parent, 1314 slot, &eb, 1315 BTRFS_NESTING_COW); 1316 if (ret) { 1317 btrfs_tree_unlock(eb); 1318 free_extent_buffer(eb); 1319 break; 1320 } 1321 } 1322 1323 btrfs_tree_unlock(parent); 1324 free_extent_buffer(parent); 1325 1326 parent = eb; 1327 continue; 1328 } 1329 1330 if (!cow) { 1331 btrfs_tree_unlock(parent); 1332 free_extent_buffer(parent); 1333 cow = 1; 1334 goto again; 1335 } 1336 1337 btrfs_node_key_to_cpu(path->nodes[level], &key, 1338 path->slots[level]); 1339 btrfs_release_path(path); 1340 1341 path->lowest_level = level; 1342 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); 1343 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1344 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); 1345 path->lowest_level = 0; 1346 if (ret) { 1347 if (ret > 0) 1348 ret = -ENOENT; 1349 break; 1350 } 1351 1352 /* 1353 * Info qgroup to trace both subtrees. 1354 * 1355 * We must trace both trees. 1356 * 1) Tree reloc subtree 1357 * If not traced, we will leak data numbers 1358 * 2) Fs subtree 1359 * If not traced, we will double count old data 1360 * 1361 * We don't scan the subtree right now, but only record 1362 * the swapped tree blocks. 1363 * The real subtree rescan is delayed until we have new 1364 * CoW on the subtree root node before transaction commit. 1365 */ 1366 ret = btrfs_qgroup_add_swapped_blocks(trans, dest, 1367 rc->block_group, parent, slot, 1368 path->nodes[level], path->slots[level], 1369 last_snapshot); 1370 if (ret < 0) 1371 break; 1372 /* 1373 * swap blocks in fs tree and reloc tree. 1374 */ 1375 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1376 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1377 btrfs_mark_buffer_dirty(parent); 1378 1379 btrfs_set_node_blockptr(path->nodes[level], 1380 path->slots[level], old_bytenr); 1381 btrfs_set_node_ptr_generation(path->nodes[level], 1382 path->slots[level], old_ptr_gen); 1383 btrfs_mark_buffer_dirty(path->nodes[level]); 1384 1385 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr, 1386 blocksize, path->nodes[level]->start); 1387 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, 1388 0, true); 1389 ret = btrfs_inc_extent_ref(trans, &ref); 1390 if (ret) { 1391 btrfs_abort_transaction(trans, ret); 1392 break; 1393 } 1394 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr, 1395 blocksize, 0); 1396 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0, 1397 true); 1398 ret = btrfs_inc_extent_ref(trans, &ref); 1399 if (ret) { 1400 btrfs_abort_transaction(trans, ret); 1401 break; 1402 } 1403 1404 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr, 1405 blocksize, path->nodes[level]->start); 1406 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, 1407 0, true); 1408 ret = btrfs_free_extent(trans, &ref); 1409 if (ret) { 1410 btrfs_abort_transaction(trans, ret); 1411 break; 1412 } 1413 1414 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr, 1415 blocksize, 0); 1416 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 1417 0, true); 1418 ret = btrfs_free_extent(trans, &ref); 1419 if (ret) { 1420 btrfs_abort_transaction(trans, ret); 1421 break; 1422 } 1423 1424 btrfs_unlock_up_safe(path, 0); 1425 1426 ret = level; 1427 break; 1428 } 1429 btrfs_tree_unlock(parent); 1430 free_extent_buffer(parent); 1431 return ret; 1432 } 1433 1434 /* 1435 * helper to find next relocated block in reloc tree 1436 */ 1437 static noinline_for_stack 1438 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1439 int *level) 1440 { 1441 struct extent_buffer *eb; 1442 int i; 1443 u64 last_snapshot; 1444 u32 nritems; 1445 1446 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1447 1448 for (i = 0; i < *level; i++) { 1449 free_extent_buffer(path->nodes[i]); 1450 path->nodes[i] = NULL; 1451 } 1452 1453 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1454 eb = path->nodes[i]; 1455 nritems = btrfs_header_nritems(eb); 1456 while (path->slots[i] + 1 < nritems) { 1457 path->slots[i]++; 1458 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1459 last_snapshot) 1460 continue; 1461 1462 *level = i; 1463 return 0; 1464 } 1465 free_extent_buffer(path->nodes[i]); 1466 path->nodes[i] = NULL; 1467 } 1468 return 1; 1469 } 1470 1471 /* 1472 * walk down reloc tree to find relocated block of lowest level 1473 */ 1474 static noinline_for_stack 1475 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1476 int *level) 1477 { 1478 struct extent_buffer *eb = NULL; 1479 int i; 1480 u64 ptr_gen = 0; 1481 u64 last_snapshot; 1482 u32 nritems; 1483 1484 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1485 1486 for (i = *level; i > 0; i--) { 1487 eb = path->nodes[i]; 1488 nritems = btrfs_header_nritems(eb); 1489 while (path->slots[i] < nritems) { 1490 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1491 if (ptr_gen > last_snapshot) 1492 break; 1493 path->slots[i]++; 1494 } 1495 if (path->slots[i] >= nritems) { 1496 if (i == *level) 1497 break; 1498 *level = i + 1; 1499 return 0; 1500 } 1501 if (i == 1) { 1502 *level = i; 1503 return 0; 1504 } 1505 1506 eb = btrfs_read_node_slot(eb, path->slots[i]); 1507 if (IS_ERR(eb)) 1508 return PTR_ERR(eb); 1509 BUG_ON(btrfs_header_level(eb) != i - 1); 1510 path->nodes[i - 1] = eb; 1511 path->slots[i - 1] = 0; 1512 } 1513 return 1; 1514 } 1515 1516 /* 1517 * invalidate extent cache for file extents whose key in range of 1518 * [min_key, max_key) 1519 */ 1520 static int invalidate_extent_cache(struct btrfs_root *root, 1521 struct btrfs_key *min_key, 1522 struct btrfs_key *max_key) 1523 { 1524 struct btrfs_fs_info *fs_info = root->fs_info; 1525 struct inode *inode = NULL; 1526 u64 objectid; 1527 u64 start, end; 1528 u64 ino; 1529 1530 objectid = min_key->objectid; 1531 while (1) { 1532 struct extent_state *cached_state = NULL; 1533 1534 cond_resched(); 1535 iput(inode); 1536 1537 if (objectid > max_key->objectid) 1538 break; 1539 1540 inode = find_next_inode(root, objectid); 1541 if (!inode) 1542 break; 1543 ino = btrfs_ino(BTRFS_I(inode)); 1544 1545 if (ino > max_key->objectid) { 1546 iput(inode); 1547 break; 1548 } 1549 1550 objectid = ino + 1; 1551 if (!S_ISREG(inode->i_mode)) 1552 continue; 1553 1554 if (unlikely(min_key->objectid == ino)) { 1555 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 1556 continue; 1557 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 1558 start = 0; 1559 else { 1560 start = min_key->offset; 1561 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); 1562 } 1563 } else { 1564 start = 0; 1565 } 1566 1567 if (unlikely(max_key->objectid == ino)) { 1568 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 1569 continue; 1570 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 1571 end = (u64)-1; 1572 } else { 1573 if (max_key->offset == 0) 1574 continue; 1575 end = max_key->offset; 1576 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1577 end--; 1578 } 1579 } else { 1580 end = (u64)-1; 1581 } 1582 1583 /* the lock_extent waits for read_folio to complete */ 1584 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 1585 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true); 1586 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 1587 } 1588 return 0; 1589 } 1590 1591 static int find_next_key(struct btrfs_path *path, int level, 1592 struct btrfs_key *key) 1593 1594 { 1595 while (level < BTRFS_MAX_LEVEL) { 1596 if (!path->nodes[level]) 1597 break; 1598 if (path->slots[level] + 1 < 1599 btrfs_header_nritems(path->nodes[level])) { 1600 btrfs_node_key_to_cpu(path->nodes[level], key, 1601 path->slots[level] + 1); 1602 return 0; 1603 } 1604 level++; 1605 } 1606 return 1; 1607 } 1608 1609 /* 1610 * Insert current subvolume into reloc_control::dirty_subvol_roots 1611 */ 1612 static int insert_dirty_subvol(struct btrfs_trans_handle *trans, 1613 struct reloc_control *rc, 1614 struct btrfs_root *root) 1615 { 1616 struct btrfs_root *reloc_root = root->reloc_root; 1617 struct btrfs_root_item *reloc_root_item; 1618 int ret; 1619 1620 /* @root must be a subvolume tree root with a valid reloc tree */ 1621 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1622 ASSERT(reloc_root); 1623 1624 reloc_root_item = &reloc_root->root_item; 1625 memset(&reloc_root_item->drop_progress, 0, 1626 sizeof(reloc_root_item->drop_progress)); 1627 btrfs_set_root_drop_level(reloc_root_item, 0); 1628 btrfs_set_root_refs(reloc_root_item, 0); 1629 ret = btrfs_update_reloc_root(trans, root); 1630 if (ret) 1631 return ret; 1632 1633 if (list_empty(&root->reloc_dirty_list)) { 1634 btrfs_grab_root(root); 1635 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots); 1636 } 1637 1638 return 0; 1639 } 1640 1641 static int clean_dirty_subvols(struct reloc_control *rc) 1642 { 1643 struct btrfs_root *root; 1644 struct btrfs_root *next; 1645 int ret = 0; 1646 int ret2; 1647 1648 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots, 1649 reloc_dirty_list) { 1650 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1651 /* Merged subvolume, cleanup its reloc root */ 1652 struct btrfs_root *reloc_root = root->reloc_root; 1653 1654 list_del_init(&root->reloc_dirty_list); 1655 root->reloc_root = NULL; 1656 /* 1657 * Need barrier to ensure clear_bit() only happens after 1658 * root->reloc_root = NULL. Pairs with have_reloc_root. 1659 */ 1660 smp_wmb(); 1661 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); 1662 if (reloc_root) { 1663 /* 1664 * btrfs_drop_snapshot drops our ref we hold for 1665 * ->reloc_root. If it fails however we must 1666 * drop the ref ourselves. 1667 */ 1668 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1); 1669 if (ret2 < 0) { 1670 btrfs_put_root(reloc_root); 1671 if (!ret) 1672 ret = ret2; 1673 } 1674 } 1675 btrfs_put_root(root); 1676 } else { 1677 /* Orphan reloc tree, just clean it up */ 1678 ret2 = btrfs_drop_snapshot(root, 0, 1); 1679 if (ret2 < 0) { 1680 btrfs_put_root(root); 1681 if (!ret) 1682 ret = ret2; 1683 } 1684 } 1685 } 1686 return ret; 1687 } 1688 1689 /* 1690 * merge the relocated tree blocks in reloc tree with corresponding 1691 * fs tree. 1692 */ 1693 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 1694 struct btrfs_root *root) 1695 { 1696 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 1697 struct btrfs_key key; 1698 struct btrfs_key next_key; 1699 struct btrfs_trans_handle *trans = NULL; 1700 struct btrfs_root *reloc_root; 1701 struct btrfs_root_item *root_item; 1702 struct btrfs_path *path; 1703 struct extent_buffer *leaf; 1704 int reserve_level; 1705 int level; 1706 int max_level; 1707 int replaced = 0; 1708 int ret = 0; 1709 u32 min_reserved; 1710 1711 path = btrfs_alloc_path(); 1712 if (!path) 1713 return -ENOMEM; 1714 path->reada = READA_FORWARD; 1715 1716 reloc_root = root->reloc_root; 1717 root_item = &reloc_root->root_item; 1718 1719 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 1720 level = btrfs_root_level(root_item); 1721 atomic_inc(&reloc_root->node->refs); 1722 path->nodes[level] = reloc_root->node; 1723 path->slots[level] = 0; 1724 } else { 1725 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 1726 1727 level = btrfs_root_drop_level(root_item); 1728 BUG_ON(level == 0); 1729 path->lowest_level = level; 1730 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 1731 path->lowest_level = 0; 1732 if (ret < 0) { 1733 btrfs_free_path(path); 1734 return ret; 1735 } 1736 1737 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 1738 path->slots[level]); 1739 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 1740 1741 btrfs_unlock_up_safe(path, 0); 1742 } 1743 1744 /* 1745 * In merge_reloc_root(), we modify the upper level pointer to swap the 1746 * tree blocks between reloc tree and subvolume tree. Thus for tree 1747 * block COW, we COW at most from level 1 to root level for each tree. 1748 * 1749 * Thus the needed metadata size is at most root_level * nodesize, 1750 * and * 2 since we have two trees to COW. 1751 */ 1752 reserve_level = max_t(int, 1, btrfs_root_level(root_item)); 1753 min_reserved = fs_info->nodesize * reserve_level * 2; 1754 memset(&next_key, 0, sizeof(next_key)); 1755 1756 while (1) { 1757 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, 1758 min_reserved, 1759 BTRFS_RESERVE_FLUSH_LIMIT); 1760 if (ret) 1761 goto out; 1762 trans = btrfs_start_transaction(root, 0); 1763 if (IS_ERR(trans)) { 1764 ret = PTR_ERR(trans); 1765 trans = NULL; 1766 goto out; 1767 } 1768 1769 /* 1770 * At this point we no longer have a reloc_control, so we can't 1771 * depend on btrfs_init_reloc_root to update our last_trans. 1772 * 1773 * But that's ok, we started the trans handle on our 1774 * corresponding fs_root, which means it's been added to the 1775 * dirty list. At commit time we'll still call 1776 * btrfs_update_reloc_root() and update our root item 1777 * appropriately. 1778 */ 1779 reloc_root->last_trans = trans->transid; 1780 trans->block_rsv = rc->block_rsv; 1781 1782 replaced = 0; 1783 max_level = level; 1784 1785 ret = walk_down_reloc_tree(reloc_root, path, &level); 1786 if (ret < 0) 1787 goto out; 1788 if (ret > 0) 1789 break; 1790 1791 if (!find_next_key(path, level, &key) && 1792 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 1793 ret = 0; 1794 } else { 1795 ret = replace_path(trans, rc, root, reloc_root, path, 1796 &next_key, level, max_level); 1797 } 1798 if (ret < 0) 1799 goto out; 1800 if (ret > 0) { 1801 level = ret; 1802 btrfs_node_key_to_cpu(path->nodes[level], &key, 1803 path->slots[level]); 1804 replaced = 1; 1805 } 1806 1807 ret = walk_up_reloc_tree(reloc_root, path, &level); 1808 if (ret > 0) 1809 break; 1810 1811 BUG_ON(level == 0); 1812 /* 1813 * save the merging progress in the drop_progress. 1814 * this is OK since root refs == 1 in this case. 1815 */ 1816 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 1817 path->slots[level]); 1818 btrfs_set_root_drop_level(root_item, level); 1819 1820 btrfs_end_transaction_throttle(trans); 1821 trans = NULL; 1822 1823 btrfs_btree_balance_dirty(fs_info); 1824 1825 if (replaced && rc->stage == UPDATE_DATA_PTRS) 1826 invalidate_extent_cache(root, &key, &next_key); 1827 } 1828 1829 /* 1830 * handle the case only one block in the fs tree need to be 1831 * relocated and the block is tree root. 1832 */ 1833 leaf = btrfs_lock_root_node(root); 1834 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf, 1835 BTRFS_NESTING_COW); 1836 btrfs_tree_unlock(leaf); 1837 free_extent_buffer(leaf); 1838 out: 1839 btrfs_free_path(path); 1840 1841 if (ret == 0) { 1842 ret = insert_dirty_subvol(trans, rc, root); 1843 if (ret) 1844 btrfs_abort_transaction(trans, ret); 1845 } 1846 1847 if (trans) 1848 btrfs_end_transaction_throttle(trans); 1849 1850 btrfs_btree_balance_dirty(fs_info); 1851 1852 if (replaced && rc->stage == UPDATE_DATA_PTRS) 1853 invalidate_extent_cache(root, &key, &next_key); 1854 1855 return ret; 1856 } 1857 1858 static noinline_for_stack 1859 int prepare_to_merge(struct reloc_control *rc, int err) 1860 { 1861 struct btrfs_root *root = rc->extent_root; 1862 struct btrfs_fs_info *fs_info = root->fs_info; 1863 struct btrfs_root *reloc_root; 1864 struct btrfs_trans_handle *trans; 1865 LIST_HEAD(reloc_roots); 1866 u64 num_bytes = 0; 1867 int ret; 1868 1869 mutex_lock(&fs_info->reloc_mutex); 1870 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 1871 rc->merging_rsv_size += rc->nodes_relocated * 2; 1872 mutex_unlock(&fs_info->reloc_mutex); 1873 1874 again: 1875 if (!err) { 1876 num_bytes = rc->merging_rsv_size; 1877 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes, 1878 BTRFS_RESERVE_FLUSH_ALL); 1879 if (ret) 1880 err = ret; 1881 } 1882 1883 trans = btrfs_join_transaction(rc->extent_root); 1884 if (IS_ERR(trans)) { 1885 if (!err) 1886 btrfs_block_rsv_release(fs_info, rc->block_rsv, 1887 num_bytes, NULL); 1888 return PTR_ERR(trans); 1889 } 1890 1891 if (!err) { 1892 if (num_bytes != rc->merging_rsv_size) { 1893 btrfs_end_transaction(trans); 1894 btrfs_block_rsv_release(fs_info, rc->block_rsv, 1895 num_bytes, NULL); 1896 goto again; 1897 } 1898 } 1899 1900 rc->merge_reloc_tree = 1; 1901 1902 while (!list_empty(&rc->reloc_roots)) { 1903 reloc_root = list_entry(rc->reloc_roots.next, 1904 struct btrfs_root, root_list); 1905 list_del_init(&reloc_root->root_list); 1906 1907 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 1908 false); 1909 if (IS_ERR(root)) { 1910 /* 1911 * Even if we have an error we need this reloc root 1912 * back on our list so we can clean up properly. 1913 */ 1914 list_add(&reloc_root->root_list, &reloc_roots); 1915 btrfs_abort_transaction(trans, (int)PTR_ERR(root)); 1916 if (!err) 1917 err = PTR_ERR(root); 1918 break; 1919 } 1920 ASSERT(root->reloc_root == reloc_root); 1921 1922 /* 1923 * set reference count to 1, so btrfs_recover_relocation 1924 * knows it should resumes merging 1925 */ 1926 if (!err) 1927 btrfs_set_root_refs(&reloc_root->root_item, 1); 1928 ret = btrfs_update_reloc_root(trans, root); 1929 1930 /* 1931 * Even if we have an error we need this reloc root back on our 1932 * list so we can clean up properly. 1933 */ 1934 list_add(&reloc_root->root_list, &reloc_roots); 1935 btrfs_put_root(root); 1936 1937 if (ret) { 1938 btrfs_abort_transaction(trans, ret); 1939 if (!err) 1940 err = ret; 1941 break; 1942 } 1943 } 1944 1945 list_splice(&reloc_roots, &rc->reloc_roots); 1946 1947 if (!err) 1948 err = btrfs_commit_transaction(trans); 1949 else 1950 btrfs_end_transaction(trans); 1951 return err; 1952 } 1953 1954 static noinline_for_stack 1955 void free_reloc_roots(struct list_head *list) 1956 { 1957 struct btrfs_root *reloc_root, *tmp; 1958 1959 list_for_each_entry_safe(reloc_root, tmp, list, root_list) 1960 __del_reloc_root(reloc_root); 1961 } 1962 1963 static noinline_for_stack 1964 void merge_reloc_roots(struct reloc_control *rc) 1965 { 1966 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 1967 struct btrfs_root *root; 1968 struct btrfs_root *reloc_root; 1969 LIST_HEAD(reloc_roots); 1970 int found = 0; 1971 int ret = 0; 1972 again: 1973 root = rc->extent_root; 1974 1975 /* 1976 * this serializes us with btrfs_record_root_in_transaction, 1977 * we have to make sure nobody is in the middle of 1978 * adding their roots to the list while we are 1979 * doing this splice 1980 */ 1981 mutex_lock(&fs_info->reloc_mutex); 1982 list_splice_init(&rc->reloc_roots, &reloc_roots); 1983 mutex_unlock(&fs_info->reloc_mutex); 1984 1985 while (!list_empty(&reloc_roots)) { 1986 found = 1; 1987 reloc_root = list_entry(reloc_roots.next, 1988 struct btrfs_root, root_list); 1989 1990 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 1991 false); 1992 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 1993 if (IS_ERR(root)) { 1994 /* 1995 * For recovery we read the fs roots on mount, 1996 * and if we didn't find the root then we marked 1997 * the reloc root as a garbage root. For normal 1998 * relocation obviously the root should exist in 1999 * memory. However there's no reason we can't 2000 * handle the error properly here just in case. 2001 */ 2002 ASSERT(0); 2003 ret = PTR_ERR(root); 2004 goto out; 2005 } 2006 if (root->reloc_root != reloc_root) { 2007 /* 2008 * This is actually impossible without something 2009 * going really wrong (like weird race condition 2010 * or cosmic rays). 2011 */ 2012 ASSERT(0); 2013 ret = -EINVAL; 2014 goto out; 2015 } 2016 ret = merge_reloc_root(rc, root); 2017 btrfs_put_root(root); 2018 if (ret) { 2019 if (list_empty(&reloc_root->root_list)) 2020 list_add_tail(&reloc_root->root_list, 2021 &reloc_roots); 2022 goto out; 2023 } 2024 } else { 2025 if (!IS_ERR(root)) { 2026 if (root->reloc_root == reloc_root) { 2027 root->reloc_root = NULL; 2028 btrfs_put_root(reloc_root); 2029 } 2030 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, 2031 &root->state); 2032 btrfs_put_root(root); 2033 } 2034 2035 list_del_init(&reloc_root->root_list); 2036 /* Don't forget to queue this reloc root for cleanup */ 2037 list_add_tail(&reloc_root->reloc_dirty_list, 2038 &rc->dirty_subvol_roots); 2039 } 2040 } 2041 2042 if (found) { 2043 found = 0; 2044 goto again; 2045 } 2046 out: 2047 if (ret) { 2048 btrfs_handle_fs_error(fs_info, ret, NULL); 2049 free_reloc_roots(&reloc_roots); 2050 2051 /* new reloc root may be added */ 2052 mutex_lock(&fs_info->reloc_mutex); 2053 list_splice_init(&rc->reloc_roots, &reloc_roots); 2054 mutex_unlock(&fs_info->reloc_mutex); 2055 free_reloc_roots(&reloc_roots); 2056 } 2057 2058 /* 2059 * We used to have 2060 * 2061 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2062 * 2063 * here, but it's wrong. If we fail to start the transaction in 2064 * prepare_to_merge() we will have only 0 ref reloc roots, none of which 2065 * have actually been removed from the reloc_root_tree rb tree. This is 2066 * fine because we're bailing here, and we hold a reference on the root 2067 * for the list that holds it, so these roots will be cleaned up when we 2068 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root 2069 * will be cleaned up on unmount. 2070 * 2071 * The remaining nodes will be cleaned up by free_reloc_control. 2072 */ 2073 } 2074 2075 static void free_block_list(struct rb_root *blocks) 2076 { 2077 struct tree_block *block; 2078 struct rb_node *rb_node; 2079 while ((rb_node = rb_first(blocks))) { 2080 block = rb_entry(rb_node, struct tree_block, rb_node); 2081 rb_erase(rb_node, blocks); 2082 kfree(block); 2083 } 2084 } 2085 2086 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2087 struct btrfs_root *reloc_root) 2088 { 2089 struct btrfs_fs_info *fs_info = reloc_root->fs_info; 2090 struct btrfs_root *root; 2091 int ret; 2092 2093 if (reloc_root->last_trans == trans->transid) 2094 return 0; 2095 2096 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); 2097 2098 /* 2099 * This should succeed, since we can't have a reloc root without having 2100 * already looked up the actual root and created the reloc root for this 2101 * root. 2102 * 2103 * However if there's some sort of corruption where we have a ref to a 2104 * reloc root without a corresponding root this could return ENOENT. 2105 */ 2106 if (IS_ERR(root)) { 2107 ASSERT(0); 2108 return PTR_ERR(root); 2109 } 2110 if (root->reloc_root != reloc_root) { 2111 ASSERT(0); 2112 btrfs_err(fs_info, 2113 "root %llu has two reloc roots associated with it", 2114 reloc_root->root_key.offset); 2115 btrfs_put_root(root); 2116 return -EUCLEAN; 2117 } 2118 ret = btrfs_record_root_in_trans(trans, root); 2119 btrfs_put_root(root); 2120 2121 return ret; 2122 } 2123 2124 static noinline_for_stack 2125 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2126 struct reloc_control *rc, 2127 struct btrfs_backref_node *node, 2128 struct btrfs_backref_edge *edges[]) 2129 { 2130 struct btrfs_backref_node *next; 2131 struct btrfs_root *root; 2132 int index = 0; 2133 int ret; 2134 2135 next = node; 2136 while (1) { 2137 cond_resched(); 2138 next = walk_up_backref(next, edges, &index); 2139 root = next->root; 2140 2141 /* 2142 * If there is no root, then our references for this block are 2143 * incomplete, as we should be able to walk all the way up to a 2144 * block that is owned by a root. 2145 * 2146 * This path is only for SHAREABLE roots, so if we come upon a 2147 * non-SHAREABLE root then we have backrefs that resolve 2148 * improperly. 2149 * 2150 * Both of these cases indicate file system corruption, or a bug 2151 * in the backref walking code. 2152 */ 2153 if (!root) { 2154 ASSERT(0); 2155 btrfs_err(trans->fs_info, 2156 "bytenr %llu doesn't have a backref path ending in a root", 2157 node->bytenr); 2158 return ERR_PTR(-EUCLEAN); 2159 } 2160 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 2161 ASSERT(0); 2162 btrfs_err(trans->fs_info, 2163 "bytenr %llu has multiple refs with one ending in a non-shareable root", 2164 node->bytenr); 2165 return ERR_PTR(-EUCLEAN); 2166 } 2167 2168 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2169 ret = record_reloc_root_in_trans(trans, root); 2170 if (ret) 2171 return ERR_PTR(ret); 2172 break; 2173 } 2174 2175 ret = btrfs_record_root_in_trans(trans, root); 2176 if (ret) 2177 return ERR_PTR(ret); 2178 root = root->reloc_root; 2179 2180 /* 2181 * We could have raced with another thread which failed, so 2182 * root->reloc_root may not be set, return ENOENT in this case. 2183 */ 2184 if (!root) 2185 return ERR_PTR(-ENOENT); 2186 2187 if (next->new_bytenr != root->node->start) { 2188 /* 2189 * We just created the reloc root, so we shouldn't have 2190 * ->new_bytenr set and this shouldn't be in the changed 2191 * list. If it is then we have multiple roots pointing 2192 * at the same bytenr which indicates corruption, or 2193 * we've made a mistake in the backref walking code. 2194 */ 2195 ASSERT(next->new_bytenr == 0); 2196 ASSERT(list_empty(&next->list)); 2197 if (next->new_bytenr || !list_empty(&next->list)) { 2198 btrfs_err(trans->fs_info, 2199 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu", 2200 node->bytenr, next->bytenr); 2201 return ERR_PTR(-EUCLEAN); 2202 } 2203 2204 next->new_bytenr = root->node->start; 2205 btrfs_put_root(next->root); 2206 next->root = btrfs_grab_root(root); 2207 ASSERT(next->root); 2208 list_add_tail(&next->list, 2209 &rc->backref_cache.changed); 2210 mark_block_processed(rc, next); 2211 break; 2212 } 2213 2214 WARN_ON(1); 2215 root = NULL; 2216 next = walk_down_backref(edges, &index); 2217 if (!next || next->level <= node->level) 2218 break; 2219 } 2220 if (!root) { 2221 /* 2222 * This can happen if there's fs corruption or if there's a bug 2223 * in the backref lookup code. 2224 */ 2225 ASSERT(0); 2226 return ERR_PTR(-ENOENT); 2227 } 2228 2229 next = node; 2230 /* setup backref node path for btrfs_reloc_cow_block */ 2231 while (1) { 2232 rc->backref_cache.path[next->level] = next; 2233 if (--index < 0) 2234 break; 2235 next = edges[index]->node[UPPER]; 2236 } 2237 return root; 2238 } 2239 2240 /* 2241 * Select a tree root for relocation. 2242 * 2243 * Return NULL if the block is not shareable. We should use do_relocation() in 2244 * this case. 2245 * 2246 * Return a tree root pointer if the block is shareable. 2247 * Return -ENOENT if the block is root of reloc tree. 2248 */ 2249 static noinline_for_stack 2250 struct btrfs_root *select_one_root(struct btrfs_backref_node *node) 2251 { 2252 struct btrfs_backref_node *next; 2253 struct btrfs_root *root; 2254 struct btrfs_root *fs_root = NULL; 2255 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2256 int index = 0; 2257 2258 next = node; 2259 while (1) { 2260 cond_resched(); 2261 next = walk_up_backref(next, edges, &index); 2262 root = next->root; 2263 2264 /* 2265 * This can occur if we have incomplete extent refs leading all 2266 * the way up a particular path, in this case return -EUCLEAN. 2267 */ 2268 if (!root) 2269 return ERR_PTR(-EUCLEAN); 2270 2271 /* No other choice for non-shareable tree */ 2272 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2273 return root; 2274 2275 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2276 fs_root = root; 2277 2278 if (next != node) 2279 return NULL; 2280 2281 next = walk_down_backref(edges, &index); 2282 if (!next || next->level <= node->level) 2283 break; 2284 } 2285 2286 if (!fs_root) 2287 return ERR_PTR(-ENOENT); 2288 return fs_root; 2289 } 2290 2291 static noinline_for_stack 2292 u64 calcu_metadata_size(struct reloc_control *rc, 2293 struct btrfs_backref_node *node, int reserve) 2294 { 2295 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2296 struct btrfs_backref_node *next = node; 2297 struct btrfs_backref_edge *edge; 2298 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2299 u64 num_bytes = 0; 2300 int index = 0; 2301 2302 BUG_ON(reserve && node->processed); 2303 2304 while (next) { 2305 cond_resched(); 2306 while (1) { 2307 if (next->processed && (reserve || next != node)) 2308 break; 2309 2310 num_bytes += fs_info->nodesize; 2311 2312 if (list_empty(&next->upper)) 2313 break; 2314 2315 edge = list_entry(next->upper.next, 2316 struct btrfs_backref_edge, list[LOWER]); 2317 edges[index++] = edge; 2318 next = edge->node[UPPER]; 2319 } 2320 next = walk_down_backref(edges, &index); 2321 } 2322 return num_bytes; 2323 } 2324 2325 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2326 struct reloc_control *rc, 2327 struct btrfs_backref_node *node) 2328 { 2329 struct btrfs_root *root = rc->extent_root; 2330 struct btrfs_fs_info *fs_info = root->fs_info; 2331 u64 num_bytes; 2332 int ret; 2333 u64 tmp; 2334 2335 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2336 2337 trans->block_rsv = rc->block_rsv; 2338 rc->reserved_bytes += num_bytes; 2339 2340 /* 2341 * We are under a transaction here so we can only do limited flushing. 2342 * If we get an enospc just kick back -EAGAIN so we know to drop the 2343 * transaction and try to refill when we can flush all the things. 2344 */ 2345 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes, 2346 BTRFS_RESERVE_FLUSH_LIMIT); 2347 if (ret) { 2348 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; 2349 while (tmp <= rc->reserved_bytes) 2350 tmp <<= 1; 2351 /* 2352 * only one thread can access block_rsv at this point, 2353 * so we don't need hold lock to protect block_rsv. 2354 * we expand more reservation size here to allow enough 2355 * space for relocation and we will return earlier in 2356 * enospc case. 2357 */ 2358 rc->block_rsv->size = tmp + fs_info->nodesize * 2359 RELOCATION_RESERVED_NODES; 2360 return -EAGAIN; 2361 } 2362 2363 return 0; 2364 } 2365 2366 /* 2367 * relocate a block tree, and then update pointers in upper level 2368 * blocks that reference the block to point to the new location. 2369 * 2370 * if called by link_to_upper, the block has already been relocated. 2371 * in that case this function just updates pointers. 2372 */ 2373 static int do_relocation(struct btrfs_trans_handle *trans, 2374 struct reloc_control *rc, 2375 struct btrfs_backref_node *node, 2376 struct btrfs_key *key, 2377 struct btrfs_path *path, int lowest) 2378 { 2379 struct btrfs_backref_node *upper; 2380 struct btrfs_backref_edge *edge; 2381 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2382 struct btrfs_root *root; 2383 struct extent_buffer *eb; 2384 u32 blocksize; 2385 u64 bytenr; 2386 int slot; 2387 int ret = 0; 2388 2389 /* 2390 * If we are lowest then this is the first time we're processing this 2391 * block, and thus shouldn't have an eb associated with it yet. 2392 */ 2393 ASSERT(!lowest || !node->eb); 2394 2395 path->lowest_level = node->level + 1; 2396 rc->backref_cache.path[node->level] = node; 2397 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2398 struct btrfs_ref ref = { 0 }; 2399 2400 cond_resched(); 2401 2402 upper = edge->node[UPPER]; 2403 root = select_reloc_root(trans, rc, upper, edges); 2404 if (IS_ERR(root)) { 2405 ret = PTR_ERR(root); 2406 goto next; 2407 } 2408 2409 if (upper->eb && !upper->locked) { 2410 if (!lowest) { 2411 ret = btrfs_bin_search(upper->eb, 0, key, &slot); 2412 if (ret < 0) 2413 goto next; 2414 BUG_ON(ret); 2415 bytenr = btrfs_node_blockptr(upper->eb, slot); 2416 if (node->eb->start == bytenr) 2417 goto next; 2418 } 2419 btrfs_backref_drop_node_buffer(upper); 2420 } 2421 2422 if (!upper->eb) { 2423 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2424 if (ret) { 2425 if (ret > 0) 2426 ret = -ENOENT; 2427 2428 btrfs_release_path(path); 2429 break; 2430 } 2431 2432 if (!upper->eb) { 2433 upper->eb = path->nodes[upper->level]; 2434 path->nodes[upper->level] = NULL; 2435 } else { 2436 BUG_ON(upper->eb != path->nodes[upper->level]); 2437 } 2438 2439 upper->locked = 1; 2440 path->locks[upper->level] = 0; 2441 2442 slot = path->slots[upper->level]; 2443 btrfs_release_path(path); 2444 } else { 2445 ret = btrfs_bin_search(upper->eb, 0, key, &slot); 2446 if (ret < 0) 2447 goto next; 2448 BUG_ON(ret); 2449 } 2450 2451 bytenr = btrfs_node_blockptr(upper->eb, slot); 2452 if (lowest) { 2453 if (bytenr != node->bytenr) { 2454 btrfs_err(root->fs_info, 2455 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", 2456 bytenr, node->bytenr, slot, 2457 upper->eb->start); 2458 ret = -EIO; 2459 goto next; 2460 } 2461 } else { 2462 if (node->eb->start == bytenr) 2463 goto next; 2464 } 2465 2466 blocksize = root->fs_info->nodesize; 2467 eb = btrfs_read_node_slot(upper->eb, slot); 2468 if (IS_ERR(eb)) { 2469 ret = PTR_ERR(eb); 2470 goto next; 2471 } 2472 btrfs_tree_lock(eb); 2473 2474 if (!node->eb) { 2475 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2476 slot, &eb, BTRFS_NESTING_COW); 2477 btrfs_tree_unlock(eb); 2478 free_extent_buffer(eb); 2479 if (ret < 0) 2480 goto next; 2481 /* 2482 * We've just COWed this block, it should have updated 2483 * the correct backref node entry. 2484 */ 2485 ASSERT(node->eb == eb); 2486 } else { 2487 btrfs_set_node_blockptr(upper->eb, slot, 2488 node->eb->start); 2489 btrfs_set_node_ptr_generation(upper->eb, slot, 2490 trans->transid); 2491 btrfs_mark_buffer_dirty(upper->eb); 2492 2493 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, 2494 node->eb->start, blocksize, 2495 upper->eb->start); 2496 btrfs_init_tree_ref(&ref, node->level, 2497 btrfs_header_owner(upper->eb), 2498 root->root_key.objectid, false); 2499 ret = btrfs_inc_extent_ref(trans, &ref); 2500 if (!ret) 2501 ret = btrfs_drop_subtree(trans, root, eb, 2502 upper->eb); 2503 if (ret) 2504 btrfs_abort_transaction(trans, ret); 2505 } 2506 next: 2507 if (!upper->pending) 2508 btrfs_backref_drop_node_buffer(upper); 2509 else 2510 btrfs_backref_unlock_node_buffer(upper); 2511 if (ret) 2512 break; 2513 } 2514 2515 if (!ret && node->pending) { 2516 btrfs_backref_drop_node_buffer(node); 2517 list_move_tail(&node->list, &rc->backref_cache.changed); 2518 node->pending = 0; 2519 } 2520 2521 path->lowest_level = 0; 2522 2523 /* 2524 * We should have allocated all of our space in the block rsv and thus 2525 * shouldn't ENOSPC. 2526 */ 2527 ASSERT(ret != -ENOSPC); 2528 return ret; 2529 } 2530 2531 static int link_to_upper(struct btrfs_trans_handle *trans, 2532 struct reloc_control *rc, 2533 struct btrfs_backref_node *node, 2534 struct btrfs_path *path) 2535 { 2536 struct btrfs_key key; 2537 2538 btrfs_node_key_to_cpu(node->eb, &key, 0); 2539 return do_relocation(trans, rc, node, &key, path, 0); 2540 } 2541 2542 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2543 struct reloc_control *rc, 2544 struct btrfs_path *path, int err) 2545 { 2546 LIST_HEAD(list); 2547 struct btrfs_backref_cache *cache = &rc->backref_cache; 2548 struct btrfs_backref_node *node; 2549 int level; 2550 int ret; 2551 2552 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2553 while (!list_empty(&cache->pending[level])) { 2554 node = list_entry(cache->pending[level].next, 2555 struct btrfs_backref_node, list); 2556 list_move_tail(&node->list, &list); 2557 BUG_ON(!node->pending); 2558 2559 if (!err) { 2560 ret = link_to_upper(trans, rc, node, path); 2561 if (ret < 0) 2562 err = ret; 2563 } 2564 } 2565 list_splice_init(&list, &cache->pending[level]); 2566 } 2567 return err; 2568 } 2569 2570 /* 2571 * mark a block and all blocks directly/indirectly reference the block 2572 * as processed. 2573 */ 2574 static void update_processed_blocks(struct reloc_control *rc, 2575 struct btrfs_backref_node *node) 2576 { 2577 struct btrfs_backref_node *next = node; 2578 struct btrfs_backref_edge *edge; 2579 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2580 int index = 0; 2581 2582 while (next) { 2583 cond_resched(); 2584 while (1) { 2585 if (next->processed) 2586 break; 2587 2588 mark_block_processed(rc, next); 2589 2590 if (list_empty(&next->upper)) 2591 break; 2592 2593 edge = list_entry(next->upper.next, 2594 struct btrfs_backref_edge, list[LOWER]); 2595 edges[index++] = edge; 2596 next = edge->node[UPPER]; 2597 } 2598 next = walk_down_backref(edges, &index); 2599 } 2600 } 2601 2602 static int tree_block_processed(u64 bytenr, struct reloc_control *rc) 2603 { 2604 u32 blocksize = rc->extent_root->fs_info->nodesize; 2605 2606 if (test_range_bit(&rc->processed_blocks, bytenr, 2607 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2608 return 1; 2609 return 0; 2610 } 2611 2612 static int get_tree_block_key(struct btrfs_fs_info *fs_info, 2613 struct tree_block *block) 2614 { 2615 struct btrfs_tree_parent_check check = { 2616 .level = block->level, 2617 .owner_root = block->owner, 2618 .transid = block->key.offset 2619 }; 2620 struct extent_buffer *eb; 2621 2622 eb = read_tree_block(fs_info, block->bytenr, &check); 2623 if (IS_ERR(eb)) 2624 return PTR_ERR(eb); 2625 if (!extent_buffer_uptodate(eb)) { 2626 free_extent_buffer(eb); 2627 return -EIO; 2628 } 2629 if (block->level == 0) 2630 btrfs_item_key_to_cpu(eb, &block->key, 0); 2631 else 2632 btrfs_node_key_to_cpu(eb, &block->key, 0); 2633 free_extent_buffer(eb); 2634 block->key_ready = 1; 2635 return 0; 2636 } 2637 2638 /* 2639 * helper function to relocate a tree block 2640 */ 2641 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2642 struct reloc_control *rc, 2643 struct btrfs_backref_node *node, 2644 struct btrfs_key *key, 2645 struct btrfs_path *path) 2646 { 2647 struct btrfs_root *root; 2648 int ret = 0; 2649 2650 if (!node) 2651 return 0; 2652 2653 /* 2654 * If we fail here we want to drop our backref_node because we are going 2655 * to start over and regenerate the tree for it. 2656 */ 2657 ret = reserve_metadata_space(trans, rc, node); 2658 if (ret) 2659 goto out; 2660 2661 BUG_ON(node->processed); 2662 root = select_one_root(node); 2663 if (IS_ERR(root)) { 2664 ret = PTR_ERR(root); 2665 2666 /* See explanation in select_one_root for the -EUCLEAN case. */ 2667 ASSERT(ret == -ENOENT); 2668 if (ret == -ENOENT) { 2669 ret = 0; 2670 update_processed_blocks(rc, node); 2671 } 2672 goto out; 2673 } 2674 2675 if (root) { 2676 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 2677 /* 2678 * This block was the root block of a root, and this is 2679 * the first time we're processing the block and thus it 2680 * should not have had the ->new_bytenr modified and 2681 * should have not been included on the changed list. 2682 * 2683 * However in the case of corruption we could have 2684 * multiple refs pointing to the same block improperly, 2685 * and thus we would trip over these checks. ASSERT() 2686 * for the developer case, because it could indicate a 2687 * bug in the backref code, however error out for a 2688 * normal user in the case of corruption. 2689 */ 2690 ASSERT(node->new_bytenr == 0); 2691 ASSERT(list_empty(&node->list)); 2692 if (node->new_bytenr || !list_empty(&node->list)) { 2693 btrfs_err(root->fs_info, 2694 "bytenr %llu has improper references to it", 2695 node->bytenr); 2696 ret = -EUCLEAN; 2697 goto out; 2698 } 2699 ret = btrfs_record_root_in_trans(trans, root); 2700 if (ret) 2701 goto out; 2702 /* 2703 * Another thread could have failed, need to check if we 2704 * have reloc_root actually set. 2705 */ 2706 if (!root->reloc_root) { 2707 ret = -ENOENT; 2708 goto out; 2709 } 2710 root = root->reloc_root; 2711 node->new_bytenr = root->node->start; 2712 btrfs_put_root(node->root); 2713 node->root = btrfs_grab_root(root); 2714 ASSERT(node->root); 2715 list_add_tail(&node->list, &rc->backref_cache.changed); 2716 } else { 2717 path->lowest_level = node->level; 2718 if (root == root->fs_info->chunk_root) 2719 btrfs_reserve_chunk_metadata(trans, false); 2720 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2721 btrfs_release_path(path); 2722 if (root == root->fs_info->chunk_root) 2723 btrfs_trans_release_chunk_metadata(trans); 2724 if (ret > 0) 2725 ret = 0; 2726 } 2727 if (!ret) 2728 update_processed_blocks(rc, node); 2729 } else { 2730 ret = do_relocation(trans, rc, node, key, path, 1); 2731 } 2732 out: 2733 if (ret || node->level == 0 || node->cowonly) 2734 btrfs_backref_cleanup_node(&rc->backref_cache, node); 2735 return ret; 2736 } 2737 2738 /* 2739 * relocate a list of blocks 2740 */ 2741 static noinline_for_stack 2742 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2743 struct reloc_control *rc, struct rb_root *blocks) 2744 { 2745 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2746 struct btrfs_backref_node *node; 2747 struct btrfs_path *path; 2748 struct tree_block *block; 2749 struct tree_block *next; 2750 int ret; 2751 int err = 0; 2752 2753 path = btrfs_alloc_path(); 2754 if (!path) { 2755 err = -ENOMEM; 2756 goto out_free_blocks; 2757 } 2758 2759 /* Kick in readahead for tree blocks with missing keys */ 2760 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2761 if (!block->key_ready) 2762 btrfs_readahead_tree_block(fs_info, block->bytenr, 2763 block->owner, 0, 2764 block->level); 2765 } 2766 2767 /* Get first keys */ 2768 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2769 if (!block->key_ready) { 2770 err = get_tree_block_key(fs_info, block); 2771 if (err) 2772 goto out_free_path; 2773 } 2774 } 2775 2776 /* Do tree relocation */ 2777 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2778 node = build_backref_tree(rc, &block->key, 2779 block->level, block->bytenr); 2780 if (IS_ERR(node)) { 2781 err = PTR_ERR(node); 2782 goto out; 2783 } 2784 2785 ret = relocate_tree_block(trans, rc, node, &block->key, 2786 path); 2787 if (ret < 0) { 2788 err = ret; 2789 break; 2790 } 2791 } 2792 out: 2793 err = finish_pending_nodes(trans, rc, path, err); 2794 2795 out_free_path: 2796 btrfs_free_path(path); 2797 out_free_blocks: 2798 free_block_list(blocks); 2799 return err; 2800 } 2801 2802 static noinline_for_stack int prealloc_file_extent_cluster( 2803 struct btrfs_inode *inode, 2804 struct file_extent_cluster *cluster) 2805 { 2806 u64 alloc_hint = 0; 2807 u64 start; 2808 u64 end; 2809 u64 offset = inode->index_cnt; 2810 u64 num_bytes; 2811 int nr; 2812 int ret = 0; 2813 u64 i_size = i_size_read(&inode->vfs_inode); 2814 u64 prealloc_start = cluster->start - offset; 2815 u64 prealloc_end = cluster->end - offset; 2816 u64 cur_offset = prealloc_start; 2817 2818 /* 2819 * For subpage case, previous i_size may not be aligned to PAGE_SIZE. 2820 * This means the range [i_size, PAGE_END + 1) is filled with zeros by 2821 * btrfs_do_readpage() call of previously relocated file cluster. 2822 * 2823 * If the current cluster starts in the above range, btrfs_do_readpage() 2824 * will skip the read, and relocate_one_page() will later writeback 2825 * the padding zeros as new data, causing data corruption. 2826 * 2827 * Here we have to manually invalidate the range (i_size, PAGE_END + 1). 2828 */ 2829 if (!PAGE_ALIGNED(i_size)) { 2830 struct address_space *mapping = inode->vfs_inode.i_mapping; 2831 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2832 const u32 sectorsize = fs_info->sectorsize; 2833 struct page *page; 2834 2835 ASSERT(sectorsize < PAGE_SIZE); 2836 ASSERT(IS_ALIGNED(i_size, sectorsize)); 2837 2838 /* 2839 * Subpage can't handle page with DIRTY but without UPTODATE 2840 * bit as it can lead to the following deadlock: 2841 * 2842 * btrfs_read_folio() 2843 * | Page already *locked* 2844 * |- btrfs_lock_and_flush_ordered_range() 2845 * |- btrfs_start_ordered_extent() 2846 * |- extent_write_cache_pages() 2847 * |- lock_page() 2848 * We try to lock the page we already hold. 2849 * 2850 * Here we just writeback the whole data reloc inode, so that 2851 * we will be ensured to have no dirty range in the page, and 2852 * are safe to clear the uptodate bits. 2853 * 2854 * This shouldn't cause too much overhead, as we need to write 2855 * the data back anyway. 2856 */ 2857 ret = filemap_write_and_wait(mapping); 2858 if (ret < 0) 2859 return ret; 2860 2861 clear_extent_bits(&inode->io_tree, i_size, 2862 round_up(i_size, PAGE_SIZE) - 1, 2863 EXTENT_UPTODATE); 2864 page = find_lock_page(mapping, i_size >> PAGE_SHIFT); 2865 /* 2866 * If page is freed we don't need to do anything then, as we 2867 * will re-read the whole page anyway. 2868 */ 2869 if (page) { 2870 btrfs_subpage_clear_uptodate(fs_info, page, i_size, 2871 round_up(i_size, PAGE_SIZE) - i_size); 2872 unlock_page(page); 2873 put_page(page); 2874 } 2875 } 2876 2877 BUG_ON(cluster->start != cluster->boundary[0]); 2878 ret = btrfs_alloc_data_chunk_ondemand(inode, 2879 prealloc_end + 1 - prealloc_start); 2880 if (ret) 2881 return ret; 2882 2883 btrfs_inode_lock(inode, 0); 2884 for (nr = 0; nr < cluster->nr; nr++) { 2885 struct extent_state *cached_state = NULL; 2886 2887 start = cluster->boundary[nr] - offset; 2888 if (nr + 1 < cluster->nr) 2889 end = cluster->boundary[nr + 1] - 1 - offset; 2890 else 2891 end = cluster->end - offset; 2892 2893 lock_extent(&inode->io_tree, start, end, &cached_state); 2894 num_bytes = end + 1 - start; 2895 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, 2896 num_bytes, num_bytes, 2897 end + 1, &alloc_hint); 2898 cur_offset = end + 1; 2899 unlock_extent(&inode->io_tree, start, end, &cached_state); 2900 if (ret) 2901 break; 2902 } 2903 btrfs_inode_unlock(inode, 0); 2904 2905 if (cur_offset < prealloc_end) 2906 btrfs_free_reserved_data_space_noquota(inode->root->fs_info, 2907 prealloc_end + 1 - cur_offset); 2908 return ret; 2909 } 2910 2911 static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode, 2912 u64 start, u64 end, u64 block_start) 2913 { 2914 struct extent_map *em; 2915 struct extent_state *cached_state = NULL; 2916 int ret = 0; 2917 2918 em = alloc_extent_map(); 2919 if (!em) 2920 return -ENOMEM; 2921 2922 em->start = start; 2923 em->len = end + 1 - start; 2924 em->block_len = em->len; 2925 em->block_start = block_start; 2926 set_bit(EXTENT_FLAG_PINNED, &em->flags); 2927 2928 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 2929 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false); 2930 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); 2931 free_extent_map(em); 2932 2933 return ret; 2934 } 2935 2936 /* 2937 * Allow error injection to test balance/relocation cancellation 2938 */ 2939 noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info) 2940 { 2941 return atomic_read(&fs_info->balance_cancel_req) || 2942 atomic_read(&fs_info->reloc_cancel_req) || 2943 fatal_signal_pending(current); 2944 } 2945 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE); 2946 2947 static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster, 2948 int cluster_nr) 2949 { 2950 /* Last extent, use cluster end directly */ 2951 if (cluster_nr >= cluster->nr - 1) 2952 return cluster->end; 2953 2954 /* Use next boundary start*/ 2955 return cluster->boundary[cluster_nr + 1] - 1; 2956 } 2957 2958 static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, 2959 struct file_extent_cluster *cluster, 2960 int *cluster_nr, unsigned long page_index) 2961 { 2962 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2963 u64 offset = BTRFS_I(inode)->index_cnt; 2964 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; 2965 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 2966 struct page *page; 2967 u64 page_start; 2968 u64 page_end; 2969 u64 cur; 2970 int ret; 2971 2972 ASSERT(page_index <= last_index); 2973 page = find_lock_page(inode->i_mapping, page_index); 2974 if (!page) { 2975 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 2976 page_index, last_index + 1 - page_index); 2977 page = find_or_create_page(inode->i_mapping, page_index, mask); 2978 if (!page) 2979 return -ENOMEM; 2980 } 2981 ret = set_page_extent_mapped(page); 2982 if (ret < 0) 2983 goto release_page; 2984 2985 if (PageReadahead(page)) 2986 page_cache_async_readahead(inode->i_mapping, ra, NULL, 2987 page_folio(page), page_index, 2988 last_index + 1 - page_index); 2989 2990 if (!PageUptodate(page)) { 2991 btrfs_read_folio(NULL, page_folio(page)); 2992 lock_page(page); 2993 if (!PageUptodate(page)) { 2994 ret = -EIO; 2995 goto release_page; 2996 } 2997 } 2998 2999 page_start = page_offset(page); 3000 page_end = page_start + PAGE_SIZE - 1; 3001 3002 /* 3003 * Start from the cluster, as for subpage case, the cluster can start 3004 * inside the page. 3005 */ 3006 cur = max(page_start, cluster->boundary[*cluster_nr] - offset); 3007 while (cur <= page_end) { 3008 struct extent_state *cached_state = NULL; 3009 u64 extent_start = cluster->boundary[*cluster_nr] - offset; 3010 u64 extent_end = get_cluster_boundary_end(cluster, 3011 *cluster_nr) - offset; 3012 u64 clamped_start = max(page_start, extent_start); 3013 u64 clamped_end = min(page_end, extent_end); 3014 u32 clamped_len = clamped_end + 1 - clamped_start; 3015 3016 /* Reserve metadata for this range */ 3017 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 3018 clamped_len, clamped_len, 3019 false); 3020 if (ret) 3021 goto release_page; 3022 3023 /* Mark the range delalloc and dirty for later writeback */ 3024 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, 3025 &cached_state); 3026 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start, 3027 clamped_end, 0, &cached_state); 3028 if (ret) { 3029 clear_extent_bit(&BTRFS_I(inode)->io_tree, 3030 clamped_start, clamped_end, 3031 EXTENT_LOCKED | EXTENT_BOUNDARY, 3032 &cached_state); 3033 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3034 clamped_len, true); 3035 btrfs_delalloc_release_extents(BTRFS_I(inode), 3036 clamped_len); 3037 goto release_page; 3038 } 3039 btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len); 3040 3041 /* 3042 * Set the boundary if it's inside the page. 3043 * Data relocation requires the destination extents to have the 3044 * same size as the source. 3045 * EXTENT_BOUNDARY bit prevents current extent from being merged 3046 * with previous extent. 3047 */ 3048 if (in_range(cluster->boundary[*cluster_nr] - offset, 3049 page_start, PAGE_SIZE)) { 3050 u64 boundary_start = cluster->boundary[*cluster_nr] - 3051 offset; 3052 u64 boundary_end = boundary_start + 3053 fs_info->sectorsize - 1; 3054 3055 set_extent_bit(&BTRFS_I(inode)->io_tree, 3056 boundary_start, boundary_end, 3057 EXTENT_BOUNDARY, NULL, GFP_NOFS); 3058 } 3059 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, 3060 &cached_state); 3061 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len); 3062 cur += clamped_len; 3063 3064 /* Crossed extent end, go to next extent */ 3065 if (cur >= extent_end) { 3066 (*cluster_nr)++; 3067 /* Just finished the last extent of the cluster, exit. */ 3068 if (*cluster_nr >= cluster->nr) 3069 break; 3070 } 3071 } 3072 unlock_page(page); 3073 put_page(page); 3074 3075 balance_dirty_pages_ratelimited(inode->i_mapping); 3076 btrfs_throttle(fs_info); 3077 if (btrfs_should_cancel_balance(fs_info)) 3078 ret = -ECANCELED; 3079 return ret; 3080 3081 release_page: 3082 unlock_page(page); 3083 put_page(page); 3084 return ret; 3085 } 3086 3087 static int relocate_file_extent_cluster(struct inode *inode, 3088 struct file_extent_cluster *cluster) 3089 { 3090 u64 offset = BTRFS_I(inode)->index_cnt; 3091 unsigned long index; 3092 unsigned long last_index; 3093 struct file_ra_state *ra; 3094 int cluster_nr = 0; 3095 int ret = 0; 3096 3097 if (!cluster->nr) 3098 return 0; 3099 3100 ra = kzalloc(sizeof(*ra), GFP_NOFS); 3101 if (!ra) 3102 return -ENOMEM; 3103 3104 ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster); 3105 if (ret) 3106 goto out; 3107 3108 file_ra_state_init(ra, inode->i_mapping); 3109 3110 ret = setup_relocation_extent_mapping(inode, cluster->start - offset, 3111 cluster->end - offset, cluster->start); 3112 if (ret) 3113 goto out; 3114 3115 last_index = (cluster->end - offset) >> PAGE_SHIFT; 3116 for (index = (cluster->start - offset) >> PAGE_SHIFT; 3117 index <= last_index && !ret; index++) 3118 ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index); 3119 if (ret == 0) 3120 WARN_ON(cluster_nr != cluster->nr); 3121 out: 3122 kfree(ra); 3123 return ret; 3124 } 3125 3126 static noinline_for_stack 3127 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3128 struct file_extent_cluster *cluster) 3129 { 3130 int ret; 3131 3132 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3133 ret = relocate_file_extent_cluster(inode, cluster); 3134 if (ret) 3135 return ret; 3136 cluster->nr = 0; 3137 } 3138 3139 if (!cluster->nr) 3140 cluster->start = extent_key->objectid; 3141 else 3142 BUG_ON(cluster->nr >= MAX_EXTENTS); 3143 cluster->end = extent_key->objectid + extent_key->offset - 1; 3144 cluster->boundary[cluster->nr] = extent_key->objectid; 3145 cluster->nr++; 3146 3147 if (cluster->nr >= MAX_EXTENTS) { 3148 ret = relocate_file_extent_cluster(inode, cluster); 3149 if (ret) 3150 return ret; 3151 cluster->nr = 0; 3152 } 3153 return 0; 3154 } 3155 3156 /* 3157 * helper to add a tree block to the list. 3158 * the major work is getting the generation and level of the block 3159 */ 3160 static int add_tree_block(struct reloc_control *rc, 3161 struct btrfs_key *extent_key, 3162 struct btrfs_path *path, 3163 struct rb_root *blocks) 3164 { 3165 struct extent_buffer *eb; 3166 struct btrfs_extent_item *ei; 3167 struct btrfs_tree_block_info *bi; 3168 struct tree_block *block; 3169 struct rb_node *rb_node; 3170 u32 item_size; 3171 int level = -1; 3172 u64 generation; 3173 u64 owner = 0; 3174 3175 eb = path->nodes[0]; 3176 item_size = btrfs_item_size(eb, path->slots[0]); 3177 3178 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3179 item_size >= sizeof(*ei) + sizeof(*bi)) { 3180 unsigned long ptr = 0, end; 3181 3182 ei = btrfs_item_ptr(eb, path->slots[0], 3183 struct btrfs_extent_item); 3184 end = (unsigned long)ei + item_size; 3185 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3186 bi = (struct btrfs_tree_block_info *)(ei + 1); 3187 level = btrfs_tree_block_level(eb, bi); 3188 ptr = (unsigned long)(bi + 1); 3189 } else { 3190 level = (int)extent_key->offset; 3191 ptr = (unsigned long)(ei + 1); 3192 } 3193 generation = btrfs_extent_generation(eb, ei); 3194 3195 /* 3196 * We're reading random blocks without knowing their owner ahead 3197 * of time. This is ok most of the time, as all reloc roots and 3198 * fs roots have the same lock type. However normal trees do 3199 * not, and the only way to know ahead of time is to read the 3200 * inline ref offset. We know it's an fs root if 3201 * 3202 * 1. There's more than one ref. 3203 * 2. There's a SHARED_DATA_REF_KEY set. 3204 * 3. FULL_BACKREF is set on the flags. 3205 * 3206 * Otherwise it's safe to assume that the ref offset == the 3207 * owner of this block, so we can use that when calling 3208 * read_tree_block. 3209 */ 3210 if (btrfs_extent_refs(eb, ei) == 1 && 3211 !(btrfs_extent_flags(eb, ei) & 3212 BTRFS_BLOCK_FLAG_FULL_BACKREF) && 3213 ptr < end) { 3214 struct btrfs_extent_inline_ref *iref; 3215 int type; 3216 3217 iref = (struct btrfs_extent_inline_ref *)ptr; 3218 type = btrfs_get_extent_inline_ref_type(eb, iref, 3219 BTRFS_REF_TYPE_BLOCK); 3220 if (type == BTRFS_REF_TYPE_INVALID) 3221 return -EINVAL; 3222 if (type == BTRFS_TREE_BLOCK_REF_KEY) 3223 owner = btrfs_extent_inline_ref_offset(eb, iref); 3224 } 3225 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) { 3226 btrfs_print_v0_err(eb->fs_info); 3227 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); 3228 return -EINVAL; 3229 } else { 3230 BUG(); 3231 } 3232 3233 btrfs_release_path(path); 3234 3235 BUG_ON(level == -1); 3236 3237 block = kmalloc(sizeof(*block), GFP_NOFS); 3238 if (!block) 3239 return -ENOMEM; 3240 3241 block->bytenr = extent_key->objectid; 3242 block->key.objectid = rc->extent_root->fs_info->nodesize; 3243 block->key.offset = generation; 3244 block->level = level; 3245 block->key_ready = 0; 3246 block->owner = owner; 3247 3248 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node); 3249 if (rb_node) 3250 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, 3251 -EEXIST); 3252 3253 return 0; 3254 } 3255 3256 /* 3257 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3258 */ 3259 static int __add_tree_block(struct reloc_control *rc, 3260 u64 bytenr, u32 blocksize, 3261 struct rb_root *blocks) 3262 { 3263 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3264 struct btrfs_path *path; 3265 struct btrfs_key key; 3266 int ret; 3267 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 3268 3269 if (tree_block_processed(bytenr, rc)) 3270 return 0; 3271 3272 if (rb_simple_search(blocks, bytenr)) 3273 return 0; 3274 3275 path = btrfs_alloc_path(); 3276 if (!path) 3277 return -ENOMEM; 3278 again: 3279 key.objectid = bytenr; 3280 if (skinny) { 3281 key.type = BTRFS_METADATA_ITEM_KEY; 3282 key.offset = (u64)-1; 3283 } else { 3284 key.type = BTRFS_EXTENT_ITEM_KEY; 3285 key.offset = blocksize; 3286 } 3287 3288 path->search_commit_root = 1; 3289 path->skip_locking = 1; 3290 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3291 if (ret < 0) 3292 goto out; 3293 3294 if (ret > 0 && skinny) { 3295 if (path->slots[0]) { 3296 path->slots[0]--; 3297 btrfs_item_key_to_cpu(path->nodes[0], &key, 3298 path->slots[0]); 3299 if (key.objectid == bytenr && 3300 (key.type == BTRFS_METADATA_ITEM_KEY || 3301 (key.type == BTRFS_EXTENT_ITEM_KEY && 3302 key.offset == blocksize))) 3303 ret = 0; 3304 } 3305 3306 if (ret) { 3307 skinny = false; 3308 btrfs_release_path(path); 3309 goto again; 3310 } 3311 } 3312 if (ret) { 3313 ASSERT(ret == 1); 3314 btrfs_print_leaf(path->nodes[0]); 3315 btrfs_err(fs_info, 3316 "tree block extent item (%llu) is not found in extent tree", 3317 bytenr); 3318 WARN_ON(1); 3319 ret = -EINVAL; 3320 goto out; 3321 } 3322 3323 ret = add_tree_block(rc, &key, path, blocks); 3324 out: 3325 btrfs_free_path(path); 3326 return ret; 3327 } 3328 3329 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3330 struct btrfs_block_group *block_group, 3331 struct inode *inode, 3332 u64 ino) 3333 { 3334 struct btrfs_root *root = fs_info->tree_root; 3335 struct btrfs_trans_handle *trans; 3336 int ret = 0; 3337 3338 if (inode) 3339 goto truncate; 3340 3341 inode = btrfs_iget(fs_info->sb, ino, root); 3342 if (IS_ERR(inode)) 3343 return -ENOENT; 3344 3345 truncate: 3346 ret = btrfs_check_trunc_cache_free_space(fs_info, 3347 &fs_info->global_block_rsv); 3348 if (ret) 3349 goto out; 3350 3351 trans = btrfs_join_transaction(root); 3352 if (IS_ERR(trans)) { 3353 ret = PTR_ERR(trans); 3354 goto out; 3355 } 3356 3357 ret = btrfs_truncate_free_space_cache(trans, block_group, inode); 3358 3359 btrfs_end_transaction(trans); 3360 btrfs_btree_balance_dirty(fs_info); 3361 out: 3362 iput(inode); 3363 return ret; 3364 } 3365 3366 /* 3367 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the 3368 * cache inode, to avoid free space cache data extent blocking data relocation. 3369 */ 3370 static int delete_v1_space_cache(struct extent_buffer *leaf, 3371 struct btrfs_block_group *block_group, 3372 u64 data_bytenr) 3373 { 3374 u64 space_cache_ino; 3375 struct btrfs_file_extent_item *ei; 3376 struct btrfs_key key; 3377 bool found = false; 3378 int i; 3379 int ret; 3380 3381 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID) 3382 return 0; 3383 3384 for (i = 0; i < btrfs_header_nritems(leaf); i++) { 3385 u8 type; 3386 3387 btrfs_item_key_to_cpu(leaf, &key, i); 3388 if (key.type != BTRFS_EXTENT_DATA_KEY) 3389 continue; 3390 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 3391 type = btrfs_file_extent_type(leaf, ei); 3392 3393 if ((type == BTRFS_FILE_EXTENT_REG || 3394 type == BTRFS_FILE_EXTENT_PREALLOC) && 3395 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) { 3396 found = true; 3397 space_cache_ino = key.objectid; 3398 break; 3399 } 3400 } 3401 if (!found) 3402 return -ENOENT; 3403 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL, 3404 space_cache_ino); 3405 return ret; 3406 } 3407 3408 /* 3409 * helper to find all tree blocks that reference a given data extent 3410 */ 3411 static noinline_for_stack 3412 int add_data_references(struct reloc_control *rc, 3413 struct btrfs_key *extent_key, 3414 struct btrfs_path *path, 3415 struct rb_root *blocks) 3416 { 3417 struct btrfs_backref_walk_ctx ctx = { 0 }; 3418 struct ulist_iterator leaf_uiter; 3419 struct ulist_node *ref_node = NULL; 3420 const u32 blocksize = rc->extent_root->fs_info->nodesize; 3421 int ret = 0; 3422 3423 btrfs_release_path(path); 3424 3425 ctx.bytenr = extent_key->objectid; 3426 ctx.skip_inode_ref_list = true; 3427 ctx.fs_info = rc->extent_root->fs_info; 3428 3429 ret = btrfs_find_all_leafs(&ctx); 3430 if (ret < 0) 3431 return ret; 3432 3433 ULIST_ITER_INIT(&leaf_uiter); 3434 while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) { 3435 struct btrfs_tree_parent_check check = { 0 }; 3436 struct extent_buffer *eb; 3437 3438 eb = read_tree_block(ctx.fs_info, ref_node->val, &check); 3439 if (IS_ERR(eb)) { 3440 ret = PTR_ERR(eb); 3441 break; 3442 } 3443 ret = delete_v1_space_cache(eb, rc->block_group, 3444 extent_key->objectid); 3445 free_extent_buffer(eb); 3446 if (ret < 0) 3447 break; 3448 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks); 3449 if (ret < 0) 3450 break; 3451 } 3452 if (ret < 0) 3453 free_block_list(blocks); 3454 ulist_free(ctx.refs); 3455 return ret; 3456 } 3457 3458 /* 3459 * helper to find next unprocessed extent 3460 */ 3461 static noinline_for_stack 3462 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, 3463 struct btrfs_key *extent_key) 3464 { 3465 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3466 struct btrfs_key key; 3467 struct extent_buffer *leaf; 3468 u64 start, end, last; 3469 int ret; 3470 3471 last = rc->block_group->start + rc->block_group->length; 3472 while (1) { 3473 cond_resched(); 3474 if (rc->search_start >= last) { 3475 ret = 1; 3476 break; 3477 } 3478 3479 key.objectid = rc->search_start; 3480 key.type = BTRFS_EXTENT_ITEM_KEY; 3481 key.offset = 0; 3482 3483 path->search_commit_root = 1; 3484 path->skip_locking = 1; 3485 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3486 0, 0); 3487 if (ret < 0) 3488 break; 3489 next: 3490 leaf = path->nodes[0]; 3491 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3492 ret = btrfs_next_leaf(rc->extent_root, path); 3493 if (ret != 0) 3494 break; 3495 leaf = path->nodes[0]; 3496 } 3497 3498 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3499 if (key.objectid >= last) { 3500 ret = 1; 3501 break; 3502 } 3503 3504 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3505 key.type != BTRFS_METADATA_ITEM_KEY) { 3506 path->slots[0]++; 3507 goto next; 3508 } 3509 3510 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3511 key.objectid + key.offset <= rc->search_start) { 3512 path->slots[0]++; 3513 goto next; 3514 } 3515 3516 if (key.type == BTRFS_METADATA_ITEM_KEY && 3517 key.objectid + fs_info->nodesize <= 3518 rc->search_start) { 3519 path->slots[0]++; 3520 goto next; 3521 } 3522 3523 ret = find_first_extent_bit(&rc->processed_blocks, 3524 key.objectid, &start, &end, 3525 EXTENT_DIRTY, NULL); 3526 3527 if (ret == 0 && start <= key.objectid) { 3528 btrfs_release_path(path); 3529 rc->search_start = end + 1; 3530 } else { 3531 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3532 rc->search_start = key.objectid + key.offset; 3533 else 3534 rc->search_start = key.objectid + 3535 fs_info->nodesize; 3536 memcpy(extent_key, &key, sizeof(key)); 3537 return 0; 3538 } 3539 } 3540 btrfs_release_path(path); 3541 return ret; 3542 } 3543 3544 static void set_reloc_control(struct reloc_control *rc) 3545 { 3546 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3547 3548 mutex_lock(&fs_info->reloc_mutex); 3549 fs_info->reloc_ctl = rc; 3550 mutex_unlock(&fs_info->reloc_mutex); 3551 } 3552 3553 static void unset_reloc_control(struct reloc_control *rc) 3554 { 3555 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3556 3557 mutex_lock(&fs_info->reloc_mutex); 3558 fs_info->reloc_ctl = NULL; 3559 mutex_unlock(&fs_info->reloc_mutex); 3560 } 3561 3562 static noinline_for_stack 3563 int prepare_to_relocate(struct reloc_control *rc) 3564 { 3565 struct btrfs_trans_handle *trans; 3566 int ret; 3567 3568 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info, 3569 BTRFS_BLOCK_RSV_TEMP); 3570 if (!rc->block_rsv) 3571 return -ENOMEM; 3572 3573 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3574 rc->search_start = rc->block_group->start; 3575 rc->extents_found = 0; 3576 rc->nodes_relocated = 0; 3577 rc->merging_rsv_size = 0; 3578 rc->reserved_bytes = 0; 3579 rc->block_rsv->size = rc->extent_root->fs_info->nodesize * 3580 RELOCATION_RESERVED_NODES; 3581 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info, 3582 rc->block_rsv, rc->block_rsv->size, 3583 BTRFS_RESERVE_FLUSH_ALL); 3584 if (ret) 3585 return ret; 3586 3587 rc->create_reloc_tree = 1; 3588 set_reloc_control(rc); 3589 3590 trans = btrfs_join_transaction(rc->extent_root); 3591 if (IS_ERR(trans)) { 3592 unset_reloc_control(rc); 3593 /* 3594 * extent tree is not a ref_cow tree and has no reloc_root to 3595 * cleanup. And callers are responsible to free the above 3596 * block rsv. 3597 */ 3598 return PTR_ERR(trans); 3599 } 3600 3601 ret = btrfs_commit_transaction(trans); 3602 if (ret) 3603 unset_reloc_control(rc); 3604 3605 return ret; 3606 } 3607 3608 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3609 { 3610 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3611 struct rb_root blocks = RB_ROOT; 3612 struct btrfs_key key; 3613 struct btrfs_trans_handle *trans = NULL; 3614 struct btrfs_path *path; 3615 struct btrfs_extent_item *ei; 3616 u64 flags; 3617 int ret; 3618 int err = 0; 3619 int progress = 0; 3620 3621 path = btrfs_alloc_path(); 3622 if (!path) 3623 return -ENOMEM; 3624 path->reada = READA_FORWARD; 3625 3626 ret = prepare_to_relocate(rc); 3627 if (ret) { 3628 err = ret; 3629 goto out_free; 3630 } 3631 3632 while (1) { 3633 rc->reserved_bytes = 0; 3634 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, 3635 rc->block_rsv->size, 3636 BTRFS_RESERVE_FLUSH_ALL); 3637 if (ret) { 3638 err = ret; 3639 break; 3640 } 3641 progress++; 3642 trans = btrfs_start_transaction(rc->extent_root, 0); 3643 if (IS_ERR(trans)) { 3644 err = PTR_ERR(trans); 3645 trans = NULL; 3646 break; 3647 } 3648 restart: 3649 if (update_backref_cache(trans, &rc->backref_cache)) { 3650 btrfs_end_transaction(trans); 3651 trans = NULL; 3652 continue; 3653 } 3654 3655 ret = find_next_extent(rc, path, &key); 3656 if (ret < 0) 3657 err = ret; 3658 if (ret != 0) 3659 break; 3660 3661 rc->extents_found++; 3662 3663 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3664 struct btrfs_extent_item); 3665 flags = btrfs_extent_flags(path->nodes[0], ei); 3666 3667 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 3668 ret = add_tree_block(rc, &key, path, &blocks); 3669 } else if (rc->stage == UPDATE_DATA_PTRS && 3670 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3671 ret = add_data_references(rc, &key, path, &blocks); 3672 } else { 3673 btrfs_release_path(path); 3674 ret = 0; 3675 } 3676 if (ret < 0) { 3677 err = ret; 3678 break; 3679 } 3680 3681 if (!RB_EMPTY_ROOT(&blocks)) { 3682 ret = relocate_tree_blocks(trans, rc, &blocks); 3683 if (ret < 0) { 3684 if (ret != -EAGAIN) { 3685 err = ret; 3686 break; 3687 } 3688 rc->extents_found--; 3689 rc->search_start = key.objectid; 3690 } 3691 } 3692 3693 btrfs_end_transaction_throttle(trans); 3694 btrfs_btree_balance_dirty(fs_info); 3695 trans = NULL; 3696 3697 if (rc->stage == MOVE_DATA_EXTENTS && 3698 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3699 rc->found_file_extent = 1; 3700 ret = relocate_data_extent(rc->data_inode, 3701 &key, &rc->cluster); 3702 if (ret < 0) { 3703 err = ret; 3704 break; 3705 } 3706 } 3707 if (btrfs_should_cancel_balance(fs_info)) { 3708 err = -ECANCELED; 3709 break; 3710 } 3711 } 3712 if (trans && progress && err == -ENOSPC) { 3713 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags); 3714 if (ret == 1) { 3715 err = 0; 3716 progress = 0; 3717 goto restart; 3718 } 3719 } 3720 3721 btrfs_release_path(path); 3722 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); 3723 3724 if (trans) { 3725 btrfs_end_transaction_throttle(trans); 3726 btrfs_btree_balance_dirty(fs_info); 3727 } 3728 3729 if (!err) { 3730 ret = relocate_file_extent_cluster(rc->data_inode, 3731 &rc->cluster); 3732 if (ret < 0) 3733 err = ret; 3734 } 3735 3736 rc->create_reloc_tree = 0; 3737 set_reloc_control(rc); 3738 3739 btrfs_backref_release_cache(&rc->backref_cache); 3740 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); 3741 3742 /* 3743 * Even in the case when the relocation is cancelled, we should all go 3744 * through prepare_to_merge() and merge_reloc_roots(). 3745 * 3746 * For error (including cancelled balance), prepare_to_merge() will 3747 * mark all reloc trees orphan, then queue them for cleanup in 3748 * merge_reloc_roots() 3749 */ 3750 err = prepare_to_merge(rc, err); 3751 3752 merge_reloc_roots(rc); 3753 3754 rc->merge_reloc_tree = 0; 3755 unset_reloc_control(rc); 3756 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); 3757 3758 /* get rid of pinned extents */ 3759 trans = btrfs_join_transaction(rc->extent_root); 3760 if (IS_ERR(trans)) { 3761 err = PTR_ERR(trans); 3762 goto out_free; 3763 } 3764 ret = btrfs_commit_transaction(trans); 3765 if (ret && !err) 3766 err = ret; 3767 out_free: 3768 ret = clean_dirty_subvols(rc); 3769 if (ret < 0 && !err) 3770 err = ret; 3771 btrfs_free_block_rsv(fs_info, rc->block_rsv); 3772 btrfs_free_path(path); 3773 return err; 3774 } 3775 3776 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 3777 struct btrfs_root *root, u64 objectid) 3778 { 3779 struct btrfs_path *path; 3780 struct btrfs_inode_item *item; 3781 struct extent_buffer *leaf; 3782 int ret; 3783 3784 path = btrfs_alloc_path(); 3785 if (!path) 3786 return -ENOMEM; 3787 3788 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 3789 if (ret) 3790 goto out; 3791 3792 leaf = path->nodes[0]; 3793 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 3794 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3795 btrfs_set_inode_generation(leaf, item, 1); 3796 btrfs_set_inode_size(leaf, item, 0); 3797 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 3798 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 3799 BTRFS_INODE_PREALLOC); 3800 btrfs_mark_buffer_dirty(leaf); 3801 out: 3802 btrfs_free_path(path); 3803 return ret; 3804 } 3805 3806 static void delete_orphan_inode(struct btrfs_trans_handle *trans, 3807 struct btrfs_root *root, u64 objectid) 3808 { 3809 struct btrfs_path *path; 3810 struct btrfs_key key; 3811 int ret = 0; 3812 3813 path = btrfs_alloc_path(); 3814 if (!path) { 3815 ret = -ENOMEM; 3816 goto out; 3817 } 3818 3819 key.objectid = objectid; 3820 key.type = BTRFS_INODE_ITEM_KEY; 3821 key.offset = 0; 3822 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3823 if (ret) { 3824 if (ret > 0) 3825 ret = -ENOENT; 3826 goto out; 3827 } 3828 ret = btrfs_del_item(trans, root, path); 3829 out: 3830 if (ret) 3831 btrfs_abort_transaction(trans, ret); 3832 btrfs_free_path(path); 3833 } 3834 3835 /* 3836 * helper to create inode for data relocation. 3837 * the inode is in data relocation tree and its link count is 0 3838 */ 3839 static noinline_for_stack 3840 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 3841 struct btrfs_block_group *group) 3842 { 3843 struct inode *inode = NULL; 3844 struct btrfs_trans_handle *trans; 3845 struct btrfs_root *root; 3846 u64 objectid; 3847 int err = 0; 3848 3849 root = btrfs_grab_root(fs_info->data_reloc_root); 3850 trans = btrfs_start_transaction(root, 6); 3851 if (IS_ERR(trans)) { 3852 btrfs_put_root(root); 3853 return ERR_CAST(trans); 3854 } 3855 3856 err = btrfs_get_free_objectid(root, &objectid); 3857 if (err) 3858 goto out; 3859 3860 err = __insert_orphan_inode(trans, root, objectid); 3861 if (err) 3862 goto out; 3863 3864 inode = btrfs_iget(fs_info->sb, objectid, root); 3865 if (IS_ERR(inode)) { 3866 delete_orphan_inode(trans, root, objectid); 3867 err = PTR_ERR(inode); 3868 inode = NULL; 3869 goto out; 3870 } 3871 BTRFS_I(inode)->index_cnt = group->start; 3872 3873 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 3874 out: 3875 btrfs_put_root(root); 3876 btrfs_end_transaction(trans); 3877 btrfs_btree_balance_dirty(fs_info); 3878 if (err) { 3879 iput(inode); 3880 inode = ERR_PTR(err); 3881 } 3882 return inode; 3883 } 3884 3885 /* 3886 * Mark start of chunk relocation that is cancellable. Check if the cancellation 3887 * has been requested meanwhile and don't start in that case. 3888 * 3889 * Return: 3890 * 0 success 3891 * -EINPROGRESS operation is already in progress, that's probably a bug 3892 * -ECANCELED cancellation request was set before the operation started 3893 */ 3894 static int reloc_chunk_start(struct btrfs_fs_info *fs_info) 3895 { 3896 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) { 3897 /* This should not happen */ 3898 btrfs_err(fs_info, "reloc already running, cannot start"); 3899 return -EINPROGRESS; 3900 } 3901 3902 if (atomic_read(&fs_info->reloc_cancel_req) > 0) { 3903 btrfs_info(fs_info, "chunk relocation canceled on start"); 3904 /* 3905 * On cancel, clear all requests but let the caller mark 3906 * the end after cleanup operations. 3907 */ 3908 atomic_set(&fs_info->reloc_cancel_req, 0); 3909 return -ECANCELED; 3910 } 3911 return 0; 3912 } 3913 3914 /* 3915 * Mark end of chunk relocation that is cancellable and wake any waiters. 3916 */ 3917 static void reloc_chunk_end(struct btrfs_fs_info *fs_info) 3918 { 3919 /* Requested after start, clear bit first so any waiters can continue */ 3920 if (atomic_read(&fs_info->reloc_cancel_req) > 0) 3921 btrfs_info(fs_info, "chunk relocation canceled during operation"); 3922 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); 3923 atomic_set(&fs_info->reloc_cancel_req, 0); 3924 } 3925 3926 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) 3927 { 3928 struct reloc_control *rc; 3929 3930 rc = kzalloc(sizeof(*rc), GFP_NOFS); 3931 if (!rc) 3932 return NULL; 3933 3934 INIT_LIST_HEAD(&rc->reloc_roots); 3935 INIT_LIST_HEAD(&rc->dirty_subvol_roots); 3936 btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1); 3937 mapping_tree_init(&rc->reloc_root_tree); 3938 extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS); 3939 return rc; 3940 } 3941 3942 static void free_reloc_control(struct reloc_control *rc) 3943 { 3944 struct mapping_node *node, *tmp; 3945 3946 free_reloc_roots(&rc->reloc_roots); 3947 rbtree_postorder_for_each_entry_safe(node, tmp, 3948 &rc->reloc_root_tree.rb_root, rb_node) 3949 kfree(node); 3950 3951 kfree(rc); 3952 } 3953 3954 /* 3955 * Print the block group being relocated 3956 */ 3957 static void describe_relocation(struct btrfs_fs_info *fs_info, 3958 struct btrfs_block_group *block_group) 3959 { 3960 char buf[128] = {'\0'}; 3961 3962 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf)); 3963 3964 btrfs_info(fs_info, 3965 "relocating block group %llu flags %s", 3966 block_group->start, buf); 3967 } 3968 3969 static const char *stage_to_string(int stage) 3970 { 3971 if (stage == MOVE_DATA_EXTENTS) 3972 return "move data extents"; 3973 if (stage == UPDATE_DATA_PTRS) 3974 return "update data pointers"; 3975 return "unknown"; 3976 } 3977 3978 /* 3979 * function to relocate all extents in a block group. 3980 */ 3981 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) 3982 { 3983 struct btrfs_block_group *bg; 3984 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start); 3985 struct reloc_control *rc; 3986 struct inode *inode; 3987 struct btrfs_path *path; 3988 int ret; 3989 int rw = 0; 3990 int err = 0; 3991 3992 /* 3993 * This only gets set if we had a half-deleted snapshot on mount. We 3994 * cannot allow relocation to start while we're still trying to clean up 3995 * these pending deletions. 3996 */ 3997 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE); 3998 if (ret) 3999 return ret; 4000 4001 /* We may have been woken up by close_ctree, so bail if we're closing. */ 4002 if (btrfs_fs_closing(fs_info)) 4003 return -EINTR; 4004 4005 bg = btrfs_lookup_block_group(fs_info, group_start); 4006 if (!bg) 4007 return -ENOENT; 4008 4009 /* 4010 * Relocation of a data block group creates ordered extents. Without 4011 * sb_start_write(), we can freeze the filesystem while unfinished 4012 * ordered extents are left. Such ordered extents can cause a deadlock 4013 * e.g. when syncfs() is waiting for their completion but they can't 4014 * finish because they block when joining a transaction, due to the 4015 * fact that the freeze locks are being held in write mode. 4016 */ 4017 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) 4018 ASSERT(sb_write_started(fs_info->sb)); 4019 4020 if (btrfs_pinned_by_swapfile(fs_info, bg)) { 4021 btrfs_put_block_group(bg); 4022 return -ETXTBSY; 4023 } 4024 4025 rc = alloc_reloc_control(fs_info); 4026 if (!rc) { 4027 btrfs_put_block_group(bg); 4028 return -ENOMEM; 4029 } 4030 4031 ret = reloc_chunk_start(fs_info); 4032 if (ret < 0) { 4033 err = ret; 4034 goto out_put_bg; 4035 } 4036 4037 rc->extent_root = extent_root; 4038 rc->block_group = bg; 4039 4040 ret = btrfs_inc_block_group_ro(rc->block_group, true); 4041 if (ret) { 4042 err = ret; 4043 goto out; 4044 } 4045 rw = 1; 4046 4047 path = btrfs_alloc_path(); 4048 if (!path) { 4049 err = -ENOMEM; 4050 goto out; 4051 } 4052 4053 inode = lookup_free_space_inode(rc->block_group, path); 4054 btrfs_free_path(path); 4055 4056 if (!IS_ERR(inode)) 4057 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); 4058 else 4059 ret = PTR_ERR(inode); 4060 4061 if (ret && ret != -ENOENT) { 4062 err = ret; 4063 goto out; 4064 } 4065 4066 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4067 if (IS_ERR(rc->data_inode)) { 4068 err = PTR_ERR(rc->data_inode); 4069 rc->data_inode = NULL; 4070 goto out; 4071 } 4072 4073 describe_relocation(fs_info, rc->block_group); 4074 4075 btrfs_wait_block_group_reservations(rc->block_group); 4076 btrfs_wait_nocow_writers(rc->block_group); 4077 btrfs_wait_ordered_roots(fs_info, U64_MAX, 4078 rc->block_group->start, 4079 rc->block_group->length); 4080 4081 ret = btrfs_zone_finish(rc->block_group); 4082 WARN_ON(ret && ret != -EAGAIN); 4083 4084 while (1) { 4085 int finishes_stage; 4086 4087 mutex_lock(&fs_info->cleaner_mutex); 4088 ret = relocate_block_group(rc); 4089 mutex_unlock(&fs_info->cleaner_mutex); 4090 if (ret < 0) 4091 err = ret; 4092 4093 finishes_stage = rc->stage; 4094 /* 4095 * We may have gotten ENOSPC after we already dirtied some 4096 * extents. If writeout happens while we're relocating a 4097 * different block group we could end up hitting the 4098 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in 4099 * btrfs_reloc_cow_block. Make sure we write everything out 4100 * properly so we don't trip over this problem, and then break 4101 * out of the loop if we hit an error. 4102 */ 4103 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4104 ret = btrfs_wait_ordered_range(rc->data_inode, 0, 4105 (u64)-1); 4106 if (ret) 4107 err = ret; 4108 invalidate_mapping_pages(rc->data_inode->i_mapping, 4109 0, -1); 4110 rc->stage = UPDATE_DATA_PTRS; 4111 } 4112 4113 if (err < 0) 4114 goto out; 4115 4116 if (rc->extents_found == 0) 4117 break; 4118 4119 btrfs_info(fs_info, "found %llu extents, stage: %s", 4120 rc->extents_found, stage_to_string(finishes_stage)); 4121 } 4122 4123 WARN_ON(rc->block_group->pinned > 0); 4124 WARN_ON(rc->block_group->reserved > 0); 4125 WARN_ON(rc->block_group->used > 0); 4126 out: 4127 if (err && rw) 4128 btrfs_dec_block_group_ro(rc->block_group); 4129 iput(rc->data_inode); 4130 out_put_bg: 4131 btrfs_put_block_group(bg); 4132 reloc_chunk_end(fs_info); 4133 free_reloc_control(rc); 4134 return err; 4135 } 4136 4137 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4138 { 4139 struct btrfs_fs_info *fs_info = root->fs_info; 4140 struct btrfs_trans_handle *trans; 4141 int ret, err; 4142 4143 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4144 if (IS_ERR(trans)) 4145 return PTR_ERR(trans); 4146 4147 memset(&root->root_item.drop_progress, 0, 4148 sizeof(root->root_item.drop_progress)); 4149 btrfs_set_root_drop_level(&root->root_item, 0); 4150 btrfs_set_root_refs(&root->root_item, 0); 4151 ret = btrfs_update_root(trans, fs_info->tree_root, 4152 &root->root_key, &root->root_item); 4153 4154 err = btrfs_end_transaction(trans); 4155 if (err) 4156 return err; 4157 return ret; 4158 } 4159 4160 /* 4161 * recover relocation interrupted by system crash. 4162 * 4163 * this function resumes merging reloc trees with corresponding fs trees. 4164 * this is important for keeping the sharing of tree blocks 4165 */ 4166 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) 4167 { 4168 LIST_HEAD(reloc_roots); 4169 struct btrfs_key key; 4170 struct btrfs_root *fs_root; 4171 struct btrfs_root *reloc_root; 4172 struct btrfs_path *path; 4173 struct extent_buffer *leaf; 4174 struct reloc_control *rc = NULL; 4175 struct btrfs_trans_handle *trans; 4176 int ret; 4177 int err = 0; 4178 4179 path = btrfs_alloc_path(); 4180 if (!path) 4181 return -ENOMEM; 4182 path->reada = READA_BACK; 4183 4184 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4185 key.type = BTRFS_ROOT_ITEM_KEY; 4186 key.offset = (u64)-1; 4187 4188 while (1) { 4189 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, 4190 path, 0, 0); 4191 if (ret < 0) { 4192 err = ret; 4193 goto out; 4194 } 4195 if (ret > 0) { 4196 if (path->slots[0] == 0) 4197 break; 4198 path->slots[0]--; 4199 } 4200 leaf = path->nodes[0]; 4201 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4202 btrfs_release_path(path); 4203 4204 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4205 key.type != BTRFS_ROOT_ITEM_KEY) 4206 break; 4207 4208 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key); 4209 if (IS_ERR(reloc_root)) { 4210 err = PTR_ERR(reloc_root); 4211 goto out; 4212 } 4213 4214 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 4215 list_add(&reloc_root->root_list, &reloc_roots); 4216 4217 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4218 fs_root = btrfs_get_fs_root(fs_info, 4219 reloc_root->root_key.offset, false); 4220 if (IS_ERR(fs_root)) { 4221 ret = PTR_ERR(fs_root); 4222 if (ret != -ENOENT) { 4223 err = ret; 4224 goto out; 4225 } 4226 ret = mark_garbage_root(reloc_root); 4227 if (ret < 0) { 4228 err = ret; 4229 goto out; 4230 } 4231 } else { 4232 btrfs_put_root(fs_root); 4233 } 4234 } 4235 4236 if (key.offset == 0) 4237 break; 4238 4239 key.offset--; 4240 } 4241 btrfs_release_path(path); 4242 4243 if (list_empty(&reloc_roots)) 4244 goto out; 4245 4246 rc = alloc_reloc_control(fs_info); 4247 if (!rc) { 4248 err = -ENOMEM; 4249 goto out; 4250 } 4251 4252 ret = reloc_chunk_start(fs_info); 4253 if (ret < 0) { 4254 err = ret; 4255 goto out_end; 4256 } 4257 4258 rc->extent_root = btrfs_extent_root(fs_info, 0); 4259 4260 set_reloc_control(rc); 4261 4262 trans = btrfs_join_transaction(rc->extent_root); 4263 if (IS_ERR(trans)) { 4264 err = PTR_ERR(trans); 4265 goto out_unset; 4266 } 4267 4268 rc->merge_reloc_tree = 1; 4269 4270 while (!list_empty(&reloc_roots)) { 4271 reloc_root = list_entry(reloc_roots.next, 4272 struct btrfs_root, root_list); 4273 list_del(&reloc_root->root_list); 4274 4275 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4276 list_add_tail(&reloc_root->root_list, 4277 &rc->reloc_roots); 4278 continue; 4279 } 4280 4281 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 4282 false); 4283 if (IS_ERR(fs_root)) { 4284 err = PTR_ERR(fs_root); 4285 list_add_tail(&reloc_root->root_list, &reloc_roots); 4286 btrfs_end_transaction(trans); 4287 goto out_unset; 4288 } 4289 4290 err = __add_reloc_root(reloc_root); 4291 ASSERT(err != -EEXIST); 4292 if (err) { 4293 list_add_tail(&reloc_root->root_list, &reloc_roots); 4294 btrfs_put_root(fs_root); 4295 btrfs_end_transaction(trans); 4296 goto out_unset; 4297 } 4298 fs_root->reloc_root = btrfs_grab_root(reloc_root); 4299 btrfs_put_root(fs_root); 4300 } 4301 4302 err = btrfs_commit_transaction(trans); 4303 if (err) 4304 goto out_unset; 4305 4306 merge_reloc_roots(rc); 4307 4308 unset_reloc_control(rc); 4309 4310 trans = btrfs_join_transaction(rc->extent_root); 4311 if (IS_ERR(trans)) { 4312 err = PTR_ERR(trans); 4313 goto out_clean; 4314 } 4315 err = btrfs_commit_transaction(trans); 4316 out_clean: 4317 ret = clean_dirty_subvols(rc); 4318 if (ret < 0 && !err) 4319 err = ret; 4320 out_unset: 4321 unset_reloc_control(rc); 4322 out_end: 4323 reloc_chunk_end(fs_info); 4324 free_reloc_control(rc); 4325 out: 4326 free_reloc_roots(&reloc_roots); 4327 4328 btrfs_free_path(path); 4329 4330 if (err == 0) { 4331 /* cleanup orphan inode in data relocation tree */ 4332 fs_root = btrfs_grab_root(fs_info->data_reloc_root); 4333 ASSERT(fs_root); 4334 err = btrfs_orphan_cleanup(fs_root); 4335 btrfs_put_root(fs_root); 4336 } 4337 return err; 4338 } 4339 4340 /* 4341 * helper to add ordered checksum for data relocation. 4342 * 4343 * cloning checksum properly handles the nodatasum extents. 4344 * it also saves CPU time to re-calculate the checksum. 4345 */ 4346 int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len) 4347 { 4348 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4349 struct btrfs_root *csum_root; 4350 struct btrfs_ordered_sum *sums; 4351 struct btrfs_ordered_extent *ordered; 4352 int ret; 4353 u64 disk_bytenr; 4354 u64 new_bytenr; 4355 LIST_HEAD(list); 4356 4357 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4358 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len); 4359 4360 disk_bytenr = file_pos + inode->index_cnt; 4361 csum_root = btrfs_csum_root(fs_info, disk_bytenr); 4362 ret = btrfs_lookup_csums_list(csum_root, disk_bytenr, 4363 disk_bytenr + len - 1, &list, 0, false); 4364 if (ret) 4365 goto out; 4366 4367 while (!list_empty(&list)) { 4368 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4369 list_del_init(&sums->list); 4370 4371 /* 4372 * We need to offset the new_bytenr based on where the csum is. 4373 * We need to do this because we will read in entire prealloc 4374 * extents but we may have written to say the middle of the 4375 * prealloc extent, so we need to make sure the csum goes with 4376 * the right disk offset. 4377 * 4378 * We can do this because the data reloc inode refers strictly 4379 * to the on disk bytes, so we don't have to worry about 4380 * disk_len vs real len like with real inodes since it's all 4381 * disk length. 4382 */ 4383 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr; 4384 sums->bytenr = new_bytenr; 4385 4386 btrfs_add_ordered_sum(ordered, sums); 4387 } 4388 out: 4389 btrfs_put_ordered_extent(ordered); 4390 return ret; 4391 } 4392 4393 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4394 struct btrfs_root *root, struct extent_buffer *buf, 4395 struct extent_buffer *cow) 4396 { 4397 struct btrfs_fs_info *fs_info = root->fs_info; 4398 struct reloc_control *rc; 4399 struct btrfs_backref_node *node; 4400 int first_cow = 0; 4401 int level; 4402 int ret = 0; 4403 4404 rc = fs_info->reloc_ctl; 4405 if (!rc) 4406 return 0; 4407 4408 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root)); 4409 4410 level = btrfs_header_level(buf); 4411 if (btrfs_header_generation(buf) <= 4412 btrfs_root_last_snapshot(&root->root_item)) 4413 first_cow = 1; 4414 4415 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4416 rc->create_reloc_tree) { 4417 WARN_ON(!first_cow && level == 0); 4418 4419 node = rc->backref_cache.path[level]; 4420 BUG_ON(node->bytenr != buf->start && 4421 node->new_bytenr != buf->start); 4422 4423 btrfs_backref_drop_node_buffer(node); 4424 atomic_inc(&cow->refs); 4425 node->eb = cow; 4426 node->new_bytenr = cow->start; 4427 4428 if (!node->pending) { 4429 list_move_tail(&node->list, 4430 &rc->backref_cache.pending[level]); 4431 node->pending = 1; 4432 } 4433 4434 if (first_cow) 4435 mark_block_processed(rc, node); 4436 4437 if (first_cow && level > 0) 4438 rc->nodes_relocated += buf->len; 4439 } 4440 4441 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4442 ret = replace_file_extents(trans, rc, root, cow); 4443 return ret; 4444 } 4445 4446 /* 4447 * called before creating snapshot. it calculates metadata reservation 4448 * required for relocating tree blocks in the snapshot 4449 */ 4450 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4451 u64 *bytes_to_reserve) 4452 { 4453 struct btrfs_root *root = pending->root; 4454 struct reloc_control *rc = root->fs_info->reloc_ctl; 4455 4456 if (!rc || !have_reloc_root(root)) 4457 return; 4458 4459 if (!rc->merge_reloc_tree) 4460 return; 4461 4462 root = root->reloc_root; 4463 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4464 /* 4465 * relocation is in the stage of merging trees. the space 4466 * used by merging a reloc tree is twice the size of 4467 * relocated tree nodes in the worst case. half for cowing 4468 * the reloc tree, half for cowing the fs tree. the space 4469 * used by cowing the reloc tree will be freed after the 4470 * tree is dropped. if we create snapshot, cowing the fs 4471 * tree may use more space than it frees. so we need 4472 * reserve extra space. 4473 */ 4474 *bytes_to_reserve += rc->nodes_relocated; 4475 } 4476 4477 /* 4478 * called after snapshot is created. migrate block reservation 4479 * and create reloc root for the newly created snapshot 4480 * 4481 * This is similar to btrfs_init_reloc_root(), we come out of here with two 4482 * references held on the reloc_root, one for root->reloc_root and one for 4483 * rc->reloc_roots. 4484 */ 4485 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4486 struct btrfs_pending_snapshot *pending) 4487 { 4488 struct btrfs_root *root = pending->root; 4489 struct btrfs_root *reloc_root; 4490 struct btrfs_root *new_root; 4491 struct reloc_control *rc = root->fs_info->reloc_ctl; 4492 int ret; 4493 4494 if (!rc || !have_reloc_root(root)) 4495 return 0; 4496 4497 rc = root->fs_info->reloc_ctl; 4498 rc->merging_rsv_size += rc->nodes_relocated; 4499 4500 if (rc->merge_reloc_tree) { 4501 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4502 rc->block_rsv, 4503 rc->nodes_relocated, true); 4504 if (ret) 4505 return ret; 4506 } 4507 4508 new_root = pending->snap; 4509 reloc_root = create_reloc_root(trans, root->reloc_root, 4510 new_root->root_key.objectid); 4511 if (IS_ERR(reloc_root)) 4512 return PTR_ERR(reloc_root); 4513 4514 ret = __add_reloc_root(reloc_root); 4515 ASSERT(ret != -EEXIST); 4516 if (ret) { 4517 /* Pairs with create_reloc_root */ 4518 btrfs_put_root(reloc_root); 4519 return ret; 4520 } 4521 new_root->reloc_root = btrfs_grab_root(reloc_root); 4522 4523 if (rc->create_reloc_tree) 4524 ret = clone_backref_node(trans, rc, root, reloc_root); 4525 return ret; 4526 } 4527 4528 /* 4529 * Get the current bytenr for the block group which is being relocated. 4530 * 4531 * Return U64_MAX if no running relocation. 4532 */ 4533 u64 btrfs_get_reloc_bg_bytenr(struct btrfs_fs_info *fs_info) 4534 { 4535 u64 logical = U64_MAX; 4536 4537 lockdep_assert_held(&fs_info->reloc_mutex); 4538 4539 if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group) 4540 logical = fs_info->reloc_ctl->block_group->start; 4541 return logical; 4542 } 4543