Lines Matching +full:root +full:- +full:node
1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/error-injection.h>
14 #include "disk-io.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
22 #include "print-tree.h"
23 #include "delalloc-space.h"
24 #include "block-group.h"
29 #include "inode-item.h"
30 #include "space-info.h"
33 #include "extent-tree.h"
34 #include "root-tree.h"
35 #include "file-item.h"
38 #include "tree-checker.h"
51 * ------------------------------------------------------------------
58 * 1. Mark the target block group read-only
89 * map address of tree root to tree
142 /* map start of tree root to corresponding reloc tree */
169 struct btrfs_backref_node *node) in mark_block_processed() argument
173 if (node->level == 0 || in mark_block_processed()
174 in_range(node->bytenr, rc->block_group->start, in mark_block_processed()
175 rc->block_group->length)) { in mark_block_processed()
176 blocksize = rc->extent_root->fs_info->nodesize; in mark_block_processed()
177 set_extent_bit(&rc->processed_blocks, node->bytenr, in mark_block_processed()
178 node->bytenr + blocksize - 1, EXTENT_DIRTY, NULL); in mark_block_processed()
180 node->processed = 1; in mark_block_processed()
186 tree->rb_root = RB_ROOT; in mapping_tree_init()
187 spin_lock_init(&tree->lock); in mapping_tree_init()
191 * walk up backref nodes until reach node presents tree root
194 struct btrfs_backref_node *node, in walk_up_backref() argument
200 while (!list_empty(&node->upper)) { in walk_up_backref()
201 edge = list_entry(node->upper.next, in walk_up_backref()
204 node = edge->node[UPPER]; in walk_up_backref()
206 BUG_ON(node->detached); in walk_up_backref()
208 return node; in walk_up_backref()
222 edge = edges[idx - 1]; in walk_down_backref()
223 lower = edge->node[LOWER]; in walk_down_backref()
224 if (list_is_last(&edge->list[LOWER], &lower->upper)) { in walk_down_backref()
225 idx--; in walk_down_backref()
228 edge = list_entry(edge->list[LOWER].next, in walk_down_backref()
230 edges[idx - 1] = edge; in walk_down_backref()
232 return edge->node[UPPER]; in walk_down_backref()
238 static bool reloc_root_is_dead(const struct btrfs_root *root) in reloc_root_is_dead() argument
246 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) in reloc_root_is_dead()
255 * This is enough for most callers, as they don't distinguish dead reloc root
256 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
259 static bool have_reloc_root(const struct btrfs_root *root) in have_reloc_root() argument
261 if (reloc_root_is_dead(root)) in have_reloc_root()
263 if (!root->reloc_root) in have_reloc_root()
268 bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root) in btrfs_should_ignore_reloc_root() argument
272 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in btrfs_should_ignore_reloc_root()
275 /* This root has been merged with its reloc tree, we can ignore it */ in btrfs_should_ignore_reloc_root()
276 if (reloc_root_is_dead(root)) in btrfs_should_ignore_reloc_root()
279 reloc_root = root->reloc_root; in btrfs_should_ignore_reloc_root()
283 if (btrfs_header_generation(reloc_root->commit_root) == in btrfs_should_ignore_reloc_root()
284 root->fs_info->running_transaction->transid) in btrfs_should_ignore_reloc_root()
288 * backref lookup can find the reloc tree, so backref node for the fs in btrfs_should_ignore_reloc_root()
289 * tree root is useless for relocation. in btrfs_should_ignore_reloc_root()
295 * find reloc tree by address of tree root
299 struct reloc_control *rc = fs_info->reloc_ctl; in find_reloc_root()
301 struct mapping_node *node; in find_reloc_root() local
302 struct btrfs_root *root = NULL; in find_reloc_root() local
305 spin_lock(&rc->reloc_root_tree.lock); in find_reloc_root()
306 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr); in find_reloc_root()
308 node = rb_entry(rb_node, struct mapping_node, rb_node); in find_reloc_root()
309 root = node->data; in find_reloc_root()
311 spin_unlock(&rc->reloc_root_tree.lock); in find_reloc_root()
312 return btrfs_grab_root(root); in find_reloc_root()
318 * - Cleanup the children edges and nodes
319 * If child node is also orphan (no parent) during cleanup, then the child
320 * node will also be cleaned up.
322 * - Freeing up leaves (level 0), keeps nodes detached
323 * For nodes, the node is still cached as "detached"
325 * Return false if @node is not in the @useless_nodes list.
326 * Return true if @node is in the @useless_nodes list.
329 struct btrfs_backref_node *node) in handle_useless_nodes() argument
331 struct btrfs_backref_cache *cache = &rc->backref_cache; in handle_useless_nodes()
332 struct list_head *useless_node = &cache->useless_node; in handle_useless_nodes()
340 list_del_init(&cur->list); in handle_useless_nodes()
342 /* Only tree root nodes can be added to @useless_nodes */ in handle_useless_nodes()
343 ASSERT(list_empty(&cur->upper)); in handle_useless_nodes()
345 if (cur == node) in handle_useless_nodes()
348 /* The node is the lowest node */ in handle_useless_nodes()
349 if (cur->lowest) { in handle_useless_nodes()
350 list_del_init(&cur->lower); in handle_useless_nodes()
351 cur->lowest = 0; in handle_useless_nodes()
355 while (!list_empty(&cur->lower)) { in handle_useless_nodes()
359 edge = list_entry(cur->lower.next, in handle_useless_nodes()
361 list_del(&edge->list[UPPER]); in handle_useless_nodes()
362 list_del(&edge->list[LOWER]); in handle_useless_nodes()
363 lower = edge->node[LOWER]; in handle_useless_nodes()
366 /* Child node is also orphan, queue for cleanup */ in handle_useless_nodes()
367 if (list_empty(&lower->upper)) in handle_useless_nodes()
368 list_add(&lower->list, useless_node); in handle_useless_nodes()
378 if (cur->level > 0) { in handle_useless_nodes()
379 list_add(&cur->list, &cache->detached); in handle_useless_nodes()
380 cur->detached = 1; in handle_useless_nodes()
382 rb_erase(&cur->rb_node, &cache->rb_root); in handle_useless_nodes()
390 * Build backref tree for a given tree block. Root of the backref tree
392 * b-trees that reference the tree block.
396 * these upper level blocks recursively. The recursion stops when tree root is
409 struct btrfs_backref_cache *cache = &rc->backref_cache; in build_backref_tree()
413 struct btrfs_backref_node *node = NULL; in build_backref_tree() local
418 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info); in build_backref_tree()
420 return ERR_PTR(-ENOMEM); in build_backref_tree()
423 err = -ENOMEM; in build_backref_tree()
427 node = btrfs_backref_alloc_node(cache, bytenr, level); in build_backref_tree()
428 if (!node) { in build_backref_tree()
429 err = -ENOMEM; in build_backref_tree()
433 node->lowest = 1; in build_backref_tree()
434 cur = node; in build_backref_tree()
436 /* Breadth-first search to build backref cache */ in build_backref_tree()
444 edge = list_first_entry_or_null(&cache->pending_edge, in build_backref_tree()
451 list_del_init(&edge->list[UPPER]); in build_backref_tree()
452 cur = edge->node[UPPER]; in build_backref_tree()
457 ret = btrfs_backref_finish_upper_links(cache, node); in build_backref_tree()
463 if (handle_useless_nodes(rc, node)) in build_backref_tree()
464 node = NULL; in build_backref_tree()
469 btrfs_backref_error_cleanup(cache, node); in build_backref_tree()
472 ASSERT(!node || !node->detached); in build_backref_tree()
473 ASSERT(list_empty(&cache->useless_node) && in build_backref_tree()
474 list_empty(&cache->pending_edge)); in build_backref_tree()
475 return node; in build_backref_tree()
479 * helper to add backref node for the newly created snapshot.
480 * the backref node is created by cloning backref node that
481 * corresponds to root of source tree
488 struct btrfs_root *reloc_root = src->reloc_root; in clone_backref_node()
489 struct btrfs_backref_cache *cache = &rc->backref_cache; in clone_backref_node()
490 struct btrfs_backref_node *node = NULL; in clone_backref_node() local
496 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start); in clone_backref_node()
498 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); in clone_backref_node()
499 if (node->detached) in clone_backref_node()
500 node = NULL; in clone_backref_node()
502 BUG_ON(node->new_bytenr != reloc_root->node->start); in clone_backref_node()
505 if (!node) { in clone_backref_node()
506 rb_node = rb_simple_search(&cache->rb_root, in clone_backref_node()
507 reloc_root->commit_root->start); in clone_backref_node()
509 node = rb_entry(rb_node, struct btrfs_backref_node, in clone_backref_node()
511 BUG_ON(node->detached); in clone_backref_node()
515 if (!node) in clone_backref_node()
518 new_node = btrfs_backref_alloc_node(cache, dest->node->start, in clone_backref_node()
519 node->level); in clone_backref_node()
521 return -ENOMEM; in clone_backref_node()
523 new_node->lowest = node->lowest; in clone_backref_node()
524 new_node->checked = 1; in clone_backref_node()
525 new_node->root = btrfs_grab_root(dest); in clone_backref_node()
526 ASSERT(new_node->root); in clone_backref_node()
528 if (!node->lowest) { in clone_backref_node()
529 list_for_each_entry(edge, &node->lower, list[UPPER]) { in clone_backref_node()
534 btrfs_backref_link_edge(new_edge, edge->node[LOWER], in clone_backref_node()
538 list_add_tail(&new_node->lower, &cache->leaves); in clone_backref_node()
541 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr, in clone_backref_node()
542 &new_node->rb_node); in clone_backref_node()
544 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST); in clone_backref_node()
546 if (!new_node->lowest) { in clone_backref_node()
547 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { in clone_backref_node()
548 list_add_tail(&new_edge->list[LOWER], in clone_backref_node()
549 &new_edge->node[LOWER]->upper); in clone_backref_node()
554 while (!list_empty(&new_node->lower)) { in clone_backref_node()
555 new_edge = list_entry(new_node->lower.next, in clone_backref_node()
557 list_del(&new_edge->list[UPPER]); in clone_backref_node()
561 return -ENOMEM; in clone_backref_node()
565 * helper to add 'address of tree root -> reloc tree' mapping
567 static int __must_check __add_reloc_root(struct btrfs_root *root) in __add_reloc_root() argument
569 struct btrfs_fs_info *fs_info = root->fs_info; in __add_reloc_root()
571 struct mapping_node *node; in __add_reloc_root() local
572 struct reloc_control *rc = fs_info->reloc_ctl; in __add_reloc_root()
574 node = kmalloc(sizeof(*node), GFP_NOFS); in __add_reloc_root()
575 if (!node) in __add_reloc_root()
576 return -ENOMEM; in __add_reloc_root()
578 node->bytenr = root->commit_root->start; in __add_reloc_root()
579 node->data = root; in __add_reloc_root()
581 spin_lock(&rc->reloc_root_tree.lock); in __add_reloc_root()
582 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, in __add_reloc_root()
583 node->bytenr, &node->rb_node); in __add_reloc_root()
584 spin_unlock(&rc->reloc_root_tree.lock); in __add_reloc_root()
587 "Duplicate root found for start=%llu while inserting into relocation tree", in __add_reloc_root()
588 node->bytenr); in __add_reloc_root()
589 return -EEXIST; in __add_reloc_root()
592 list_add_tail(&root->root_list, &rc->reloc_roots); in __add_reloc_root()
597 * helper to delete the 'address of tree root -> reloc tree'
600 static void __del_reloc_root(struct btrfs_root *root) in __del_reloc_root() argument
602 struct btrfs_fs_info *fs_info = root->fs_info; in __del_reloc_root()
604 struct mapping_node *node = NULL; in __del_reloc_root() local
605 struct reloc_control *rc = fs_info->reloc_ctl; in __del_reloc_root()
608 if (rc && root->node) { in __del_reloc_root()
609 spin_lock(&rc->reloc_root_tree.lock); in __del_reloc_root()
610 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, in __del_reloc_root()
611 root->commit_root->start); in __del_reloc_root()
613 node = rb_entry(rb_node, struct mapping_node, rb_node); in __del_reloc_root()
614 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); in __del_reloc_root()
615 RB_CLEAR_NODE(&node->rb_node); in __del_reloc_root()
617 spin_unlock(&rc->reloc_root_tree.lock); in __del_reloc_root()
618 ASSERT(!node || (struct btrfs_root *)node->data == root); in __del_reloc_root()
622 * We only put the reloc root here if it's on the list. There's a lot in __del_reloc_root()
623 * of places where the pattern is to splice the rc->reloc_roots, process in __del_reloc_root()
624 * the reloc roots, and then add the reloc root back onto in __del_reloc_root()
625 * rc->reloc_roots. If we call __del_reloc_root while it's off of the in __del_reloc_root()
629 spin_lock(&fs_info->trans_lock); in __del_reloc_root()
630 if (!list_empty(&root->root_list)) { in __del_reloc_root()
632 list_del_init(&root->root_list); in __del_reloc_root()
634 spin_unlock(&fs_info->trans_lock); in __del_reloc_root()
636 btrfs_put_root(root); in __del_reloc_root()
637 kfree(node); in __del_reloc_root()
641 * helper to update the 'address of tree root -> reloc tree'
644 static int __update_reloc_root(struct btrfs_root *root) in __update_reloc_root() argument
646 struct btrfs_fs_info *fs_info = root->fs_info; in __update_reloc_root()
648 struct mapping_node *node = NULL; in __update_reloc_root() local
649 struct reloc_control *rc = fs_info->reloc_ctl; in __update_reloc_root()
651 spin_lock(&rc->reloc_root_tree.lock); in __update_reloc_root()
652 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, in __update_reloc_root()
653 root->commit_root->start); in __update_reloc_root()
655 node = rb_entry(rb_node, struct mapping_node, rb_node); in __update_reloc_root()
656 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); in __update_reloc_root()
658 spin_unlock(&rc->reloc_root_tree.lock); in __update_reloc_root()
660 if (!node) in __update_reloc_root()
662 BUG_ON((struct btrfs_root *)node->data != root); in __update_reloc_root()
664 spin_lock(&rc->reloc_root_tree.lock); in __update_reloc_root()
665 node->bytenr = root->node->start; in __update_reloc_root()
666 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, in __update_reloc_root()
667 node->bytenr, &node->rb_node); in __update_reloc_root()
668 spin_unlock(&rc->reloc_root_tree.lock); in __update_reloc_root()
670 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); in __update_reloc_root()
675 struct btrfs_root *root, u64 objectid) in create_reloc_root() argument
677 struct btrfs_fs_info *fs_info = root->fs_info; in create_reloc_root()
687 return ERR_PTR(-ENOMEM); in create_reloc_root()
693 if (root->root_key.objectid == objectid) { in create_reloc_root()
697 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, in create_reloc_root()
704 * root - like this ctree.c:btrfs_block_can_be_shared() behaves in create_reloc_root()
705 * correctly (returns true) when the relocation root is created in create_reloc_root()
710 commit_root_gen = btrfs_header_generation(root->commit_root); in create_reloc_root()
711 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen); in create_reloc_root()
720 ret = btrfs_copy_root(trans, root, root->node, &eb, in create_reloc_root()
732 memcpy(root_item, &root->root_item, sizeof(*root_item)); in create_reloc_root()
733 btrfs_set_root_bytenr(root_item, eb->start); in create_reloc_root()
735 btrfs_set_root_generation(root_item, trans->transid); in create_reloc_root()
737 if (root->root_key.objectid == objectid) { in create_reloc_root()
739 memset(&root_item->drop_progress, 0, in create_reloc_root()
747 ret = btrfs_insert_root(trans, fs_info->tree_root, in create_reloc_root()
754 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key); in create_reloc_root()
759 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); in create_reloc_root()
760 reloc_root->last_trans = trans->transid; in create_reloc_root()
772 * snapshot of the fs tree with special root objectid.
775 * root->reloc_root, and another for being on the rc->reloc_roots list.
778 struct btrfs_root *root) in btrfs_init_reloc_root() argument
780 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_init_reloc_root()
782 struct reloc_control *rc = fs_info->reloc_ctl; in btrfs_init_reloc_root()
794 if (reloc_root_is_dead(root)) in btrfs_init_reloc_root()
800 * corresponding fs root, and then here we update the last trans for the in btrfs_init_reloc_root()
801 * reloc root. This means that we have to do this for the entire life in btrfs_init_reloc_root()
802 * of the reloc root, regardless of which stage of the relocation we are in btrfs_init_reloc_root()
805 if (root->reloc_root) { in btrfs_init_reloc_root()
806 reloc_root = root->reloc_root; in btrfs_init_reloc_root()
807 reloc_root->last_trans = trans->transid; in btrfs_init_reloc_root()
815 if (!rc->create_reloc_tree || in btrfs_init_reloc_root()
816 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) in btrfs_init_reloc_root()
819 if (!trans->reloc_reserved) { in btrfs_init_reloc_root()
820 rsv = trans->block_rsv; in btrfs_init_reloc_root()
821 trans->block_rsv = rc->block_rsv; in btrfs_init_reloc_root()
824 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); in btrfs_init_reloc_root()
826 trans->block_rsv = rsv; in btrfs_init_reloc_root()
831 ASSERT(ret != -EEXIST); in btrfs_init_reloc_root()
837 root->reloc_root = btrfs_grab_root(reloc_root); in btrfs_init_reloc_root()
842 * update root item of reloc tree
845 struct btrfs_root *root) in btrfs_update_reloc_root() argument
847 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_update_reloc_root()
852 if (!have_reloc_root(root)) in btrfs_update_reloc_root()
855 reloc_root = root->reloc_root; in btrfs_update_reloc_root()
856 root_item = &reloc_root->root_item; in btrfs_update_reloc_root()
860 * the root. We have the ref for root->reloc_root, but just in case in btrfs_update_reloc_root()
861 * hold it while we update the reloc root. in btrfs_update_reloc_root()
865 /* root->reloc_root will stay until current relocation finished */ in btrfs_update_reloc_root()
866 if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree && in btrfs_update_reloc_root()
868 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); in btrfs_update_reloc_root()
877 if (reloc_root->commit_root != reloc_root->node) { in btrfs_update_reloc_root()
879 btrfs_set_root_node(root_item, reloc_root->node); in btrfs_update_reloc_root()
880 free_extent_buffer(reloc_root->commit_root); in btrfs_update_reloc_root()
881 reloc_root->commit_root = btrfs_root_node(reloc_root); in btrfs_update_reloc_root()
884 ret = btrfs_update_root(trans, fs_info->tree_root, in btrfs_update_reloc_root()
885 &reloc_root->root_key, root_item); in btrfs_update_reloc_root()
894 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) in find_next_inode() argument
896 struct rb_node *node; in find_next_inode() local
901 spin_lock(&root->inode_lock); in find_next_inode()
903 node = root->inode_tree.rb_node; in find_next_inode()
905 while (node) { in find_next_inode()
906 prev = node; in find_next_inode()
907 entry = rb_entry(node, struct btrfs_inode, rb_node); in find_next_inode()
910 node = node->rb_left; in find_next_inode()
912 node = node->rb_right; in find_next_inode()
916 if (!node) { in find_next_inode()
920 node = prev; in find_next_inode()
926 while (node) { in find_next_inode()
927 entry = rb_entry(node, struct btrfs_inode, rb_node); in find_next_inode()
928 inode = igrab(&entry->vfs_inode); in find_next_inode()
930 spin_unlock(&root->inode_lock); in find_next_inode()
935 if (cond_resched_lock(&root->inode_lock)) in find_next_inode()
938 node = rb_next(node); in find_next_inode()
940 spin_unlock(&root->inode_lock); in find_next_inode()
950 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; in get_new_location() local
958 return -ENOMEM; in get_new_location()
960 bytenr -= BTRFS_I(reloc_inode)->index_cnt; in get_new_location()
961 ret = btrfs_lookup_file_extent(NULL, root, path, in get_new_location()
966 ret = -ENOENT; in get_new_location()
970 leaf = path->nodes[0]; in get_new_location()
971 fi = btrfs_item_ptr(leaf, path->slots[0], in get_new_location()
980 ret = -EINVAL; in get_new_location()
998 struct btrfs_root *root, in replace_file_extents() argument
1001 struct btrfs_fs_info *fs_info = root->fs_info; in replace_file_extents()
1016 if (rc->stage != UPDATE_DATA_PTRS) in replace_file_extents()
1020 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) in replace_file_extents()
1021 parent = leaf->start; in replace_file_extents()
1041 if (!in_range(bytenr, rc->block_group->start, in replace_file_extents()
1042 rc->block_group->length)) in replace_file_extents()
1049 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { in replace_file_extents()
1051 inode = find_next_inode(root, key.objectid); in replace_file_extents()
1055 inode = find_next_inode(root, key.objectid); in replace_file_extents()
1063 fs_info->sectorsize)); in replace_file_extents()
1064 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); in replace_file_extents()
1065 end--; in replace_file_extents()
1066 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, in replace_file_extents()
1074 unlock_extent(&BTRFS_I(inode)->io_tree, in replace_file_extents()
1079 ret = get_new_location(rc->data_inode, &new_bytenr, in replace_file_extents()
1092 key.offset -= btrfs_file_extent_offset(leaf, fi); in replace_file_extents()
1097 root->root_key.objectid, false); in replace_file_extents()
1108 root->root_key.objectid, false); in replace_file_extents()
1129 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); in memcmp_node_keys()
1148 struct btrfs_fs_info *fs_info = dest->fs_info; in replace_path()
1164 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); in replace_path()
1165 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); in replace_path()
1167 last_snapshot = btrfs_root_last_snapshot(&src->root_item); in replace_path()
1169 slot = path->slots[lowest_level]; in replace_path()
1170 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); in replace_path()
1192 next_key->objectid = (u64)-1; in replace_path()
1193 next_key->type = (u8)-1; in replace_path()
1194 next_key->offset = (u64)-1; in replace_path()
1206 slot--; in replace_path()
1212 blocksize = fs_info->nodesize; in replace_path()
1216 eb = path->nodes[level]; in replace_path()
1218 path->slots[level]); in replace_path()
1220 path->slots[level]); in replace_path()
1269 btrfs_node_key_to_cpu(path->nodes[level], &key, in replace_path()
1270 path->slots[level]); in replace_path()
1273 path->lowest_level = level; in replace_path()
1274 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); in replace_path()
1276 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); in replace_path()
1277 path->lowest_level = 0; in replace_path()
1280 ret = -ENOENT; in replace_path()
1296 * CoW on the subtree root node before transaction commit. in replace_path()
1299 rc->block_group, parent, slot, in replace_path()
1300 path->nodes[level], path->slots[level], in replace_path()
1311 btrfs_set_node_blockptr(path->nodes[level], in replace_path()
1312 path->slots[level], old_bytenr); in replace_path()
1313 btrfs_set_node_ptr_generation(path->nodes[level], in replace_path()
1314 path->slots[level], old_ptr_gen); in replace_path()
1315 btrfs_mark_buffer_dirty(trans, path->nodes[level]); in replace_path()
1318 blocksize, path->nodes[level]->start); in replace_path()
1319 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, in replace_path()
1328 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0, in replace_path()
1337 blocksize, path->nodes[level]->start); in replace_path()
1338 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid, in replace_path()
1348 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, in replace_path()
1370 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, in walk_up_reloc_tree() argument
1378 last_snapshot = btrfs_root_last_snapshot(&root->root_item); in walk_up_reloc_tree()
1381 free_extent_buffer(path->nodes[i]); in walk_up_reloc_tree()
1382 path->nodes[i] = NULL; in walk_up_reloc_tree()
1385 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { in walk_up_reloc_tree()
1386 eb = path->nodes[i]; in walk_up_reloc_tree()
1388 while (path->slots[i] + 1 < nritems) { in walk_up_reloc_tree()
1389 path->slots[i]++; in walk_up_reloc_tree()
1390 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= in walk_up_reloc_tree()
1397 free_extent_buffer(path->nodes[i]); in walk_up_reloc_tree()
1398 path->nodes[i] = NULL; in walk_up_reloc_tree()
1407 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, in walk_down_reloc_tree() argument
1416 last_snapshot = btrfs_root_last_snapshot(&root->root_item); in walk_down_reloc_tree()
1418 for (i = *level; i > 0; i--) { in walk_down_reloc_tree()
1419 eb = path->nodes[i]; in walk_down_reloc_tree()
1421 while (path->slots[i] < nritems) { in walk_down_reloc_tree()
1422 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); in walk_down_reloc_tree()
1425 path->slots[i]++; in walk_down_reloc_tree()
1427 if (path->slots[i] >= nritems) { in walk_down_reloc_tree()
1438 eb = btrfs_read_node_slot(eb, path->slots[i]); in walk_down_reloc_tree()
1441 BUG_ON(btrfs_header_level(eb) != i - 1); in walk_down_reloc_tree()
1442 path->nodes[i - 1] = eb; in walk_down_reloc_tree()
1443 path->slots[i - 1] = 0; in walk_down_reloc_tree()
1452 static int invalidate_extent_cache(struct btrfs_root *root, in invalidate_extent_cache() argument
1456 struct btrfs_fs_info *fs_info = root->fs_info; in invalidate_extent_cache()
1462 objectid = min_key->objectid; in invalidate_extent_cache()
1469 if (objectid > max_key->objectid) in invalidate_extent_cache()
1472 inode = find_next_inode(root, objectid); in invalidate_extent_cache()
1477 if (ino > max_key->objectid) { in invalidate_extent_cache()
1483 if (!S_ISREG(inode->i_mode)) in invalidate_extent_cache()
1486 if (unlikely(min_key->objectid == ino)) { in invalidate_extent_cache()
1487 if (min_key->type > BTRFS_EXTENT_DATA_KEY) in invalidate_extent_cache()
1489 if (min_key->type < BTRFS_EXTENT_DATA_KEY) in invalidate_extent_cache()
1492 start = min_key->offset; in invalidate_extent_cache()
1493 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); in invalidate_extent_cache()
1499 if (unlikely(max_key->objectid == ino)) { in invalidate_extent_cache()
1500 if (max_key->type < BTRFS_EXTENT_DATA_KEY) in invalidate_extent_cache()
1502 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { in invalidate_extent_cache()
1503 end = (u64)-1; in invalidate_extent_cache()
1505 if (max_key->offset == 0) in invalidate_extent_cache()
1507 end = max_key->offset; in invalidate_extent_cache()
1508 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); in invalidate_extent_cache()
1509 end--; in invalidate_extent_cache()
1512 end = (u64)-1; in invalidate_extent_cache()
1516 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); in invalidate_extent_cache()
1518 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); in invalidate_extent_cache()
1528 if (!path->nodes[level]) in find_next_key()
1530 if (path->slots[level] + 1 < in find_next_key()
1531 btrfs_header_nritems(path->nodes[level])) { in find_next_key()
1532 btrfs_node_key_to_cpu(path->nodes[level], key, in find_next_key()
1533 path->slots[level] + 1); in find_next_key()
1546 struct btrfs_root *root) in insert_dirty_subvol() argument
1548 struct btrfs_root *reloc_root = root->reloc_root; in insert_dirty_subvol()
1552 /* @root must be a subvolume tree root with a valid reloc tree */ in insert_dirty_subvol()
1553 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); in insert_dirty_subvol()
1556 reloc_root_item = &reloc_root->root_item; in insert_dirty_subvol()
1557 memset(&reloc_root_item->drop_progress, 0, in insert_dirty_subvol()
1558 sizeof(reloc_root_item->drop_progress)); in insert_dirty_subvol()
1561 ret = btrfs_update_reloc_root(trans, root); in insert_dirty_subvol()
1565 if (list_empty(&root->reloc_dirty_list)) { in insert_dirty_subvol()
1566 btrfs_grab_root(root); in insert_dirty_subvol()
1567 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots); in insert_dirty_subvol()
1575 struct btrfs_root *root; in clean_dirty_subvols() local
1580 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots, in clean_dirty_subvols()
1582 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { in clean_dirty_subvols()
1583 /* Merged subvolume, cleanup its reloc root */ in clean_dirty_subvols()
1584 struct btrfs_root *reloc_root = root->reloc_root; in clean_dirty_subvols()
1586 list_del_init(&root->reloc_dirty_list); in clean_dirty_subvols()
1587 root->reloc_root = NULL; in clean_dirty_subvols()
1590 * root->reloc_root = NULL. Pairs with have_reloc_root. in clean_dirty_subvols()
1593 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); in clean_dirty_subvols()
1597 * ->reloc_root. If it fails however we must in clean_dirty_subvols()
1607 btrfs_put_root(root); in clean_dirty_subvols()
1610 ret2 = btrfs_drop_snapshot(root, 0, 1); in clean_dirty_subvols()
1612 btrfs_put_root(root); in clean_dirty_subvols()
1626 struct btrfs_root *root) in merge_reloc_root() argument
1628 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in merge_reloc_root()
1645 return -ENOMEM; in merge_reloc_root()
1646 path->reada = READA_FORWARD; in merge_reloc_root()
1648 reloc_root = root->reloc_root; in merge_reloc_root()
1649 root_item = &reloc_root->root_item; in merge_reloc_root()
1651 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { in merge_reloc_root()
1653 atomic_inc(&reloc_root->node->refs); in merge_reloc_root()
1654 path->nodes[level] = reloc_root->node; in merge_reloc_root()
1655 path->slots[level] = 0; in merge_reloc_root()
1657 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); in merge_reloc_root()
1661 path->lowest_level = level; in merge_reloc_root()
1663 path->lowest_level = 0; in merge_reloc_root()
1669 btrfs_node_key_to_cpu(path->nodes[level], &next_key, in merge_reloc_root()
1670 path->slots[level]); in merge_reloc_root()
1679 * block COW, we COW at most from level 1 to root level for each tree. in merge_reloc_root()
1685 min_reserved = fs_info->nodesize * reserve_level * 2; in merge_reloc_root()
1689 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, in merge_reloc_root()
1694 trans = btrfs_start_transaction(root, 0); in merge_reloc_root()
1708 * btrfs_update_reloc_root() and update our root item in merge_reloc_root()
1711 reloc_root->last_trans = trans->transid; in merge_reloc_root()
1712 trans->block_rsv = rc->block_rsv; in merge_reloc_root()
1727 ret = replace_path(trans, rc, root, reloc_root, path, in merge_reloc_root()
1734 btrfs_node_key_to_cpu(path->nodes[level], &key, in merge_reloc_root()
1735 path->slots[level]); in merge_reloc_root()
1746 * this is OK since root refs == 1 in this case. in merge_reloc_root()
1748 btrfs_node_key(path->nodes[level], &root_item->drop_progress, in merge_reloc_root()
1749 path->slots[level]); in merge_reloc_root()
1757 if (replaced && rc->stage == UPDATE_DATA_PTRS) in merge_reloc_root()
1758 invalidate_extent_cache(root, &key, &next_key); in merge_reloc_root()
1763 * relocated and the block is tree root. in merge_reloc_root()
1765 leaf = btrfs_lock_root_node(root); in merge_reloc_root()
1766 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf, in merge_reloc_root()
1774 ret = insert_dirty_subvol(trans, rc, root); in merge_reloc_root()
1784 if (replaced && rc->stage == UPDATE_DATA_PTRS) in merge_reloc_root()
1785 invalidate_extent_cache(root, &key, &next_key); in merge_reloc_root()
1793 struct btrfs_root *root = rc->extent_root; in prepare_to_merge() local
1794 struct btrfs_fs_info *fs_info = root->fs_info; in prepare_to_merge()
1801 mutex_lock(&fs_info->reloc_mutex); in prepare_to_merge()
1802 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; in prepare_to_merge()
1803 rc->merging_rsv_size += rc->nodes_relocated * 2; in prepare_to_merge()
1804 mutex_unlock(&fs_info->reloc_mutex); in prepare_to_merge()
1808 num_bytes = rc->merging_rsv_size; in prepare_to_merge()
1809 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes, in prepare_to_merge()
1815 trans = btrfs_join_transaction(rc->extent_root); in prepare_to_merge()
1818 btrfs_block_rsv_release(fs_info, rc->block_rsv, in prepare_to_merge()
1824 if (num_bytes != rc->merging_rsv_size) { in prepare_to_merge()
1826 btrfs_block_rsv_release(fs_info, rc->block_rsv, in prepare_to_merge()
1832 rc->merge_reloc_tree = 1; in prepare_to_merge()
1834 while (!list_empty(&rc->reloc_roots)) { in prepare_to_merge()
1835 reloc_root = list_entry(rc->reloc_roots.next, in prepare_to_merge()
1837 list_del_init(&reloc_root->root_list); in prepare_to_merge()
1839 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, in prepare_to_merge()
1841 if (IS_ERR(root)) { in prepare_to_merge()
1843 * Even if we have an error we need this reloc root in prepare_to_merge()
1846 list_add(&reloc_root->root_list, &reloc_roots); in prepare_to_merge()
1847 btrfs_abort_transaction(trans, (int)PTR_ERR(root)); in prepare_to_merge()
1849 err = PTR_ERR(root); in prepare_to_merge()
1853 if (unlikely(root->reloc_root != reloc_root)) { in prepare_to_merge()
1854 if (root->reloc_root) { in prepare_to_merge()
1856 "reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (… in prepare_to_merge()
1857 root->root_key.objectid, in prepare_to_merge()
1858 root->reloc_root->root_key.objectid, in prepare_to_merge()
1859 root->reloc_root->root_key.type, in prepare_to_merge()
1860 root->reloc_root->root_key.offset, in prepare_to_merge()
1862 &root->reloc_root->root_item), in prepare_to_merge()
1863 reloc_root->root_key.objectid, in prepare_to_merge()
1864 reloc_root->root_key.type, in prepare_to_merge()
1865 reloc_root->root_key.offset, in prepare_to_merge()
1867 &reloc_root->root_item)); in prepare_to_merge()
1870 "reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu", in prepare_to_merge()
1871 root->root_key.objectid, in prepare_to_merge()
1872 reloc_root->root_key.objectid, in prepare_to_merge()
1873 reloc_root->root_key.type, in prepare_to_merge()
1874 reloc_root->root_key.offset, in prepare_to_merge()
1876 &reloc_root->root_item)); in prepare_to_merge()
1878 list_add(&reloc_root->root_list, &reloc_roots); in prepare_to_merge()
1879 btrfs_put_root(root); in prepare_to_merge()
1880 btrfs_abort_transaction(trans, -EUCLEAN); in prepare_to_merge()
1882 err = -EUCLEAN; in prepare_to_merge()
1891 btrfs_set_root_refs(&reloc_root->root_item, 1); in prepare_to_merge()
1892 ret = btrfs_update_reloc_root(trans, root); in prepare_to_merge()
1895 * Even if we have an error we need this reloc root back on our in prepare_to_merge()
1898 list_add(&reloc_root->root_list, &reloc_roots); in prepare_to_merge()
1899 btrfs_put_root(root); in prepare_to_merge()
1909 list_splice(&reloc_roots, &rc->reloc_roots); in prepare_to_merge()
1930 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in merge_reloc_roots()
1931 struct btrfs_root *root; in merge_reloc_roots() local
1937 root = rc->extent_root; in merge_reloc_roots()
1945 mutex_lock(&fs_info->reloc_mutex); in merge_reloc_roots()
1946 list_splice_init(&rc->reloc_roots, &reloc_roots); in merge_reloc_roots()
1947 mutex_unlock(&fs_info->reloc_mutex); in merge_reloc_roots()
1954 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, in merge_reloc_roots()
1956 if (btrfs_root_refs(&reloc_root->root_item) > 0) { in merge_reloc_roots()
1957 if (WARN_ON(IS_ERR(root))) { in merge_reloc_roots()
1960 * and if we didn't find the root then we marked in merge_reloc_roots()
1961 * the reloc root as a garbage root. For normal in merge_reloc_roots()
1962 * relocation obviously the root should exist in in merge_reloc_roots()
1966 ret = PTR_ERR(root); in merge_reloc_roots()
1969 if (WARN_ON(root->reloc_root != reloc_root)) { in merge_reloc_roots()
1971 * This can happen if on-disk metadata has some in merge_reloc_roots()
1974 ret = -EINVAL; in merge_reloc_roots()
1977 ret = merge_reloc_root(rc, root); in merge_reloc_roots()
1978 btrfs_put_root(root); in merge_reloc_roots()
1980 if (list_empty(&reloc_root->root_list)) in merge_reloc_roots()
1981 list_add_tail(&reloc_root->root_list, in merge_reloc_roots()
1986 if (!IS_ERR(root)) { in merge_reloc_roots()
1987 if (root->reloc_root == reloc_root) { in merge_reloc_roots()
1988 root->reloc_root = NULL; in merge_reloc_roots()
1992 &root->state); in merge_reloc_roots()
1993 btrfs_put_root(root); in merge_reloc_roots()
1996 list_del_init(&reloc_root->root_list); in merge_reloc_roots()
1997 /* Don't forget to queue this reloc root for cleanup */ in merge_reloc_roots()
1998 list_add_tail(&reloc_root->reloc_dirty_list, in merge_reloc_roots()
1999 &rc->dirty_subvol_roots); in merge_reloc_roots()
2012 /* new reloc root may be added */ in merge_reloc_roots()
2013 mutex_lock(&fs_info->reloc_mutex); in merge_reloc_roots()
2014 list_splice_init(&rc->reloc_roots, &reloc_roots); in merge_reloc_roots()
2015 mutex_unlock(&fs_info->reloc_mutex); in merge_reloc_roots()
2022 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); in merge_reloc_roots()
2027 * fine because we're bailing here, and we hold a reference on the root in merge_reloc_roots()
2029 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root in merge_reloc_roots()
2050 struct btrfs_fs_info *fs_info = reloc_root->fs_info; in record_reloc_root_in_trans()
2051 struct btrfs_root *root; in record_reloc_root_in_trans() local
2054 if (reloc_root->last_trans == trans->transid) in record_reloc_root_in_trans()
2057 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); in record_reloc_root_in_trans()
2060 * This should succeed, since we can't have a reloc root without having in record_reloc_root_in_trans()
2061 * already looked up the actual root and created the reloc root for this in record_reloc_root_in_trans()
2062 * root. in record_reloc_root_in_trans()
2065 * reloc root without a corresponding root this could return ENOENT. in record_reloc_root_in_trans()
2067 if (IS_ERR(root)) { in record_reloc_root_in_trans()
2069 return PTR_ERR(root); in record_reloc_root_in_trans()
2071 if (root->reloc_root != reloc_root) { in record_reloc_root_in_trans()
2074 "root %llu has two reloc roots associated with it", in record_reloc_root_in_trans()
2075 reloc_root->root_key.offset); in record_reloc_root_in_trans()
2076 btrfs_put_root(root); in record_reloc_root_in_trans()
2077 return -EUCLEAN; in record_reloc_root_in_trans()
2079 ret = btrfs_record_root_in_trans(trans, root); in record_reloc_root_in_trans()
2080 btrfs_put_root(root); in record_reloc_root_in_trans()
2088 struct btrfs_backref_node *node, in select_reloc_root() argument
2092 struct btrfs_root *root; in select_reloc_root() local
2096 next = node; in select_reloc_root()
2100 root = next->root; in select_reloc_root()
2103 * If there is no root, then our references for this block are in select_reloc_root()
2105 * block that is owned by a root. in select_reloc_root()
2108 * non-SHAREABLE root then we have backrefs that resolve in select_reloc_root()
2114 if (!root) { in select_reloc_root()
2116 btrfs_err(trans->fs_info, in select_reloc_root()
2117 "bytenr %llu doesn't have a backref path ending in a root", in select_reloc_root()
2118 node->bytenr); in select_reloc_root()
2119 return ERR_PTR(-EUCLEAN); in select_reloc_root()
2121 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { in select_reloc_root()
2123 btrfs_err(trans->fs_info, in select_reloc_root()
2124 "bytenr %llu has multiple refs with one ending in a non-shareable root", in select_reloc_root()
2125 node->bytenr); in select_reloc_root()
2126 return ERR_PTR(-EUCLEAN); in select_reloc_root()
2129 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { in select_reloc_root()
2130 ret = record_reloc_root_in_trans(trans, root); in select_reloc_root()
2136 ret = btrfs_record_root_in_trans(trans, root); in select_reloc_root()
2139 root = root->reloc_root; in select_reloc_root()
2143 * root->reloc_root may not be set, return ENOENT in this case. in select_reloc_root()
2145 if (!root) in select_reloc_root()
2146 return ERR_PTR(-ENOENT); in select_reloc_root()
2148 if (next->new_bytenr != root->node->start) { in select_reloc_root()
2150 * We just created the reloc root, so we shouldn't have in select_reloc_root()
2151 * ->new_bytenr set and this shouldn't be in the changed in select_reloc_root()
2156 ASSERT(next->new_bytenr == 0); in select_reloc_root()
2157 ASSERT(list_empty(&next->list)); in select_reloc_root()
2158 if (next->new_bytenr || !list_empty(&next->list)) { in select_reloc_root()
2159 btrfs_err(trans->fs_info, in select_reloc_root()
2161 node->bytenr, next->bytenr); in select_reloc_root()
2162 return ERR_PTR(-EUCLEAN); in select_reloc_root()
2165 next->new_bytenr = root->node->start; in select_reloc_root()
2166 btrfs_put_root(next->root); in select_reloc_root()
2167 next->root = btrfs_grab_root(root); in select_reloc_root()
2168 ASSERT(next->root); in select_reloc_root()
2169 list_add_tail(&next->list, in select_reloc_root()
2170 &rc->backref_cache.changed); in select_reloc_root()
2176 root = NULL; in select_reloc_root()
2178 if (!next || next->level <= node->level) in select_reloc_root()
2181 if (!root) { in select_reloc_root()
2187 return ERR_PTR(-ENOENT); in select_reloc_root()
2190 next = node; in select_reloc_root()
2191 /* setup backref node path for btrfs_reloc_cow_block */ in select_reloc_root()
2193 rc->backref_cache.path[next->level] = next; in select_reloc_root()
2194 if (--index < 0) in select_reloc_root()
2196 next = edges[index]->node[UPPER]; in select_reloc_root()
2198 return root; in select_reloc_root()
2202 * Select a tree root for relocation.
2207 * Return a tree root pointer if the block is shareable.
2208 * Return -ENOENT if the block is root of reloc tree.
2211 struct btrfs_root *select_one_root(struct btrfs_backref_node *node) in select_one_root() argument
2214 struct btrfs_root *root; in select_one_root() local
2216 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; in select_one_root()
2219 next = node; in select_one_root()
2223 root = next->root; in select_one_root()
2227 * the way up a particular path, in this case return -EUCLEAN. in select_one_root()
2229 if (!root) in select_one_root()
2230 return ERR_PTR(-EUCLEAN); in select_one_root()
2232 /* No other choice for non-shareable tree */ in select_one_root()
2233 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in select_one_root()
2234 return root; in select_one_root()
2236 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) in select_one_root()
2237 fs_root = root; in select_one_root()
2239 if (next != node) in select_one_root()
2243 if (!next || next->level <= node->level) in select_one_root()
2248 return ERR_PTR(-ENOENT); in select_one_root()
2254 struct btrfs_backref_node *node, int reserve) in calcu_metadata_size() argument
2256 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in calcu_metadata_size()
2257 struct btrfs_backref_node *next = node; in calcu_metadata_size()
2259 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; in calcu_metadata_size()
2263 BUG_ON(reserve && node->processed); in calcu_metadata_size()
2268 if (next->processed && (reserve || next != node)) in calcu_metadata_size()
2271 num_bytes += fs_info->nodesize; in calcu_metadata_size()
2273 if (list_empty(&next->upper)) in calcu_metadata_size()
2276 edge = list_entry(next->upper.next, in calcu_metadata_size()
2279 next = edge->node[UPPER]; in calcu_metadata_size()
2288 struct btrfs_backref_node *node) in reserve_metadata_space() argument
2290 struct btrfs_root *root = rc->extent_root; in reserve_metadata_space() local
2291 struct btrfs_fs_info *fs_info = root->fs_info; in reserve_metadata_space()
2296 num_bytes = calcu_metadata_size(rc, node, 1) * 2; in reserve_metadata_space()
2298 trans->block_rsv = rc->block_rsv; in reserve_metadata_space()
2299 rc->reserved_bytes += num_bytes; in reserve_metadata_space()
2303 * If we get an enospc just kick back -EAGAIN so we know to drop the in reserve_metadata_space()
2306 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes, in reserve_metadata_space()
2309 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; in reserve_metadata_space()
2310 while (tmp <= rc->reserved_bytes) in reserve_metadata_space()
2319 rc->block_rsv->size = tmp + fs_info->nodesize * in reserve_metadata_space()
2321 return -EAGAIN; in reserve_metadata_space()
2336 struct btrfs_backref_node *node, in do_relocation() argument
2342 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; in do_relocation()
2343 struct btrfs_root *root; in do_relocation() local
2354 ASSERT(!lowest || !node->eb); in do_relocation()
2356 path->lowest_level = node->level + 1; in do_relocation()
2357 rc->backref_cache.path[node->level] = node; in do_relocation()
2358 list_for_each_entry(edge, &node->upper, list[LOWER]) { in do_relocation()
2363 upper = edge->node[UPPER]; in do_relocation()
2364 root = select_reloc_root(trans, rc, upper, edges); in do_relocation()
2365 if (IS_ERR(root)) { in do_relocation()
2366 ret = PTR_ERR(root); in do_relocation()
2370 if (upper->eb && !upper->locked) { in do_relocation()
2372 ret = btrfs_bin_search(upper->eb, 0, key, &slot); in do_relocation()
2376 bytenr = btrfs_node_blockptr(upper->eb, slot); in do_relocation()
2377 if (node->eb->start == bytenr) in do_relocation()
2383 if (!upper->eb) { in do_relocation()
2384 ret = btrfs_search_slot(trans, root, key, path, 0, 1); in do_relocation()
2387 ret = -ENOENT; in do_relocation()
2393 if (!upper->eb) { in do_relocation()
2394 upper->eb = path->nodes[upper->level]; in do_relocation()
2395 path->nodes[upper->level] = NULL; in do_relocation()
2397 BUG_ON(upper->eb != path->nodes[upper->level]); in do_relocation()
2400 upper->locked = 1; in do_relocation()
2401 path->locks[upper->level] = 0; in do_relocation()
2403 slot = path->slots[upper->level]; in do_relocation()
2406 ret = btrfs_bin_search(upper->eb, 0, key, &slot); in do_relocation()
2412 bytenr = btrfs_node_blockptr(upper->eb, slot); in do_relocation()
2414 if (bytenr != node->bytenr) { in do_relocation()
2415 btrfs_err(root->fs_info, in do_relocation()
2416 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", in do_relocation()
2417 bytenr, node->bytenr, slot, in do_relocation()
2418 upper->eb->start); in do_relocation()
2419 ret = -EIO; in do_relocation()
2423 if (node->eb->start == bytenr) in do_relocation()
2427 blocksize = root->fs_info->nodesize; in do_relocation()
2428 eb = btrfs_read_node_slot(upper->eb, slot); in do_relocation()
2435 if (!node->eb) { in do_relocation()
2436 ret = btrfs_cow_block(trans, root, eb, upper->eb, in do_relocation()
2444 * the correct backref node entry. in do_relocation()
2446 ASSERT(node->eb == eb); in do_relocation()
2448 btrfs_set_node_blockptr(upper->eb, slot, in do_relocation()
2449 node->eb->start); in do_relocation()
2450 btrfs_set_node_ptr_generation(upper->eb, slot, in do_relocation()
2451 trans->transid); in do_relocation()
2452 btrfs_mark_buffer_dirty(trans, upper->eb); in do_relocation()
2455 node->eb->start, blocksize, in do_relocation()
2456 upper->eb->start); in do_relocation()
2457 btrfs_init_tree_ref(&ref, node->level, in do_relocation()
2458 btrfs_header_owner(upper->eb), in do_relocation()
2459 root->root_key.objectid, false); in do_relocation()
2462 ret = btrfs_drop_subtree(trans, root, eb, in do_relocation()
2463 upper->eb); in do_relocation()
2468 if (!upper->pending) in do_relocation()
2476 if (!ret && node->pending) { in do_relocation()
2477 btrfs_backref_drop_node_buffer(node); in do_relocation()
2478 list_move_tail(&node->list, &rc->backref_cache.changed); in do_relocation()
2479 node->pending = 0; in do_relocation()
2482 path->lowest_level = 0; in do_relocation()
2488 ASSERT(ret != -ENOSPC); in do_relocation()
2494 struct btrfs_backref_node *node, in link_to_upper() argument
2499 btrfs_node_key_to_cpu(node->eb, &key, 0); in link_to_upper()
2500 return do_relocation(trans, rc, node, &key, path, 0); in link_to_upper()
2508 struct btrfs_backref_cache *cache = &rc->backref_cache; in finish_pending_nodes()
2509 struct btrfs_backref_node *node; in finish_pending_nodes() local
2514 while (!list_empty(&cache->pending[level])) { in finish_pending_nodes()
2515 node = list_entry(cache->pending[level].next, in finish_pending_nodes()
2517 list_move_tail(&node->list, &list); in finish_pending_nodes()
2518 BUG_ON(!node->pending); in finish_pending_nodes()
2521 ret = link_to_upper(trans, rc, node, path); in finish_pending_nodes()
2526 list_splice_init(&list, &cache->pending[level]); in finish_pending_nodes()
2536 struct btrfs_backref_node *node) in update_processed_blocks() argument
2538 struct btrfs_backref_node *next = node; in update_processed_blocks()
2540 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; in update_processed_blocks()
2546 if (next->processed) in update_processed_blocks()
2551 if (list_empty(&next->upper)) in update_processed_blocks()
2554 edge = list_entry(next->upper.next, in update_processed_blocks()
2557 next = edge->node[UPPER]; in update_processed_blocks()
2565 u32 blocksize = rc->extent_root->fs_info->nodesize; in tree_block_processed()
2567 if (test_range_bit(&rc->processed_blocks, bytenr, in tree_block_processed()
2568 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) in tree_block_processed()
2577 .level = block->level, in get_tree_block_key()
2578 .owner_root = block->owner, in get_tree_block_key()
2579 .transid = block->key.offset in get_tree_block_key()
2583 eb = read_tree_block(fs_info, block->bytenr, &check); in get_tree_block_key()
2588 return -EIO; in get_tree_block_key()
2590 if (block->level == 0) in get_tree_block_key()
2591 btrfs_item_key_to_cpu(eb, &block->key, 0); in get_tree_block_key()
2593 btrfs_node_key_to_cpu(eb, &block->key, 0); in get_tree_block_key()
2595 block->key_ready = 1; in get_tree_block_key()
2604 struct btrfs_backref_node *node, in relocate_tree_block() argument
2608 struct btrfs_root *root; in relocate_tree_block() local
2611 if (!node) in relocate_tree_block()
2618 ret = reserve_metadata_space(trans, rc, node); in relocate_tree_block()
2622 BUG_ON(node->processed); in relocate_tree_block()
2623 root = select_one_root(node); in relocate_tree_block()
2624 if (IS_ERR(root)) { in relocate_tree_block()
2625 ret = PTR_ERR(root); in relocate_tree_block()
2627 /* See explanation in select_one_root for the -EUCLEAN case. */ in relocate_tree_block()
2628 ASSERT(ret == -ENOENT); in relocate_tree_block()
2629 if (ret == -ENOENT) { in relocate_tree_block()
2631 update_processed_blocks(rc, node); in relocate_tree_block()
2636 if (root) { in relocate_tree_block()
2637 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { in relocate_tree_block()
2639 * This block was the root block of a root, and this is in relocate_tree_block()
2641 * should not have had the ->new_bytenr modified and in relocate_tree_block()
2651 ASSERT(node->new_bytenr == 0); in relocate_tree_block()
2652 ASSERT(list_empty(&node->list)); in relocate_tree_block()
2653 if (node->new_bytenr || !list_empty(&node->list)) { in relocate_tree_block()
2654 btrfs_err(root->fs_info, in relocate_tree_block()
2656 node->bytenr); in relocate_tree_block()
2657 ret = -EUCLEAN; in relocate_tree_block()
2660 ret = btrfs_record_root_in_trans(trans, root); in relocate_tree_block()
2667 if (!root->reloc_root) { in relocate_tree_block()
2668 ret = -ENOENT; in relocate_tree_block()
2671 root = root->reloc_root; in relocate_tree_block()
2672 node->new_bytenr = root->node->start; in relocate_tree_block()
2673 btrfs_put_root(node->root); in relocate_tree_block()
2674 node->root = btrfs_grab_root(root); in relocate_tree_block()
2675 ASSERT(node->root); in relocate_tree_block()
2676 list_add_tail(&node->list, &rc->backref_cache.changed); in relocate_tree_block()
2678 path->lowest_level = node->level; in relocate_tree_block()
2679 if (root == root->fs_info->chunk_root) in relocate_tree_block()
2681 ret = btrfs_search_slot(trans, root, key, path, 0, 1); in relocate_tree_block()
2683 if (root == root->fs_info->chunk_root) in relocate_tree_block()
2689 update_processed_blocks(rc, node); in relocate_tree_block()
2691 ret = do_relocation(trans, rc, node, key, path, 1); in relocate_tree_block()
2694 if (ret || node->level == 0 || node->cowonly) in relocate_tree_block()
2695 btrfs_backref_cleanup_node(&rc->backref_cache, node); in relocate_tree_block()
2706 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in relocate_tree_blocks()
2707 struct btrfs_backref_node *node; in relocate_tree_blocks() local
2716 err = -ENOMEM; in relocate_tree_blocks()
2722 if (!block->key_ready) in relocate_tree_blocks()
2723 btrfs_readahead_tree_block(fs_info, block->bytenr, in relocate_tree_blocks()
2724 block->owner, 0, in relocate_tree_blocks()
2725 block->level); in relocate_tree_blocks()
2730 if (!block->key_ready) { in relocate_tree_blocks()
2739 node = build_backref_tree(trans, rc, &block->key, in relocate_tree_blocks()
2740 block->level, block->bytenr); in relocate_tree_blocks()
2741 if (IS_ERR(node)) { in relocate_tree_blocks()
2742 err = PTR_ERR(node); in relocate_tree_blocks()
2746 ret = relocate_tree_block(trans, rc, node, &block->key, in relocate_tree_blocks()
2770 u64 offset = inode->index_cnt; in prealloc_file_extent_cluster()
2774 u64 i_size = i_size_read(&inode->vfs_inode); in prealloc_file_extent_cluster()
2775 u64 prealloc_start = cluster->start - offset; in prealloc_file_extent_cluster()
2776 u64 prealloc_end = cluster->end - offset; in prealloc_file_extent_cluster()
2791 struct address_space *mapping = inode->vfs_inode.i_mapping; in prealloc_file_extent_cluster()
2792 struct btrfs_fs_info *fs_info = inode->root->fs_info; in prealloc_file_extent_cluster()
2793 const u32 sectorsize = fs_info->sectorsize; in prealloc_file_extent_cluster()
2805 * |- btrfs_lock_and_flush_ordered_range() in prealloc_file_extent_cluster()
2806 * |- btrfs_start_ordered_extent() in prealloc_file_extent_cluster()
2807 * |- extent_write_cache_pages() in prealloc_file_extent_cluster()
2808 * |- lock_page() in prealloc_file_extent_cluster()
2822 clear_extent_bits(&inode->io_tree, i_size, in prealloc_file_extent_cluster()
2823 round_up(i_size, PAGE_SIZE) - 1, in prealloc_file_extent_cluster()
2828 * will re-read the whole page anyway. in prealloc_file_extent_cluster()
2832 round_up(i_size, PAGE_SIZE) - i_size); in prealloc_file_extent_cluster()
2838 BUG_ON(cluster->start != cluster->boundary[0]); in prealloc_file_extent_cluster()
2840 prealloc_end + 1 - prealloc_start); in prealloc_file_extent_cluster()
2845 for (nr = 0; nr < cluster->nr; nr++) { in prealloc_file_extent_cluster()
2848 start = cluster->boundary[nr] - offset; in prealloc_file_extent_cluster()
2849 if (nr + 1 < cluster->nr) in prealloc_file_extent_cluster()
2850 end = cluster->boundary[nr + 1] - 1 - offset; in prealloc_file_extent_cluster()
2852 end = cluster->end - offset; in prealloc_file_extent_cluster()
2854 lock_extent(&inode->io_tree, start, end, &cached_state); in prealloc_file_extent_cluster()
2855 num_bytes = end + 1 - start; in prealloc_file_extent_cluster()
2856 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, in prealloc_file_extent_cluster()
2860 unlock_extent(&inode->io_tree, start, end, &cached_state); in prealloc_file_extent_cluster()
2867 btrfs_free_reserved_data_space_noquota(inode->root->fs_info, in prealloc_file_extent_cluster()
2868 prealloc_end + 1 - cur_offset); in prealloc_file_extent_cluster()
2881 return -ENOMEM; in setup_relocation_extent_mapping()
2883 em->start = start; in setup_relocation_extent_mapping()
2884 em->len = end + 1 - start; in setup_relocation_extent_mapping()
2885 em->block_len = em->len; in setup_relocation_extent_mapping()
2886 em->block_start = block_start; in setup_relocation_extent_mapping()
2887 set_bit(EXTENT_FLAG_PINNED, &em->flags); in setup_relocation_extent_mapping()
2889 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); in setup_relocation_extent_mapping()
2891 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state); in setup_relocation_extent_mapping()
2902 return atomic_read(&fs_info->balance_cancel_req) || in btrfs_should_cancel_balance()
2903 atomic_read(&fs_info->reloc_cancel_req) || in btrfs_should_cancel_balance()
2912 if (cluster_nr >= cluster->nr - 1) in get_cluster_boundary_end()
2913 return cluster->end; in get_cluster_boundary_end()
2916 return cluster->boundary[cluster_nr + 1] - 1; in get_cluster_boundary_end()
2923 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in relocate_one_page()
2924 u64 offset = BTRFS_I(inode)->index_cnt; in relocate_one_page()
2925 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; in relocate_one_page()
2926 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); in relocate_one_page()
2934 page = find_lock_page(inode->i_mapping, page_index); in relocate_one_page()
2936 page_cache_sync_readahead(inode->i_mapping, ra, NULL, in relocate_one_page()
2937 page_index, last_index + 1 - page_index); in relocate_one_page()
2938 page = find_or_create_page(inode->i_mapping, page_index, mask); in relocate_one_page()
2940 return -ENOMEM; in relocate_one_page()
2944 page_cache_async_readahead(inode->i_mapping, ra, NULL, in relocate_one_page()
2946 last_index + 1 - page_index); in relocate_one_page()
2952 ret = -EIO; in relocate_one_page()
2967 page_end = page_start + PAGE_SIZE - 1; in relocate_one_page()
2973 cur = max(page_start, cluster->boundary[*cluster_nr] - offset); in relocate_one_page()
2976 u64 extent_start = cluster->boundary[*cluster_nr] - offset; in relocate_one_page()
2978 *cluster_nr) - offset; in relocate_one_page()
2981 u32 clamped_len = clamped_end + 1 - clamped_start; in relocate_one_page()
2991 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, in relocate_one_page()
2996 clear_extent_bit(&BTRFS_I(inode)->io_tree, in relocate_one_page()
3015 if (in_range(cluster->boundary[*cluster_nr] - offset, in relocate_one_page()
3017 u64 boundary_start = cluster->boundary[*cluster_nr] - in relocate_one_page()
3020 fs_info->sectorsize - 1; in relocate_one_page()
3022 set_extent_bit(&BTRFS_I(inode)->io_tree, in relocate_one_page()
3026 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, in relocate_one_page()
3035 if (*cluster_nr >= cluster->nr) in relocate_one_page()
3042 balance_dirty_pages_ratelimited(inode->i_mapping); in relocate_one_page()
3045 ret = -ECANCELED; in relocate_one_page()
3057 u64 offset = BTRFS_I(inode)->index_cnt; in relocate_file_extent_cluster()
3064 if (!cluster->nr) in relocate_file_extent_cluster()
3069 return -ENOMEM; in relocate_file_extent_cluster()
3075 file_ra_state_init(ra, inode->i_mapping); in relocate_file_extent_cluster()
3077 ret = setup_relocation_extent_mapping(inode, cluster->start - offset, in relocate_file_extent_cluster()
3078 cluster->end - offset, cluster->start); in relocate_file_extent_cluster()
3082 last_index = (cluster->end - offset) >> PAGE_SHIFT; in relocate_file_extent_cluster()
3083 for (index = (cluster->start - offset) >> PAGE_SHIFT; in relocate_file_extent_cluster()
3087 WARN_ON(cluster_nr != cluster->nr); in relocate_file_extent_cluster()
3099 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { in relocate_data_extent()
3103 cluster->nr = 0; in relocate_data_extent()
3106 if (!cluster->nr) in relocate_data_extent()
3107 cluster->start = extent_key->objectid; in relocate_data_extent()
3109 BUG_ON(cluster->nr >= MAX_EXTENTS); in relocate_data_extent()
3110 cluster->end = extent_key->objectid + extent_key->offset - 1; in relocate_data_extent()
3111 cluster->boundary[cluster->nr] = extent_key->objectid; in relocate_data_extent()
3112 cluster->nr++; in relocate_data_extent()
3114 if (cluster->nr >= MAX_EXTENTS) { in relocate_data_extent()
3118 cluster->nr = 0; in relocate_data_extent()
3138 int level = -1; in add_tree_block()
3142 eb = path->nodes[0]; in add_tree_block()
3143 item_size = btrfs_item_size(eb, path->slots[0]); in add_tree_block()
3145 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || in add_tree_block()
3149 ei = btrfs_item_ptr(eb, path->slots[0], in add_tree_block()
3152 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { in add_tree_block()
3157 level = (int)extent_key->offset; in add_tree_block()
3167 * inline ref offset. We know it's an fs root if in add_tree_block()
3188 return -EINVAL; in add_tree_block()
3194 btrfs_err(rc->block_group->fs_info, in add_tree_block()
3196 eb->start, path->slots[0]); in add_tree_block()
3198 return -EUCLEAN; in add_tree_block()
3203 BUG_ON(level == -1); in add_tree_block()
3207 return -ENOMEM; in add_tree_block()
3209 block->bytenr = extent_key->objectid; in add_tree_block()
3210 block->key.objectid = rc->extent_root->fs_info->nodesize; in add_tree_block()
3211 block->key.offset = generation; in add_tree_block()
3212 block->level = level; in add_tree_block()
3213 block->key_ready = 0; in add_tree_block()
3214 block->owner = owner; in add_tree_block()
3216 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node); in add_tree_block()
3218 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, in add_tree_block()
3219 -EEXIST); in add_tree_block()
3231 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in __add_tree_block()
3245 return -ENOMEM; in __add_tree_block()
3250 key.offset = (u64)-1; in __add_tree_block()
3256 path->search_commit_root = 1; in __add_tree_block()
3257 path->skip_locking = 1; in __add_tree_block()
3258 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); in __add_tree_block()
3263 if (path->slots[0]) { in __add_tree_block()
3264 path->slots[0]--; in __add_tree_block()
3265 btrfs_item_key_to_cpu(path->nodes[0], &key, in __add_tree_block()
3266 path->slots[0]); in __add_tree_block()
3282 btrfs_print_leaf(path->nodes[0]); in __add_tree_block()
3287 ret = -EINVAL; in __add_tree_block()
3302 struct btrfs_root *root = fs_info->tree_root; in delete_block_group_cache() local
3309 inode = btrfs_iget(fs_info->sb, ino, root); in delete_block_group_cache()
3311 return -ENOENT; in delete_block_group_cache()
3315 &fs_info->global_block_rsv); in delete_block_group_cache()
3319 trans = btrfs_join_transaction(root); in delete_block_group_cache()
3335 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3370 return -ENOENT; in delete_v1_space_cache()
3371 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL, in delete_v1_space_cache()
3387 const u32 blocksize = rc->extent_root->fs_info->nodesize; in add_data_references()
3392 ctx.bytenr = extent_key->objectid; in add_data_references()
3394 ctx.fs_info = rc->extent_root->fs_info; in add_data_references()
3405 eb = read_tree_block(ctx.fs_info, ref_node->val, &check); in add_data_references()
3410 ret = delete_v1_space_cache(eb, rc->block_group, in add_data_references()
3411 extent_key->objectid); in add_data_references()
3415 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks); in add_data_references()
3432 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in find_next_extent()
3438 last = rc->block_group->start + rc->block_group->length; in find_next_extent()
3443 if (rc->search_start >= last) { in find_next_extent()
3448 key.objectid = rc->search_start; in find_next_extent()
3452 path->search_commit_root = 1; in find_next_extent()
3453 path->skip_locking = 1; in find_next_extent()
3454 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, in find_next_extent()
3459 leaf = path->nodes[0]; in find_next_extent()
3460 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in find_next_extent()
3461 ret = btrfs_next_leaf(rc->extent_root, path); in find_next_extent()
3464 leaf = path->nodes[0]; in find_next_extent()
3467 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in find_next_extent()
3475 path->slots[0]++; in find_next_extent()
3480 key.objectid + key.offset <= rc->search_start) { in find_next_extent()
3481 path->slots[0]++; in find_next_extent()
3486 key.objectid + fs_info->nodesize <= in find_next_extent()
3487 rc->search_start) { in find_next_extent()
3488 path->slots[0]++; in find_next_extent()
3492 block_found = find_first_extent_bit(&rc->processed_blocks, in find_next_extent()
3498 rc->search_start = end + 1; in find_next_extent()
3501 rc->search_start = key.objectid + key.offset; in find_next_extent()
3503 rc->search_start = key.objectid + in find_next_extent()
3504 fs_info->nodesize; in find_next_extent()
3515 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in set_reloc_control()
3517 mutex_lock(&fs_info->reloc_mutex); in set_reloc_control()
3518 fs_info->reloc_ctl = rc; in set_reloc_control()
3519 mutex_unlock(&fs_info->reloc_mutex); in set_reloc_control()
3524 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in unset_reloc_control()
3526 mutex_lock(&fs_info->reloc_mutex); in unset_reloc_control()
3527 fs_info->reloc_ctl = NULL; in unset_reloc_control()
3528 mutex_unlock(&fs_info->reloc_mutex); in unset_reloc_control()
3537 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info, in prepare_to_relocate()
3539 if (!rc->block_rsv) in prepare_to_relocate()
3540 return -ENOMEM; in prepare_to_relocate()
3542 memset(&rc->cluster, 0, sizeof(rc->cluster)); in prepare_to_relocate()
3543 rc->search_start = rc->block_group->start; in prepare_to_relocate()
3544 rc->extents_found = 0; in prepare_to_relocate()
3545 rc->nodes_relocated = 0; in prepare_to_relocate()
3546 rc->merging_rsv_size = 0; in prepare_to_relocate()
3547 rc->reserved_bytes = 0; in prepare_to_relocate()
3548 rc->block_rsv->size = rc->extent_root->fs_info->nodesize * in prepare_to_relocate()
3550 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info, in prepare_to_relocate()
3551 rc->block_rsv, rc->block_rsv->size, in prepare_to_relocate()
3556 rc->create_reloc_tree = 1; in prepare_to_relocate()
3559 trans = btrfs_join_transaction(rc->extent_root); in prepare_to_relocate()
3579 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in relocate_block_group()
3592 return -ENOMEM; in relocate_block_group()
3593 path->reada = READA_FORWARD; in relocate_block_group()
3602 rc->reserved_bytes = 0; in relocate_block_group()
3603 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, in relocate_block_group()
3604 rc->block_rsv->size, in relocate_block_group()
3611 trans = btrfs_start_transaction(rc->extent_root, 0); in relocate_block_group()
3618 if (rc->backref_cache.last_trans != trans->transid) in relocate_block_group()
3619 btrfs_backref_release_cache(&rc->backref_cache); in relocate_block_group()
3620 rc->backref_cache.last_trans = trans->transid; in relocate_block_group()
3628 rc->extents_found++; in relocate_block_group()
3630 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], in relocate_block_group()
3632 flags = btrfs_extent_flags(path->nodes[0], ei); in relocate_block_group()
3636 } else if (rc->stage == UPDATE_DATA_PTRS && in relocate_block_group()
3651 if (ret != -EAGAIN) { in relocate_block_group()
3655 rc->extents_found--; in relocate_block_group()
3656 rc->search_start = key.objectid; in relocate_block_group()
3664 if (rc->stage == MOVE_DATA_EXTENTS && in relocate_block_group()
3666 rc->found_file_extent = 1; in relocate_block_group()
3667 ret = relocate_data_extent(rc->data_inode, in relocate_block_group()
3668 &key, &rc->cluster); in relocate_block_group()
3675 err = -ECANCELED; in relocate_block_group()
3679 if (trans && progress && err == -ENOSPC) { in relocate_block_group()
3680 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags); in relocate_block_group()
3689 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); in relocate_block_group()
3697 ret = relocate_file_extent_cluster(rc->data_inode, in relocate_block_group()
3698 &rc->cluster); in relocate_block_group()
3703 rc->create_reloc_tree = 0; in relocate_block_group()
3706 btrfs_backref_release_cache(&rc->backref_cache); in relocate_block_group()
3707 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); in relocate_block_group()
3721 rc->merge_reloc_tree = 0; in relocate_block_group()
3723 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); in relocate_block_group()
3726 trans = btrfs_join_transaction(rc->extent_root); in relocate_block_group()
3738 btrfs_free_block_rsv(fs_info, rc->block_rsv); in relocate_block_group()
3744 struct btrfs_root *root, u64 objectid) in __insert_orphan_inode() argument
3753 return -ENOMEM; in __insert_orphan_inode()
3755 ret = btrfs_insert_empty_inode(trans, root, path, objectid); in __insert_orphan_inode()
3759 leaf = path->nodes[0]; in __insert_orphan_inode()
3760 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); in __insert_orphan_inode()
3774 struct btrfs_root *root, u64 objectid) in delete_orphan_inode() argument
3782 ret = -ENOMEM; in delete_orphan_inode()
3789 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in delete_orphan_inode()
3792 ret = -ENOENT; in delete_orphan_inode()
3795 ret = btrfs_del_item(trans, root, path); in delete_orphan_inode()
3812 struct btrfs_root *root; in create_reloc_inode() local
3816 root = btrfs_grab_root(fs_info->data_reloc_root); in create_reloc_inode()
3817 trans = btrfs_start_transaction(root, 6); in create_reloc_inode()
3819 btrfs_put_root(root); in create_reloc_inode()
3823 err = btrfs_get_free_objectid(root, &objectid); in create_reloc_inode()
3827 err = __insert_orphan_inode(trans, root, objectid); in create_reloc_inode()
3831 inode = btrfs_iget(fs_info->sb, objectid, root); in create_reloc_inode()
3833 delete_orphan_inode(trans, root, objectid); in create_reloc_inode()
3838 BTRFS_I(inode)->index_cnt = group->start; in create_reloc_inode()
3842 btrfs_put_root(root); in create_reloc_inode()
3858 * -EINPROGRESS operation is already in progress, that's probably a bug
3859 * -ECANCELED cancellation request was set before the operation started
3863 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) { in reloc_chunk_start()
3866 return -EINPROGRESS; in reloc_chunk_start()
3869 if (atomic_read(&fs_info->reloc_cancel_req) > 0) { in reloc_chunk_start()
3875 atomic_set(&fs_info->reloc_cancel_req, 0); in reloc_chunk_start()
3876 return -ECANCELED; in reloc_chunk_start()
3887 if (atomic_read(&fs_info->reloc_cancel_req) > 0) in reloc_chunk_end()
3889 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); in reloc_chunk_end()
3890 atomic_set(&fs_info->reloc_cancel_req, 0); in reloc_chunk_end()
3901 INIT_LIST_HEAD(&rc->reloc_roots); in alloc_reloc_control()
3902 INIT_LIST_HEAD(&rc->dirty_subvol_roots); in alloc_reloc_control()
3903 btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1); in alloc_reloc_control()
3904 mapping_tree_init(&rc->reloc_root_tree); in alloc_reloc_control()
3905 extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS); in alloc_reloc_control()
3911 struct mapping_node *node, *tmp; in free_reloc_control() local
3913 free_reloc_roots(&rc->reloc_roots); in free_reloc_control()
3914 rbtree_postorder_for_each_entry_safe(node, tmp, in free_reloc_control()
3915 &rc->reloc_root_tree.rb_root, rb_node) in free_reloc_control()
3916 kfree(node); in free_reloc_control()
3929 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf)); in describe_relocation()
3933 block_group->start, buf); in describe_relocation()
3960 * This only gets set if we had a half-deleted snapshot on mount. We in btrfs_relocate_block_group()
3964 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE); in btrfs_relocate_block_group()
3970 return -EINTR; in btrfs_relocate_block_group()
3974 return -ENOENT; in btrfs_relocate_block_group()
3984 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) in btrfs_relocate_block_group()
3985 ASSERT(sb_write_started(fs_info->sb)); in btrfs_relocate_block_group()
3989 return -ETXTBSY; in btrfs_relocate_block_group()
3995 return -ENOMEM; in btrfs_relocate_block_group()
4004 rc->extent_root = extent_root; in btrfs_relocate_block_group()
4005 rc->block_group = bg; in btrfs_relocate_block_group()
4007 ret = btrfs_inc_block_group_ro(rc->block_group, true); in btrfs_relocate_block_group()
4016 err = -ENOMEM; in btrfs_relocate_block_group()
4020 inode = lookup_free_space_inode(rc->block_group, path); in btrfs_relocate_block_group()
4024 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); in btrfs_relocate_block_group()
4028 if (ret && ret != -ENOENT) { in btrfs_relocate_block_group()
4033 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); in btrfs_relocate_block_group()
4034 if (IS_ERR(rc->data_inode)) { in btrfs_relocate_block_group()
4035 err = PTR_ERR(rc->data_inode); in btrfs_relocate_block_group()
4036 rc->data_inode = NULL; in btrfs_relocate_block_group()
4040 describe_relocation(fs_info, rc->block_group); in btrfs_relocate_block_group()
4042 btrfs_wait_block_group_reservations(rc->block_group); in btrfs_relocate_block_group()
4043 btrfs_wait_nocow_writers(rc->block_group); in btrfs_relocate_block_group()
4045 rc->block_group->start, in btrfs_relocate_block_group()
4046 rc->block_group->length); in btrfs_relocate_block_group()
4048 ret = btrfs_zone_finish(rc->block_group); in btrfs_relocate_block_group()
4049 WARN_ON(ret && ret != -EAGAIN); in btrfs_relocate_block_group()
4054 mutex_lock(&fs_info->cleaner_mutex); in btrfs_relocate_block_group()
4056 mutex_unlock(&fs_info->cleaner_mutex); in btrfs_relocate_block_group()
4060 finishes_stage = rc->stage; in btrfs_relocate_block_group()
4065 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in in btrfs_relocate_block_group()
4070 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { in btrfs_relocate_block_group()
4071 ret = btrfs_wait_ordered_range(rc->data_inode, 0, in btrfs_relocate_block_group()
4072 (u64)-1); in btrfs_relocate_block_group()
4075 invalidate_mapping_pages(rc->data_inode->i_mapping, in btrfs_relocate_block_group()
4076 0, -1); in btrfs_relocate_block_group()
4077 rc->stage = UPDATE_DATA_PTRS; in btrfs_relocate_block_group()
4083 if (rc->extents_found == 0) in btrfs_relocate_block_group()
4087 rc->extents_found, stage_to_string(finishes_stage)); in btrfs_relocate_block_group()
4090 WARN_ON(rc->block_group->pinned > 0); in btrfs_relocate_block_group()
4091 WARN_ON(rc->block_group->reserved > 0); in btrfs_relocate_block_group()
4092 WARN_ON(rc->block_group->used > 0); in btrfs_relocate_block_group()
4095 btrfs_dec_block_group_ro(rc->block_group); in btrfs_relocate_block_group()
4096 iput(rc->data_inode); in btrfs_relocate_block_group()
4104 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) in mark_garbage_root() argument
4106 struct btrfs_fs_info *fs_info = root->fs_info; in mark_garbage_root()
4110 trans = btrfs_start_transaction(fs_info->tree_root, 0); in mark_garbage_root()
4114 memset(&root->root_item.drop_progress, 0, in mark_garbage_root()
4115 sizeof(root->root_item.drop_progress)); in mark_garbage_root()
4116 btrfs_set_root_drop_level(&root->root_item, 0); in mark_garbage_root()
4117 btrfs_set_root_refs(&root->root_item, 0); in mark_garbage_root()
4118 ret = btrfs_update_root(trans, fs_info->tree_root, in mark_garbage_root()
4119 &root->root_key, &root->root_item); in mark_garbage_root()
4148 return -ENOMEM; in btrfs_recover_relocation()
4149 path->reada = READA_BACK; in btrfs_recover_relocation()
4153 key.offset = (u64)-1; in btrfs_recover_relocation()
4156 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, in btrfs_recover_relocation()
4163 if (path->slots[0] == 0) in btrfs_recover_relocation()
4165 path->slots[0]--; in btrfs_recover_relocation()
4167 leaf = path->nodes[0]; in btrfs_recover_relocation()
4168 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in btrfs_recover_relocation()
4175 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key); in btrfs_recover_relocation()
4181 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); in btrfs_recover_relocation()
4182 list_add(&reloc_root->root_list, &reloc_roots); in btrfs_recover_relocation()
4184 if (btrfs_root_refs(&reloc_root->root_item) > 0) { in btrfs_recover_relocation()
4186 reloc_root->root_key.offset, false); in btrfs_recover_relocation()
4189 if (ret != -ENOENT) { in btrfs_recover_relocation()
4206 key.offset--; in btrfs_recover_relocation()
4215 err = -ENOMEM; in btrfs_recover_relocation()
4225 rc->extent_root = btrfs_extent_root(fs_info, 0); in btrfs_recover_relocation()
4229 trans = btrfs_join_transaction(rc->extent_root); in btrfs_recover_relocation()
4235 rc->merge_reloc_tree = 1; in btrfs_recover_relocation()
4240 list_del(&reloc_root->root_list); in btrfs_recover_relocation()
4242 if (btrfs_root_refs(&reloc_root->root_item) == 0) { in btrfs_recover_relocation()
4243 list_add_tail(&reloc_root->root_list, in btrfs_recover_relocation()
4244 &rc->reloc_roots); in btrfs_recover_relocation()
4248 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, in btrfs_recover_relocation()
4252 list_add_tail(&reloc_root->root_list, &reloc_roots); in btrfs_recover_relocation()
4258 ASSERT(err != -EEXIST); in btrfs_recover_relocation()
4260 list_add_tail(&reloc_root->root_list, &reloc_roots); in btrfs_recover_relocation()
4265 fs_root->reloc_root = btrfs_grab_root(reloc_root); in btrfs_recover_relocation()
4277 trans = btrfs_join_transaction(rc->extent_root); in btrfs_recover_relocation()
4299 fs_root = btrfs_grab_root(fs_info->data_reloc_root); in btrfs_recover_relocation()
4311 * it also saves CPU time to re-calculate the checksum.
4315 struct btrfs_inode *inode = BTRFS_I(ordered->inode); in btrfs_reloc_clone_csums()
4316 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_reloc_clone_csums()
4317 u64 disk_bytenr = ordered->file_offset + inode->index_cnt; in btrfs_reloc_clone_csums()
4323 disk_bytenr + ordered->num_bytes - 1, in btrfs_reloc_clone_csums()
4332 list_del_init(&sums->list); in btrfs_reloc_clone_csums()
4346 sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr; in btrfs_reloc_clone_csums()
4354 struct btrfs_root *root, in btrfs_reloc_cow_block() argument
4358 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_reloc_cow_block()
4360 struct btrfs_backref_node *node; in btrfs_reloc_cow_block() local
4365 rc = fs_info->reloc_ctl; in btrfs_reloc_cow_block()
4369 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root)); in btrfs_reloc_cow_block()
4373 btrfs_root_last_snapshot(&root->root_item)) in btrfs_reloc_cow_block()
4376 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && in btrfs_reloc_cow_block()
4377 rc->create_reloc_tree) { in btrfs_reloc_cow_block()
4380 node = rc->backref_cache.path[level]; in btrfs_reloc_cow_block()
4383 * If node->bytenr != buf->start and node->new_bytenr != in btrfs_reloc_cow_block()
4384 * buf->start then we've got the wrong backref node for what we in btrfs_reloc_cow_block()
4387 if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) { in btrfs_reloc_cow_block()
4390 buf->start, node->bytenr, node->new_bytenr); in btrfs_reloc_cow_block()
4391 return -EUCLEAN; in btrfs_reloc_cow_block()
4394 btrfs_backref_drop_node_buffer(node); in btrfs_reloc_cow_block()
4395 atomic_inc(&cow->refs); in btrfs_reloc_cow_block()
4396 node->eb = cow; in btrfs_reloc_cow_block()
4397 node->new_bytenr = cow->start; in btrfs_reloc_cow_block()
4399 if (!node->pending) { in btrfs_reloc_cow_block()
4400 list_move_tail(&node->list, in btrfs_reloc_cow_block()
4401 &rc->backref_cache.pending[level]); in btrfs_reloc_cow_block()
4402 node->pending = 1; in btrfs_reloc_cow_block()
4406 mark_block_processed(rc, node); in btrfs_reloc_cow_block()
4409 rc->nodes_relocated += buf->len; in btrfs_reloc_cow_block()
4412 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) in btrfs_reloc_cow_block()
4413 ret = replace_file_extents(trans, rc, root, cow); in btrfs_reloc_cow_block()
4424 struct btrfs_root *root = pending->root; in btrfs_reloc_pre_snapshot() local
4425 struct reloc_control *rc = root->fs_info->reloc_ctl; in btrfs_reloc_pre_snapshot()
4427 if (!rc || !have_reloc_root(root)) in btrfs_reloc_pre_snapshot()
4430 if (!rc->merge_reloc_tree) in btrfs_reloc_pre_snapshot()
4433 root = root->reloc_root; in btrfs_reloc_pre_snapshot()
4434 BUG_ON(btrfs_root_refs(&root->root_item) == 0); in btrfs_reloc_pre_snapshot()
4445 *bytes_to_reserve += rc->nodes_relocated; in btrfs_reloc_pre_snapshot()
4450 * and create reloc root for the newly created snapshot
4453 * references held on the reloc_root, one for root->reloc_root and one for
4454 * rc->reloc_roots.
4459 struct btrfs_root *root = pending->root; in btrfs_reloc_post_snapshot() local
4462 struct reloc_control *rc = root->fs_info->reloc_ctl; in btrfs_reloc_post_snapshot()
4465 if (!rc || !have_reloc_root(root)) in btrfs_reloc_post_snapshot()
4468 rc = root->fs_info->reloc_ctl; in btrfs_reloc_post_snapshot()
4469 rc->merging_rsv_size += rc->nodes_relocated; in btrfs_reloc_post_snapshot()
4471 if (rc->merge_reloc_tree) { in btrfs_reloc_post_snapshot()
4472 ret = btrfs_block_rsv_migrate(&pending->block_rsv, in btrfs_reloc_post_snapshot()
4473 rc->block_rsv, in btrfs_reloc_post_snapshot()
4474 rc->nodes_relocated, true); in btrfs_reloc_post_snapshot()
4479 new_root = pending->snap; in btrfs_reloc_post_snapshot()
4480 reloc_root = create_reloc_root(trans, root->reloc_root, in btrfs_reloc_post_snapshot()
4481 new_root->root_key.objectid); in btrfs_reloc_post_snapshot()
4486 ASSERT(ret != -EEXIST); in btrfs_reloc_post_snapshot()
4492 new_root->reloc_root = btrfs_grab_root(reloc_root); in btrfs_reloc_post_snapshot()
4494 if (rc->create_reloc_tree) in btrfs_reloc_post_snapshot()
4495 ret = clone_backref_node(trans, rc, root, reloc_root); in btrfs_reloc_post_snapshot()
4508 lockdep_assert_held(&fs_info->reloc_mutex); in btrfs_get_reloc_bg_bytenr()
4510 if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group) in btrfs_get_reloc_bg_bytenr()
4511 logical = fs_info->reloc_ctl->block_group->start; in btrfs_get_reloc_bg_bytenr()