Lines Matching +full:edge +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0
10 #include "disk-io.h"
14 #include "delayed-ref.h"
17 #include "tree-mod-log.h"
20 #include "extent-tree.h"
22 #include "tree-checker.h"
30 u64 offset; member
42 u64 offset = key->offset; in check_extent_in_eb() local
48 if (!ctx->ignore_extent_item_pos && in check_extent_in_eb()
56 if (ctx->extent_item_pos < data_offset || in check_extent_in_eb()
57 ctx->extent_item_pos >= data_offset + data_len) in check_extent_in_eb()
59 offset += ctx->extent_item_pos - data_offset; in check_extent_in_eb()
62 if (!ctx->indirect_ref_iterator || !ctx->cache_lookup) in check_extent_in_eb()
65 cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids, in check_extent_in_eb()
73 ret = ctx->indirect_ref_iterator(key->objectid, offset, in check_extent_in_eb()
75 ctx->user_ctx); in check_extent_in_eb()
83 return -ENOMEM; in check_extent_in_eb()
85 e->next = *eie; in check_extent_in_eb()
86 e->inum = key->objectid; in check_extent_in_eb()
87 e->offset = offset; in check_extent_in_eb()
88 e->num_bytes = data_len; in check_extent_in_eb()
99 eie_next = eie->next; in free_inode_elem_list()
132 if (disk_byte != ctx->bytenr) in find_extent_in_eb()
160 * ref->count >0:
161 * - incremented when a ref->count transitions to >0
162 * - decremented when a ref->count transitions to <1
193 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; in extent_is_shared()
206 return -ENOMEM; in btrfs_prelim_ref_init()
222 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
228 if (ref1->level < ref2->level) in prelim_ref_compare()
229 return -1; in prelim_ref_compare()
230 if (ref1->level > ref2->level) in prelim_ref_compare()
232 if (ref1->root_id < ref2->root_id) in prelim_ref_compare()
233 return -1; in prelim_ref_compare()
234 if (ref1->root_id > ref2->root_id) in prelim_ref_compare()
236 if (ref1->key_for_search.type < ref2->key_for_search.type) in prelim_ref_compare()
237 return -1; in prelim_ref_compare()
238 if (ref1->key_for_search.type > ref2->key_for_search.type) in prelim_ref_compare()
240 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) in prelim_ref_compare()
241 return -1; in prelim_ref_compare()
242 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) in prelim_ref_compare()
244 if (ref1->key_for_search.offset < ref2->key_for_search.offset) in prelim_ref_compare()
245 return -1; in prelim_ref_compare()
246 if (ref1->key_for_search.offset > ref2->key_for_search.offset) in prelim_ref_compare()
248 if (ref1->parent < ref2->parent) in prelim_ref_compare()
249 return -1; in prelim_ref_compare()
250 if (ref1->parent > ref2->parent) in prelim_ref_compare()
263 sc->share_count--; in update_share_count()
265 sc->share_count++; in update_share_count()
267 if (newref->root_id == sc->root->root_key.objectid && in update_share_count()
268 newref->wanted_disk_byte == sc->data_bytenr && in update_share_count()
269 newref->key_for_search.objectid == sc->inum) in update_share_count()
270 sc->self_ref_count += newref->count; in update_share_count()
290 root = &preftree->root; in prelim_ref_insert()
291 p = &root->rb_root.rb_node; in prelim_ref_insert()
298 p = &(*p)->rb_left; in prelim_ref_insert()
300 p = &(*p)->rb_right; in prelim_ref_insert()
304 struct extent_inode_elem *eie = ref->inode_list; in prelim_ref_insert()
306 while (eie && eie->next) in prelim_ref_insert()
307 eie = eie->next; in prelim_ref_insert()
310 ref->inode_list = newref->inode_list; in prelim_ref_insert()
312 eie->next = newref->inode_list; in prelim_ref_insert()
314 preftree->count); in prelim_ref_insert()
316 * A delayed ref can have newref->count < 0. in prelim_ref_insert()
317 * The ref->count is updated to follow any in prelim_ref_insert()
320 update_share_count(sc, ref->count, in prelim_ref_insert()
321 ref->count + newref->count, newref); in prelim_ref_insert()
322 ref->count += newref->count; in prelim_ref_insert()
328 update_share_count(sc, 0, newref->count, newref); in prelim_ref_insert()
329 preftree->count++; in prelim_ref_insert()
330 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); in prelim_ref_insert()
331 rb_link_node(&newref->rbnode, parent, p); in prelim_ref_insert()
332 rb_insert_color_cached(&newref->rbnode, root, leftmost); in prelim_ref_insert()
344 &preftree->root.rb_root, rbnode) { in prelim_release()
345 free_inode_elem_list(ref->inode_list); in prelim_release()
349 preftree->root = RB_ROOT_CACHED; in prelim_release()
350 preftree->count = 0; in prelim_release()
355 * - obtaining the parent is the goal
356 * - if you add a key, you must know that it is a correct key
357 * - if you cannot add the parent or a correct key, then we will look into the
364 * --------------------+--------+----------+--------+----------
365 * parent logical | y | - | - | -
366 * key to resolve | - | y | y | y
367 * tree block logical | - | - | - | -
370 * - column 1: we've the parent -> done
371 * - column 2, 3, 4: we use the key to find the parent
377 * --------------------+--------+----------+--------+----------
378 * parent logical | y | - | y | -
379 * key to resolve | - | - | - | y
381 * root for resolving | - | y | y | y
383 * - column 1, 3: we've the parent -> done
384 * - column 2: we take the first key from the block to find the parent
386 * - column 4: we use the key to find the parent
404 return -ENOMEM; in add_prelim_ref()
406 ref->root_id = root_id; in add_prelim_ref()
408 ref->key_for_search = *key; in add_prelim_ref()
410 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); in add_prelim_ref()
412 ref->inode_list = NULL; in add_prelim_ref()
413 ref->level = level; in add_prelim_ref()
414 ref->count = count; in add_prelim_ref()
415 ref->parent = parent; in add_prelim_ref()
416 ref->wanted_disk_byte = wanted_disk_byte; in add_prelim_ref()
427 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, in add_direct_ref()
438 struct preftree *tree = &preftrees->indirect; in add_indirect_ref()
441 tree = &preftrees->indirect_missing_keys; in add_indirect_ref()
448 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; in is_shared_data_backref()
462 p = &(*p)->rb_left; in is_shared_data_backref()
464 p = &(*p)->rb_right; in is_shared_data_backref()
481 struct btrfs_key *key_for_search = &ref->key_for_search; in add_all_parents()
485 u64 wanted_disk_byte = ref->wanted_disk_byte; in add_all_parents()
491 eb = path->nodes[level]; in add_all_parents()
492 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); in add_all_parents()
508 eb = path->nodes[0]; in add_all_parents()
509 if (path->slots[0] >= btrfs_header_nritems(eb) || in add_all_parents()
510 is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
511 ref->root_id != btrfs_header_owner(eb)) { in add_all_parents()
512 if (ctx->time_seq == BTRFS_SEQ_LAST) in add_all_parents()
515 ret = btrfs_next_old_leaf(root, path, ctx->time_seq); in add_all_parents()
518 while (!ret && count < ref->count) { in add_all_parents()
519 eb = path->nodes[0]; in add_all_parents()
520 slot = path->slots[0]; in add_all_parents()
524 if (key.objectid != key_for_search->objectid || in add_all_parents()
534 (is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
535 ref->root_id != btrfs_header_owner(eb))) { in add_all_parents()
536 if (ctx->time_seq == BTRFS_SEQ_LAST) in add_all_parents()
539 ret = btrfs_next_old_leaf(root, path, ctx->time_seq); in add_all_parents()
552 if (ref->key_for_search.offset == key.offset - data_offset) in add_all_parents()
556 if (!ctx->skip_inode_ref_list) { in add_all_parents()
564 ret = ulist_add_merge_ptr(parents, eb->start, in add_all_parents()
568 if (!ret && !ctx->skip_inode_ref_list) { in add_all_parents()
569 while (old->next) in add_all_parents()
570 old = old->next; in add_all_parents()
571 old->next = eie; in add_all_parents()
576 if (ctx->time_seq == BTRFS_SEQ_LAST) in add_all_parents()
579 ret = btrfs_next_old_item(root, path, ctx->time_seq); in add_all_parents()
603 int level = ref->level; in resolve_indirect_ref()
604 struct btrfs_key search_key = ref->key_for_search; in resolve_indirect_ref()
614 if (path->search_commit_root) in resolve_indirect_ref()
615 root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id); in resolve_indirect_ref()
617 root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false); in resolve_indirect_ref()
623 if (!path->search_commit_root && in resolve_indirect_ref()
624 test_bit(BTRFS_ROOT_DELETING, &root->state)) { in resolve_indirect_ref()
625 ret = -ENOENT; in resolve_indirect_ref()
629 if (btrfs_is_testing(ctx->fs_info)) { in resolve_indirect_ref()
630 ret = -ENOENT; in resolve_indirect_ref()
634 if (path->search_commit_root) in resolve_indirect_ref()
635 root_level = btrfs_header_level(root->commit_root); in resolve_indirect_ref()
636 else if (ctx->time_seq == BTRFS_SEQ_LAST) in resolve_indirect_ref()
637 root_level = btrfs_header_level(root->node); in resolve_indirect_ref()
639 root_level = btrfs_old_root_level(root, ctx->time_seq); in resolve_indirect_ref()
645 * We can often find data backrefs with an offset that is too large in resolve_indirect_ref()
646 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when in resolve_indirect_ref()
647 * subtracting a file's offset with the data offset of its in resolve_indirect_ref()
651 * So if we detect such case we set the search key's offset to zero to in resolve_indirect_ref()
653 * add_all_parents(), otherwise we will miss it because the offset in resolve_indirect_ref()
654 * taken form the backref is much larger then the offset of the file in resolve_indirect_ref()
664 search_key.offset >= LLONG_MAX) in resolve_indirect_ref()
665 search_key.offset = 0; in resolve_indirect_ref()
666 path->lowest_level = level; in resolve_indirect_ref()
667 if (ctx->time_seq == BTRFS_SEQ_LAST) in resolve_indirect_ref()
670 ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq); in resolve_indirect_ref()
672 btrfs_debug(ctx->fs_info, in resolve_indirect_ref()
674 ref->root_id, level, ref->count, ret, in resolve_indirect_ref()
675 ref->key_for_search.objectid, ref->key_for_search.type, in resolve_indirect_ref()
676 ref->key_for_search.offset); in resolve_indirect_ref()
680 eb = path->nodes[level]; in resolve_indirect_ref()
686 level--; in resolve_indirect_ref()
687 eb = path->nodes[level]; in resolve_indirect_ref()
694 path->lowest_level = 0; in resolve_indirect_ref()
704 return (struct extent_inode_elem *)(uintptr_t)node->aux; in unode_aux_to_inode_list()
749 return -ENOMEM; in resolve_indirect_refs()
757 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { in resolve_indirect_refs()
761 if (WARN(ref->parent, in resolve_indirect_refs()
763 ret = -EINVAL; in resolve_indirect_refs()
767 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs()
768 preftrees->indirect.count--; in resolve_indirect_refs()
770 if (ref->count == 0) { in resolve_indirect_refs()
775 if (sc && ref->root_id != sc->root->root_key.objectid) { in resolve_indirect_refs()
785 if (err == -ENOENT) { in resolve_indirect_refs()
786 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, in resolve_indirect_refs()
798 ref->parent = node ? node->val : 0; in resolve_indirect_refs()
799 ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
809 ret = -ENOMEM; in resolve_indirect_refs()
813 new_ref->parent = node->val; in resolve_indirect_refs()
814 new_ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
815 prelim_ref_insert(ctx->fs_info, &preftrees->direct, in resolve_indirect_refs()
823 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL); in resolve_indirect_refs()
845 struct preftree *tree = &preftrees->indirect_missing_keys; in add_missing_keys()
848 while ((node = rb_first_cached(&tree->root))) { in add_missing_keys()
852 rb_erase_cached(node, &tree->root); in add_missing_keys()
854 BUG_ON(ref->parent); /* should not be a direct ref */ in add_missing_keys()
855 BUG_ON(ref->key_for_search.type); in add_missing_keys()
856 BUG_ON(!ref->wanted_disk_byte); in add_missing_keys()
858 check.level = ref->level - 1; in add_missing_keys()
859 check.owner_root = ref->root_id; in add_missing_keys()
861 eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check); in add_missing_keys()
869 return -EIO; in add_missing_keys()
875 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
877 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
881 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); in add_missing_keys()
901 spin_lock(&head->lock); in add_delayed_refs()
902 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { in add_delayed_refs()
905 if (node->seq > seq) in add_delayed_refs()
908 switch (node->action) { in add_delayed_refs()
914 count = node->ref_mod; in add_delayed_refs()
917 count = node->ref_mod * -1; in add_delayed_refs()
922 switch (node->type) { in add_delayed_refs()
928 if (head->extent_op && head->extent_op->update_key) { in add_delayed_refs()
929 btrfs_disk_key_to_cpu(&key, &head->extent_op->key); in add_delayed_refs()
934 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
935 key_ptr, ref->level + 1, in add_delayed_refs()
936 node->bytenr, count, sc, in add_delayed_refs()
946 ret = add_direct_ref(fs_info, preftrees, ref->level + 1, in add_delayed_refs()
947 ref->parent, node->bytenr, count, in add_delayed_refs()
956 key.objectid = ref->objectid; in add_delayed_refs()
958 key.offset = ref->offset; in add_delayed_refs()
976 sc->have_delayed_delete_refs = true; in add_delayed_refs()
978 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
979 &key, 0, node->bytenr, count, sc, in add_delayed_refs()
989 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, in add_delayed_refs()
990 node->bytenr, count, sc, in add_delayed_refs()
1007 spin_unlock(&head->lock); in add_delayed_refs()
1035 leaf = path->nodes[0]; in add_inline_refs()
1036 slot = path->slots[0]; in add_inline_refs()
1043 if (ctx->check_extent_item) { in add_inline_refs()
1044 ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx); in add_inline_refs()
1064 *info_level = found_key.offset; in add_inline_refs()
1071 u64 offset; in add_inline_refs() local
1078 return -EUCLEAN; in add_inline_refs()
1080 offset = btrfs_extent_inline_ref_offset(leaf, iref); in add_inline_refs()
1084 ret = add_direct_ref(ctx->fs_info, preftrees, in add_inline_refs()
1085 *info_level + 1, offset, in add_inline_refs()
1086 ctx->bytenr, 1, NULL, GFP_NOFS); in add_inline_refs()
1095 ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset, in add_inline_refs()
1096 ctx->bytenr, count, sc, GFP_NOFS); in add_inline_refs()
1100 ret = add_indirect_ref(ctx->fs_info, preftrees, offset, in add_inline_refs()
1102 ctx->bytenr, 1, NULL, GFP_NOFS); in add_inline_refs()
1109 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in add_inline_refs()
1114 key.offset = btrfs_extent_data_ref_offset(leaf, dref); in add_inline_refs()
1116 if (sc && key.objectid != sc->inum && in add_inline_refs()
1117 !sc->have_delayed_delete_refs) { in add_inline_refs()
1124 if (!ctx->skip_data_ref || in add_inline_refs()
1125 !ctx->skip_data_ref(root, key.objectid, key.offset, in add_inline_refs()
1126 ctx->user_ctx)) in add_inline_refs()
1127 ret = add_indirect_ref(ctx->fs_info, preftrees, in add_inline_refs()
1128 root, &key, 0, ctx->bytenr, in add_inline_refs()
1144 * add all non-inline backrefs for bytenr to the list
1154 struct btrfs_fs_info *fs_info = extent_root->fs_info; in add_keyed_refs()
1169 slot = path->slots[0]; in add_keyed_refs()
1170 leaf = path->nodes[0]; in add_keyed_refs()
1173 if (key.objectid != ctx->bytenr) in add_keyed_refs()
1184 info_level + 1, key.offset, in add_keyed_refs()
1185 ctx->bytenr, 1, NULL, GFP_NOFS); in add_keyed_refs()
1196 key.offset, ctx->bytenr, count, in add_keyed_refs()
1202 ret = add_indirect_ref(fs_info, preftrees, key.offset, in add_keyed_refs()
1203 NULL, info_level + 1, ctx->bytenr, in add_keyed_refs()
1218 key.offset = btrfs_extent_data_ref_offset(leaf, dref); in add_keyed_refs()
1220 if (sc && key.objectid != sc->inum && in add_keyed_refs()
1221 !sc->have_delayed_delete_refs) { in add_keyed_refs()
1228 if (!ctx->skip_data_ref || in add_keyed_refs()
1229 !ctx->skip_data_ref(root, key.objectid, key.offset, in add_keyed_refs()
1230 ctx->user_ctx)) in add_keyed_refs()
1232 &key, 0, ctx->bytenr, in add_keyed_refs()
1249 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1256 const struct btrfs_fs_info *fs_info = root->fs_info; in lookup_backref_shared_cache()
1259 if (!current->journal_info) in lookup_backref_shared_cache()
1260 lockdep_assert_held(&fs_info->commit_root_sem); in lookup_backref_shared_cache()
1262 if (!ctx->use_path_cache) in lookup_backref_shared_cache()
1269 * Level -1 is used for the data extent, which is not reliable to cache in lookup_backref_shared_cache()
1276 entry = &ctx->path_cache_entries[level]; in lookup_backref_shared_cache()
1279 if (entry->bytenr != bytenr) in lookup_backref_shared_cache()
1286 if (!entry->is_shared && in lookup_backref_shared_cache()
1287 entry->gen != btrfs_root_last_snapshot(&root->root_item)) in lookup_backref_shared_cache()
1295 if (entry->is_shared && in lookup_backref_shared_cache()
1296 entry->gen != btrfs_get_last_root_drop_gen(fs_info)) in lookup_backref_shared_cache()
1299 *is_shared = entry->is_shared; in lookup_backref_shared_cache()
1309 ctx->path_cache_entries[i].is_shared = true; in lookup_backref_shared_cache()
1310 ctx->path_cache_entries[i].gen = entry->gen; in lookup_backref_shared_cache()
1319 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1326 const struct btrfs_fs_info *fs_info = root->fs_info; in store_backref_shared_cache()
1330 if (!current->journal_info) in store_backref_shared_cache()
1331 lockdep_assert_held(&fs_info->commit_root_sem); in store_backref_shared_cache()
1333 if (!ctx->use_path_cache) in store_backref_shared_cache()
1340 * Level -1 is used for the data extent, which is not reliable to cache in store_backref_shared_cache()
1350 gen = btrfs_root_last_snapshot(&root->root_item); in store_backref_shared_cache()
1352 entry = &ctx->path_cache_entries[level]; in store_backref_shared_cache()
1353 entry->bytenr = bytenr; in store_backref_shared_cache()
1354 entry->is_shared = is_shared; in store_backref_shared_cache()
1355 entry->gen = gen; in store_backref_shared_cache()
1366 entry = &ctx->path_cache_entries[i]; in store_backref_shared_cache()
1367 entry->is_shared = is_shared; in store_backref_shared_cache()
1368 entry->gen = gen; in store_backref_shared_cache()
1390 struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr); in find_parent_nodes()
1408 ASSERT(ctx->roots == NULL); in find_parent_nodes()
1410 key.objectid = ctx->bytenr; in find_parent_nodes()
1411 key.offset = (u64)-1; in find_parent_nodes()
1412 if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA)) in find_parent_nodes()
1419 return -ENOMEM; in find_parent_nodes()
1420 if (!ctx->trans) { in find_parent_nodes()
1421 path->search_commit_root = 1; in find_parent_nodes()
1422 path->skip_locking = 1; in find_parent_nodes()
1425 if (ctx->time_seq == BTRFS_SEQ_LAST) in find_parent_nodes()
1426 path->skip_locking = 1; in find_parent_nodes()
1437 ret = -EUCLEAN; in find_parent_nodes()
1441 if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) && in find_parent_nodes()
1442 ctx->time_seq != BTRFS_SEQ_LAST) { in find_parent_nodes()
1449 delayed_refs = &ctx->trans->transaction->delayed_refs; in find_parent_nodes()
1450 spin_lock(&delayed_refs->lock); in find_parent_nodes()
1451 head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr); in find_parent_nodes()
1453 if (!mutex_trylock(&head->mutex)) { in find_parent_nodes()
1454 refcount_inc(&head->refs); in find_parent_nodes()
1455 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1463 mutex_lock(&head->mutex); in find_parent_nodes()
1464 mutex_unlock(&head->mutex); in find_parent_nodes()
1468 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1469 ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq, in find_parent_nodes()
1471 mutex_unlock(&head->mutex); in find_parent_nodes()
1475 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1479 if (path->slots[0]) { in find_parent_nodes()
1483 path->slots[0]--; in find_parent_nodes()
1484 leaf = path->nodes[0]; in find_parent_nodes()
1485 slot = path->slots[0]; in find_parent_nodes()
1487 if (key.objectid == ctx->bytenr && in find_parent_nodes()
1523 if (sc && ctx->bytenr == sc->data_bytenr) { in find_parent_nodes()
1533 if (sc->data_extent_gen > in find_parent_nodes()
1534 btrfs_root_last_snapshot(&sc->root->root_item)) { in find_parent_nodes()
1550 if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr && in find_parent_nodes()
1551 sc->self_ref_count == 1) { in find_parent_nodes()
1555 cached = lookup_backref_shared_cache(sc->ctx, sc->root, in find_parent_nodes()
1556 sc->ctx->curr_leaf_bytenr, in find_parent_nodes()
1570 ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0); in find_parent_nodes()
1592 node = rb_next(&ref->rbnode); in find_parent_nodes()
1594 * ref->count < 0 can happen here if there are delayed in find_parent_nodes()
1595 * refs with a node->action of BTRFS_DROP_DELAYED_REF. in find_parent_nodes()
1601 * and would retain their original ref->count < 0. in find_parent_nodes()
1603 if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) { in find_parent_nodes()
1605 ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS); in find_parent_nodes()
1609 if (ref->count && ref->parent) { in find_parent_nodes()
1610 if (!ctx->skip_inode_ref_list && !ref->inode_list && in find_parent_nodes()
1611 ref->level == 0) { in find_parent_nodes()
1615 check.level = ref->level; in find_parent_nodes()
1617 eb = read_tree_block(ctx->fs_info, ref->parent, in find_parent_nodes()
1625 ret = -EIO; in find_parent_nodes()
1629 if (!path->skip_locking) in find_parent_nodes()
1632 if (!path->skip_locking) in find_parent_nodes()
1638 ref->inode_list = eie; in find_parent_nodes()
1646 ret = ulist_add_merge_ptr(ctx->refs, ref->parent, in find_parent_nodes()
1647 ref->inode_list, in find_parent_nodes()
1651 if (!ret && !ctx->skip_inode_ref_list) { in find_parent_nodes()
1662 ret = -EUCLEAN; in find_parent_nodes()
1665 while (eie->next) in find_parent_nodes()
1666 eie = eie->next; in find_parent_nodes()
1667 eie->next = ref->inode_list; in find_parent_nodes()
1674 * use-after-free when our caller uses it or double in find_parent_nodes()
1677 ref->inode_list = NULL; in find_parent_nodes()
1696 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1697 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1699 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1702 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1708 ASSERT(ctx->refs == NULL); in btrfs_find_all_leafs()
1710 ctx->refs = ulist_alloc(GFP_NOFS); in btrfs_find_all_leafs()
1711 if (!ctx->refs) in btrfs_find_all_leafs()
1712 return -ENOMEM; in btrfs_find_all_leafs()
1716 (ret < 0 && ret != -ENOENT)) { in btrfs_find_all_leafs()
1717 free_leaf_list(ctx->refs); in btrfs_find_all_leafs()
1718 ctx->refs = NULL; in btrfs_find_all_leafs()
1736 * Found roots are added to @ctx->roots, which is allocated by this function if
1739 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1746 const u64 orig_bytenr = ctx->bytenr; in btrfs_find_all_roots_safe()
1747 const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list; in btrfs_find_all_roots_safe()
1752 ASSERT(ctx->refs == NULL); in btrfs_find_all_roots_safe()
1754 ctx->refs = ulist_alloc(GFP_NOFS); in btrfs_find_all_roots_safe()
1755 if (!ctx->refs) in btrfs_find_all_roots_safe()
1756 return -ENOMEM; in btrfs_find_all_roots_safe()
1758 if (!ctx->roots) { in btrfs_find_all_roots_safe()
1759 ctx->roots = ulist_alloc(GFP_NOFS); in btrfs_find_all_roots_safe()
1760 if (!ctx->roots) { in btrfs_find_all_roots_safe()
1761 ulist_free(ctx->refs); in btrfs_find_all_roots_safe()
1762 ctx->refs = NULL; in btrfs_find_all_roots_safe()
1763 return -ENOMEM; in btrfs_find_all_roots_safe()
1768 ctx->skip_inode_ref_list = true; in btrfs_find_all_roots_safe()
1775 if (ret < 0 && ret != -ENOENT) { in btrfs_find_all_roots_safe()
1777 ulist_free(ctx->roots); in btrfs_find_all_roots_safe()
1778 ctx->roots = NULL; in btrfs_find_all_roots_safe()
1783 node = ulist_next(ctx->refs, &uiter); in btrfs_find_all_roots_safe()
1786 ctx->bytenr = node->val; in btrfs_find_all_roots_safe()
1790 ulist_free(ctx->refs); in btrfs_find_all_roots_safe()
1791 ctx->refs = NULL; in btrfs_find_all_roots_safe()
1792 ctx->bytenr = orig_bytenr; in btrfs_find_all_roots_safe()
1793 ctx->skip_inode_ref_list = orig_skip_inode_ref_list; in btrfs_find_all_roots_safe()
1803 if (!ctx->trans && !skip_commit_root_sem) in btrfs_find_all_roots()
1804 down_read(&ctx->fs_info->commit_root_sem); in btrfs_find_all_roots()
1806 if (!ctx->trans && !skip_commit_root_sem) in btrfs_find_all_roots()
1807 up_read(&ctx->fs_info->commit_root_sem); in btrfs_find_all_roots()
1819 ulist_init(&ctx->refs); in btrfs_alloc_backref_share_check_ctx()
1829 ulist_release(&ctx->refs); in btrfs_free_backref_share_ctx()
1858 struct btrfs_root *root = inode->root; in btrfs_is_data_extent_shared()
1859 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_is_data_extent_shared()
1880 if (ctx->prev_extents_cache[i].bytenr == bytenr) in btrfs_is_data_extent_shared()
1881 return ctx->prev_extents_cache[i].is_shared; in btrfs_is_data_extent_shared()
1884 ulist_init(&ctx->refs); in btrfs_is_data_extent_shared()
1888 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { in btrfs_is_data_extent_shared()
1893 down_read(&fs_info->commit_root_sem); in btrfs_is_data_extent_shared()
1899 ctx->use_path_cache = true; in btrfs_is_data_extent_shared()
1909 ctx->curr_leaf_bytenr, 0, in btrfs_is_data_extent_shared()
1919 walk_ctx.refs = &ctx->refs; in btrfs_is_data_extent_shared()
1921 /* -1 means we are in the bytenr of the data extent. */ in btrfs_is_data_extent_shared()
1922 level = -1; in btrfs_is_data_extent_shared()
1925 const unsigned long prev_ref_count = ctx->refs.nnodes; in btrfs_is_data_extent_shared()
1938 if (ret < 0 && ret != -ENOENT) in btrfs_is_data_extent_shared()
1944 * the ctx->refs ulist, in which case we have to check multiple in btrfs_is_data_extent_shared()
1949 * 1) level -1, the data extent: If our data extent was not in btrfs_is_data_extent_shared()
1952 * the same offset, which means there are 2 (or more) file in btrfs_is_data_extent_shared()
1953 * extent items that point to the data extent - this happens in btrfs_is_data_extent_shared()
1969 * (direct ref) and a non-shared tree block ref (indirect in btrfs_is_data_extent_shared()
1972 if ((ctx->refs.nnodes - prev_ref_count) > 1) in btrfs_is_data_extent_shared()
1973 ctx->use_path_cache = false; in btrfs_is_data_extent_shared()
1978 node = ulist_next(&ctx->refs, &uiter); in btrfs_is_data_extent_shared()
1981 bytenr = node->val; in btrfs_is_data_extent_shared()
1982 if (ctx->use_path_cache) { in btrfs_is_data_extent_shared()
2006 if (!ctx->use_path_cache) { in btrfs_is_data_extent_shared()
2009 level--; in btrfs_is_data_extent_shared()
2011 bytenr = ctx->path_cache_entries[level].bytenr; in btrfs_is_data_extent_shared()
2012 ctx->use_path_cache = true; in btrfs_is_data_extent_shared()
2018 ctx->path_cache_entries[i].bytenr = 0; in btrfs_is_data_extent_shared()
2026 int slot = ctx->prev_extents_cache_slot; in btrfs_is_data_extent_shared()
2028 ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr; in btrfs_is_data_extent_shared()
2029 ctx->prev_extents_cache[slot].is_shared = (ret == 1); in btrfs_is_data_extent_shared()
2032 ctx->prev_extents_cache_slot = slot; in btrfs_is_data_extent_shared()
2040 up_read(&fs_info->commit_root_sem); in btrfs_is_data_extent_shared()
2043 ulist_release(&ctx->refs); in btrfs_is_data_extent_shared()
2044 ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr; in btrfs_is_data_extent_shared()
2063 key.offset = start_off; in btrfs_find_one_extref()
2070 leaf = path->nodes[0]; in btrfs_find_one_extref()
2071 slot = path->slots[0]; in btrfs_find_one_extref()
2074 * If the item at offset is not found, in btrfs_find_one_extref()
2085 ret = -ENOENT; in btrfs_find_one_extref()
2099 ret = -ENOENT; in btrfs_find_one_extref()
2106 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_find_one_extref()
2110 *found_off = found_key.offset; in btrfs_find_one_extref()
2120 * 0-terminated. the path is only given within the current file system.
2139 s64 bytes_left = ((s64)size) - 1; in btrfs_ref_to_path()
2148 bytes_left -= name_len; in btrfs_ref_to_path()
2153 if (!path->skip_locking) in btrfs_ref_to_path()
2160 ret = -ENOENT; in btrfs_ref_to_path()
2164 next_inum = found_key.offset; in btrfs_ref_to_path()
2170 slot = path->slots[0]; in btrfs_ref_to_path()
2171 eb = path->nodes[0]; in btrfs_ref_to_path()
2174 path->nodes[0] = NULL; in btrfs_ref_to_path()
2175 path->locks[0] = 0; in btrfs_ref_to_path()
2184 --bytes_left; in btrfs_ref_to_path()
2220 key.offset = (u64)-1; in extent_from_logical()
2229 ret = -ENOENT; in extent_from_logical()
2232 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); in extent_from_logical()
2233 if (found_key->type == BTRFS_METADATA_ITEM_KEY) in extent_from_logical()
2234 size = fs_info->nodesize; in extent_from_logical()
2235 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) in extent_from_logical()
2236 size = found_key->offset; in extent_from_logical()
2238 if (found_key->objectid > logical || in extent_from_logical()
2239 found_key->objectid + size <= logical) { in extent_from_logical()
2242 return -ENOENT; in extent_from_logical()
2245 eb = path->nodes[0]; in extent_from_logical()
2246 item_size = btrfs_item_size(eb, path->slots[0]); in extent_from_logical()
2249 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); in extent_from_logical()
2254 logical, logical - found_key->objectid, found_key->objectid, in extent_from_logical()
2255 found_key->offset, flags, item_size); in extent_from_logical()
2268 return -EIO; in extent_from_logical()
2295 if (key->type == BTRFS_METADATA_ITEM_KEY) { in get_extent_inline_ref()
2300 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); in get_extent_inline_ref()
2310 return -ENOENT; in get_extent_inline_ref()
2318 return -EUCLEAN; in get_extent_inline_ref()
2343 if (*ptr == (unsigned long)-1) in tree_backref_for_extent()
2363 if (key->type == BTRFS_EXTENT_ITEM_KEY) { in tree_backref_for_extent()
2369 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); in tree_backref_for_extent()
2370 *out_level = (u8)key->offset; in tree_backref_for_extent()
2374 *ptr = (unsigned long)-1; in tree_backref_for_extent()
2387 for (eie = inode_list; eie; eie = eie->next) { in iterate_leaf_refs()
2390 extent_item_objectid, eie->inum, in iterate_leaf_refs()
2391 eie->offset, root); in iterate_leaf_refs()
2392 ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx); in iterate_leaf_refs()
2407 * when the iterator function returns a non-zero value, iteration stops.
2419 btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu", in iterate_extent_inodes()
2420 ctx->bytenr); in iterate_extent_inodes()
2422 ASSERT(ctx->trans == NULL); in iterate_extent_inodes()
2423 ASSERT(ctx->roots == NULL); in iterate_extent_inodes()
2428 trans = btrfs_attach_transaction(ctx->fs_info->tree_root); in iterate_extent_inodes()
2430 if (PTR_ERR(trans) != -ENOENT && in iterate_extent_inodes()
2431 PTR_ERR(trans) != -EROFS) in iterate_extent_inodes()
2435 ctx->trans = trans; in iterate_extent_inodes()
2438 if (ctx->trans) { in iterate_extent_inodes()
2439 btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem); in iterate_extent_inodes()
2440 ctx->time_seq = seq_elem.seq; in iterate_extent_inodes()
2442 down_read(&ctx->fs_info->commit_root_sem); in iterate_extent_inodes()
2448 refs = ctx->refs; in iterate_extent_inodes()
2449 ctx->refs = NULL; in iterate_extent_inodes()
2453 const u64 leaf_bytenr = ref_node->val; in iterate_extent_inodes()
2458 inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux; in iterate_extent_inodes()
2460 if (ctx->cache_lookup) { in iterate_extent_inodes()
2465 cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx, in iterate_extent_inodes()
2469 ret = iterate_leaf_refs(ctx->fs_info, in iterate_extent_inodes()
2482 if (!ctx->roots) { in iterate_extent_inodes()
2483 ctx->roots = ulist_alloc(GFP_NOFS); in iterate_extent_inodes()
2484 if (!ctx->roots) { in iterate_extent_inodes()
2485 ret = -ENOMEM; in iterate_extent_inodes()
2490 ctx->bytenr = leaf_bytenr; in iterate_extent_inodes()
2495 if (ctx->cache_store) in iterate_extent_inodes()
2496 ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx); in iterate_extent_inodes()
2499 while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) { in iterate_extent_inodes()
2500 btrfs_debug(ctx->fs_info, in iterate_extent_inodes()
2502 root_node->val, ref_node->val, in iterate_extent_inodes()
2503 ref_node->aux); in iterate_extent_inodes()
2504 ret = iterate_leaf_refs(ctx->fs_info, inode_list, in iterate_extent_inodes()
2505 root_node->val, ctx->bytenr, in iterate_extent_inodes()
2508 ulist_reinit(ctx->roots); in iterate_extent_inodes()
2513 if (ctx->trans) { in iterate_extent_inodes()
2514 btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem); in iterate_extent_inodes()
2515 btrfs_end_transaction(ctx->trans); in iterate_extent_inodes()
2516 ctx->trans = NULL; in iterate_extent_inodes()
2518 up_read(&ctx->fs_info->commit_root_sem); in iterate_extent_inodes()
2521 ulist_free(ctx->roots); in iterate_extent_inodes()
2522 ctx->roots = NULL; in iterate_extent_inodes()
2530 static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx) in build_ino_list() argument
2535 if (inodes->bytes_left >= c) { in build_ino_list()
2536 inodes->bytes_left -= c; in build_ino_list()
2537 inodes->val[inodes->elem_cnt] = inum; in build_ino_list()
2538 inodes->val[inodes->elem_cnt + 1] = offset; in build_ino_list()
2539 inodes->val[inodes->elem_cnt + 2] = root; in build_ino_list()
2540 inodes->elem_cnt += 3; in build_ino_list()
2542 inodes->bytes_missing += c - inodes->bytes_left; in build_ino_list()
2543 inodes->bytes_left = 0; in build_ino_list()
2544 inodes->elem_missed += 3; in build_ino_list()
2558 int search_commit_root = path->search_commit_root; in iterate_inodes_from_logical()
2565 return -EINVAL; in iterate_inodes_from_logical()
2571 walk_ctx.extent_item_pos = logical - found_key.objectid; in iterate_inodes_from_logical()
2590 struct btrfs_root *fs_root = ipath->fs_root; in iterate_inode_refs()
2591 struct btrfs_path *path = ipath->btrfs_path; in iterate_inode_refs()
2604 ret = found ? 0 : -ENOENT; in iterate_inode_refs()
2609 parent = found_key.offset; in iterate_inode_refs()
2610 slot = path->slots[0]; in iterate_inode_refs()
2611 eb = btrfs_clone_extent_buffer(path->nodes[0]); in iterate_inode_refs()
2613 ret = -ENOMEM; in iterate_inode_refs()
2623 btrfs_debug(fs_root->fs_info, in iterate_inode_refs()
2624 "following ref at offset %u for inode %llu in tree %llu", in iterate_inode_refs()
2626 fs_root->root_key.objectid); in iterate_inode_refs()
2646 u64 offset = 0; in iterate_inode_extrefs() local
2649 struct btrfs_root *fs_root = ipath->fs_root; in iterate_inode_extrefs()
2650 struct btrfs_path *path = ipath->btrfs_path; in iterate_inode_extrefs()
2658 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, in iterate_inode_extrefs()
2659 &offset); in iterate_inode_extrefs()
2663 ret = found ? 0 : -ENOENT; in iterate_inode_extrefs()
2668 slot = path->slots[0]; in iterate_inode_extrefs()
2669 eb = btrfs_clone_extent_buffer(path->nodes[0]); in iterate_inode_extrefs()
2671 ret = -ENOMEM; in iterate_inode_extrefs()
2687 (unsigned long)&extref->name, eb, ipath); in iterate_inode_extrefs()
2696 offset++; in iterate_inode_extrefs()
2713 int i = ipath->fspath->elem_cnt; in inode_to_path()
2717 bytes_left = ipath->fspath->bytes_left > s_ptr ? in inode_to_path()
2718 ipath->fspath->bytes_left - s_ptr : 0; in inode_to_path()
2720 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; in inode_to_path()
2721 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, in inode_to_path()
2727 ipath->fspath->val[i] = (u64)(unsigned long)fspath; in inode_to_path()
2728 ++ipath->fspath->elem_cnt; in inode_to_path()
2729 ipath->fspath->bytes_left = fspath - fspath_min; in inode_to_path()
2731 ++ipath->fspath->elem_missed; in inode_to_path()
2732 ipath->fspath->bytes_missing += fspath_min - fspath; in inode_to_path()
2733 ipath->fspath->bytes_left = 0; in inode_to_path()
2741 * is has been created large enough. each path is zero-terminated and accessed
2742 * from ipath->fspath->val[i].
2743 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2744 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2745 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2746 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2757 else if (ret != -ENOENT) in paths_from_inode()
2761 if (ret == -ENOENT && found_refs) in paths_from_inode()
2775 return ERR_PTR(-ENOMEM); in init_data_container()
2778 data->bytes_left = total_bytes - sizeof(*data); in init_data_container()
2780 data->bytes_missing = sizeof(*data) - total_bytes; in init_data_container()
2788 * information will be total_bytes - sizeof(struct inode_fs_paths).
2804 return ERR_PTR(-ENOMEM); in init_ipath()
2807 ifp->btrfs_path = path; in init_ipath()
2808 ifp->fspath = fspath; in init_ipath()
2809 ifp->fs_root = fs_root; in init_ipath()
2818 kvfree(ipath->fspath); in free_ipath()
2830 ret->path = btrfs_alloc_path(); in btrfs_backref_iter_alloc()
2831 if (!ret->path) { in btrfs_backref_iter_alloc()
2837 ret->path->search_commit_root = 1; in btrfs_backref_iter_alloc()
2838 ret->path->skip_locking = 1; in btrfs_backref_iter_alloc()
2839 ret->fs_info = fs_info; in btrfs_backref_iter_alloc()
2846 struct btrfs_fs_info *fs_info = iter->fs_info; in btrfs_backref_iter_start()
2848 struct btrfs_path *path = iter->path; in btrfs_backref_iter_start()
2855 key.offset = (u64)-1; in btrfs_backref_iter_start()
2856 iter->bytenr = bytenr; in btrfs_backref_iter_start()
2862 ret = -EUCLEAN; in btrfs_backref_iter_start()
2865 if (path->slots[0] == 0) { in btrfs_backref_iter_start()
2867 ret = -EUCLEAN; in btrfs_backref_iter_start()
2870 path->slots[0]--; in btrfs_backref_iter_start()
2872 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in btrfs_backref_iter_start()
2875 ret = -ENOENT; in btrfs_backref_iter_start()
2878 memcpy(&iter->cur_key, &key, sizeof(key)); in btrfs_backref_iter_start()
2879 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_start()
2880 path->slots[0]); in btrfs_backref_iter_start()
2881 iter->end_ptr = (u32)(iter->item_ptr + in btrfs_backref_iter_start()
2882 btrfs_item_size(path->nodes[0], path->slots[0])); in btrfs_backref_iter_start()
2883 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], in btrfs_backref_iter_start()
2889 * This is an extra precaution for non skinny-metadata, where in btrfs_backref_iter_start()
2893 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { in btrfs_backref_iter_start()
2894 ret = -ENOTSUPP; in btrfs_backref_iter_start()
2897 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); in btrfs_backref_iter_start()
2900 if (iter->cur_ptr >= iter->end_ptr) { in btrfs_backref_iter_start()
2905 ret = -ENOENT; in btrfs_backref_iter_start()
2911 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, in btrfs_backref_iter_start()
2912 path->slots[0]); in btrfs_backref_iter_start()
2913 if (iter->cur_key.objectid != bytenr || in btrfs_backref_iter_start()
2914 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && in btrfs_backref_iter_start()
2915 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { in btrfs_backref_iter_start()
2916 ret = -ENOENT; in btrfs_backref_iter_start()
2919 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_start()
2920 path->slots[0]); in btrfs_backref_iter_start()
2921 iter->item_ptr = iter->cur_ptr; in btrfs_backref_iter_start()
2922 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size( in btrfs_backref_iter_start()
2923 path->nodes[0], path->slots[0])); in btrfs_backref_iter_start()
2936 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2946 struct btrfs_path *path = iter->path; in btrfs_backref_iter_next()
2953 ASSERT(iter->cur_ptr < iter->end_ptr); in btrfs_backref_iter_next()
2963 ((unsigned long)iter->cur_ptr); in btrfs_backref_iter_next()
2968 iter->cur_ptr += size; in btrfs_backref_iter_next()
2969 if (iter->cur_ptr < iter->end_ptr) in btrfs_backref_iter_next()
2976 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr); in btrfs_backref_iter_next()
2977 ret = btrfs_next_item(extent_root, iter->path); in btrfs_backref_iter_next()
2981 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); in btrfs_backref_iter_next()
2982 if (iter->cur_key.objectid != iter->bytenr || in btrfs_backref_iter_next()
2983 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && in btrfs_backref_iter_next()
2984 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) in btrfs_backref_iter_next()
2986 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_next()
2987 path->slots[0]); in btrfs_backref_iter_next()
2988 iter->cur_ptr = iter->item_ptr; in btrfs_backref_iter_next()
2989 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0], in btrfs_backref_iter_next()
2990 path->slots[0]); in btrfs_backref_iter_next()
2999 cache->rb_root = RB_ROOT; in btrfs_backref_init_cache()
3001 INIT_LIST_HEAD(&cache->pending[i]); in btrfs_backref_init_cache()
3002 INIT_LIST_HEAD(&cache->changed); in btrfs_backref_init_cache()
3003 INIT_LIST_HEAD(&cache->detached); in btrfs_backref_init_cache()
3004 INIT_LIST_HEAD(&cache->leaves); in btrfs_backref_init_cache()
3005 INIT_LIST_HEAD(&cache->pending_edge); in btrfs_backref_init_cache()
3006 INIT_LIST_HEAD(&cache->useless_node); in btrfs_backref_init_cache()
3007 cache->fs_info = fs_info; in btrfs_backref_init_cache()
3008 cache->is_reloc = is_reloc; in btrfs_backref_init_cache()
3021 INIT_LIST_HEAD(&node->list); in btrfs_backref_alloc_node()
3022 INIT_LIST_HEAD(&node->upper); in btrfs_backref_alloc_node()
3023 INIT_LIST_HEAD(&node->lower); in btrfs_backref_alloc_node()
3024 RB_CLEAR_NODE(&node->rb_node); in btrfs_backref_alloc_node()
3025 cache->nr_nodes++; in btrfs_backref_alloc_node()
3026 node->level = level; in btrfs_backref_alloc_node()
3027 node->bytenr = bytenr; in btrfs_backref_alloc_node()
3035 struct btrfs_backref_edge *edge; in btrfs_backref_alloc_edge() local
3037 edge = kzalloc(sizeof(*edge), GFP_NOFS); in btrfs_backref_alloc_edge()
3038 if (edge) in btrfs_backref_alloc_edge()
3039 cache->nr_edges++; in btrfs_backref_alloc_edge()
3040 return edge; in btrfs_backref_alloc_edge()
3054 struct btrfs_backref_edge *edge; in btrfs_backref_cleanup_node() local
3059 BUG_ON(!node->lowest && !node->detached); in btrfs_backref_cleanup_node()
3060 while (!list_empty(&node->upper)) { in btrfs_backref_cleanup_node()
3061 edge = list_entry(node->upper.next, struct btrfs_backref_edge, in btrfs_backref_cleanup_node()
3063 upper = edge->node[UPPER]; in btrfs_backref_cleanup_node()
3064 list_del(&edge->list[LOWER]); in btrfs_backref_cleanup_node()
3065 list_del(&edge->list[UPPER]); in btrfs_backref_cleanup_node()
3066 btrfs_backref_free_edge(cache, edge); in btrfs_backref_cleanup_node()
3072 if (list_empty(&upper->lower)) { in btrfs_backref_cleanup_node()
3073 list_add_tail(&upper->lower, &cache->leaves); in btrfs_backref_cleanup_node()
3074 upper->lowest = 1; in btrfs_backref_cleanup_node()
3089 while (!list_empty(&cache->detached)) { in btrfs_backref_release_cache()
3090 node = list_entry(cache->detached.next, in btrfs_backref_release_cache()
3095 while (!list_empty(&cache->leaves)) { in btrfs_backref_release_cache()
3096 node = list_entry(cache->leaves.next, in btrfs_backref_release_cache()
3102 while (!list_empty(&cache->pending[i])) { in btrfs_backref_release_cache()
3103 node = list_first_entry(&cache->pending[i], in btrfs_backref_release_cache()
3109 ASSERT(list_empty(&cache->pending_edge)); in btrfs_backref_release_cache()
3110 ASSERT(list_empty(&cache->useless_node)); in btrfs_backref_release_cache()
3111 ASSERT(list_empty(&cache->changed)); in btrfs_backref_release_cache()
3112 ASSERT(list_empty(&cache->detached)); in btrfs_backref_release_cache()
3113 ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); in btrfs_backref_release_cache()
3114 ASSERT(!cache->nr_nodes); in btrfs_backref_release_cache()
3115 ASSERT(!cache->nr_edges); in btrfs_backref_release_cache()
3127 * type is btrfs_inline_ref_type, offset is
3134 struct btrfs_backref_edge *edge; in handle_direct_tree_backref() local
3138 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); in handle_direct_tree_backref()
3141 if (ref_key->objectid == ref_key->offset) { in handle_direct_tree_backref()
3144 cur->is_reloc_root = 1; in handle_direct_tree_backref()
3146 if (cache->is_reloc) { in handle_direct_tree_backref()
3147 root = find_reloc_root(cache->fs_info, cur->bytenr); in handle_direct_tree_backref()
3149 return -ENOENT; in handle_direct_tree_backref()
3150 cur->root = root; in handle_direct_tree_backref()
3156 list_add(&cur->list, &cache->useless_node); in handle_direct_tree_backref()
3161 edge = btrfs_backref_alloc_edge(cache); in handle_direct_tree_backref()
3162 if (!edge) in handle_direct_tree_backref()
3163 return -ENOMEM; in handle_direct_tree_backref()
3165 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); in handle_direct_tree_backref()
3168 upper = btrfs_backref_alloc_node(cache, ref_key->offset, in handle_direct_tree_backref()
3169 cur->level + 1); in handle_direct_tree_backref()
3171 btrfs_backref_free_edge(cache, edge); in handle_direct_tree_backref()
3172 return -ENOMEM; in handle_direct_tree_backref()
3179 list_add_tail(&edge->list[UPPER], &cache->pending_edge); in handle_direct_tree_backref()
3183 ASSERT(upper->checked); in handle_direct_tree_backref()
3184 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_direct_tree_backref()
3186 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); in handle_direct_tree_backref()
3210 struct btrfs_fs_info *fs_info = cache->fs_info; in handle_indirect_tree_backref()
3213 struct btrfs_backref_edge *edge; in handle_indirect_tree_backref() local
3221 root = btrfs_get_fs_root(fs_info, ref_key->offset, false); in handle_indirect_tree_backref()
3224 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in handle_indirect_tree_backref()
3225 cur->cowonly = 1; in handle_indirect_tree_backref()
3227 if (btrfs_root_level(&root->root_item) == cur->level) { in handle_indirect_tree_backref()
3229 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); in handle_indirect_tree_backref()
3237 * completely relying on direct backref (key->offset is parent in handle_indirect_tree_backref()
3240 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { in handle_indirect_tree_backref()
3242 list_add(&cur->list, &cache->useless_node); in handle_indirect_tree_backref()
3244 cur->root = root; in handle_indirect_tree_backref()
3249 level = cur->level + 1; in handle_indirect_tree_backref()
3252 path->search_commit_root = 1; in handle_indirect_tree_backref()
3253 path->skip_locking = 1; in handle_indirect_tree_backref()
3254 path->lowest_level = level; in handle_indirect_tree_backref()
3256 path->lowest_level = 0; in handle_indirect_tree_backref()
3261 if (ret > 0 && path->slots[level] > 0) in handle_indirect_tree_backref()
3262 path->slots[level]--; in handle_indirect_tree_backref()
3264 eb = path->nodes[level]; in handle_indirect_tree_backref()
3265 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { in handle_indirect_tree_backref()
3268 cur->bytenr, level - 1, root->root_key.objectid, in handle_indirect_tree_backref()
3269 tree_key->objectid, tree_key->type, tree_key->offset); in handle_indirect_tree_backref()
3271 ret = -ENOENT; in handle_indirect_tree_backref()
3278 if (!path->nodes[level]) { in handle_indirect_tree_backref()
3279 ASSERT(btrfs_root_bytenr(&root->root_item) == in handle_indirect_tree_backref()
3280 lower->bytenr); in handle_indirect_tree_backref()
3283 cache->is_reloc) { in handle_indirect_tree_backref()
3285 list_add(&lower->list, &cache->useless_node); in handle_indirect_tree_backref()
3287 lower->root = root; in handle_indirect_tree_backref()
3292 edge = btrfs_backref_alloc_edge(cache); in handle_indirect_tree_backref()
3293 if (!edge) { in handle_indirect_tree_backref()
3295 ret = -ENOMEM; in handle_indirect_tree_backref()
3299 eb = path->nodes[level]; in handle_indirect_tree_backref()
3300 rb_node = rb_simple_search(&cache->rb_root, eb->start); in handle_indirect_tree_backref()
3302 upper = btrfs_backref_alloc_node(cache, eb->start, in handle_indirect_tree_backref()
3303 lower->level + 1); in handle_indirect_tree_backref()
3306 btrfs_backref_free_edge(cache, edge); in handle_indirect_tree_backref()
3307 ret = -ENOMEM; in handle_indirect_tree_backref()
3310 upper->owner = btrfs_header_owner(eb); in handle_indirect_tree_backref()
3311 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in handle_indirect_tree_backref()
3312 upper->cowonly = 1; in handle_indirect_tree_backref()
3319 upper->checked = 0; in handle_indirect_tree_backref()
3321 upper->checked = 1; in handle_indirect_tree_backref()
3328 if (!upper->checked && need_check) { in handle_indirect_tree_backref()
3330 list_add_tail(&edge->list[UPPER], in handle_indirect_tree_backref()
3331 &cache->pending_edge); in handle_indirect_tree_backref()
3333 if (upper->checked) in handle_indirect_tree_backref()
3335 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_indirect_tree_backref()
3340 ASSERT(upper->checked); in handle_indirect_tree_backref()
3341 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_indirect_tree_backref()
3342 if (!upper->owner) in handle_indirect_tree_backref()
3343 upper->owner = btrfs_header_owner(eb); in handle_indirect_tree_backref()
3345 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); in handle_indirect_tree_backref()
3363 * links aren't yet bi-directional. Needs to finish such links.
3378 struct btrfs_backref_edge *edge; in btrfs_backref_add_tree_node() local
3382 ret = btrfs_backref_iter_start(iter, cur->bytenr); in btrfs_backref_add_tree_node()
3395 ret = -EUCLEAN; in btrfs_backref_add_tree_node()
3399 WARN_ON(cur->checked); in btrfs_backref_add_tree_node()
3400 if (!list_empty(&cur->upper)) { in btrfs_backref_add_tree_node()
3405 ASSERT(list_is_singular(&cur->upper)); in btrfs_backref_add_tree_node()
3406 edge = list_entry(cur->upper.next, struct btrfs_backref_edge, in btrfs_backref_add_tree_node()
3408 ASSERT(list_empty(&edge->list[UPPER])); in btrfs_backref_add_tree_node()
3409 exist = edge->node[UPPER]; in btrfs_backref_add_tree_node()
3414 if (!exist->checked) in btrfs_backref_add_tree_node()
3415 list_add_tail(&edge->list[UPPER], &cache->pending_edge); in btrfs_backref_add_tree_node()
3428 key.objectid = iter->bytenr; in btrfs_backref_add_tree_node()
3434 ((unsigned long)iter->cur_ptr); in btrfs_backref_add_tree_node()
3438 ret = -EUCLEAN; in btrfs_backref_add_tree_node()
3442 key.offset = btrfs_extent_inline_ref_offset(eb, iref); in btrfs_backref_add_tree_node()
3444 key.type = iter->cur_key.type; in btrfs_backref_add_tree_node()
3445 key.offset = iter->cur_key.offset; in btrfs_backref_add_tree_node()
3454 exist->owner == key.offset) || in btrfs_backref_add_tree_node()
3456 exist->bytenr == key.offset))) { in btrfs_backref_add_tree_node()
3461 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ in btrfs_backref_add_tree_node()
3469 * offset means the root objectid. We need to search in btrfs_backref_add_tree_node()
3478 * Unrecognized tree backref items (if it can pass tree-checker) in btrfs_backref_add_tree_node()
3483 cur->checked = 1; in btrfs_backref_add_tree_node()
3496 struct list_head *useless_node = &cache->useless_node; in btrfs_backref_finish_upper_links()
3497 struct btrfs_backref_edge *edge; in btrfs_backref_finish_upper_links() local
3501 ASSERT(start->checked); in btrfs_backref_finish_upper_links()
3503 /* Insert this node to cache if it's not COW-only */ in btrfs_backref_finish_upper_links()
3504 if (!start->cowonly) { in btrfs_backref_finish_upper_links()
3505 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, in btrfs_backref_finish_upper_links()
3506 &start->rb_node); in btrfs_backref_finish_upper_links()
3508 btrfs_backref_panic(cache->fs_info, start->bytenr, in btrfs_backref_finish_upper_links()
3509 -EEXIST); in btrfs_backref_finish_upper_links()
3510 list_add_tail(&start->lower, &cache->leaves); in btrfs_backref_finish_upper_links()
3518 list_for_each_entry(edge, &start->upper, list[LOWER]) in btrfs_backref_finish_upper_links()
3519 list_add_tail(&edge->list[UPPER], &pending_edge); in btrfs_backref_finish_upper_links()
3525 edge = list_first_entry(&pending_edge, in btrfs_backref_finish_upper_links()
3527 list_del_init(&edge->list[UPPER]); in btrfs_backref_finish_upper_links()
3528 upper = edge->node[UPPER]; in btrfs_backref_finish_upper_links()
3529 lower = edge->node[LOWER]; in btrfs_backref_finish_upper_links()
3532 if (upper->detached) { in btrfs_backref_finish_upper_links()
3533 list_del(&edge->list[LOWER]); in btrfs_backref_finish_upper_links()
3534 btrfs_backref_free_edge(cache, edge); in btrfs_backref_finish_upper_links()
3537 if (list_empty(&lower->upper)) in btrfs_backref_finish_upper_links()
3538 list_add(&lower->list, useless_node); in btrfs_backref_finish_upper_links()
3545 * So if we have upper->rb_node populated, this means a cache in btrfs_backref_finish_upper_links()
3546 * hit. We only need to link the edge, as @upper and all its in btrfs_backref_finish_upper_links()
3549 if (!RB_EMPTY_NODE(&upper->rb_node)) { in btrfs_backref_finish_upper_links()
3550 if (upper->lowest) { in btrfs_backref_finish_upper_links()
3551 list_del_init(&upper->lower); in btrfs_backref_finish_upper_links()
3552 upper->lowest = 0; in btrfs_backref_finish_upper_links()
3555 list_add_tail(&edge->list[UPPER], &upper->lower); in btrfs_backref_finish_upper_links()
3560 if (!upper->checked) { in btrfs_backref_finish_upper_links()
3562 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3565 /* Sanity check, COW-only node has non-COW-only parent */ in btrfs_backref_finish_upper_links()
3566 if (start->cowonly != upper->cowonly) { in btrfs_backref_finish_upper_links()
3568 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3571 /* Only cache non-COW-only (subvolume trees) tree blocks */ in btrfs_backref_finish_upper_links()
3572 if (!upper->cowonly) { in btrfs_backref_finish_upper_links()
3573 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, in btrfs_backref_finish_upper_links()
3574 &upper->rb_node); in btrfs_backref_finish_upper_links()
3576 btrfs_backref_panic(cache->fs_info, in btrfs_backref_finish_upper_links()
3577 upper->bytenr, -EEXIST); in btrfs_backref_finish_upper_links()
3578 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3582 list_add_tail(&edge->list[UPPER], &upper->lower); in btrfs_backref_finish_upper_links()
3588 list_for_each_entry(edge, &upper->upper, list[LOWER]) in btrfs_backref_finish_upper_links()
3589 list_add_tail(&edge->list[UPPER], &pending_edge); in btrfs_backref_finish_upper_links()
3599 struct btrfs_backref_edge *edge; in btrfs_backref_error_cleanup() local
3601 while (!list_empty(&cache->useless_node)) { in btrfs_backref_error_cleanup()
3602 lower = list_first_entry(&cache->useless_node, in btrfs_backref_error_cleanup()
3604 list_del_init(&lower->list); in btrfs_backref_error_cleanup()
3606 while (!list_empty(&cache->pending_edge)) { in btrfs_backref_error_cleanup()
3607 edge = list_first_entry(&cache->pending_edge, in btrfs_backref_error_cleanup()
3609 list_del(&edge->list[UPPER]); in btrfs_backref_error_cleanup()
3610 list_del(&edge->list[LOWER]); in btrfs_backref_error_cleanup()
3611 lower = edge->node[LOWER]; in btrfs_backref_error_cleanup()
3612 upper = edge->node[UPPER]; in btrfs_backref_error_cleanup()
3613 btrfs_backref_free_edge(cache, edge); in btrfs_backref_error_cleanup()
3619 if (list_empty(&lower->upper) && in btrfs_backref_error_cleanup()
3620 RB_EMPTY_NODE(&lower->rb_node)) in btrfs_backref_error_cleanup()
3621 list_add(&lower->list, &cache->useless_node); in btrfs_backref_error_cleanup()
3623 if (!RB_EMPTY_NODE(&upper->rb_node)) in btrfs_backref_error_cleanup()
3627 list_for_each_entry(edge, &upper->upper, list[LOWER]) in btrfs_backref_error_cleanup()
3628 list_add_tail(&edge->list[UPPER], in btrfs_backref_error_cleanup()
3629 &cache->pending_edge); in btrfs_backref_error_cleanup()
3630 if (list_empty(&upper->upper)) in btrfs_backref_error_cleanup()
3631 list_add(&upper->list, &cache->useless_node); in btrfs_backref_error_cleanup()
3634 while (!list_empty(&cache->useless_node)) { in btrfs_backref_error_cleanup()
3635 lower = list_first_entry(&cache->useless_node, in btrfs_backref_error_cleanup()
3637 list_del_init(&lower->list); in btrfs_backref_error_cleanup()
3644 ASSERT(list_empty(&cache->useless_node) && in btrfs_backref_error_cleanup()
3645 list_empty(&cache->pending_edge)); in btrfs_backref_error_cleanup()