backref.c (9e8238020c5beba64e7ffafbb7ea0fb02fe68270) backref.c (c15c2ec07a26b251040943a1a9f90d3037f041e5)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6#include <linux/mm.h>
7#include <linux/rbtree.h>
8#include <trace/events/btrfs.h>
9#include "ctree.h"
10#include "disk-io.h"
11#include "backref.h"
12#include "ulist.h"
13#include "transaction.h"
14#include "delayed-ref.h"
15#include "locking.h"
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6#include <linux/mm.h>
7#include <linux/rbtree.h>
8#include <trace/events/btrfs.h>
9#include "ctree.h"
10#include "disk-io.h"
11#include "backref.h"
12#include "ulist.h"
13#include "transaction.h"
14#include "delayed-ref.h"
15#include "locking.h"
16#include "misc.h"
16
17/* Just an arbitrary number so we can be sure this happened */
18#define BACKREF_FOUND_SHARED 6
19
20struct extent_inode_elem {
21 u64 inum;
22 u64 offset;
23 struct extent_inode_elem *next;

--- 508 unchanged lines hidden (view full) ---

532 */
533static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
534 struct btrfs_path *path, u64 time_seq,
535 struct preftrees *preftrees,
536 struct prelim_ref *ref, struct ulist *parents,
537 const u64 *extent_item_pos, bool ignore_offset)
538{
539 struct btrfs_root *root;
17
18/* Just an arbitrary number so we can be sure this happened */
19#define BACKREF_FOUND_SHARED 6
20
21struct extent_inode_elem {
22 u64 inum;
23 u64 offset;
24 struct extent_inode_elem *next;

--- 508 unchanged lines hidden (view full) ---

533 */
534static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
535 struct btrfs_path *path, u64 time_seq,
536 struct preftrees *preftrees,
537 struct prelim_ref *ref, struct ulist *parents,
538 const u64 *extent_item_pos, bool ignore_offset)
539{
540 struct btrfs_root *root;
540 struct btrfs_key root_key;
541 struct extent_buffer *eb;
542 int ret = 0;
543 int root_level;
544 int level = ref->level;
545 struct btrfs_key search_key = ref->key_for_search;
546
541 struct extent_buffer *eb;
542 int ret = 0;
543 int root_level;
544 int level = ref->level;
545 struct btrfs_key search_key = ref->key_for_search;
546
547 root_key.objectid = ref->root_id;
548 root_key.type = BTRFS_ROOT_ITEM_KEY;
549 root_key.offset = (u64)-1;
550
551 root = btrfs_get_fs_root(fs_info, &root_key, false);
547 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
552 if (IS_ERR(root)) {
553 ret = PTR_ERR(root);
554 goto out_free;
555 }
556
557 if (!path->search_commit_root &&
558 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
559 ret = -ENOENT;

--- 900 unchanged lines hidden (view full) ---

1460
1461 ULIST_ITER_INIT(&uiter);
1462 while (1) {
1463 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1464 tmp, *roots, NULL, NULL, ignore_offset);
1465 if (ret < 0 && ret != -ENOENT) {
1466 ulist_free(tmp);
1467 ulist_free(*roots);
548 if (IS_ERR(root)) {
549 ret = PTR_ERR(root);
550 goto out_free;
551 }
552
553 if (!path->search_commit_root &&
554 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
555 ret = -ENOENT;

--- 900 unchanged lines hidden (view full) ---

1456
1457 ULIST_ITER_INIT(&uiter);
1458 while (1) {
1459 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1460 tmp, *roots, NULL, NULL, ignore_offset);
1461 if (ret < 0 && ret != -ENOENT) {
1462 ulist_free(tmp);
1463 ulist_free(*roots);
1464 *roots = NULL;
1468 return ret;
1469 }
1470 node = ulist_next(tmp, &uiter);
1471 if (!node)
1472 break;
1473 bytenr = node->val;
1474 cond_resched();
1475 }

--- 814 unchanged lines hidden (view full) ---

2290
2291void free_ipath(struct inode_fs_paths *ipath)
2292{
2293 if (!ipath)
2294 return;
2295 kvfree(ipath->fspath);
2296 kfree(ipath);
2297}
1465 return ret;
1466 }
1467 node = ulist_next(tmp, &uiter);
1468 if (!node)
1469 break;
1470 bytenr = node->val;
1471 cond_resched();
1472 }

--- 814 unchanged lines hidden (view full) ---

2287
2288void free_ipath(struct inode_fs_paths *ipath)
2289{
2290 if (!ipath)
2291 return;
2292 kvfree(ipath->fspath);
2293 kfree(ipath);
2294}
2295
2296struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2297 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2298{
2299 struct btrfs_backref_iter *ret;
2300
2301 ret = kzalloc(sizeof(*ret), gfp_flag);
2302 if (!ret)
2303 return NULL;
2304
2305 ret->path = btrfs_alloc_path();
2306 if (!ret->path) {
2307 kfree(ret);
2308 return NULL;
2309 }
2310
2311 /* Current backref iterator only supports iteration in commit root */
2312 ret->path->search_commit_root = 1;
2313 ret->path->skip_locking = 1;
2314 ret->fs_info = fs_info;
2315
2316 return ret;
2317}
2318
2319int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2320{
2321 struct btrfs_fs_info *fs_info = iter->fs_info;
2322 struct btrfs_path *path = iter->path;
2323 struct btrfs_extent_item *ei;
2324 struct btrfs_key key;
2325 int ret;
2326
2327 key.objectid = bytenr;
2328 key.type = BTRFS_METADATA_ITEM_KEY;
2329 key.offset = (u64)-1;
2330 iter->bytenr = bytenr;
2331
2332 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2333 if (ret < 0)
2334 return ret;
2335 if (ret == 0) {
2336 ret = -EUCLEAN;
2337 goto release;
2338 }
2339 if (path->slots[0] == 0) {
2340 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2341 ret = -EUCLEAN;
2342 goto release;
2343 }
2344 path->slots[0]--;
2345
2346 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2347 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2348 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2349 ret = -ENOENT;
2350 goto release;
2351 }
2352 memcpy(&iter->cur_key, &key, sizeof(key));
2353 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2354 path->slots[0]);
2355 iter->end_ptr = (u32)(iter->item_ptr +
2356 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2357 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2358 struct btrfs_extent_item);
2359
2360 /*
2361 * Only support iteration on tree backref yet.
2362 *
2363 * This is an extra precaution for non skinny-metadata, where
2364 * EXTENT_ITEM is also used for tree blocks, that we can only use
2365 * extent flags to determine if it's a tree block.
2366 */
2367 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2368 ret = -ENOTSUPP;
2369 goto release;
2370 }
2371 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2372
2373 /* If there is no inline backref, go search for keyed backref */
2374 if (iter->cur_ptr >= iter->end_ptr) {
2375 ret = btrfs_next_item(fs_info->extent_root, path);
2376
2377 /* No inline nor keyed ref */
2378 if (ret > 0) {
2379 ret = -ENOENT;
2380 goto release;
2381 }
2382 if (ret < 0)
2383 goto release;
2384
2385 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2386 path->slots[0]);
2387 if (iter->cur_key.objectid != bytenr ||
2388 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2389 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2390 ret = -ENOENT;
2391 goto release;
2392 }
2393 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2394 path->slots[0]);
2395 iter->item_ptr = iter->cur_ptr;
2396 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2397 path->nodes[0], path->slots[0]));
2398 }
2399
2400 return 0;
2401release:
2402 btrfs_backref_iter_release(iter);
2403 return ret;
2404}
2405
2406/*
2407 * Go to the next backref item of current bytenr, can be either inlined or
2408 * keyed.
2409 *
2410 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2411 *
2412 * Return 0 if we get next backref without problem.
2413 * Return >0 if there is no extra backref for this bytenr.
2414 * Return <0 if there is something wrong happened.
2415 */
2416int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2417{
2418 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2419 struct btrfs_path *path = iter->path;
2420 struct btrfs_extent_inline_ref *iref;
2421 int ret;
2422 u32 size;
2423
2424 if (btrfs_backref_iter_is_inline_ref(iter)) {
2425 /* We're still inside the inline refs */
2426 ASSERT(iter->cur_ptr < iter->end_ptr);
2427
2428 if (btrfs_backref_has_tree_block_info(iter)) {
2429 /* First tree block info */
2430 size = sizeof(struct btrfs_tree_block_info);
2431 } else {
2432 /* Use inline ref type to determine the size */
2433 int type;
2434
2435 iref = (struct btrfs_extent_inline_ref *)
2436 ((unsigned long)iter->cur_ptr);
2437 type = btrfs_extent_inline_ref_type(eb, iref);
2438
2439 size = btrfs_extent_inline_ref_size(type);
2440 }
2441 iter->cur_ptr += size;
2442 if (iter->cur_ptr < iter->end_ptr)
2443 return 0;
2444
2445 /* All inline items iterated, fall through */
2446 }
2447
2448 /* We're at keyed items, there is no inline item, go to the next one */
2449 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2450 if (ret)
2451 return ret;
2452
2453 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2454 if (iter->cur_key.objectid != iter->bytenr ||
2455 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2456 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2457 return 1;
2458 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2459 path->slots[0]);
2460 iter->cur_ptr = iter->item_ptr;
2461 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2462 path->slots[0]);
2463 return 0;
2464}
2465
2466void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2467 struct btrfs_backref_cache *cache, int is_reloc)
2468{
2469 int i;
2470
2471 cache->rb_root = RB_ROOT;
2472 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2473 INIT_LIST_HEAD(&cache->pending[i]);
2474 INIT_LIST_HEAD(&cache->changed);
2475 INIT_LIST_HEAD(&cache->detached);
2476 INIT_LIST_HEAD(&cache->leaves);
2477 INIT_LIST_HEAD(&cache->pending_edge);
2478 INIT_LIST_HEAD(&cache->useless_node);
2479 cache->fs_info = fs_info;
2480 cache->is_reloc = is_reloc;
2481}
2482
2483struct btrfs_backref_node *btrfs_backref_alloc_node(
2484 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2485{
2486 struct btrfs_backref_node *node;
2487
2488 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2489 node = kzalloc(sizeof(*node), GFP_NOFS);
2490 if (!node)
2491 return node;
2492
2493 INIT_LIST_HEAD(&node->list);
2494 INIT_LIST_HEAD(&node->upper);
2495 INIT_LIST_HEAD(&node->lower);
2496 RB_CLEAR_NODE(&node->rb_node);
2497 cache->nr_nodes++;
2498 node->level = level;
2499 node->bytenr = bytenr;
2500
2501 return node;
2502}
2503
2504struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2505 struct btrfs_backref_cache *cache)
2506{
2507 struct btrfs_backref_edge *edge;
2508
2509 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2510 if (edge)
2511 cache->nr_edges++;
2512 return edge;
2513}
2514
2515/*
2516 * Drop the backref node from cache, also cleaning up all its
2517 * upper edges and any uncached nodes in the path.
2518 *
2519 * This cleanup happens bottom up, thus the node should either
2520 * be the lowest node in the cache or a detached node.
2521 */
2522void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2523 struct btrfs_backref_node *node)
2524{
2525 struct btrfs_backref_node *upper;
2526 struct btrfs_backref_edge *edge;
2527
2528 if (!node)
2529 return;
2530
2531 BUG_ON(!node->lowest && !node->detached);
2532 while (!list_empty(&node->upper)) {
2533 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2534 list[LOWER]);
2535 upper = edge->node[UPPER];
2536 list_del(&edge->list[LOWER]);
2537 list_del(&edge->list[UPPER]);
2538 btrfs_backref_free_edge(cache, edge);
2539
2540 if (RB_EMPTY_NODE(&upper->rb_node)) {
2541 BUG_ON(!list_empty(&node->upper));
2542 btrfs_backref_drop_node(cache, node);
2543 node = upper;
2544 node->lowest = 1;
2545 continue;
2546 }
2547 /*
2548 * Add the node to leaf node list if no other child block
2549 * cached.
2550 */
2551 if (list_empty(&upper->lower)) {
2552 list_add_tail(&upper->lower, &cache->leaves);
2553 upper->lowest = 1;
2554 }
2555 }
2556
2557 btrfs_backref_drop_node(cache, node);
2558}
2559
2560/*
2561 * Release all nodes/edges from current cache
2562 */
2563void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2564{
2565 struct btrfs_backref_node *node;
2566 int i;
2567
2568 while (!list_empty(&cache->detached)) {
2569 node = list_entry(cache->detached.next,
2570 struct btrfs_backref_node, list);
2571 btrfs_backref_cleanup_node(cache, node);
2572 }
2573
2574 while (!list_empty(&cache->leaves)) {
2575 node = list_entry(cache->leaves.next,
2576 struct btrfs_backref_node, lower);
2577 btrfs_backref_cleanup_node(cache, node);
2578 }
2579
2580 cache->last_trans = 0;
2581
2582 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2583 ASSERT(list_empty(&cache->pending[i]));
2584 ASSERT(list_empty(&cache->pending_edge));
2585 ASSERT(list_empty(&cache->useless_node));
2586 ASSERT(list_empty(&cache->changed));
2587 ASSERT(list_empty(&cache->detached));
2588 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2589 ASSERT(!cache->nr_nodes);
2590 ASSERT(!cache->nr_edges);
2591}
2592
2593/*
2594 * Handle direct tree backref
2595 *
2596 * Direct tree backref means, the backref item shows its parent bytenr
2597 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2598 *
2599 * @ref_key: The converted backref key.
2600 * For keyed backref, it's the item key.
2601 * For inlined backref, objectid is the bytenr,
2602 * type is btrfs_inline_ref_type, offset is
2603 * btrfs_inline_ref_offset.
2604 */
2605static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2606 struct btrfs_key *ref_key,
2607 struct btrfs_backref_node *cur)
2608{
2609 struct btrfs_backref_edge *edge;
2610 struct btrfs_backref_node *upper;
2611 struct rb_node *rb_node;
2612
2613 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2614
2615 /* Only reloc root uses backref pointing to itself */
2616 if (ref_key->objectid == ref_key->offset) {
2617 struct btrfs_root *root;
2618
2619 cur->is_reloc_root = 1;
2620 /* Only reloc backref cache cares about a specific root */
2621 if (cache->is_reloc) {
2622 root = find_reloc_root(cache->fs_info, cur->bytenr);
2623 if (WARN_ON(!root))
2624 return -ENOENT;
2625 cur->root = root;
2626 } else {
2627 /*
2628 * For generic purpose backref cache, reloc root node
2629 * is useless.
2630 */
2631 list_add(&cur->list, &cache->useless_node);
2632 }
2633 return 0;
2634 }
2635
2636 edge = btrfs_backref_alloc_edge(cache);
2637 if (!edge)
2638 return -ENOMEM;
2639
2640 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2641 if (!rb_node) {
2642 /* Parent node not yet cached */
2643 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2644 cur->level + 1);
2645 if (!upper) {
2646 btrfs_backref_free_edge(cache, edge);
2647 return -ENOMEM;
2648 }
2649
2650 /*
2651 * Backrefs for the upper level block isn't cached, add the
2652 * block to pending list
2653 */
2654 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2655 } else {
2656 /* Parent node already cached */
2657 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2658 ASSERT(upper->checked);
2659 INIT_LIST_HEAD(&edge->list[UPPER]);
2660 }
2661 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2662 return 0;
2663}
2664
2665/*
2666 * Handle indirect tree backref
2667 *
2668 * Indirect tree backref means, we only know which tree the node belongs to.
2669 * We still need to do a tree search to find out the parents. This is for
2670 * TREE_BLOCK_REF backref (keyed or inlined).
2671 *
2672 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2673 * @tree_key: The first key of this tree block.
2674 * @path: A clean (released) path, to avoid allocating path everytime
2675 * the function get called.
2676 */
2677static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2678 struct btrfs_path *path,
2679 struct btrfs_key *ref_key,
2680 struct btrfs_key *tree_key,
2681 struct btrfs_backref_node *cur)
2682{
2683 struct btrfs_fs_info *fs_info = cache->fs_info;
2684 struct btrfs_backref_node *upper;
2685 struct btrfs_backref_node *lower;
2686 struct btrfs_backref_edge *edge;
2687 struct extent_buffer *eb;
2688 struct btrfs_root *root;
2689 struct rb_node *rb_node;
2690 int level;
2691 bool need_check = true;
2692 int ret;
2693
2694 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2695 if (IS_ERR(root))
2696 return PTR_ERR(root);
2697 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2698 cur->cowonly = 1;
2699
2700 if (btrfs_root_level(&root->root_item) == cur->level) {
2701 /* Tree root */
2702 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2703 /*
2704 * For reloc backref cache, we may ignore reloc root. But for
2705 * general purpose backref cache, we can't rely on
2706 * btrfs_should_ignore_reloc_root() as it may conflict with
2707 * current running relocation and lead to missing root.
2708 *
2709 * For general purpose backref cache, reloc root detection is
2710 * completely relying on direct backref (key->offset is parent
2711 * bytenr), thus only do such check for reloc cache.
2712 */
2713 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2714 btrfs_put_root(root);
2715 list_add(&cur->list, &cache->useless_node);
2716 } else {
2717 cur->root = root;
2718 }
2719 return 0;
2720 }
2721
2722 level = cur->level + 1;
2723
2724 /* Search the tree to find parent blocks referring to the block */
2725 path->search_commit_root = 1;
2726 path->skip_locking = 1;
2727 path->lowest_level = level;
2728 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2729 path->lowest_level = 0;
2730 if (ret < 0) {
2731 btrfs_put_root(root);
2732 return ret;
2733 }
2734 if (ret > 0 && path->slots[level] > 0)
2735 path->slots[level]--;
2736
2737 eb = path->nodes[level];
2738 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2739 btrfs_err(fs_info,
2740"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2741 cur->bytenr, level - 1, root->root_key.objectid,
2742 tree_key->objectid, tree_key->type, tree_key->offset);
2743 btrfs_put_root(root);
2744 ret = -ENOENT;
2745 goto out;
2746 }
2747 lower = cur;
2748
2749 /* Add all nodes and edges in the path */
2750 for (; level < BTRFS_MAX_LEVEL; level++) {
2751 if (!path->nodes[level]) {
2752 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2753 lower->bytenr);
2754 /* Same as previous should_ignore_reloc_root() call */
2755 if (btrfs_should_ignore_reloc_root(root) &&
2756 cache->is_reloc) {
2757 btrfs_put_root(root);
2758 list_add(&lower->list, &cache->useless_node);
2759 } else {
2760 lower->root = root;
2761 }
2762 break;
2763 }
2764
2765 edge = btrfs_backref_alloc_edge(cache);
2766 if (!edge) {
2767 btrfs_put_root(root);
2768 ret = -ENOMEM;
2769 goto out;
2770 }
2771
2772 eb = path->nodes[level];
2773 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2774 if (!rb_node) {
2775 upper = btrfs_backref_alloc_node(cache, eb->start,
2776 lower->level + 1);
2777 if (!upper) {
2778 btrfs_put_root(root);
2779 btrfs_backref_free_edge(cache, edge);
2780 ret = -ENOMEM;
2781 goto out;
2782 }
2783 upper->owner = btrfs_header_owner(eb);
2784 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2785 upper->cowonly = 1;
2786
2787 /*
2788 * If we know the block isn't shared we can avoid
2789 * checking its backrefs.
2790 */
2791 if (btrfs_block_can_be_shared(root, eb))
2792 upper->checked = 0;
2793 else
2794 upper->checked = 1;
2795
2796 /*
2797 * Add the block to pending list if we need to check its
2798 * backrefs, we only do this once while walking up a
2799 * tree as we will catch anything else later on.
2800 */
2801 if (!upper->checked && need_check) {
2802 need_check = false;
2803 list_add_tail(&edge->list[UPPER],
2804 &cache->pending_edge);
2805 } else {
2806 if (upper->checked)
2807 need_check = true;
2808 INIT_LIST_HEAD(&edge->list[UPPER]);
2809 }
2810 } else {
2811 upper = rb_entry(rb_node, struct btrfs_backref_node,
2812 rb_node);
2813 ASSERT(upper->checked);
2814 INIT_LIST_HEAD(&edge->list[UPPER]);
2815 if (!upper->owner)
2816 upper->owner = btrfs_header_owner(eb);
2817 }
2818 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2819
2820 if (rb_node) {
2821 btrfs_put_root(root);
2822 break;
2823 }
2824 lower = upper;
2825 upper = NULL;
2826 }
2827out:
2828 btrfs_release_path(path);
2829 return ret;
2830}
2831
2832/*
2833 * Add backref node @cur into @cache.
2834 *
2835 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2836 * links aren't yet bi-directional. Needs to finish such links.
2837 * Use btrfs_backref_finish_upper_links() to finish such linkage.
2838 *
2839 * @path: Released path for indirect tree backref lookup
2840 * @iter: Released backref iter for extent tree search
2841 * @node_key: The first key of the tree block
2842 */
2843int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2844 struct btrfs_path *path,
2845 struct btrfs_backref_iter *iter,
2846 struct btrfs_key *node_key,
2847 struct btrfs_backref_node *cur)
2848{
2849 struct btrfs_fs_info *fs_info = cache->fs_info;
2850 struct btrfs_backref_edge *edge;
2851 struct btrfs_backref_node *exist;
2852 int ret;
2853
2854 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2855 if (ret < 0)
2856 return ret;
2857 /*
2858 * We skip the first btrfs_tree_block_info, as we don't use the key
2859 * stored in it, but fetch it from the tree block
2860 */
2861 if (btrfs_backref_has_tree_block_info(iter)) {
2862 ret = btrfs_backref_iter_next(iter);
2863 if (ret < 0)
2864 goto out;
2865 /* No extra backref? This means the tree block is corrupted */
2866 if (ret > 0) {
2867 ret = -EUCLEAN;
2868 goto out;
2869 }
2870 }
2871 WARN_ON(cur->checked);
2872 if (!list_empty(&cur->upper)) {
2873 /*
2874 * The backref was added previously when processing backref of
2875 * type BTRFS_TREE_BLOCK_REF_KEY
2876 */
2877 ASSERT(list_is_singular(&cur->upper));
2878 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2879 list[LOWER]);
2880 ASSERT(list_empty(&edge->list[UPPER]));
2881 exist = edge->node[UPPER];
2882 /*
2883 * Add the upper level block to pending list if we need check
2884 * its backrefs
2885 */
2886 if (!exist->checked)
2887 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2888 } else {
2889 exist = NULL;
2890 }
2891
2892 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2893 struct extent_buffer *eb;
2894 struct btrfs_key key;
2895 int type;
2896
2897 cond_resched();
2898 eb = btrfs_backref_get_eb(iter);
2899
2900 key.objectid = iter->bytenr;
2901 if (btrfs_backref_iter_is_inline_ref(iter)) {
2902 struct btrfs_extent_inline_ref *iref;
2903
2904 /* Update key for inline backref */
2905 iref = (struct btrfs_extent_inline_ref *)
2906 ((unsigned long)iter->cur_ptr);
2907 type = btrfs_get_extent_inline_ref_type(eb, iref,
2908 BTRFS_REF_TYPE_BLOCK);
2909 if (type == BTRFS_REF_TYPE_INVALID) {
2910 ret = -EUCLEAN;
2911 goto out;
2912 }
2913 key.type = type;
2914 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2915 } else {
2916 key.type = iter->cur_key.type;
2917 key.offset = iter->cur_key.offset;
2918 }
2919
2920 /*
2921 * Parent node found and matches current inline ref, no need to
2922 * rebuild this node for this inline ref
2923 */
2924 if (exist &&
2925 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2926 exist->owner == key.offset) ||
2927 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2928 exist->bytenr == key.offset))) {
2929 exist = NULL;
2930 continue;
2931 }
2932
2933 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2934 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2935 ret = handle_direct_tree_backref(cache, &key, cur);
2936 if (ret < 0)
2937 goto out;
2938 continue;
2939 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2940 ret = -EINVAL;
2941 btrfs_print_v0_err(fs_info);
2942 btrfs_handle_fs_error(fs_info, ret, NULL);
2943 goto out;
2944 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2945 continue;
2946 }
2947
2948 /*
2949 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2950 * means the root objectid. We need to search the tree to get
2951 * its parent bytenr.
2952 */
2953 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2954 cur);
2955 if (ret < 0)
2956 goto out;
2957 }
2958 ret = 0;
2959 cur->checked = 1;
2960 WARN_ON(exist);
2961out:
2962 btrfs_backref_iter_release(iter);
2963 return ret;
2964}
2965
2966/*
2967 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
2968 */
2969int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2970 struct btrfs_backref_node *start)
2971{
2972 struct list_head *useless_node = &cache->useless_node;
2973 struct btrfs_backref_edge *edge;
2974 struct rb_node *rb_node;
2975 LIST_HEAD(pending_edge);
2976
2977 ASSERT(start->checked);
2978
2979 /* Insert this node to cache if it's not COW-only */
2980 if (!start->cowonly) {
2981 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2982 &start->rb_node);
2983 if (rb_node)
2984 btrfs_backref_panic(cache->fs_info, start->bytenr,
2985 -EEXIST);
2986 list_add_tail(&start->lower, &cache->leaves);
2987 }
2988
2989 /*
2990 * Use breadth first search to iterate all related edges.
2991 *
2992 * The starting points are all the edges of this node
2993 */
2994 list_for_each_entry(edge, &start->upper, list[LOWER])
2995 list_add_tail(&edge->list[UPPER], &pending_edge);
2996
2997 while (!list_empty(&pending_edge)) {
2998 struct btrfs_backref_node *upper;
2999 struct btrfs_backref_node *lower;
3000 struct rb_node *rb_node;
3001
3002 edge = list_first_entry(&pending_edge,
3003 struct btrfs_backref_edge, list[UPPER]);
3004 list_del_init(&edge->list[UPPER]);
3005 upper = edge->node[UPPER];
3006 lower = edge->node[LOWER];
3007
3008 /* Parent is detached, no need to keep any edges */
3009 if (upper->detached) {
3010 list_del(&edge->list[LOWER]);
3011 btrfs_backref_free_edge(cache, edge);
3012
3013 /* Lower node is orphan, queue for cleanup */
3014 if (list_empty(&lower->upper))
3015 list_add(&lower->list, useless_node);
3016 continue;
3017 }
3018
3019 /*
3020 * All new nodes added in current build_backref_tree() haven't
3021 * been linked to the cache rb tree.
3022 * So if we have upper->rb_node populated, this means a cache
3023 * hit. We only need to link the edge, as @upper and all its
3024 * parents have already been linked.
3025 */
3026 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3027 if (upper->lowest) {
3028 list_del_init(&upper->lower);
3029 upper->lowest = 0;
3030 }
3031
3032 list_add_tail(&edge->list[UPPER], &upper->lower);
3033 continue;
3034 }
3035
3036 /* Sanity check, we shouldn't have any unchecked nodes */
3037 if (!upper->checked) {
3038 ASSERT(0);
3039 return -EUCLEAN;
3040 }
3041
3042 /* Sanity check, COW-only node has non-COW-only parent */
3043 if (start->cowonly != upper->cowonly) {
3044 ASSERT(0);
3045 return -EUCLEAN;
3046 }
3047
3048 /* Only cache non-COW-only (subvolume trees) tree blocks */
3049 if (!upper->cowonly) {
3050 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3051 &upper->rb_node);
3052 if (rb_node) {
3053 btrfs_backref_panic(cache->fs_info,
3054 upper->bytenr, -EEXIST);
3055 return -EUCLEAN;
3056 }
3057 }
3058
3059 list_add_tail(&edge->list[UPPER], &upper->lower);
3060
3061 /*
3062 * Also queue all the parent edges of this uncached node
3063 * to finish the upper linkage
3064 */
3065 list_for_each_entry(edge, &upper->upper, list[LOWER])
3066 list_add_tail(&edge->list[UPPER], &pending_edge);
3067 }
3068 return 0;
3069}
3070
3071void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3072 struct btrfs_backref_node *node)
3073{
3074 struct btrfs_backref_node *lower;
3075 struct btrfs_backref_node *upper;
3076 struct btrfs_backref_edge *edge;
3077
3078 while (!list_empty(&cache->useless_node)) {
3079 lower = list_first_entry(&cache->useless_node,
3080 struct btrfs_backref_node, list);
3081 list_del_init(&lower->list);
3082 }
3083 while (!list_empty(&cache->pending_edge)) {
3084 edge = list_first_entry(&cache->pending_edge,
3085 struct btrfs_backref_edge, list[UPPER]);
3086 list_del(&edge->list[UPPER]);
3087 list_del(&edge->list[LOWER]);
3088 lower = edge->node[LOWER];
3089 upper = edge->node[UPPER];
3090 btrfs_backref_free_edge(cache, edge);
3091
3092 /*
3093 * Lower is no longer linked to any upper backref nodes and
3094 * isn't in the cache, we can free it ourselves.
3095 */
3096 if (list_empty(&lower->upper) &&
3097 RB_EMPTY_NODE(&lower->rb_node))
3098 list_add(&lower->list, &cache->useless_node);
3099
3100 if (!RB_EMPTY_NODE(&upper->rb_node))
3101 continue;
3102
3103 /* Add this guy's upper edges to the list to process */
3104 list_for_each_entry(edge, &upper->upper, list[LOWER])
3105 list_add_tail(&edge->list[UPPER],
3106 &cache->pending_edge);
3107 if (list_empty(&upper->upper))
3108 list_add(&upper->list, &cache->useless_node);
3109 }
3110
3111 while (!list_empty(&cache->useless_node)) {
3112 lower = list_first_entry(&cache->useless_node,
3113 struct btrfs_backref_node, list);
3114 list_del_init(&lower->list);
3115 if (lower == node)
3116 node = NULL;
3117 btrfs_backref_free_node(cache, lower);
3118 }
3119
3120 btrfs_backref_cleanup_node(cache, node);
3121 ASSERT(list_empty(&cache->useless_node) &&
3122 list_empty(&cache->pending_edge));
3123}