relocation.c (1b60d2ec982a35c2953d81d035e1d7fc7c89f42a) relocation.c (fc997ed05a9f9d2185b8804fb2d0273e6d9e921a)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h>

--- 364 unchanged lines hidden (view full) ---

373 key.objectid = root_objectid;
374 key.type = BTRFS_ROOT_ITEM_KEY;
375 key.offset = (u64)-1;
376
377 return btrfs_get_fs_root(fs_info, &key, false);
378}
379
380/*
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h>

--- 364 unchanged lines hidden (view full) ---

373 key.objectid = root_objectid;
374 key.type = BTRFS_ROOT_ITEM_KEY;
375 key.offset = (u64)-1;
376
377 return btrfs_get_fs_root(fs_info, &key, false);
378}
379
380/*
381 * In handle_one_tree_backref(), we have only linked the lower node to the edge,
382 * but the upper node hasn't been linked to the edge.
383 * This means we can only iterate through btrfs_backref_node::upper to reach
384 * parent edges, but not through btrfs_backref_node::lower to reach children
385 * edges.
386 *
387 * This function will finish the btrfs_backref_node::lower to related edges,
388 * so that backref cache can be bi-directionally iterated.
389 *
390 * Also, this will add the nodes to backref cache for the next run.
391 */
392static int finish_upper_links(struct btrfs_backref_cache *cache,
393 struct btrfs_backref_node *start)
394{
395 struct list_head *useless_node = &cache->useless_node;
396 struct btrfs_backref_edge *edge;
397 struct rb_node *rb_node;
398 LIST_HEAD(pending_edge);
399
400 ASSERT(start->checked);
401
402 /* Insert this node to cache if it's not COW-only */
403 if (!start->cowonly) {
404 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
405 &start->rb_node);
406 if (rb_node)
407 btrfs_backref_panic(cache->fs_info, start->bytenr,
408 -EEXIST);
409 list_add_tail(&start->lower, &cache->leaves);
410 }
411
412 /*
413 * Use breadth first search to iterate all related edges.
414 *
415 * The starting points are all the edges of this node
416 */
417 list_for_each_entry(edge, &start->upper, list[LOWER])
418 list_add_tail(&edge->list[UPPER], &pending_edge);
419
420 while (!list_empty(&pending_edge)) {
421 struct btrfs_backref_node *upper;
422 struct btrfs_backref_node *lower;
423 struct rb_node *rb_node;
424
425 edge = list_first_entry(&pending_edge,
426 struct btrfs_backref_edge, list[UPPER]);
427 list_del_init(&edge->list[UPPER]);
428 upper = edge->node[UPPER];
429 lower = edge->node[LOWER];
430
431 /* Parent is detached, no need to keep any edges */
432 if (upper->detached) {
433 list_del(&edge->list[LOWER]);
434 btrfs_backref_free_edge(cache, edge);
435
436 /* Lower node is orphan, queue for cleanup */
437 if (list_empty(&lower->upper))
438 list_add(&lower->list, useless_node);
439 continue;
440 }
441
442 /*
443 * All new nodes added in current build_backref_tree() haven't
444 * been linked to the cache rb tree.
445 * So if we have upper->rb_node populated, this means a cache
446 * hit. We only need to link the edge, as @upper and all its
447 * parent have already been linked.
448 */
449 if (!RB_EMPTY_NODE(&upper->rb_node)) {
450 if (upper->lowest) {
451 list_del_init(&upper->lower);
452 upper->lowest = 0;
453 }
454
455 list_add_tail(&edge->list[UPPER], &upper->lower);
456 continue;
457 }
458
459 /* Sanity check, we shouldn't have any unchecked nodes */
460 if (!upper->checked) {
461 ASSERT(0);
462 return -EUCLEAN;
463 }
464
465 /* Sanity check, COW-only node has non-COW-only parent */
466 if (start->cowonly != upper->cowonly) {
467 ASSERT(0);
468 return -EUCLEAN;
469 }
470
471 /* Only cache non-COW-only (subvolume trees) tree blocks */
472 if (!upper->cowonly) {
473 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
474 &upper->rb_node);
475 if (rb_node) {
476 btrfs_backref_panic(cache->fs_info,
477 upper->bytenr, -EEXIST);
478 return -EUCLEAN;
479 }
480 }
481
482 list_add_tail(&edge->list[UPPER], &upper->lower);
483
484 /*
485 * Also queue all the parent edges of this uncached node to
486 * finish the upper linkage
487 */
488 list_for_each_entry(edge, &upper->upper, list[LOWER])
489 list_add_tail(&edge->list[UPPER], &pending_edge);
490 }
491 return 0;
492}
493
494/*
495 * For useless nodes, do two major clean ups:
496 *
497 * - Cleanup the children edges and nodes
498 * If child node is also orphan (no parent) during cleanup, then the child
499 * node will also be cleaned up.
500 *
501 * - Freeing up leaves (level 0), keeps nodes detached
502 * For nodes, the node is still cached as "detached"

--- 126 unchanged lines hidden (view full) ---

629 */
630 if (edge) {
631 list_del_init(&edge->list[UPPER]);
632 cur = edge->node[UPPER];
633 }
634 } while (edge);
635
636 /* Finish the upper linkage of newly added edges/nodes */
381 * For useless nodes, do two major clean ups:
382 *
383 * - Cleanup the children edges and nodes
384 * If child node is also orphan (no parent) during cleanup, then the child
385 * node will also be cleaned up.
386 *
387 * - Freeing up leaves (level 0), keeps nodes detached
388 * For nodes, the node is still cached as "detached"

--- 126 unchanged lines hidden (view full) ---

515 */
516 if (edge) {
517 list_del_init(&edge->list[UPPER]);
518 cur = edge->node[UPPER];
519 }
520 } while (edge);
521
522 /* Finish the upper linkage of newly added edges/nodes */
637 ret = finish_upper_links(cache, node);
523 ret = btrfs_backref_finish_upper_links(cache, node);
638 if (ret < 0) {
639 err = ret;
640 goto out;
641 }
642
643 if (handle_useless_nodes(rc, node))
644 node = NULL;
645out:

--- 3585 unchanged lines hidden ---
524 if (ret < 0) {
525 err = ret;
526 goto out;
527 }
528
529 if (handle_useless_nodes(rc, node))
530 node = NULL;
531out:

--- 3585 unchanged lines hidden ---