1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2009 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "volumes.h" 16 #include "locking.h" 17 #include "btrfs_inode.h" 18 #include "async-thread.h" 19 #include "free-space-cache.h" 20 #include "inode-map.h" 21 #include "qgroup.h" 22 #include "print-tree.h" 23 24 /* 25 * backref_node, mapping_node and tree_block start with this 26 */ 27 struct tree_entry { 28 struct rb_node rb_node; 29 u64 bytenr; 30 }; 31 32 /* 33 * present a tree block in the backref cache 34 */ 35 struct backref_node { 36 struct rb_node rb_node; 37 u64 bytenr; 38 39 u64 new_bytenr; 40 /* objectid of tree block owner, can be not uptodate */ 41 u64 owner; 42 /* link to pending, changed or detached list */ 43 struct list_head list; 44 /* list of upper level blocks reference this block */ 45 struct list_head upper; 46 /* list of child blocks in the cache */ 47 struct list_head lower; 48 /* NULL if this node is not tree root */ 49 struct btrfs_root *root; 50 /* extent buffer got by COW the block */ 51 struct extent_buffer *eb; 52 /* level of tree block */ 53 unsigned int level:8; 54 /* is the block in non-reference counted tree */ 55 unsigned int cowonly:1; 56 /* 1 if no child node in the cache */ 57 unsigned int lowest:1; 58 /* is the extent buffer locked */ 59 unsigned int locked:1; 60 /* has the block been processed */ 61 unsigned int processed:1; 62 /* have backrefs of this block been checked */ 63 unsigned int checked:1; 64 /* 65 * 1 if corresponding block has been cowed but some upper 66 * level block pointers may not point to the new location 67 */ 68 unsigned int pending:1; 69 /* 70 * 1 if the backref node isn't connected to any other 71 * backref node. 72 */ 73 unsigned int detached:1; 74 }; 75 76 /* 77 * present a block pointer in the backref cache 78 */ 79 struct backref_edge { 80 struct list_head list[2]; 81 struct backref_node *node[2]; 82 }; 83 84 #define LOWER 0 85 #define UPPER 1 86 #define RELOCATION_RESERVED_NODES 256 87 88 struct backref_cache { 89 /* red black tree of all backref nodes in the cache */ 90 struct rb_root rb_root; 91 /* for passing backref nodes to btrfs_reloc_cow_block */ 92 struct backref_node *path[BTRFS_MAX_LEVEL]; 93 /* 94 * list of blocks that have been cowed but some block 95 * pointers in upper level blocks may not reflect the 96 * new location 97 */ 98 struct list_head pending[BTRFS_MAX_LEVEL]; 99 /* list of backref nodes with no child node */ 100 struct list_head leaves; 101 /* list of blocks that have been cowed in current transaction */ 102 struct list_head changed; 103 /* list of detached backref node. */ 104 struct list_head detached; 105 106 u64 last_trans; 107 108 int nr_nodes; 109 int nr_edges; 110 }; 111 112 /* 113 * map address of tree root to tree 114 */ 115 struct mapping_node { 116 struct rb_node rb_node; 117 u64 bytenr; 118 void *data; 119 }; 120 121 struct mapping_tree { 122 struct rb_root rb_root; 123 spinlock_t lock; 124 }; 125 126 /* 127 * present a tree block to process 128 */ 129 struct tree_block { 130 struct rb_node rb_node; 131 u64 bytenr; 132 struct btrfs_key key; 133 unsigned int level:8; 134 unsigned int key_ready:1; 135 }; 136 137 #define MAX_EXTENTS 128 138 139 struct file_extent_cluster { 140 u64 start; 141 u64 end; 142 u64 boundary[MAX_EXTENTS]; 143 unsigned int nr; 144 }; 145 146 struct reloc_control { 147 /* block group to relocate */ 148 struct btrfs_block_group_cache *block_group; 149 /* extent tree */ 150 struct btrfs_root *extent_root; 151 /* inode for moving data */ 152 struct inode *data_inode; 153 154 struct btrfs_block_rsv *block_rsv; 155 156 struct backref_cache backref_cache; 157 158 struct file_extent_cluster cluster; 159 /* tree blocks have been processed */ 160 struct extent_io_tree processed_blocks; 161 /* map start of tree root to corresponding reloc tree */ 162 struct mapping_tree reloc_root_tree; 163 /* list of reloc trees */ 164 struct list_head reloc_roots; 165 /* size of metadata reservation for merging reloc trees */ 166 u64 merging_rsv_size; 167 /* size of relocated tree nodes */ 168 u64 nodes_relocated; 169 /* reserved size for block group relocation*/ 170 u64 reserved_bytes; 171 172 u64 search_start; 173 u64 extents_found; 174 175 unsigned int stage:8; 176 unsigned int create_reloc_tree:1; 177 unsigned int merge_reloc_tree:1; 178 unsigned int found_file_extent:1; 179 }; 180 181 /* stages of data relocation */ 182 #define MOVE_DATA_EXTENTS 0 183 #define UPDATE_DATA_PTRS 1 184 185 static void remove_backref_node(struct backref_cache *cache, 186 struct backref_node *node); 187 static void __mark_block_processed(struct reloc_control *rc, 188 struct backref_node *node); 189 190 static void mapping_tree_init(struct mapping_tree *tree) 191 { 192 tree->rb_root = RB_ROOT; 193 spin_lock_init(&tree->lock); 194 } 195 196 static void backref_cache_init(struct backref_cache *cache) 197 { 198 int i; 199 cache->rb_root = RB_ROOT; 200 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 201 INIT_LIST_HEAD(&cache->pending[i]); 202 INIT_LIST_HEAD(&cache->changed); 203 INIT_LIST_HEAD(&cache->detached); 204 INIT_LIST_HEAD(&cache->leaves); 205 } 206 207 static void backref_cache_cleanup(struct backref_cache *cache) 208 { 209 struct backref_node *node; 210 int i; 211 212 while (!list_empty(&cache->detached)) { 213 node = list_entry(cache->detached.next, 214 struct backref_node, list); 215 remove_backref_node(cache, node); 216 } 217 218 while (!list_empty(&cache->leaves)) { 219 node = list_entry(cache->leaves.next, 220 struct backref_node, lower); 221 remove_backref_node(cache, node); 222 } 223 224 cache->last_trans = 0; 225 226 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 227 ASSERT(list_empty(&cache->pending[i])); 228 ASSERT(list_empty(&cache->changed)); 229 ASSERT(list_empty(&cache->detached)); 230 ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); 231 ASSERT(!cache->nr_nodes); 232 ASSERT(!cache->nr_edges); 233 } 234 235 static struct backref_node *alloc_backref_node(struct backref_cache *cache) 236 { 237 struct backref_node *node; 238 239 node = kzalloc(sizeof(*node), GFP_NOFS); 240 if (node) { 241 INIT_LIST_HEAD(&node->list); 242 INIT_LIST_HEAD(&node->upper); 243 INIT_LIST_HEAD(&node->lower); 244 RB_CLEAR_NODE(&node->rb_node); 245 cache->nr_nodes++; 246 } 247 return node; 248 } 249 250 static void free_backref_node(struct backref_cache *cache, 251 struct backref_node *node) 252 { 253 if (node) { 254 cache->nr_nodes--; 255 kfree(node); 256 } 257 } 258 259 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) 260 { 261 struct backref_edge *edge; 262 263 edge = kzalloc(sizeof(*edge), GFP_NOFS); 264 if (edge) 265 cache->nr_edges++; 266 return edge; 267 } 268 269 static void free_backref_edge(struct backref_cache *cache, 270 struct backref_edge *edge) 271 { 272 if (edge) { 273 cache->nr_edges--; 274 kfree(edge); 275 } 276 } 277 278 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 279 struct rb_node *node) 280 { 281 struct rb_node **p = &root->rb_node; 282 struct rb_node *parent = NULL; 283 struct tree_entry *entry; 284 285 while (*p) { 286 parent = *p; 287 entry = rb_entry(parent, struct tree_entry, rb_node); 288 289 if (bytenr < entry->bytenr) 290 p = &(*p)->rb_left; 291 else if (bytenr > entry->bytenr) 292 p = &(*p)->rb_right; 293 else 294 return parent; 295 } 296 297 rb_link_node(node, parent, p); 298 rb_insert_color(node, root); 299 return NULL; 300 } 301 302 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 303 { 304 struct rb_node *n = root->rb_node; 305 struct tree_entry *entry; 306 307 while (n) { 308 entry = rb_entry(n, struct tree_entry, rb_node); 309 310 if (bytenr < entry->bytenr) 311 n = n->rb_left; 312 else if (bytenr > entry->bytenr) 313 n = n->rb_right; 314 else 315 return n; 316 } 317 return NULL; 318 } 319 320 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr) 321 { 322 323 struct btrfs_fs_info *fs_info = NULL; 324 struct backref_node *bnode = rb_entry(rb_node, struct backref_node, 325 rb_node); 326 if (bnode->root) 327 fs_info = bnode->root->fs_info; 328 btrfs_panic(fs_info, errno, 329 "Inconsistency in backref cache found at offset %llu", 330 bytenr); 331 } 332 333 /* 334 * walk up backref nodes until reach node presents tree root 335 */ 336 static struct backref_node *walk_up_backref(struct backref_node *node, 337 struct backref_edge *edges[], 338 int *index) 339 { 340 struct backref_edge *edge; 341 int idx = *index; 342 343 while (!list_empty(&node->upper)) { 344 edge = list_entry(node->upper.next, 345 struct backref_edge, list[LOWER]); 346 edges[idx++] = edge; 347 node = edge->node[UPPER]; 348 } 349 BUG_ON(node->detached); 350 *index = idx; 351 return node; 352 } 353 354 /* 355 * walk down backref nodes to find start of next reference path 356 */ 357 static struct backref_node *walk_down_backref(struct backref_edge *edges[], 358 int *index) 359 { 360 struct backref_edge *edge; 361 struct backref_node *lower; 362 int idx = *index; 363 364 while (idx > 0) { 365 edge = edges[idx - 1]; 366 lower = edge->node[LOWER]; 367 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 368 idx--; 369 continue; 370 } 371 edge = list_entry(edge->list[LOWER].next, 372 struct backref_edge, list[LOWER]); 373 edges[idx - 1] = edge; 374 *index = idx; 375 return edge->node[UPPER]; 376 } 377 *index = 0; 378 return NULL; 379 } 380 381 static void unlock_node_buffer(struct backref_node *node) 382 { 383 if (node->locked) { 384 btrfs_tree_unlock(node->eb); 385 node->locked = 0; 386 } 387 } 388 389 static void drop_node_buffer(struct backref_node *node) 390 { 391 if (node->eb) { 392 unlock_node_buffer(node); 393 free_extent_buffer(node->eb); 394 node->eb = NULL; 395 } 396 } 397 398 static void drop_backref_node(struct backref_cache *tree, 399 struct backref_node *node) 400 { 401 BUG_ON(!list_empty(&node->upper)); 402 403 drop_node_buffer(node); 404 list_del(&node->list); 405 list_del(&node->lower); 406 if (!RB_EMPTY_NODE(&node->rb_node)) 407 rb_erase(&node->rb_node, &tree->rb_root); 408 free_backref_node(tree, node); 409 } 410 411 /* 412 * remove a backref node from the backref cache 413 */ 414 static void remove_backref_node(struct backref_cache *cache, 415 struct backref_node *node) 416 { 417 struct backref_node *upper; 418 struct backref_edge *edge; 419 420 if (!node) 421 return; 422 423 BUG_ON(!node->lowest && !node->detached); 424 while (!list_empty(&node->upper)) { 425 edge = list_entry(node->upper.next, struct backref_edge, 426 list[LOWER]); 427 upper = edge->node[UPPER]; 428 list_del(&edge->list[LOWER]); 429 list_del(&edge->list[UPPER]); 430 free_backref_edge(cache, edge); 431 432 if (RB_EMPTY_NODE(&upper->rb_node)) { 433 BUG_ON(!list_empty(&node->upper)); 434 drop_backref_node(cache, node); 435 node = upper; 436 node->lowest = 1; 437 continue; 438 } 439 /* 440 * add the node to leaf node list if no other 441 * child block cached. 442 */ 443 if (list_empty(&upper->lower)) { 444 list_add_tail(&upper->lower, &cache->leaves); 445 upper->lowest = 1; 446 } 447 } 448 449 drop_backref_node(cache, node); 450 } 451 452 static void update_backref_node(struct backref_cache *cache, 453 struct backref_node *node, u64 bytenr) 454 { 455 struct rb_node *rb_node; 456 rb_erase(&node->rb_node, &cache->rb_root); 457 node->bytenr = bytenr; 458 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 459 if (rb_node) 460 backref_tree_panic(rb_node, -EEXIST, bytenr); 461 } 462 463 /* 464 * update backref cache after a transaction commit 465 */ 466 static int update_backref_cache(struct btrfs_trans_handle *trans, 467 struct backref_cache *cache) 468 { 469 struct backref_node *node; 470 int level = 0; 471 472 if (cache->last_trans == 0) { 473 cache->last_trans = trans->transid; 474 return 0; 475 } 476 477 if (cache->last_trans == trans->transid) 478 return 0; 479 480 /* 481 * detached nodes are used to avoid unnecessary backref 482 * lookup. transaction commit changes the extent tree. 483 * so the detached nodes are no longer useful. 484 */ 485 while (!list_empty(&cache->detached)) { 486 node = list_entry(cache->detached.next, 487 struct backref_node, list); 488 remove_backref_node(cache, node); 489 } 490 491 while (!list_empty(&cache->changed)) { 492 node = list_entry(cache->changed.next, 493 struct backref_node, list); 494 list_del_init(&node->list); 495 BUG_ON(node->pending); 496 update_backref_node(cache, node, node->new_bytenr); 497 } 498 499 /* 500 * some nodes can be left in the pending list if there were 501 * errors during processing the pending nodes. 502 */ 503 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 504 list_for_each_entry(node, &cache->pending[level], list) { 505 BUG_ON(!node->pending); 506 if (node->bytenr == node->new_bytenr) 507 continue; 508 update_backref_node(cache, node, node->new_bytenr); 509 } 510 } 511 512 cache->last_trans = 0; 513 return 1; 514 } 515 516 517 static int should_ignore_root(struct btrfs_root *root) 518 { 519 struct btrfs_root *reloc_root; 520 521 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 522 return 0; 523 524 reloc_root = root->reloc_root; 525 if (!reloc_root) 526 return 0; 527 528 if (btrfs_root_last_snapshot(&reloc_root->root_item) == 529 root->fs_info->running_transaction->transid - 1) 530 return 0; 531 /* 532 * if there is reloc tree and it was created in previous 533 * transaction backref lookup can find the reloc tree, 534 * so backref node for the fs tree root is useless for 535 * relocation. 536 */ 537 return 1; 538 } 539 /* 540 * find reloc tree by address of tree root 541 */ 542 static struct btrfs_root *find_reloc_root(struct reloc_control *rc, 543 u64 bytenr) 544 { 545 struct rb_node *rb_node; 546 struct mapping_node *node; 547 struct btrfs_root *root = NULL; 548 549 spin_lock(&rc->reloc_root_tree.lock); 550 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); 551 if (rb_node) { 552 node = rb_entry(rb_node, struct mapping_node, rb_node); 553 root = (struct btrfs_root *)node->data; 554 } 555 spin_unlock(&rc->reloc_root_tree.lock); 556 return root; 557 } 558 559 static int is_cowonly_root(u64 root_objectid) 560 { 561 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || 562 root_objectid == BTRFS_EXTENT_TREE_OBJECTID || 563 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 564 root_objectid == BTRFS_DEV_TREE_OBJECTID || 565 root_objectid == BTRFS_TREE_LOG_OBJECTID || 566 root_objectid == BTRFS_CSUM_TREE_OBJECTID || 567 root_objectid == BTRFS_UUID_TREE_OBJECTID || 568 root_objectid == BTRFS_QUOTA_TREE_OBJECTID || 569 root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 570 return 1; 571 return 0; 572 } 573 574 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, 575 u64 root_objectid) 576 { 577 struct btrfs_key key; 578 579 key.objectid = root_objectid; 580 key.type = BTRFS_ROOT_ITEM_KEY; 581 if (is_cowonly_root(root_objectid)) 582 key.offset = 0; 583 else 584 key.offset = (u64)-1; 585 586 return btrfs_get_fs_root(fs_info, &key, false); 587 } 588 589 static noinline_for_stack 590 int find_inline_backref(struct extent_buffer *leaf, int slot, 591 unsigned long *ptr, unsigned long *end) 592 { 593 struct btrfs_key key; 594 struct btrfs_extent_item *ei; 595 struct btrfs_tree_block_info *bi; 596 u32 item_size; 597 598 btrfs_item_key_to_cpu(leaf, &key, slot); 599 600 item_size = btrfs_item_size_nr(leaf, slot); 601 if (item_size < sizeof(*ei)) { 602 btrfs_print_v0_err(leaf->fs_info); 603 btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL); 604 return 1; 605 } 606 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 607 WARN_ON(!(btrfs_extent_flags(leaf, ei) & 608 BTRFS_EXTENT_FLAG_TREE_BLOCK)); 609 610 if (key.type == BTRFS_EXTENT_ITEM_KEY && 611 item_size <= sizeof(*ei) + sizeof(*bi)) { 612 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); 613 return 1; 614 } 615 if (key.type == BTRFS_METADATA_ITEM_KEY && 616 item_size <= sizeof(*ei)) { 617 WARN_ON(item_size < sizeof(*ei)); 618 return 1; 619 } 620 621 if (key.type == BTRFS_EXTENT_ITEM_KEY) { 622 bi = (struct btrfs_tree_block_info *)(ei + 1); 623 *ptr = (unsigned long)(bi + 1); 624 } else { 625 *ptr = (unsigned long)(ei + 1); 626 } 627 *end = (unsigned long)ei + item_size; 628 return 0; 629 } 630 631 /* 632 * build backref tree for a given tree block. root of the backref tree 633 * corresponds the tree block, leaves of the backref tree correspond 634 * roots of b-trees that reference the tree block. 635 * 636 * the basic idea of this function is check backrefs of a given block 637 * to find upper level blocks that reference the block, and then check 638 * backrefs of these upper level blocks recursively. the recursion stop 639 * when tree root is reached or backrefs for the block is cached. 640 * 641 * NOTE: if we find backrefs for a block are cached, we know backrefs 642 * for all upper level blocks that directly/indirectly reference the 643 * block are also cached. 644 */ 645 static noinline_for_stack 646 struct backref_node *build_backref_tree(struct reloc_control *rc, 647 struct btrfs_key *node_key, 648 int level, u64 bytenr) 649 { 650 struct backref_cache *cache = &rc->backref_cache; 651 struct btrfs_path *path1; 652 struct btrfs_path *path2; 653 struct extent_buffer *eb; 654 struct btrfs_root *root; 655 struct backref_node *cur; 656 struct backref_node *upper; 657 struct backref_node *lower; 658 struct backref_node *node = NULL; 659 struct backref_node *exist = NULL; 660 struct backref_edge *edge; 661 struct rb_node *rb_node; 662 struct btrfs_key key; 663 unsigned long end; 664 unsigned long ptr; 665 LIST_HEAD(list); 666 LIST_HEAD(useless); 667 int cowonly; 668 int ret; 669 int err = 0; 670 bool need_check = true; 671 672 path1 = btrfs_alloc_path(); 673 path2 = btrfs_alloc_path(); 674 if (!path1 || !path2) { 675 err = -ENOMEM; 676 goto out; 677 } 678 path1->reada = READA_FORWARD; 679 path2->reada = READA_FORWARD; 680 681 node = alloc_backref_node(cache); 682 if (!node) { 683 err = -ENOMEM; 684 goto out; 685 } 686 687 node->bytenr = bytenr; 688 node->level = level; 689 node->lowest = 1; 690 cur = node; 691 again: 692 end = 0; 693 ptr = 0; 694 key.objectid = cur->bytenr; 695 key.type = BTRFS_METADATA_ITEM_KEY; 696 key.offset = (u64)-1; 697 698 path1->search_commit_root = 1; 699 path1->skip_locking = 1; 700 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, 701 0, 0); 702 if (ret < 0) { 703 err = ret; 704 goto out; 705 } 706 ASSERT(ret); 707 ASSERT(path1->slots[0]); 708 709 path1->slots[0]--; 710 711 WARN_ON(cur->checked); 712 if (!list_empty(&cur->upper)) { 713 /* 714 * the backref was added previously when processing 715 * backref of type BTRFS_TREE_BLOCK_REF_KEY 716 */ 717 ASSERT(list_is_singular(&cur->upper)); 718 edge = list_entry(cur->upper.next, struct backref_edge, 719 list[LOWER]); 720 ASSERT(list_empty(&edge->list[UPPER])); 721 exist = edge->node[UPPER]; 722 /* 723 * add the upper level block to pending list if we need 724 * check its backrefs 725 */ 726 if (!exist->checked) 727 list_add_tail(&edge->list[UPPER], &list); 728 } else { 729 exist = NULL; 730 } 731 732 while (1) { 733 cond_resched(); 734 eb = path1->nodes[0]; 735 736 if (ptr >= end) { 737 if (path1->slots[0] >= btrfs_header_nritems(eb)) { 738 ret = btrfs_next_leaf(rc->extent_root, path1); 739 if (ret < 0) { 740 err = ret; 741 goto out; 742 } 743 if (ret > 0) 744 break; 745 eb = path1->nodes[0]; 746 } 747 748 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); 749 if (key.objectid != cur->bytenr) { 750 WARN_ON(exist); 751 break; 752 } 753 754 if (key.type == BTRFS_EXTENT_ITEM_KEY || 755 key.type == BTRFS_METADATA_ITEM_KEY) { 756 ret = find_inline_backref(eb, path1->slots[0], 757 &ptr, &end); 758 if (ret) 759 goto next; 760 } 761 } 762 763 if (ptr < end) { 764 /* update key for inline back ref */ 765 struct btrfs_extent_inline_ref *iref; 766 int type; 767 iref = (struct btrfs_extent_inline_ref *)ptr; 768 type = btrfs_get_extent_inline_ref_type(eb, iref, 769 BTRFS_REF_TYPE_BLOCK); 770 if (type == BTRFS_REF_TYPE_INVALID) { 771 err = -EUCLEAN; 772 goto out; 773 } 774 key.type = type; 775 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 776 777 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && 778 key.type != BTRFS_SHARED_BLOCK_REF_KEY); 779 } 780 781 if (exist && 782 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 783 exist->owner == key.offset) || 784 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 785 exist->bytenr == key.offset))) { 786 exist = NULL; 787 goto next; 788 } 789 790 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 791 if (key.objectid == key.offset) { 792 /* 793 * only root blocks of reloc trees use 794 * backref of this type. 795 */ 796 root = find_reloc_root(rc, cur->bytenr); 797 ASSERT(root); 798 cur->root = root; 799 break; 800 } 801 802 edge = alloc_backref_edge(cache); 803 if (!edge) { 804 err = -ENOMEM; 805 goto out; 806 } 807 rb_node = tree_search(&cache->rb_root, key.offset); 808 if (!rb_node) { 809 upper = alloc_backref_node(cache); 810 if (!upper) { 811 free_backref_edge(cache, edge); 812 err = -ENOMEM; 813 goto out; 814 } 815 upper->bytenr = key.offset; 816 upper->level = cur->level + 1; 817 /* 818 * backrefs for the upper level block isn't 819 * cached, add the block to pending list 820 */ 821 list_add_tail(&edge->list[UPPER], &list); 822 } else { 823 upper = rb_entry(rb_node, struct backref_node, 824 rb_node); 825 ASSERT(upper->checked); 826 INIT_LIST_HEAD(&edge->list[UPPER]); 827 } 828 list_add_tail(&edge->list[LOWER], &cur->upper); 829 edge->node[LOWER] = cur; 830 edge->node[UPPER] = upper; 831 832 goto next; 833 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { 834 err = -EINVAL; 835 btrfs_print_v0_err(rc->extent_root->fs_info); 836 btrfs_handle_fs_error(rc->extent_root->fs_info, err, 837 NULL); 838 goto out; 839 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 840 goto next; 841 } 842 843 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */ 844 root = read_fs_root(rc->extent_root->fs_info, key.offset); 845 if (IS_ERR(root)) { 846 err = PTR_ERR(root); 847 goto out; 848 } 849 850 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 851 cur->cowonly = 1; 852 853 if (btrfs_root_level(&root->root_item) == cur->level) { 854 /* tree root */ 855 ASSERT(btrfs_root_bytenr(&root->root_item) == 856 cur->bytenr); 857 if (should_ignore_root(root)) 858 list_add(&cur->list, &useless); 859 else 860 cur->root = root; 861 break; 862 } 863 864 level = cur->level + 1; 865 866 /* 867 * searching the tree to find upper level blocks 868 * reference the block. 869 */ 870 path2->search_commit_root = 1; 871 path2->skip_locking = 1; 872 path2->lowest_level = level; 873 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); 874 path2->lowest_level = 0; 875 if (ret < 0) { 876 err = ret; 877 goto out; 878 } 879 if (ret > 0 && path2->slots[level] > 0) 880 path2->slots[level]--; 881 882 eb = path2->nodes[level]; 883 if (btrfs_node_blockptr(eb, path2->slots[level]) != 884 cur->bytenr) { 885 btrfs_err(root->fs_info, 886 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", 887 cur->bytenr, level - 1, root->objectid, 888 node_key->objectid, node_key->type, 889 node_key->offset); 890 err = -ENOENT; 891 goto out; 892 } 893 lower = cur; 894 need_check = true; 895 for (; level < BTRFS_MAX_LEVEL; level++) { 896 if (!path2->nodes[level]) { 897 ASSERT(btrfs_root_bytenr(&root->root_item) == 898 lower->bytenr); 899 if (should_ignore_root(root)) 900 list_add(&lower->list, &useless); 901 else 902 lower->root = root; 903 break; 904 } 905 906 edge = alloc_backref_edge(cache); 907 if (!edge) { 908 err = -ENOMEM; 909 goto out; 910 } 911 912 eb = path2->nodes[level]; 913 rb_node = tree_search(&cache->rb_root, eb->start); 914 if (!rb_node) { 915 upper = alloc_backref_node(cache); 916 if (!upper) { 917 free_backref_edge(cache, edge); 918 err = -ENOMEM; 919 goto out; 920 } 921 upper->bytenr = eb->start; 922 upper->owner = btrfs_header_owner(eb); 923 upper->level = lower->level + 1; 924 if (!test_bit(BTRFS_ROOT_REF_COWS, 925 &root->state)) 926 upper->cowonly = 1; 927 928 /* 929 * if we know the block isn't shared 930 * we can void checking its backrefs. 931 */ 932 if (btrfs_block_can_be_shared(root, eb)) 933 upper->checked = 0; 934 else 935 upper->checked = 1; 936 937 /* 938 * add the block to pending list if we 939 * need check its backrefs, we only do this once 940 * while walking up a tree as we will catch 941 * anything else later on. 942 */ 943 if (!upper->checked && need_check) { 944 need_check = false; 945 list_add_tail(&edge->list[UPPER], 946 &list); 947 } else { 948 if (upper->checked) 949 need_check = true; 950 INIT_LIST_HEAD(&edge->list[UPPER]); 951 } 952 } else { 953 upper = rb_entry(rb_node, struct backref_node, 954 rb_node); 955 ASSERT(upper->checked); 956 INIT_LIST_HEAD(&edge->list[UPPER]); 957 if (!upper->owner) 958 upper->owner = btrfs_header_owner(eb); 959 } 960 list_add_tail(&edge->list[LOWER], &lower->upper); 961 edge->node[LOWER] = lower; 962 edge->node[UPPER] = upper; 963 964 if (rb_node) 965 break; 966 lower = upper; 967 upper = NULL; 968 } 969 btrfs_release_path(path2); 970 next: 971 if (ptr < end) { 972 ptr += btrfs_extent_inline_ref_size(key.type); 973 if (ptr >= end) { 974 WARN_ON(ptr > end); 975 ptr = 0; 976 end = 0; 977 } 978 } 979 if (ptr >= end) 980 path1->slots[0]++; 981 } 982 btrfs_release_path(path1); 983 984 cur->checked = 1; 985 WARN_ON(exist); 986 987 /* the pending list isn't empty, take the first block to process */ 988 if (!list_empty(&list)) { 989 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 990 list_del_init(&edge->list[UPPER]); 991 cur = edge->node[UPPER]; 992 goto again; 993 } 994 995 /* 996 * everything goes well, connect backref nodes and insert backref nodes 997 * into the cache. 998 */ 999 ASSERT(node->checked); 1000 cowonly = node->cowonly; 1001 if (!cowonly) { 1002 rb_node = tree_insert(&cache->rb_root, node->bytenr, 1003 &node->rb_node); 1004 if (rb_node) 1005 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1006 list_add_tail(&node->lower, &cache->leaves); 1007 } 1008 1009 list_for_each_entry(edge, &node->upper, list[LOWER]) 1010 list_add_tail(&edge->list[UPPER], &list); 1011 1012 while (!list_empty(&list)) { 1013 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1014 list_del_init(&edge->list[UPPER]); 1015 upper = edge->node[UPPER]; 1016 if (upper->detached) { 1017 list_del(&edge->list[LOWER]); 1018 lower = edge->node[LOWER]; 1019 free_backref_edge(cache, edge); 1020 if (list_empty(&lower->upper)) 1021 list_add(&lower->list, &useless); 1022 continue; 1023 } 1024 1025 if (!RB_EMPTY_NODE(&upper->rb_node)) { 1026 if (upper->lowest) { 1027 list_del_init(&upper->lower); 1028 upper->lowest = 0; 1029 } 1030 1031 list_add_tail(&edge->list[UPPER], &upper->lower); 1032 continue; 1033 } 1034 1035 if (!upper->checked) { 1036 /* 1037 * Still want to blow up for developers since this is a 1038 * logic bug. 1039 */ 1040 ASSERT(0); 1041 err = -EINVAL; 1042 goto out; 1043 } 1044 if (cowonly != upper->cowonly) { 1045 ASSERT(0); 1046 err = -EINVAL; 1047 goto out; 1048 } 1049 1050 if (!cowonly) { 1051 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1052 &upper->rb_node); 1053 if (rb_node) 1054 backref_tree_panic(rb_node, -EEXIST, 1055 upper->bytenr); 1056 } 1057 1058 list_add_tail(&edge->list[UPPER], &upper->lower); 1059 1060 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1061 list_add_tail(&edge->list[UPPER], &list); 1062 } 1063 /* 1064 * process useless backref nodes. backref nodes for tree leaves 1065 * are deleted from the cache. backref nodes for upper level 1066 * tree blocks are left in the cache to avoid unnecessary backref 1067 * lookup. 1068 */ 1069 while (!list_empty(&useless)) { 1070 upper = list_entry(useless.next, struct backref_node, list); 1071 list_del_init(&upper->list); 1072 ASSERT(list_empty(&upper->upper)); 1073 if (upper == node) 1074 node = NULL; 1075 if (upper->lowest) { 1076 list_del_init(&upper->lower); 1077 upper->lowest = 0; 1078 } 1079 while (!list_empty(&upper->lower)) { 1080 edge = list_entry(upper->lower.next, 1081 struct backref_edge, list[UPPER]); 1082 list_del(&edge->list[UPPER]); 1083 list_del(&edge->list[LOWER]); 1084 lower = edge->node[LOWER]; 1085 free_backref_edge(cache, edge); 1086 1087 if (list_empty(&lower->upper)) 1088 list_add(&lower->list, &useless); 1089 } 1090 __mark_block_processed(rc, upper); 1091 if (upper->level > 0) { 1092 list_add(&upper->list, &cache->detached); 1093 upper->detached = 1; 1094 } else { 1095 rb_erase(&upper->rb_node, &cache->rb_root); 1096 free_backref_node(cache, upper); 1097 } 1098 } 1099 out: 1100 btrfs_free_path(path1); 1101 btrfs_free_path(path2); 1102 if (err) { 1103 while (!list_empty(&useless)) { 1104 lower = list_entry(useless.next, 1105 struct backref_node, list); 1106 list_del_init(&lower->list); 1107 } 1108 while (!list_empty(&list)) { 1109 edge = list_first_entry(&list, struct backref_edge, 1110 list[UPPER]); 1111 list_del(&edge->list[UPPER]); 1112 list_del(&edge->list[LOWER]); 1113 lower = edge->node[LOWER]; 1114 upper = edge->node[UPPER]; 1115 free_backref_edge(cache, edge); 1116 1117 /* 1118 * Lower is no longer linked to any upper backref nodes 1119 * and isn't in the cache, we can free it ourselves. 1120 */ 1121 if (list_empty(&lower->upper) && 1122 RB_EMPTY_NODE(&lower->rb_node)) 1123 list_add(&lower->list, &useless); 1124 1125 if (!RB_EMPTY_NODE(&upper->rb_node)) 1126 continue; 1127 1128 /* Add this guy's upper edges to the list to process */ 1129 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1130 list_add_tail(&edge->list[UPPER], &list); 1131 if (list_empty(&upper->upper)) 1132 list_add(&upper->list, &useless); 1133 } 1134 1135 while (!list_empty(&useless)) { 1136 lower = list_entry(useless.next, 1137 struct backref_node, list); 1138 list_del_init(&lower->list); 1139 if (lower == node) 1140 node = NULL; 1141 free_backref_node(cache, lower); 1142 } 1143 1144 free_backref_node(cache, node); 1145 return ERR_PTR(err); 1146 } 1147 ASSERT(!node || !node->detached); 1148 return node; 1149 } 1150 1151 /* 1152 * helper to add backref node for the newly created snapshot. 1153 * the backref node is created by cloning backref node that 1154 * corresponds to root of source tree 1155 */ 1156 static int clone_backref_node(struct btrfs_trans_handle *trans, 1157 struct reloc_control *rc, 1158 struct btrfs_root *src, 1159 struct btrfs_root *dest) 1160 { 1161 struct btrfs_root *reloc_root = src->reloc_root; 1162 struct backref_cache *cache = &rc->backref_cache; 1163 struct backref_node *node = NULL; 1164 struct backref_node *new_node; 1165 struct backref_edge *edge; 1166 struct backref_edge *new_edge; 1167 struct rb_node *rb_node; 1168 1169 if (cache->last_trans > 0) 1170 update_backref_cache(trans, cache); 1171 1172 rb_node = tree_search(&cache->rb_root, src->commit_root->start); 1173 if (rb_node) { 1174 node = rb_entry(rb_node, struct backref_node, rb_node); 1175 if (node->detached) 1176 node = NULL; 1177 else 1178 BUG_ON(node->new_bytenr != reloc_root->node->start); 1179 } 1180 1181 if (!node) { 1182 rb_node = tree_search(&cache->rb_root, 1183 reloc_root->commit_root->start); 1184 if (rb_node) { 1185 node = rb_entry(rb_node, struct backref_node, 1186 rb_node); 1187 BUG_ON(node->detached); 1188 } 1189 } 1190 1191 if (!node) 1192 return 0; 1193 1194 new_node = alloc_backref_node(cache); 1195 if (!new_node) 1196 return -ENOMEM; 1197 1198 new_node->bytenr = dest->node->start; 1199 new_node->level = node->level; 1200 new_node->lowest = node->lowest; 1201 new_node->checked = 1; 1202 new_node->root = dest; 1203 1204 if (!node->lowest) { 1205 list_for_each_entry(edge, &node->lower, list[UPPER]) { 1206 new_edge = alloc_backref_edge(cache); 1207 if (!new_edge) 1208 goto fail; 1209 1210 new_edge->node[UPPER] = new_node; 1211 new_edge->node[LOWER] = edge->node[LOWER]; 1212 list_add_tail(&new_edge->list[UPPER], 1213 &new_node->lower); 1214 } 1215 } else { 1216 list_add_tail(&new_node->lower, &cache->leaves); 1217 } 1218 1219 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1220 &new_node->rb_node); 1221 if (rb_node) 1222 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr); 1223 1224 if (!new_node->lowest) { 1225 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1226 list_add_tail(&new_edge->list[LOWER], 1227 &new_edge->node[LOWER]->upper); 1228 } 1229 } 1230 return 0; 1231 fail: 1232 while (!list_empty(&new_node->lower)) { 1233 new_edge = list_entry(new_node->lower.next, 1234 struct backref_edge, list[UPPER]); 1235 list_del(&new_edge->list[UPPER]); 1236 free_backref_edge(cache, new_edge); 1237 } 1238 free_backref_node(cache, new_node); 1239 return -ENOMEM; 1240 } 1241 1242 /* 1243 * helper to add 'address of tree root -> reloc tree' mapping 1244 */ 1245 static int __must_check __add_reloc_root(struct btrfs_root *root) 1246 { 1247 struct btrfs_fs_info *fs_info = root->fs_info; 1248 struct rb_node *rb_node; 1249 struct mapping_node *node; 1250 struct reloc_control *rc = fs_info->reloc_ctl; 1251 1252 node = kmalloc(sizeof(*node), GFP_NOFS); 1253 if (!node) 1254 return -ENOMEM; 1255 1256 node->bytenr = root->node->start; 1257 node->data = root; 1258 1259 spin_lock(&rc->reloc_root_tree.lock); 1260 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1261 node->bytenr, &node->rb_node); 1262 spin_unlock(&rc->reloc_root_tree.lock); 1263 if (rb_node) { 1264 btrfs_panic(fs_info, -EEXIST, 1265 "Duplicate root found for start=%llu while inserting into relocation tree", 1266 node->bytenr); 1267 } 1268 1269 list_add_tail(&root->root_list, &rc->reloc_roots); 1270 return 0; 1271 } 1272 1273 /* 1274 * helper to delete the 'address of tree root -> reloc tree' 1275 * mapping 1276 */ 1277 static void __del_reloc_root(struct btrfs_root *root) 1278 { 1279 struct btrfs_fs_info *fs_info = root->fs_info; 1280 struct rb_node *rb_node; 1281 struct mapping_node *node = NULL; 1282 struct reloc_control *rc = fs_info->reloc_ctl; 1283 1284 if (rc) { 1285 spin_lock(&rc->reloc_root_tree.lock); 1286 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1287 root->node->start); 1288 if (rb_node) { 1289 node = rb_entry(rb_node, struct mapping_node, rb_node); 1290 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1291 } 1292 spin_unlock(&rc->reloc_root_tree.lock); 1293 if (!node) 1294 return; 1295 BUG_ON((struct btrfs_root *)node->data != root); 1296 } 1297 1298 spin_lock(&fs_info->trans_lock); 1299 list_del_init(&root->root_list); 1300 spin_unlock(&fs_info->trans_lock); 1301 kfree(node); 1302 } 1303 1304 /* 1305 * helper to update the 'address of tree root -> reloc tree' 1306 * mapping 1307 */ 1308 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) 1309 { 1310 struct btrfs_fs_info *fs_info = root->fs_info; 1311 struct rb_node *rb_node; 1312 struct mapping_node *node = NULL; 1313 struct reloc_control *rc = fs_info->reloc_ctl; 1314 1315 spin_lock(&rc->reloc_root_tree.lock); 1316 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1317 root->node->start); 1318 if (rb_node) { 1319 node = rb_entry(rb_node, struct mapping_node, rb_node); 1320 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1321 } 1322 spin_unlock(&rc->reloc_root_tree.lock); 1323 1324 if (!node) 1325 return 0; 1326 BUG_ON((struct btrfs_root *)node->data != root); 1327 1328 spin_lock(&rc->reloc_root_tree.lock); 1329 node->bytenr = new_bytenr; 1330 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1331 node->bytenr, &node->rb_node); 1332 spin_unlock(&rc->reloc_root_tree.lock); 1333 if (rb_node) 1334 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1335 return 0; 1336 } 1337 1338 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 1339 struct btrfs_root *root, u64 objectid) 1340 { 1341 struct btrfs_fs_info *fs_info = root->fs_info; 1342 struct btrfs_root *reloc_root; 1343 struct extent_buffer *eb; 1344 struct btrfs_root_item *root_item; 1345 struct btrfs_key root_key; 1346 int ret; 1347 1348 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 1349 BUG_ON(!root_item); 1350 1351 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 1352 root_key.type = BTRFS_ROOT_ITEM_KEY; 1353 root_key.offset = objectid; 1354 1355 if (root->root_key.objectid == objectid) { 1356 u64 commit_root_gen; 1357 1358 /* called by btrfs_init_reloc_root */ 1359 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 1360 BTRFS_TREE_RELOC_OBJECTID); 1361 BUG_ON(ret); 1362 /* 1363 * Set the last_snapshot field to the generation of the commit 1364 * root - like this ctree.c:btrfs_block_can_be_shared() behaves 1365 * correctly (returns true) when the relocation root is created 1366 * either inside the critical section of a transaction commit 1367 * (through transaction.c:qgroup_account_snapshot()) and when 1368 * it's created before the transaction commit is started. 1369 */ 1370 commit_root_gen = btrfs_header_generation(root->commit_root); 1371 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen); 1372 } else { 1373 /* 1374 * called by btrfs_reloc_post_snapshot_hook. 1375 * the source tree is a reloc tree, all tree blocks 1376 * modified after it was created have RELOC flag 1377 * set in their headers. so it's OK to not update 1378 * the 'last_snapshot'. 1379 */ 1380 ret = btrfs_copy_root(trans, root, root->node, &eb, 1381 BTRFS_TREE_RELOC_OBJECTID); 1382 BUG_ON(ret); 1383 } 1384 1385 memcpy(root_item, &root->root_item, sizeof(*root_item)); 1386 btrfs_set_root_bytenr(root_item, eb->start); 1387 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 1388 btrfs_set_root_generation(root_item, trans->transid); 1389 1390 if (root->root_key.objectid == objectid) { 1391 btrfs_set_root_refs(root_item, 0); 1392 memset(&root_item->drop_progress, 0, 1393 sizeof(struct btrfs_disk_key)); 1394 root_item->drop_level = 0; 1395 } 1396 1397 btrfs_tree_unlock(eb); 1398 free_extent_buffer(eb); 1399 1400 ret = btrfs_insert_root(trans, fs_info->tree_root, 1401 &root_key, root_item); 1402 BUG_ON(ret); 1403 kfree(root_item); 1404 1405 reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key); 1406 BUG_ON(IS_ERR(reloc_root)); 1407 reloc_root->last_trans = trans->transid; 1408 return reloc_root; 1409 } 1410 1411 /* 1412 * create reloc tree for a given fs tree. reloc tree is just a 1413 * snapshot of the fs tree with special root objectid. 1414 */ 1415 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 1416 struct btrfs_root *root) 1417 { 1418 struct btrfs_fs_info *fs_info = root->fs_info; 1419 struct btrfs_root *reloc_root; 1420 struct reloc_control *rc = fs_info->reloc_ctl; 1421 struct btrfs_block_rsv *rsv; 1422 int clear_rsv = 0; 1423 int ret; 1424 1425 if (root->reloc_root) { 1426 reloc_root = root->reloc_root; 1427 reloc_root->last_trans = trans->transid; 1428 return 0; 1429 } 1430 1431 if (!rc || !rc->create_reloc_tree || 1432 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1433 return 0; 1434 1435 if (!trans->reloc_reserved) { 1436 rsv = trans->block_rsv; 1437 trans->block_rsv = rc->block_rsv; 1438 clear_rsv = 1; 1439 } 1440 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 1441 if (clear_rsv) 1442 trans->block_rsv = rsv; 1443 1444 ret = __add_reloc_root(reloc_root); 1445 BUG_ON(ret < 0); 1446 root->reloc_root = reloc_root; 1447 return 0; 1448 } 1449 1450 /* 1451 * update root item of reloc tree 1452 */ 1453 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 1454 struct btrfs_root *root) 1455 { 1456 struct btrfs_fs_info *fs_info = root->fs_info; 1457 struct btrfs_root *reloc_root; 1458 struct btrfs_root_item *root_item; 1459 int ret; 1460 1461 if (!root->reloc_root) 1462 goto out; 1463 1464 reloc_root = root->reloc_root; 1465 root_item = &reloc_root->root_item; 1466 1467 if (fs_info->reloc_ctl->merge_reloc_tree && 1468 btrfs_root_refs(root_item) == 0) { 1469 root->reloc_root = NULL; 1470 __del_reloc_root(reloc_root); 1471 } 1472 1473 if (reloc_root->commit_root != reloc_root->node) { 1474 btrfs_set_root_node(root_item, reloc_root->node); 1475 free_extent_buffer(reloc_root->commit_root); 1476 reloc_root->commit_root = btrfs_root_node(reloc_root); 1477 } 1478 1479 ret = btrfs_update_root(trans, fs_info->tree_root, 1480 &reloc_root->root_key, root_item); 1481 BUG_ON(ret); 1482 1483 out: 1484 return 0; 1485 } 1486 1487 /* 1488 * helper to find first cached inode with inode number >= objectid 1489 * in a subvolume 1490 */ 1491 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 1492 { 1493 struct rb_node *node; 1494 struct rb_node *prev; 1495 struct btrfs_inode *entry; 1496 struct inode *inode; 1497 1498 spin_lock(&root->inode_lock); 1499 again: 1500 node = root->inode_tree.rb_node; 1501 prev = NULL; 1502 while (node) { 1503 prev = node; 1504 entry = rb_entry(node, struct btrfs_inode, rb_node); 1505 1506 if (objectid < btrfs_ino(entry)) 1507 node = node->rb_left; 1508 else if (objectid > btrfs_ino(entry)) 1509 node = node->rb_right; 1510 else 1511 break; 1512 } 1513 if (!node) { 1514 while (prev) { 1515 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1516 if (objectid <= btrfs_ino(entry)) { 1517 node = prev; 1518 break; 1519 } 1520 prev = rb_next(prev); 1521 } 1522 } 1523 while (node) { 1524 entry = rb_entry(node, struct btrfs_inode, rb_node); 1525 inode = igrab(&entry->vfs_inode); 1526 if (inode) { 1527 spin_unlock(&root->inode_lock); 1528 return inode; 1529 } 1530 1531 objectid = btrfs_ino(entry) + 1; 1532 if (cond_resched_lock(&root->inode_lock)) 1533 goto again; 1534 1535 node = rb_next(node); 1536 } 1537 spin_unlock(&root->inode_lock); 1538 return NULL; 1539 } 1540 1541 static int in_block_group(u64 bytenr, 1542 struct btrfs_block_group_cache *block_group) 1543 { 1544 if (bytenr >= block_group->key.objectid && 1545 bytenr < block_group->key.objectid + block_group->key.offset) 1546 return 1; 1547 return 0; 1548 } 1549 1550 /* 1551 * get new location of data 1552 */ 1553 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1554 u64 bytenr, u64 num_bytes) 1555 { 1556 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1557 struct btrfs_path *path; 1558 struct btrfs_file_extent_item *fi; 1559 struct extent_buffer *leaf; 1560 int ret; 1561 1562 path = btrfs_alloc_path(); 1563 if (!path) 1564 return -ENOMEM; 1565 1566 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1567 ret = btrfs_lookup_file_extent(NULL, root, path, 1568 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0); 1569 if (ret < 0) 1570 goto out; 1571 if (ret > 0) { 1572 ret = -ENOENT; 1573 goto out; 1574 } 1575 1576 leaf = path->nodes[0]; 1577 fi = btrfs_item_ptr(leaf, path->slots[0], 1578 struct btrfs_file_extent_item); 1579 1580 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1581 btrfs_file_extent_compression(leaf, fi) || 1582 btrfs_file_extent_encryption(leaf, fi) || 1583 btrfs_file_extent_other_encoding(leaf, fi)); 1584 1585 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1586 ret = -EINVAL; 1587 goto out; 1588 } 1589 1590 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1591 ret = 0; 1592 out: 1593 btrfs_free_path(path); 1594 return ret; 1595 } 1596 1597 /* 1598 * update file extent items in the tree leaf to point to 1599 * the new locations. 1600 */ 1601 static noinline_for_stack 1602 int replace_file_extents(struct btrfs_trans_handle *trans, 1603 struct reloc_control *rc, 1604 struct btrfs_root *root, 1605 struct extent_buffer *leaf) 1606 { 1607 struct btrfs_fs_info *fs_info = root->fs_info; 1608 struct btrfs_key key; 1609 struct btrfs_file_extent_item *fi; 1610 struct inode *inode = NULL; 1611 u64 parent; 1612 u64 bytenr; 1613 u64 new_bytenr = 0; 1614 u64 num_bytes; 1615 u64 end; 1616 u32 nritems; 1617 u32 i; 1618 int ret = 0; 1619 int first = 1; 1620 int dirty = 0; 1621 1622 if (rc->stage != UPDATE_DATA_PTRS) 1623 return 0; 1624 1625 /* reloc trees always use full backref */ 1626 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1627 parent = leaf->start; 1628 else 1629 parent = 0; 1630 1631 nritems = btrfs_header_nritems(leaf); 1632 for (i = 0; i < nritems; i++) { 1633 cond_resched(); 1634 btrfs_item_key_to_cpu(leaf, &key, i); 1635 if (key.type != BTRFS_EXTENT_DATA_KEY) 1636 continue; 1637 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1638 if (btrfs_file_extent_type(leaf, fi) == 1639 BTRFS_FILE_EXTENT_INLINE) 1640 continue; 1641 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1642 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1643 if (bytenr == 0) 1644 continue; 1645 if (!in_block_group(bytenr, rc->block_group)) 1646 continue; 1647 1648 /* 1649 * if we are modifying block in fs tree, wait for readpage 1650 * to complete and drop the extent cache 1651 */ 1652 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1653 if (first) { 1654 inode = find_next_inode(root, key.objectid); 1655 first = 0; 1656 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) { 1657 btrfs_add_delayed_iput(inode); 1658 inode = find_next_inode(root, key.objectid); 1659 } 1660 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) { 1661 end = key.offset + 1662 btrfs_file_extent_num_bytes(leaf, fi); 1663 WARN_ON(!IS_ALIGNED(key.offset, 1664 fs_info->sectorsize)); 1665 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1666 end--; 1667 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1668 key.offset, end); 1669 if (!ret) 1670 continue; 1671 1672 btrfs_drop_extent_cache(BTRFS_I(inode), 1673 key.offset, end, 1); 1674 unlock_extent(&BTRFS_I(inode)->io_tree, 1675 key.offset, end); 1676 } 1677 } 1678 1679 ret = get_new_location(rc->data_inode, &new_bytenr, 1680 bytenr, num_bytes); 1681 if (ret) { 1682 /* 1683 * Don't have to abort since we've not changed anything 1684 * in the file extent yet. 1685 */ 1686 break; 1687 } 1688 1689 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1690 dirty = 1; 1691 1692 key.offset -= btrfs_file_extent_offset(leaf, fi); 1693 ret = btrfs_inc_extent_ref(trans, root, new_bytenr, 1694 num_bytes, parent, 1695 btrfs_header_owner(leaf), 1696 key.objectid, key.offset); 1697 if (ret) { 1698 btrfs_abort_transaction(trans, ret); 1699 break; 1700 } 1701 1702 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1703 parent, btrfs_header_owner(leaf), 1704 key.objectid, key.offset); 1705 if (ret) { 1706 btrfs_abort_transaction(trans, ret); 1707 break; 1708 } 1709 } 1710 if (dirty) 1711 btrfs_mark_buffer_dirty(leaf); 1712 if (inode) 1713 btrfs_add_delayed_iput(inode); 1714 return ret; 1715 } 1716 1717 static noinline_for_stack 1718 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1719 struct btrfs_path *path, int level) 1720 { 1721 struct btrfs_disk_key key1; 1722 struct btrfs_disk_key key2; 1723 btrfs_node_key(eb, &key1, slot); 1724 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1725 return memcmp(&key1, &key2, sizeof(key1)); 1726 } 1727 1728 /* 1729 * try to replace tree blocks in fs tree with the new blocks 1730 * in reloc tree. tree blocks haven't been modified since the 1731 * reloc tree was create can be replaced. 1732 * 1733 * if a block was replaced, level of the block + 1 is returned. 1734 * if no block got replaced, 0 is returned. if there are other 1735 * errors, a negative error number is returned. 1736 */ 1737 static noinline_for_stack 1738 int replace_path(struct btrfs_trans_handle *trans, 1739 struct btrfs_root *dest, struct btrfs_root *src, 1740 struct btrfs_path *path, struct btrfs_key *next_key, 1741 int lowest_level, int max_level) 1742 { 1743 struct btrfs_fs_info *fs_info = dest->fs_info; 1744 struct extent_buffer *eb; 1745 struct extent_buffer *parent; 1746 struct btrfs_key key; 1747 u64 old_bytenr; 1748 u64 new_bytenr; 1749 u64 old_ptr_gen; 1750 u64 new_ptr_gen; 1751 u64 last_snapshot; 1752 u32 blocksize; 1753 int cow = 0; 1754 int level; 1755 int ret; 1756 int slot; 1757 1758 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1759 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1760 1761 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1762 again: 1763 slot = path->slots[lowest_level]; 1764 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1765 1766 eb = btrfs_lock_root_node(dest); 1767 btrfs_set_lock_blocking(eb); 1768 level = btrfs_header_level(eb); 1769 1770 if (level < lowest_level) { 1771 btrfs_tree_unlock(eb); 1772 free_extent_buffer(eb); 1773 return 0; 1774 } 1775 1776 if (cow) { 1777 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); 1778 BUG_ON(ret); 1779 } 1780 btrfs_set_lock_blocking(eb); 1781 1782 if (next_key) { 1783 next_key->objectid = (u64)-1; 1784 next_key->type = (u8)-1; 1785 next_key->offset = (u64)-1; 1786 } 1787 1788 parent = eb; 1789 while (1) { 1790 struct btrfs_key first_key; 1791 1792 level = btrfs_header_level(parent); 1793 BUG_ON(level < lowest_level); 1794 1795 ret = btrfs_bin_search(parent, &key, level, &slot); 1796 if (ret && slot > 0) 1797 slot--; 1798 1799 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1800 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1801 1802 old_bytenr = btrfs_node_blockptr(parent, slot); 1803 blocksize = fs_info->nodesize; 1804 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1805 btrfs_node_key_to_cpu(parent, &first_key, slot); 1806 1807 if (level <= max_level) { 1808 eb = path->nodes[level]; 1809 new_bytenr = btrfs_node_blockptr(eb, 1810 path->slots[level]); 1811 new_ptr_gen = btrfs_node_ptr_generation(eb, 1812 path->slots[level]); 1813 } else { 1814 new_bytenr = 0; 1815 new_ptr_gen = 0; 1816 } 1817 1818 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1819 ret = level; 1820 break; 1821 } 1822 1823 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1824 memcmp_node_keys(parent, slot, path, level)) { 1825 if (level <= lowest_level) { 1826 ret = 0; 1827 break; 1828 } 1829 1830 eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen, 1831 level - 1, &first_key); 1832 if (IS_ERR(eb)) { 1833 ret = PTR_ERR(eb); 1834 break; 1835 } else if (!extent_buffer_uptodate(eb)) { 1836 ret = -EIO; 1837 free_extent_buffer(eb); 1838 break; 1839 } 1840 btrfs_tree_lock(eb); 1841 if (cow) { 1842 ret = btrfs_cow_block(trans, dest, eb, parent, 1843 slot, &eb); 1844 BUG_ON(ret); 1845 } 1846 btrfs_set_lock_blocking(eb); 1847 1848 btrfs_tree_unlock(parent); 1849 free_extent_buffer(parent); 1850 1851 parent = eb; 1852 continue; 1853 } 1854 1855 if (!cow) { 1856 btrfs_tree_unlock(parent); 1857 free_extent_buffer(parent); 1858 cow = 1; 1859 goto again; 1860 } 1861 1862 btrfs_node_key_to_cpu(path->nodes[level], &key, 1863 path->slots[level]); 1864 btrfs_release_path(path); 1865 1866 path->lowest_level = level; 1867 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1868 path->lowest_level = 0; 1869 BUG_ON(ret); 1870 1871 /* 1872 * Info qgroup to trace both subtrees. 1873 * 1874 * We must trace both trees. 1875 * 1) Tree reloc subtree 1876 * If not traced, we will leak data numbers 1877 * 2) Fs subtree 1878 * If not traced, we will double count old data 1879 * and tree block numbers, if current trans doesn't free 1880 * data reloc tree inode. 1881 */ 1882 ret = btrfs_qgroup_trace_subtree(trans, parent, 1883 btrfs_header_generation(parent), 1884 btrfs_header_level(parent)); 1885 if (ret < 0) 1886 break; 1887 ret = btrfs_qgroup_trace_subtree(trans, path->nodes[level], 1888 btrfs_header_generation(path->nodes[level]), 1889 btrfs_header_level(path->nodes[level])); 1890 if (ret < 0) 1891 break; 1892 1893 /* 1894 * swap blocks in fs tree and reloc tree. 1895 */ 1896 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1897 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1898 btrfs_mark_buffer_dirty(parent); 1899 1900 btrfs_set_node_blockptr(path->nodes[level], 1901 path->slots[level], old_bytenr); 1902 btrfs_set_node_ptr_generation(path->nodes[level], 1903 path->slots[level], old_ptr_gen); 1904 btrfs_mark_buffer_dirty(path->nodes[level]); 1905 1906 ret = btrfs_inc_extent_ref(trans, src, old_bytenr, 1907 blocksize, path->nodes[level]->start, 1908 src->root_key.objectid, level - 1, 0); 1909 BUG_ON(ret); 1910 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, 1911 blocksize, 0, dest->root_key.objectid, 1912 level - 1, 0); 1913 BUG_ON(ret); 1914 1915 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize, 1916 path->nodes[level]->start, 1917 src->root_key.objectid, level - 1, 0); 1918 BUG_ON(ret); 1919 1920 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize, 1921 0, dest->root_key.objectid, level - 1, 1922 0); 1923 BUG_ON(ret); 1924 1925 btrfs_unlock_up_safe(path, 0); 1926 1927 ret = level; 1928 break; 1929 } 1930 btrfs_tree_unlock(parent); 1931 free_extent_buffer(parent); 1932 return ret; 1933 } 1934 1935 /* 1936 * helper to find next relocated block in reloc tree 1937 */ 1938 static noinline_for_stack 1939 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1940 int *level) 1941 { 1942 struct extent_buffer *eb; 1943 int i; 1944 u64 last_snapshot; 1945 u32 nritems; 1946 1947 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1948 1949 for (i = 0; i < *level; i++) { 1950 free_extent_buffer(path->nodes[i]); 1951 path->nodes[i] = NULL; 1952 } 1953 1954 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1955 eb = path->nodes[i]; 1956 nritems = btrfs_header_nritems(eb); 1957 while (path->slots[i] + 1 < nritems) { 1958 path->slots[i]++; 1959 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1960 last_snapshot) 1961 continue; 1962 1963 *level = i; 1964 return 0; 1965 } 1966 free_extent_buffer(path->nodes[i]); 1967 path->nodes[i] = NULL; 1968 } 1969 return 1; 1970 } 1971 1972 /* 1973 * walk down reloc tree to find relocated block of lowest level 1974 */ 1975 static noinline_for_stack 1976 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1977 int *level) 1978 { 1979 struct btrfs_fs_info *fs_info = root->fs_info; 1980 struct extent_buffer *eb = NULL; 1981 int i; 1982 u64 bytenr; 1983 u64 ptr_gen = 0; 1984 u64 last_snapshot; 1985 u32 nritems; 1986 1987 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1988 1989 for (i = *level; i > 0; i--) { 1990 struct btrfs_key first_key; 1991 1992 eb = path->nodes[i]; 1993 nritems = btrfs_header_nritems(eb); 1994 while (path->slots[i] < nritems) { 1995 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1996 if (ptr_gen > last_snapshot) 1997 break; 1998 path->slots[i]++; 1999 } 2000 if (path->slots[i] >= nritems) { 2001 if (i == *level) 2002 break; 2003 *level = i + 1; 2004 return 0; 2005 } 2006 if (i == 1) { 2007 *level = i; 2008 return 0; 2009 } 2010 2011 bytenr = btrfs_node_blockptr(eb, path->slots[i]); 2012 btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]); 2013 eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1, 2014 &first_key); 2015 if (IS_ERR(eb)) { 2016 return PTR_ERR(eb); 2017 } else if (!extent_buffer_uptodate(eb)) { 2018 free_extent_buffer(eb); 2019 return -EIO; 2020 } 2021 BUG_ON(btrfs_header_level(eb) != i - 1); 2022 path->nodes[i - 1] = eb; 2023 path->slots[i - 1] = 0; 2024 } 2025 return 1; 2026 } 2027 2028 /* 2029 * invalidate extent cache for file extents whose key in range of 2030 * [min_key, max_key) 2031 */ 2032 static int invalidate_extent_cache(struct btrfs_root *root, 2033 struct btrfs_key *min_key, 2034 struct btrfs_key *max_key) 2035 { 2036 struct btrfs_fs_info *fs_info = root->fs_info; 2037 struct inode *inode = NULL; 2038 u64 objectid; 2039 u64 start, end; 2040 u64 ino; 2041 2042 objectid = min_key->objectid; 2043 while (1) { 2044 cond_resched(); 2045 iput(inode); 2046 2047 if (objectid > max_key->objectid) 2048 break; 2049 2050 inode = find_next_inode(root, objectid); 2051 if (!inode) 2052 break; 2053 ino = btrfs_ino(BTRFS_I(inode)); 2054 2055 if (ino > max_key->objectid) { 2056 iput(inode); 2057 break; 2058 } 2059 2060 objectid = ino + 1; 2061 if (!S_ISREG(inode->i_mode)) 2062 continue; 2063 2064 if (unlikely(min_key->objectid == ino)) { 2065 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 2066 continue; 2067 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 2068 start = 0; 2069 else { 2070 start = min_key->offset; 2071 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); 2072 } 2073 } else { 2074 start = 0; 2075 } 2076 2077 if (unlikely(max_key->objectid == ino)) { 2078 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 2079 continue; 2080 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 2081 end = (u64)-1; 2082 } else { 2083 if (max_key->offset == 0) 2084 continue; 2085 end = max_key->offset; 2086 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 2087 end--; 2088 } 2089 } else { 2090 end = (u64)-1; 2091 } 2092 2093 /* the lock_extent waits for readpage to complete */ 2094 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 2095 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1); 2096 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 2097 } 2098 return 0; 2099 } 2100 2101 static int find_next_key(struct btrfs_path *path, int level, 2102 struct btrfs_key *key) 2103 2104 { 2105 while (level < BTRFS_MAX_LEVEL) { 2106 if (!path->nodes[level]) 2107 break; 2108 if (path->slots[level] + 1 < 2109 btrfs_header_nritems(path->nodes[level])) { 2110 btrfs_node_key_to_cpu(path->nodes[level], key, 2111 path->slots[level] + 1); 2112 return 0; 2113 } 2114 level++; 2115 } 2116 return 1; 2117 } 2118 2119 /* 2120 * merge the relocated tree blocks in reloc tree with corresponding 2121 * fs tree. 2122 */ 2123 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 2124 struct btrfs_root *root) 2125 { 2126 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2127 LIST_HEAD(inode_list); 2128 struct btrfs_key key; 2129 struct btrfs_key next_key; 2130 struct btrfs_trans_handle *trans = NULL; 2131 struct btrfs_root *reloc_root; 2132 struct btrfs_root_item *root_item; 2133 struct btrfs_path *path; 2134 struct extent_buffer *leaf; 2135 int level; 2136 int max_level; 2137 int replaced = 0; 2138 int ret; 2139 int err = 0; 2140 u32 min_reserved; 2141 2142 path = btrfs_alloc_path(); 2143 if (!path) 2144 return -ENOMEM; 2145 path->reada = READA_FORWARD; 2146 2147 reloc_root = root->reloc_root; 2148 root_item = &reloc_root->root_item; 2149 2150 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 2151 level = btrfs_root_level(root_item); 2152 extent_buffer_get(reloc_root->node); 2153 path->nodes[level] = reloc_root->node; 2154 path->slots[level] = 0; 2155 } else { 2156 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 2157 2158 level = root_item->drop_level; 2159 BUG_ON(level == 0); 2160 path->lowest_level = level; 2161 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 2162 path->lowest_level = 0; 2163 if (ret < 0) { 2164 btrfs_free_path(path); 2165 return ret; 2166 } 2167 2168 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 2169 path->slots[level]); 2170 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 2171 2172 btrfs_unlock_up_safe(path, 0); 2173 } 2174 2175 min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2176 memset(&next_key, 0, sizeof(next_key)); 2177 2178 while (1) { 2179 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, 2180 BTRFS_RESERVE_FLUSH_ALL); 2181 if (ret) { 2182 err = ret; 2183 goto out; 2184 } 2185 trans = btrfs_start_transaction(root, 0); 2186 if (IS_ERR(trans)) { 2187 err = PTR_ERR(trans); 2188 trans = NULL; 2189 goto out; 2190 } 2191 trans->block_rsv = rc->block_rsv; 2192 2193 replaced = 0; 2194 max_level = level; 2195 2196 ret = walk_down_reloc_tree(reloc_root, path, &level); 2197 if (ret < 0) { 2198 err = ret; 2199 goto out; 2200 } 2201 if (ret > 0) 2202 break; 2203 2204 if (!find_next_key(path, level, &key) && 2205 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 2206 ret = 0; 2207 } else { 2208 ret = replace_path(trans, root, reloc_root, path, 2209 &next_key, level, max_level); 2210 } 2211 if (ret < 0) { 2212 err = ret; 2213 goto out; 2214 } 2215 2216 if (ret > 0) { 2217 level = ret; 2218 btrfs_node_key_to_cpu(path->nodes[level], &key, 2219 path->slots[level]); 2220 replaced = 1; 2221 } 2222 2223 ret = walk_up_reloc_tree(reloc_root, path, &level); 2224 if (ret > 0) 2225 break; 2226 2227 BUG_ON(level == 0); 2228 /* 2229 * save the merging progress in the drop_progress. 2230 * this is OK since root refs == 1 in this case. 2231 */ 2232 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 2233 path->slots[level]); 2234 root_item->drop_level = level; 2235 2236 btrfs_end_transaction_throttle(trans); 2237 trans = NULL; 2238 2239 btrfs_btree_balance_dirty(fs_info); 2240 2241 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2242 invalidate_extent_cache(root, &key, &next_key); 2243 } 2244 2245 /* 2246 * handle the case only one block in the fs tree need to be 2247 * relocated and the block is tree root. 2248 */ 2249 leaf = btrfs_lock_root_node(root); 2250 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf); 2251 btrfs_tree_unlock(leaf); 2252 free_extent_buffer(leaf); 2253 if (ret < 0) 2254 err = ret; 2255 out: 2256 btrfs_free_path(path); 2257 2258 if (err == 0) { 2259 memset(&root_item->drop_progress, 0, 2260 sizeof(root_item->drop_progress)); 2261 root_item->drop_level = 0; 2262 btrfs_set_root_refs(root_item, 0); 2263 btrfs_update_reloc_root(trans, root); 2264 } 2265 2266 if (trans) 2267 btrfs_end_transaction_throttle(trans); 2268 2269 btrfs_btree_balance_dirty(fs_info); 2270 2271 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2272 invalidate_extent_cache(root, &key, &next_key); 2273 2274 return err; 2275 } 2276 2277 static noinline_for_stack 2278 int prepare_to_merge(struct reloc_control *rc, int err) 2279 { 2280 struct btrfs_root *root = rc->extent_root; 2281 struct btrfs_fs_info *fs_info = root->fs_info; 2282 struct btrfs_root *reloc_root; 2283 struct btrfs_trans_handle *trans; 2284 LIST_HEAD(reloc_roots); 2285 u64 num_bytes = 0; 2286 int ret; 2287 2288 mutex_lock(&fs_info->reloc_mutex); 2289 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2290 rc->merging_rsv_size += rc->nodes_relocated * 2; 2291 mutex_unlock(&fs_info->reloc_mutex); 2292 2293 again: 2294 if (!err) { 2295 num_bytes = rc->merging_rsv_size; 2296 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes, 2297 BTRFS_RESERVE_FLUSH_ALL); 2298 if (ret) 2299 err = ret; 2300 } 2301 2302 trans = btrfs_join_transaction(rc->extent_root); 2303 if (IS_ERR(trans)) { 2304 if (!err) 2305 btrfs_block_rsv_release(fs_info, rc->block_rsv, 2306 num_bytes); 2307 return PTR_ERR(trans); 2308 } 2309 2310 if (!err) { 2311 if (num_bytes != rc->merging_rsv_size) { 2312 btrfs_end_transaction(trans); 2313 btrfs_block_rsv_release(fs_info, rc->block_rsv, 2314 num_bytes); 2315 goto again; 2316 } 2317 } 2318 2319 rc->merge_reloc_tree = 1; 2320 2321 while (!list_empty(&rc->reloc_roots)) { 2322 reloc_root = list_entry(rc->reloc_roots.next, 2323 struct btrfs_root, root_list); 2324 list_del_init(&reloc_root->root_list); 2325 2326 root = read_fs_root(fs_info, reloc_root->root_key.offset); 2327 BUG_ON(IS_ERR(root)); 2328 BUG_ON(root->reloc_root != reloc_root); 2329 2330 /* 2331 * set reference count to 1, so btrfs_recover_relocation 2332 * knows it should resumes merging 2333 */ 2334 if (!err) 2335 btrfs_set_root_refs(&reloc_root->root_item, 1); 2336 btrfs_update_reloc_root(trans, root); 2337 2338 list_add(&reloc_root->root_list, &reloc_roots); 2339 } 2340 2341 list_splice(&reloc_roots, &rc->reloc_roots); 2342 2343 if (!err) 2344 btrfs_commit_transaction(trans); 2345 else 2346 btrfs_end_transaction(trans); 2347 return err; 2348 } 2349 2350 static noinline_for_stack 2351 void free_reloc_roots(struct list_head *list) 2352 { 2353 struct btrfs_root *reloc_root; 2354 2355 while (!list_empty(list)) { 2356 reloc_root = list_entry(list->next, struct btrfs_root, 2357 root_list); 2358 __del_reloc_root(reloc_root); 2359 free_extent_buffer(reloc_root->node); 2360 free_extent_buffer(reloc_root->commit_root); 2361 reloc_root->node = NULL; 2362 reloc_root->commit_root = NULL; 2363 } 2364 } 2365 2366 static noinline_for_stack 2367 void merge_reloc_roots(struct reloc_control *rc) 2368 { 2369 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2370 struct btrfs_root *root; 2371 struct btrfs_root *reloc_root; 2372 LIST_HEAD(reloc_roots); 2373 int found = 0; 2374 int ret = 0; 2375 again: 2376 root = rc->extent_root; 2377 2378 /* 2379 * this serializes us with btrfs_record_root_in_transaction, 2380 * we have to make sure nobody is in the middle of 2381 * adding their roots to the list while we are 2382 * doing this splice 2383 */ 2384 mutex_lock(&fs_info->reloc_mutex); 2385 list_splice_init(&rc->reloc_roots, &reloc_roots); 2386 mutex_unlock(&fs_info->reloc_mutex); 2387 2388 while (!list_empty(&reloc_roots)) { 2389 found = 1; 2390 reloc_root = list_entry(reloc_roots.next, 2391 struct btrfs_root, root_list); 2392 2393 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 2394 root = read_fs_root(fs_info, 2395 reloc_root->root_key.offset); 2396 BUG_ON(IS_ERR(root)); 2397 BUG_ON(root->reloc_root != reloc_root); 2398 2399 ret = merge_reloc_root(rc, root); 2400 if (ret) { 2401 if (list_empty(&reloc_root->root_list)) 2402 list_add_tail(&reloc_root->root_list, 2403 &reloc_roots); 2404 goto out; 2405 } 2406 } else { 2407 list_del_init(&reloc_root->root_list); 2408 } 2409 2410 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); 2411 if (ret < 0) { 2412 if (list_empty(&reloc_root->root_list)) 2413 list_add_tail(&reloc_root->root_list, 2414 &reloc_roots); 2415 goto out; 2416 } 2417 } 2418 2419 if (found) { 2420 found = 0; 2421 goto again; 2422 } 2423 out: 2424 if (ret) { 2425 btrfs_handle_fs_error(fs_info, ret, NULL); 2426 if (!list_empty(&reloc_roots)) 2427 free_reloc_roots(&reloc_roots); 2428 2429 /* new reloc root may be added */ 2430 mutex_lock(&fs_info->reloc_mutex); 2431 list_splice_init(&rc->reloc_roots, &reloc_roots); 2432 mutex_unlock(&fs_info->reloc_mutex); 2433 if (!list_empty(&reloc_roots)) 2434 free_reloc_roots(&reloc_roots); 2435 } 2436 2437 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2438 } 2439 2440 static void free_block_list(struct rb_root *blocks) 2441 { 2442 struct tree_block *block; 2443 struct rb_node *rb_node; 2444 while ((rb_node = rb_first(blocks))) { 2445 block = rb_entry(rb_node, struct tree_block, rb_node); 2446 rb_erase(rb_node, blocks); 2447 kfree(block); 2448 } 2449 } 2450 2451 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2452 struct btrfs_root *reloc_root) 2453 { 2454 struct btrfs_fs_info *fs_info = reloc_root->fs_info; 2455 struct btrfs_root *root; 2456 2457 if (reloc_root->last_trans == trans->transid) 2458 return 0; 2459 2460 root = read_fs_root(fs_info, reloc_root->root_key.offset); 2461 BUG_ON(IS_ERR(root)); 2462 BUG_ON(root->reloc_root != reloc_root); 2463 2464 return btrfs_record_root_in_trans(trans, root); 2465 } 2466 2467 static noinline_for_stack 2468 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2469 struct reloc_control *rc, 2470 struct backref_node *node, 2471 struct backref_edge *edges[]) 2472 { 2473 struct backref_node *next; 2474 struct btrfs_root *root; 2475 int index = 0; 2476 2477 next = node; 2478 while (1) { 2479 cond_resched(); 2480 next = walk_up_backref(next, edges, &index); 2481 root = next->root; 2482 BUG_ON(!root); 2483 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state)); 2484 2485 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2486 record_reloc_root_in_trans(trans, root); 2487 break; 2488 } 2489 2490 btrfs_record_root_in_trans(trans, root); 2491 root = root->reloc_root; 2492 2493 if (next->new_bytenr != root->node->start) { 2494 BUG_ON(next->new_bytenr); 2495 BUG_ON(!list_empty(&next->list)); 2496 next->new_bytenr = root->node->start; 2497 next->root = root; 2498 list_add_tail(&next->list, 2499 &rc->backref_cache.changed); 2500 __mark_block_processed(rc, next); 2501 break; 2502 } 2503 2504 WARN_ON(1); 2505 root = NULL; 2506 next = walk_down_backref(edges, &index); 2507 if (!next || next->level <= node->level) 2508 break; 2509 } 2510 if (!root) 2511 return NULL; 2512 2513 next = node; 2514 /* setup backref node path for btrfs_reloc_cow_block */ 2515 while (1) { 2516 rc->backref_cache.path[next->level] = next; 2517 if (--index < 0) 2518 break; 2519 next = edges[index]->node[UPPER]; 2520 } 2521 return root; 2522 } 2523 2524 /* 2525 * select a tree root for relocation. return NULL if the block 2526 * is reference counted. we should use do_relocation() in this 2527 * case. return a tree root pointer if the block isn't reference 2528 * counted. return -ENOENT if the block is root of reloc tree. 2529 */ 2530 static noinline_for_stack 2531 struct btrfs_root *select_one_root(struct backref_node *node) 2532 { 2533 struct backref_node *next; 2534 struct btrfs_root *root; 2535 struct btrfs_root *fs_root = NULL; 2536 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2537 int index = 0; 2538 2539 next = node; 2540 while (1) { 2541 cond_resched(); 2542 next = walk_up_backref(next, edges, &index); 2543 root = next->root; 2544 BUG_ON(!root); 2545 2546 /* no other choice for non-references counted tree */ 2547 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 2548 return root; 2549 2550 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2551 fs_root = root; 2552 2553 if (next != node) 2554 return NULL; 2555 2556 next = walk_down_backref(edges, &index); 2557 if (!next || next->level <= node->level) 2558 break; 2559 } 2560 2561 if (!fs_root) 2562 return ERR_PTR(-ENOENT); 2563 return fs_root; 2564 } 2565 2566 static noinline_for_stack 2567 u64 calcu_metadata_size(struct reloc_control *rc, 2568 struct backref_node *node, int reserve) 2569 { 2570 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2571 struct backref_node *next = node; 2572 struct backref_edge *edge; 2573 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2574 u64 num_bytes = 0; 2575 int index = 0; 2576 2577 BUG_ON(reserve && node->processed); 2578 2579 while (next) { 2580 cond_resched(); 2581 while (1) { 2582 if (next->processed && (reserve || next != node)) 2583 break; 2584 2585 num_bytes += fs_info->nodesize; 2586 2587 if (list_empty(&next->upper)) 2588 break; 2589 2590 edge = list_entry(next->upper.next, 2591 struct backref_edge, list[LOWER]); 2592 edges[index++] = edge; 2593 next = edge->node[UPPER]; 2594 } 2595 next = walk_down_backref(edges, &index); 2596 } 2597 return num_bytes; 2598 } 2599 2600 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2601 struct reloc_control *rc, 2602 struct backref_node *node) 2603 { 2604 struct btrfs_root *root = rc->extent_root; 2605 struct btrfs_fs_info *fs_info = root->fs_info; 2606 u64 num_bytes; 2607 int ret; 2608 u64 tmp; 2609 2610 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2611 2612 trans->block_rsv = rc->block_rsv; 2613 rc->reserved_bytes += num_bytes; 2614 2615 /* 2616 * We are under a transaction here so we can only do limited flushing. 2617 * If we get an enospc just kick back -EAGAIN so we know to drop the 2618 * transaction and try to refill when we can flush all the things. 2619 */ 2620 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes, 2621 BTRFS_RESERVE_FLUSH_LIMIT); 2622 if (ret) { 2623 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; 2624 while (tmp <= rc->reserved_bytes) 2625 tmp <<= 1; 2626 /* 2627 * only one thread can access block_rsv at this point, 2628 * so we don't need hold lock to protect block_rsv. 2629 * we expand more reservation size here to allow enough 2630 * space for relocation and we will return eailer in 2631 * enospc case. 2632 */ 2633 rc->block_rsv->size = tmp + fs_info->nodesize * 2634 RELOCATION_RESERVED_NODES; 2635 return -EAGAIN; 2636 } 2637 2638 return 0; 2639 } 2640 2641 /* 2642 * relocate a block tree, and then update pointers in upper level 2643 * blocks that reference the block to point to the new location. 2644 * 2645 * if called by link_to_upper, the block has already been relocated. 2646 * in that case this function just updates pointers. 2647 */ 2648 static int do_relocation(struct btrfs_trans_handle *trans, 2649 struct reloc_control *rc, 2650 struct backref_node *node, 2651 struct btrfs_key *key, 2652 struct btrfs_path *path, int lowest) 2653 { 2654 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2655 struct backref_node *upper; 2656 struct backref_edge *edge; 2657 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2658 struct btrfs_root *root; 2659 struct extent_buffer *eb; 2660 u32 blocksize; 2661 u64 bytenr; 2662 u64 generation; 2663 int slot; 2664 int ret; 2665 int err = 0; 2666 2667 BUG_ON(lowest && node->eb); 2668 2669 path->lowest_level = node->level + 1; 2670 rc->backref_cache.path[node->level] = node; 2671 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2672 struct btrfs_key first_key; 2673 2674 cond_resched(); 2675 2676 upper = edge->node[UPPER]; 2677 root = select_reloc_root(trans, rc, upper, edges); 2678 BUG_ON(!root); 2679 2680 if (upper->eb && !upper->locked) { 2681 if (!lowest) { 2682 ret = btrfs_bin_search(upper->eb, key, 2683 upper->level, &slot); 2684 BUG_ON(ret); 2685 bytenr = btrfs_node_blockptr(upper->eb, slot); 2686 if (node->eb->start == bytenr) 2687 goto next; 2688 } 2689 drop_node_buffer(upper); 2690 } 2691 2692 if (!upper->eb) { 2693 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2694 if (ret) { 2695 if (ret < 0) 2696 err = ret; 2697 else 2698 err = -ENOENT; 2699 2700 btrfs_release_path(path); 2701 break; 2702 } 2703 2704 if (!upper->eb) { 2705 upper->eb = path->nodes[upper->level]; 2706 path->nodes[upper->level] = NULL; 2707 } else { 2708 BUG_ON(upper->eb != path->nodes[upper->level]); 2709 } 2710 2711 upper->locked = 1; 2712 path->locks[upper->level] = 0; 2713 2714 slot = path->slots[upper->level]; 2715 btrfs_release_path(path); 2716 } else { 2717 ret = btrfs_bin_search(upper->eb, key, upper->level, 2718 &slot); 2719 BUG_ON(ret); 2720 } 2721 2722 bytenr = btrfs_node_blockptr(upper->eb, slot); 2723 if (lowest) { 2724 if (bytenr != node->bytenr) { 2725 btrfs_err(root->fs_info, 2726 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", 2727 bytenr, node->bytenr, slot, 2728 upper->eb->start); 2729 err = -EIO; 2730 goto next; 2731 } 2732 } else { 2733 if (node->eb->start == bytenr) 2734 goto next; 2735 } 2736 2737 blocksize = root->fs_info->nodesize; 2738 generation = btrfs_node_ptr_generation(upper->eb, slot); 2739 btrfs_node_key_to_cpu(upper->eb, &first_key, slot); 2740 eb = read_tree_block(fs_info, bytenr, generation, 2741 upper->level - 1, &first_key); 2742 if (IS_ERR(eb)) { 2743 err = PTR_ERR(eb); 2744 goto next; 2745 } else if (!extent_buffer_uptodate(eb)) { 2746 free_extent_buffer(eb); 2747 err = -EIO; 2748 goto next; 2749 } 2750 btrfs_tree_lock(eb); 2751 btrfs_set_lock_blocking(eb); 2752 2753 if (!node->eb) { 2754 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2755 slot, &eb); 2756 btrfs_tree_unlock(eb); 2757 free_extent_buffer(eb); 2758 if (ret < 0) { 2759 err = ret; 2760 goto next; 2761 } 2762 BUG_ON(node->eb != eb); 2763 } else { 2764 btrfs_set_node_blockptr(upper->eb, slot, 2765 node->eb->start); 2766 btrfs_set_node_ptr_generation(upper->eb, slot, 2767 trans->transid); 2768 btrfs_mark_buffer_dirty(upper->eb); 2769 2770 ret = btrfs_inc_extent_ref(trans, root, 2771 node->eb->start, blocksize, 2772 upper->eb->start, 2773 btrfs_header_owner(upper->eb), 2774 node->level, 0); 2775 BUG_ON(ret); 2776 2777 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2778 BUG_ON(ret); 2779 } 2780 next: 2781 if (!upper->pending) 2782 drop_node_buffer(upper); 2783 else 2784 unlock_node_buffer(upper); 2785 if (err) 2786 break; 2787 } 2788 2789 if (!err && node->pending) { 2790 drop_node_buffer(node); 2791 list_move_tail(&node->list, &rc->backref_cache.changed); 2792 node->pending = 0; 2793 } 2794 2795 path->lowest_level = 0; 2796 BUG_ON(err == -ENOSPC); 2797 return err; 2798 } 2799 2800 static int link_to_upper(struct btrfs_trans_handle *trans, 2801 struct reloc_control *rc, 2802 struct backref_node *node, 2803 struct btrfs_path *path) 2804 { 2805 struct btrfs_key key; 2806 2807 btrfs_node_key_to_cpu(node->eb, &key, 0); 2808 return do_relocation(trans, rc, node, &key, path, 0); 2809 } 2810 2811 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2812 struct reloc_control *rc, 2813 struct btrfs_path *path, int err) 2814 { 2815 LIST_HEAD(list); 2816 struct backref_cache *cache = &rc->backref_cache; 2817 struct backref_node *node; 2818 int level; 2819 int ret; 2820 2821 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2822 while (!list_empty(&cache->pending[level])) { 2823 node = list_entry(cache->pending[level].next, 2824 struct backref_node, list); 2825 list_move_tail(&node->list, &list); 2826 BUG_ON(!node->pending); 2827 2828 if (!err) { 2829 ret = link_to_upper(trans, rc, node, path); 2830 if (ret < 0) 2831 err = ret; 2832 } 2833 } 2834 list_splice_init(&list, &cache->pending[level]); 2835 } 2836 return err; 2837 } 2838 2839 static void mark_block_processed(struct reloc_control *rc, 2840 u64 bytenr, u32 blocksize) 2841 { 2842 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2843 EXTENT_DIRTY); 2844 } 2845 2846 static void __mark_block_processed(struct reloc_control *rc, 2847 struct backref_node *node) 2848 { 2849 u32 blocksize; 2850 if (node->level == 0 || 2851 in_block_group(node->bytenr, rc->block_group)) { 2852 blocksize = rc->extent_root->fs_info->nodesize; 2853 mark_block_processed(rc, node->bytenr, blocksize); 2854 } 2855 node->processed = 1; 2856 } 2857 2858 /* 2859 * mark a block and all blocks directly/indirectly reference the block 2860 * as processed. 2861 */ 2862 static void update_processed_blocks(struct reloc_control *rc, 2863 struct backref_node *node) 2864 { 2865 struct backref_node *next = node; 2866 struct backref_edge *edge; 2867 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2868 int index = 0; 2869 2870 while (next) { 2871 cond_resched(); 2872 while (1) { 2873 if (next->processed) 2874 break; 2875 2876 __mark_block_processed(rc, next); 2877 2878 if (list_empty(&next->upper)) 2879 break; 2880 2881 edge = list_entry(next->upper.next, 2882 struct backref_edge, list[LOWER]); 2883 edges[index++] = edge; 2884 next = edge->node[UPPER]; 2885 } 2886 next = walk_down_backref(edges, &index); 2887 } 2888 } 2889 2890 static int tree_block_processed(u64 bytenr, struct reloc_control *rc) 2891 { 2892 u32 blocksize = rc->extent_root->fs_info->nodesize; 2893 2894 if (test_range_bit(&rc->processed_blocks, bytenr, 2895 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2896 return 1; 2897 return 0; 2898 } 2899 2900 static int get_tree_block_key(struct btrfs_fs_info *fs_info, 2901 struct tree_block *block) 2902 { 2903 struct extent_buffer *eb; 2904 2905 BUG_ON(block->key_ready); 2906 eb = read_tree_block(fs_info, block->bytenr, block->key.offset, 2907 block->level, NULL); 2908 if (IS_ERR(eb)) { 2909 return PTR_ERR(eb); 2910 } else if (!extent_buffer_uptodate(eb)) { 2911 free_extent_buffer(eb); 2912 return -EIO; 2913 } 2914 WARN_ON(btrfs_header_level(eb) != block->level); 2915 if (block->level == 0) 2916 btrfs_item_key_to_cpu(eb, &block->key, 0); 2917 else 2918 btrfs_node_key_to_cpu(eb, &block->key, 0); 2919 free_extent_buffer(eb); 2920 block->key_ready = 1; 2921 return 0; 2922 } 2923 2924 /* 2925 * helper function to relocate a tree block 2926 */ 2927 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2928 struct reloc_control *rc, 2929 struct backref_node *node, 2930 struct btrfs_key *key, 2931 struct btrfs_path *path) 2932 { 2933 struct btrfs_root *root; 2934 int ret = 0; 2935 2936 if (!node) 2937 return 0; 2938 2939 BUG_ON(node->processed); 2940 root = select_one_root(node); 2941 if (root == ERR_PTR(-ENOENT)) { 2942 update_processed_blocks(rc, node); 2943 goto out; 2944 } 2945 2946 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2947 ret = reserve_metadata_space(trans, rc, node); 2948 if (ret) 2949 goto out; 2950 } 2951 2952 if (root) { 2953 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2954 BUG_ON(node->new_bytenr); 2955 BUG_ON(!list_empty(&node->list)); 2956 btrfs_record_root_in_trans(trans, root); 2957 root = root->reloc_root; 2958 node->new_bytenr = root->node->start; 2959 node->root = root; 2960 list_add_tail(&node->list, &rc->backref_cache.changed); 2961 } else { 2962 path->lowest_level = node->level; 2963 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2964 btrfs_release_path(path); 2965 if (ret > 0) 2966 ret = 0; 2967 } 2968 if (!ret) 2969 update_processed_blocks(rc, node); 2970 } else { 2971 ret = do_relocation(trans, rc, node, key, path, 1); 2972 } 2973 out: 2974 if (ret || node->level == 0 || node->cowonly) 2975 remove_backref_node(&rc->backref_cache, node); 2976 return ret; 2977 } 2978 2979 /* 2980 * relocate a list of blocks 2981 */ 2982 static noinline_for_stack 2983 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2984 struct reloc_control *rc, struct rb_root *blocks) 2985 { 2986 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2987 struct backref_node *node; 2988 struct btrfs_path *path; 2989 struct tree_block *block; 2990 struct rb_node *rb_node; 2991 int ret; 2992 int err = 0; 2993 2994 path = btrfs_alloc_path(); 2995 if (!path) { 2996 err = -ENOMEM; 2997 goto out_free_blocks; 2998 } 2999 3000 rb_node = rb_first(blocks); 3001 while (rb_node) { 3002 block = rb_entry(rb_node, struct tree_block, rb_node); 3003 if (!block->key_ready) 3004 readahead_tree_block(fs_info, block->bytenr); 3005 rb_node = rb_next(rb_node); 3006 } 3007 3008 rb_node = rb_first(blocks); 3009 while (rb_node) { 3010 block = rb_entry(rb_node, struct tree_block, rb_node); 3011 if (!block->key_ready) { 3012 err = get_tree_block_key(fs_info, block); 3013 if (err) 3014 goto out_free_path; 3015 } 3016 rb_node = rb_next(rb_node); 3017 } 3018 3019 rb_node = rb_first(blocks); 3020 while (rb_node) { 3021 block = rb_entry(rb_node, struct tree_block, rb_node); 3022 3023 node = build_backref_tree(rc, &block->key, 3024 block->level, block->bytenr); 3025 if (IS_ERR(node)) { 3026 err = PTR_ERR(node); 3027 goto out; 3028 } 3029 3030 ret = relocate_tree_block(trans, rc, node, &block->key, 3031 path); 3032 if (ret < 0) { 3033 if (ret != -EAGAIN || rb_node == rb_first(blocks)) 3034 err = ret; 3035 goto out; 3036 } 3037 rb_node = rb_next(rb_node); 3038 } 3039 out: 3040 err = finish_pending_nodes(trans, rc, path, err); 3041 3042 out_free_path: 3043 btrfs_free_path(path); 3044 out_free_blocks: 3045 free_block_list(blocks); 3046 return err; 3047 } 3048 3049 static noinline_for_stack 3050 int prealloc_file_extent_cluster(struct inode *inode, 3051 struct file_extent_cluster *cluster) 3052 { 3053 u64 alloc_hint = 0; 3054 u64 start; 3055 u64 end; 3056 u64 offset = BTRFS_I(inode)->index_cnt; 3057 u64 num_bytes; 3058 int nr = 0; 3059 int ret = 0; 3060 u64 prealloc_start = cluster->start - offset; 3061 u64 prealloc_end = cluster->end - offset; 3062 u64 cur_offset; 3063 struct extent_changeset *data_reserved = NULL; 3064 3065 BUG_ON(cluster->start != cluster->boundary[0]); 3066 inode_lock(inode); 3067 3068 ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start, 3069 prealloc_end + 1 - prealloc_start); 3070 if (ret) 3071 goto out; 3072 3073 cur_offset = prealloc_start; 3074 while (nr < cluster->nr) { 3075 start = cluster->boundary[nr] - offset; 3076 if (nr + 1 < cluster->nr) 3077 end = cluster->boundary[nr + 1] - 1 - offset; 3078 else 3079 end = cluster->end - offset; 3080 3081 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3082 num_bytes = end + 1 - start; 3083 if (cur_offset < start) 3084 btrfs_free_reserved_data_space(inode, data_reserved, 3085 cur_offset, start - cur_offset); 3086 ret = btrfs_prealloc_file_range(inode, 0, start, 3087 num_bytes, num_bytes, 3088 end + 1, &alloc_hint); 3089 cur_offset = end + 1; 3090 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3091 if (ret) 3092 break; 3093 nr++; 3094 } 3095 if (cur_offset < prealloc_end) 3096 btrfs_free_reserved_data_space(inode, data_reserved, 3097 cur_offset, prealloc_end + 1 - cur_offset); 3098 out: 3099 inode_unlock(inode); 3100 extent_changeset_free(data_reserved); 3101 return ret; 3102 } 3103 3104 static noinline_for_stack 3105 int setup_extent_mapping(struct inode *inode, u64 start, u64 end, 3106 u64 block_start) 3107 { 3108 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3109 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 3110 struct extent_map *em; 3111 int ret = 0; 3112 3113 em = alloc_extent_map(); 3114 if (!em) 3115 return -ENOMEM; 3116 3117 em->start = start; 3118 em->len = end + 1 - start; 3119 em->block_len = em->len; 3120 em->block_start = block_start; 3121 em->bdev = fs_info->fs_devices->latest_bdev; 3122 set_bit(EXTENT_FLAG_PINNED, &em->flags); 3123 3124 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3125 while (1) { 3126 write_lock(&em_tree->lock); 3127 ret = add_extent_mapping(em_tree, em, 0); 3128 write_unlock(&em_tree->lock); 3129 if (ret != -EEXIST) { 3130 free_extent_map(em); 3131 break; 3132 } 3133 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); 3134 } 3135 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3136 return ret; 3137 } 3138 3139 static int relocate_file_extent_cluster(struct inode *inode, 3140 struct file_extent_cluster *cluster) 3141 { 3142 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3143 u64 page_start; 3144 u64 page_end; 3145 u64 offset = BTRFS_I(inode)->index_cnt; 3146 unsigned long index; 3147 unsigned long last_index; 3148 struct page *page; 3149 struct file_ra_state *ra; 3150 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 3151 int nr = 0; 3152 int ret = 0; 3153 3154 if (!cluster->nr) 3155 return 0; 3156 3157 ra = kzalloc(sizeof(*ra), GFP_NOFS); 3158 if (!ra) 3159 return -ENOMEM; 3160 3161 ret = prealloc_file_extent_cluster(inode, cluster); 3162 if (ret) 3163 goto out; 3164 3165 file_ra_state_init(ra, inode->i_mapping); 3166 3167 ret = setup_extent_mapping(inode, cluster->start - offset, 3168 cluster->end - offset, cluster->start); 3169 if (ret) 3170 goto out; 3171 3172 index = (cluster->start - offset) >> PAGE_SHIFT; 3173 last_index = (cluster->end - offset) >> PAGE_SHIFT; 3174 while (index <= last_index) { 3175 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 3176 PAGE_SIZE); 3177 if (ret) 3178 goto out; 3179 3180 page = find_lock_page(inode->i_mapping, index); 3181 if (!page) { 3182 page_cache_sync_readahead(inode->i_mapping, 3183 ra, NULL, index, 3184 last_index + 1 - index); 3185 page = find_or_create_page(inode->i_mapping, index, 3186 mask); 3187 if (!page) { 3188 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3189 PAGE_SIZE, true); 3190 ret = -ENOMEM; 3191 goto out; 3192 } 3193 } 3194 3195 if (PageReadahead(page)) { 3196 page_cache_async_readahead(inode->i_mapping, 3197 ra, NULL, page, index, 3198 last_index + 1 - index); 3199 } 3200 3201 if (!PageUptodate(page)) { 3202 btrfs_readpage(NULL, page); 3203 lock_page(page); 3204 if (!PageUptodate(page)) { 3205 unlock_page(page); 3206 put_page(page); 3207 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3208 PAGE_SIZE, true); 3209 btrfs_delalloc_release_extents(BTRFS_I(inode), 3210 PAGE_SIZE, true); 3211 ret = -EIO; 3212 goto out; 3213 } 3214 } 3215 3216 page_start = page_offset(page); 3217 page_end = page_start + PAGE_SIZE - 1; 3218 3219 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); 3220 3221 set_page_extent_mapped(page); 3222 3223 if (nr < cluster->nr && 3224 page_start + offset == cluster->boundary[nr]) { 3225 set_extent_bits(&BTRFS_I(inode)->io_tree, 3226 page_start, page_end, 3227 EXTENT_BOUNDARY); 3228 nr++; 3229 } 3230 3231 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 3232 NULL, 0); 3233 if (ret) { 3234 unlock_page(page); 3235 put_page(page); 3236 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3237 PAGE_SIZE, true); 3238 btrfs_delalloc_release_extents(BTRFS_I(inode), 3239 PAGE_SIZE, true); 3240 3241 clear_extent_bits(&BTRFS_I(inode)->io_tree, 3242 page_start, page_end, 3243 EXTENT_LOCKED | EXTENT_BOUNDARY); 3244 goto out; 3245 3246 } 3247 set_page_dirty(page); 3248 3249 unlock_extent(&BTRFS_I(inode)->io_tree, 3250 page_start, page_end); 3251 unlock_page(page); 3252 put_page(page); 3253 3254 index++; 3255 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, 3256 false); 3257 balance_dirty_pages_ratelimited(inode->i_mapping); 3258 btrfs_throttle(fs_info); 3259 } 3260 WARN_ON(nr != cluster->nr); 3261 out: 3262 kfree(ra); 3263 return ret; 3264 } 3265 3266 static noinline_for_stack 3267 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3268 struct file_extent_cluster *cluster) 3269 { 3270 int ret; 3271 3272 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3273 ret = relocate_file_extent_cluster(inode, cluster); 3274 if (ret) 3275 return ret; 3276 cluster->nr = 0; 3277 } 3278 3279 if (!cluster->nr) 3280 cluster->start = extent_key->objectid; 3281 else 3282 BUG_ON(cluster->nr >= MAX_EXTENTS); 3283 cluster->end = extent_key->objectid + extent_key->offset - 1; 3284 cluster->boundary[cluster->nr] = extent_key->objectid; 3285 cluster->nr++; 3286 3287 if (cluster->nr >= MAX_EXTENTS) { 3288 ret = relocate_file_extent_cluster(inode, cluster); 3289 if (ret) 3290 return ret; 3291 cluster->nr = 0; 3292 } 3293 return 0; 3294 } 3295 3296 /* 3297 * helper to add a tree block to the list. 3298 * the major work is getting the generation and level of the block 3299 */ 3300 static int add_tree_block(struct reloc_control *rc, 3301 struct btrfs_key *extent_key, 3302 struct btrfs_path *path, 3303 struct rb_root *blocks) 3304 { 3305 struct extent_buffer *eb; 3306 struct btrfs_extent_item *ei; 3307 struct btrfs_tree_block_info *bi; 3308 struct tree_block *block; 3309 struct rb_node *rb_node; 3310 u32 item_size; 3311 int level = -1; 3312 u64 generation; 3313 3314 eb = path->nodes[0]; 3315 item_size = btrfs_item_size_nr(eb, path->slots[0]); 3316 3317 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3318 item_size >= sizeof(*ei) + sizeof(*bi)) { 3319 ei = btrfs_item_ptr(eb, path->slots[0], 3320 struct btrfs_extent_item); 3321 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3322 bi = (struct btrfs_tree_block_info *)(ei + 1); 3323 level = btrfs_tree_block_level(eb, bi); 3324 } else { 3325 level = (int)extent_key->offset; 3326 } 3327 generation = btrfs_extent_generation(eb, ei); 3328 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) { 3329 btrfs_print_v0_err(eb->fs_info); 3330 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); 3331 return -EINVAL; 3332 } else { 3333 BUG(); 3334 } 3335 3336 btrfs_release_path(path); 3337 3338 BUG_ON(level == -1); 3339 3340 block = kmalloc(sizeof(*block), GFP_NOFS); 3341 if (!block) 3342 return -ENOMEM; 3343 3344 block->bytenr = extent_key->objectid; 3345 block->key.objectid = rc->extent_root->fs_info->nodesize; 3346 block->key.offset = generation; 3347 block->level = level; 3348 block->key_ready = 0; 3349 3350 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3351 if (rb_node) 3352 backref_tree_panic(rb_node, -EEXIST, block->bytenr); 3353 3354 return 0; 3355 } 3356 3357 /* 3358 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3359 */ 3360 static int __add_tree_block(struct reloc_control *rc, 3361 u64 bytenr, u32 blocksize, 3362 struct rb_root *blocks) 3363 { 3364 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3365 struct btrfs_path *path; 3366 struct btrfs_key key; 3367 int ret; 3368 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 3369 3370 if (tree_block_processed(bytenr, rc)) 3371 return 0; 3372 3373 if (tree_search(blocks, bytenr)) 3374 return 0; 3375 3376 path = btrfs_alloc_path(); 3377 if (!path) 3378 return -ENOMEM; 3379 again: 3380 key.objectid = bytenr; 3381 if (skinny) { 3382 key.type = BTRFS_METADATA_ITEM_KEY; 3383 key.offset = (u64)-1; 3384 } else { 3385 key.type = BTRFS_EXTENT_ITEM_KEY; 3386 key.offset = blocksize; 3387 } 3388 3389 path->search_commit_root = 1; 3390 path->skip_locking = 1; 3391 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3392 if (ret < 0) 3393 goto out; 3394 3395 if (ret > 0 && skinny) { 3396 if (path->slots[0]) { 3397 path->slots[0]--; 3398 btrfs_item_key_to_cpu(path->nodes[0], &key, 3399 path->slots[0]); 3400 if (key.objectid == bytenr && 3401 (key.type == BTRFS_METADATA_ITEM_KEY || 3402 (key.type == BTRFS_EXTENT_ITEM_KEY && 3403 key.offset == blocksize))) 3404 ret = 0; 3405 } 3406 3407 if (ret) { 3408 skinny = false; 3409 btrfs_release_path(path); 3410 goto again; 3411 } 3412 } 3413 if (ret) { 3414 ASSERT(ret == 1); 3415 btrfs_print_leaf(path->nodes[0]); 3416 btrfs_err(fs_info, 3417 "tree block extent item (%llu) is not found in extent tree", 3418 bytenr); 3419 WARN_ON(1); 3420 ret = -EINVAL; 3421 goto out; 3422 } 3423 3424 ret = add_tree_block(rc, &key, path, blocks); 3425 out: 3426 btrfs_free_path(path); 3427 return ret; 3428 } 3429 3430 /* 3431 * helper to check if the block use full backrefs for pointers in it 3432 */ 3433 static int block_use_full_backref(struct reloc_control *rc, 3434 struct extent_buffer *eb) 3435 { 3436 u64 flags; 3437 int ret; 3438 3439 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || 3440 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) 3441 return 1; 3442 3443 ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info, 3444 eb->start, btrfs_header_level(eb), 1, 3445 NULL, &flags); 3446 BUG_ON(ret); 3447 3448 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) 3449 ret = 1; 3450 else 3451 ret = 0; 3452 return ret; 3453 } 3454 3455 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3456 struct btrfs_block_group_cache *block_group, 3457 struct inode *inode, 3458 u64 ino) 3459 { 3460 struct btrfs_key key; 3461 struct btrfs_root *root = fs_info->tree_root; 3462 struct btrfs_trans_handle *trans; 3463 int ret = 0; 3464 3465 if (inode) 3466 goto truncate; 3467 3468 key.objectid = ino; 3469 key.type = BTRFS_INODE_ITEM_KEY; 3470 key.offset = 0; 3471 3472 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3473 if (IS_ERR(inode)) 3474 return -ENOENT; 3475 3476 truncate: 3477 ret = btrfs_check_trunc_cache_free_space(fs_info, 3478 &fs_info->global_block_rsv); 3479 if (ret) 3480 goto out; 3481 3482 trans = btrfs_join_transaction(root); 3483 if (IS_ERR(trans)) { 3484 ret = PTR_ERR(trans); 3485 goto out; 3486 } 3487 3488 ret = btrfs_truncate_free_space_cache(trans, block_group, inode); 3489 3490 btrfs_end_transaction(trans); 3491 btrfs_btree_balance_dirty(fs_info); 3492 out: 3493 iput(inode); 3494 return ret; 3495 } 3496 3497 /* 3498 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY 3499 * this function scans fs tree to find blocks reference the data extent 3500 */ 3501 static int find_data_references(struct reloc_control *rc, 3502 struct btrfs_key *extent_key, 3503 struct extent_buffer *leaf, 3504 struct btrfs_extent_data_ref *ref, 3505 struct rb_root *blocks) 3506 { 3507 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3508 struct btrfs_path *path; 3509 struct tree_block *block; 3510 struct btrfs_root *root; 3511 struct btrfs_file_extent_item *fi; 3512 struct rb_node *rb_node; 3513 struct btrfs_key key; 3514 u64 ref_root; 3515 u64 ref_objectid; 3516 u64 ref_offset; 3517 u32 ref_count; 3518 u32 nritems; 3519 int err = 0; 3520 int added = 0; 3521 int counted; 3522 int ret; 3523 3524 ref_root = btrfs_extent_data_ref_root(leaf, ref); 3525 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref); 3526 ref_offset = btrfs_extent_data_ref_offset(leaf, ref); 3527 ref_count = btrfs_extent_data_ref_count(leaf, ref); 3528 3529 /* 3530 * This is an extent belonging to the free space cache, lets just delete 3531 * it and redo the search. 3532 */ 3533 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { 3534 ret = delete_block_group_cache(fs_info, rc->block_group, 3535 NULL, ref_objectid); 3536 if (ret != -ENOENT) 3537 return ret; 3538 ret = 0; 3539 } 3540 3541 path = btrfs_alloc_path(); 3542 if (!path) 3543 return -ENOMEM; 3544 path->reada = READA_FORWARD; 3545 3546 root = read_fs_root(fs_info, ref_root); 3547 if (IS_ERR(root)) { 3548 err = PTR_ERR(root); 3549 goto out; 3550 } 3551 3552 key.objectid = ref_objectid; 3553 key.type = BTRFS_EXTENT_DATA_KEY; 3554 if (ref_offset > ((u64)-1 << 32)) 3555 key.offset = 0; 3556 else 3557 key.offset = ref_offset; 3558 3559 path->search_commit_root = 1; 3560 path->skip_locking = 1; 3561 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3562 if (ret < 0) { 3563 err = ret; 3564 goto out; 3565 } 3566 3567 leaf = path->nodes[0]; 3568 nritems = btrfs_header_nritems(leaf); 3569 /* 3570 * the references in tree blocks that use full backrefs 3571 * are not counted in 3572 */ 3573 if (block_use_full_backref(rc, leaf)) 3574 counted = 0; 3575 else 3576 counted = 1; 3577 rb_node = tree_search(blocks, leaf->start); 3578 if (rb_node) { 3579 if (counted) 3580 added = 1; 3581 else 3582 path->slots[0] = nritems; 3583 } 3584 3585 while (ref_count > 0) { 3586 while (path->slots[0] >= nritems) { 3587 ret = btrfs_next_leaf(root, path); 3588 if (ret < 0) { 3589 err = ret; 3590 goto out; 3591 } 3592 if (WARN_ON(ret > 0)) 3593 goto out; 3594 3595 leaf = path->nodes[0]; 3596 nritems = btrfs_header_nritems(leaf); 3597 added = 0; 3598 3599 if (block_use_full_backref(rc, leaf)) 3600 counted = 0; 3601 else 3602 counted = 1; 3603 rb_node = tree_search(blocks, leaf->start); 3604 if (rb_node) { 3605 if (counted) 3606 added = 1; 3607 else 3608 path->slots[0] = nritems; 3609 } 3610 } 3611 3612 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3613 if (WARN_ON(key.objectid != ref_objectid || 3614 key.type != BTRFS_EXTENT_DATA_KEY)) 3615 break; 3616 3617 fi = btrfs_item_ptr(leaf, path->slots[0], 3618 struct btrfs_file_extent_item); 3619 3620 if (btrfs_file_extent_type(leaf, fi) == 3621 BTRFS_FILE_EXTENT_INLINE) 3622 goto next; 3623 3624 if (btrfs_file_extent_disk_bytenr(leaf, fi) != 3625 extent_key->objectid) 3626 goto next; 3627 3628 key.offset -= btrfs_file_extent_offset(leaf, fi); 3629 if (key.offset != ref_offset) 3630 goto next; 3631 3632 if (counted) 3633 ref_count--; 3634 if (added) 3635 goto next; 3636 3637 if (!tree_block_processed(leaf->start, rc)) { 3638 block = kmalloc(sizeof(*block), GFP_NOFS); 3639 if (!block) { 3640 err = -ENOMEM; 3641 break; 3642 } 3643 block->bytenr = leaf->start; 3644 btrfs_item_key_to_cpu(leaf, &block->key, 0); 3645 block->level = 0; 3646 block->key_ready = 1; 3647 rb_node = tree_insert(blocks, block->bytenr, 3648 &block->rb_node); 3649 if (rb_node) 3650 backref_tree_panic(rb_node, -EEXIST, 3651 block->bytenr); 3652 } 3653 if (counted) 3654 added = 1; 3655 else 3656 path->slots[0] = nritems; 3657 next: 3658 path->slots[0]++; 3659 3660 } 3661 out: 3662 btrfs_free_path(path); 3663 return err; 3664 } 3665 3666 /* 3667 * helper to find all tree blocks that reference a given data extent 3668 */ 3669 static noinline_for_stack 3670 int add_data_references(struct reloc_control *rc, 3671 struct btrfs_key *extent_key, 3672 struct btrfs_path *path, 3673 struct rb_root *blocks) 3674 { 3675 struct btrfs_key key; 3676 struct extent_buffer *eb; 3677 struct btrfs_extent_data_ref *dref; 3678 struct btrfs_extent_inline_ref *iref; 3679 unsigned long ptr; 3680 unsigned long end; 3681 u32 blocksize = rc->extent_root->fs_info->nodesize; 3682 int ret = 0; 3683 int err = 0; 3684 3685 eb = path->nodes[0]; 3686 ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 3687 end = ptr + btrfs_item_size_nr(eb, path->slots[0]); 3688 ptr += sizeof(struct btrfs_extent_item); 3689 3690 while (ptr < end) { 3691 iref = (struct btrfs_extent_inline_ref *)ptr; 3692 key.type = btrfs_get_extent_inline_ref_type(eb, iref, 3693 BTRFS_REF_TYPE_DATA); 3694 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3695 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 3696 ret = __add_tree_block(rc, key.offset, blocksize, 3697 blocks); 3698 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3699 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 3700 ret = find_data_references(rc, extent_key, 3701 eb, dref, blocks); 3702 } else { 3703 ret = -EUCLEAN; 3704 btrfs_err(rc->extent_root->fs_info, 3705 "extent %llu slot %d has an invalid inline ref type", 3706 eb->start, path->slots[0]); 3707 } 3708 if (ret) { 3709 err = ret; 3710 goto out; 3711 } 3712 ptr += btrfs_extent_inline_ref_size(key.type); 3713 } 3714 WARN_ON(ptr > end); 3715 3716 while (1) { 3717 cond_resched(); 3718 eb = path->nodes[0]; 3719 if (path->slots[0] >= btrfs_header_nritems(eb)) { 3720 ret = btrfs_next_leaf(rc->extent_root, path); 3721 if (ret < 0) { 3722 err = ret; 3723 break; 3724 } 3725 if (ret > 0) 3726 break; 3727 eb = path->nodes[0]; 3728 } 3729 3730 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 3731 if (key.objectid != extent_key->objectid) 3732 break; 3733 3734 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3735 ret = __add_tree_block(rc, key.offset, blocksize, 3736 blocks); 3737 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3738 dref = btrfs_item_ptr(eb, path->slots[0], 3739 struct btrfs_extent_data_ref); 3740 ret = find_data_references(rc, extent_key, 3741 eb, dref, blocks); 3742 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { 3743 btrfs_print_v0_err(eb->fs_info); 3744 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); 3745 ret = -EINVAL; 3746 } else { 3747 ret = 0; 3748 } 3749 if (ret) { 3750 err = ret; 3751 break; 3752 } 3753 path->slots[0]++; 3754 } 3755 out: 3756 btrfs_release_path(path); 3757 if (err) 3758 free_block_list(blocks); 3759 return err; 3760 } 3761 3762 /* 3763 * helper to find next unprocessed extent 3764 */ 3765 static noinline_for_stack 3766 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, 3767 struct btrfs_key *extent_key) 3768 { 3769 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3770 struct btrfs_key key; 3771 struct extent_buffer *leaf; 3772 u64 start, end, last; 3773 int ret; 3774 3775 last = rc->block_group->key.objectid + rc->block_group->key.offset; 3776 while (1) { 3777 cond_resched(); 3778 if (rc->search_start >= last) { 3779 ret = 1; 3780 break; 3781 } 3782 3783 key.objectid = rc->search_start; 3784 key.type = BTRFS_EXTENT_ITEM_KEY; 3785 key.offset = 0; 3786 3787 path->search_commit_root = 1; 3788 path->skip_locking = 1; 3789 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3790 0, 0); 3791 if (ret < 0) 3792 break; 3793 next: 3794 leaf = path->nodes[0]; 3795 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3796 ret = btrfs_next_leaf(rc->extent_root, path); 3797 if (ret != 0) 3798 break; 3799 leaf = path->nodes[0]; 3800 } 3801 3802 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3803 if (key.objectid >= last) { 3804 ret = 1; 3805 break; 3806 } 3807 3808 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3809 key.type != BTRFS_METADATA_ITEM_KEY) { 3810 path->slots[0]++; 3811 goto next; 3812 } 3813 3814 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3815 key.objectid + key.offset <= rc->search_start) { 3816 path->slots[0]++; 3817 goto next; 3818 } 3819 3820 if (key.type == BTRFS_METADATA_ITEM_KEY && 3821 key.objectid + fs_info->nodesize <= 3822 rc->search_start) { 3823 path->slots[0]++; 3824 goto next; 3825 } 3826 3827 ret = find_first_extent_bit(&rc->processed_blocks, 3828 key.objectid, &start, &end, 3829 EXTENT_DIRTY, NULL); 3830 3831 if (ret == 0 && start <= key.objectid) { 3832 btrfs_release_path(path); 3833 rc->search_start = end + 1; 3834 } else { 3835 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3836 rc->search_start = key.objectid + key.offset; 3837 else 3838 rc->search_start = key.objectid + 3839 fs_info->nodesize; 3840 memcpy(extent_key, &key, sizeof(key)); 3841 return 0; 3842 } 3843 } 3844 btrfs_release_path(path); 3845 return ret; 3846 } 3847 3848 static void set_reloc_control(struct reloc_control *rc) 3849 { 3850 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3851 3852 mutex_lock(&fs_info->reloc_mutex); 3853 fs_info->reloc_ctl = rc; 3854 mutex_unlock(&fs_info->reloc_mutex); 3855 } 3856 3857 static void unset_reloc_control(struct reloc_control *rc) 3858 { 3859 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3860 3861 mutex_lock(&fs_info->reloc_mutex); 3862 fs_info->reloc_ctl = NULL; 3863 mutex_unlock(&fs_info->reloc_mutex); 3864 } 3865 3866 static int check_extent_flags(u64 flags) 3867 { 3868 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3869 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3870 return 1; 3871 if (!(flags & BTRFS_EXTENT_FLAG_DATA) && 3872 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3873 return 1; 3874 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3875 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 3876 return 1; 3877 return 0; 3878 } 3879 3880 static noinline_for_stack 3881 int prepare_to_relocate(struct reloc_control *rc) 3882 { 3883 struct btrfs_trans_handle *trans; 3884 int ret; 3885 3886 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info, 3887 BTRFS_BLOCK_RSV_TEMP); 3888 if (!rc->block_rsv) 3889 return -ENOMEM; 3890 3891 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3892 rc->search_start = rc->block_group->key.objectid; 3893 rc->extents_found = 0; 3894 rc->nodes_relocated = 0; 3895 rc->merging_rsv_size = 0; 3896 rc->reserved_bytes = 0; 3897 rc->block_rsv->size = rc->extent_root->fs_info->nodesize * 3898 RELOCATION_RESERVED_NODES; 3899 ret = btrfs_block_rsv_refill(rc->extent_root, 3900 rc->block_rsv, rc->block_rsv->size, 3901 BTRFS_RESERVE_FLUSH_ALL); 3902 if (ret) 3903 return ret; 3904 3905 rc->create_reloc_tree = 1; 3906 set_reloc_control(rc); 3907 3908 trans = btrfs_join_transaction(rc->extent_root); 3909 if (IS_ERR(trans)) { 3910 unset_reloc_control(rc); 3911 /* 3912 * extent tree is not a ref_cow tree and has no reloc_root to 3913 * cleanup. And callers are responsible to free the above 3914 * block rsv. 3915 */ 3916 return PTR_ERR(trans); 3917 } 3918 btrfs_commit_transaction(trans); 3919 return 0; 3920 } 3921 3922 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3923 { 3924 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3925 struct rb_root blocks = RB_ROOT; 3926 struct btrfs_key key; 3927 struct btrfs_trans_handle *trans = NULL; 3928 struct btrfs_path *path; 3929 struct btrfs_extent_item *ei; 3930 u64 flags; 3931 u32 item_size; 3932 int ret; 3933 int err = 0; 3934 int progress = 0; 3935 3936 path = btrfs_alloc_path(); 3937 if (!path) 3938 return -ENOMEM; 3939 path->reada = READA_FORWARD; 3940 3941 ret = prepare_to_relocate(rc); 3942 if (ret) { 3943 err = ret; 3944 goto out_free; 3945 } 3946 3947 while (1) { 3948 rc->reserved_bytes = 0; 3949 ret = btrfs_block_rsv_refill(rc->extent_root, 3950 rc->block_rsv, rc->block_rsv->size, 3951 BTRFS_RESERVE_FLUSH_ALL); 3952 if (ret) { 3953 err = ret; 3954 break; 3955 } 3956 progress++; 3957 trans = btrfs_start_transaction(rc->extent_root, 0); 3958 if (IS_ERR(trans)) { 3959 err = PTR_ERR(trans); 3960 trans = NULL; 3961 break; 3962 } 3963 restart: 3964 if (update_backref_cache(trans, &rc->backref_cache)) { 3965 btrfs_end_transaction(trans); 3966 continue; 3967 } 3968 3969 ret = find_next_extent(rc, path, &key); 3970 if (ret < 0) 3971 err = ret; 3972 if (ret != 0) 3973 break; 3974 3975 rc->extents_found++; 3976 3977 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3978 struct btrfs_extent_item); 3979 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 3980 if (item_size >= sizeof(*ei)) { 3981 flags = btrfs_extent_flags(path->nodes[0], ei); 3982 ret = check_extent_flags(flags); 3983 BUG_ON(ret); 3984 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) { 3985 err = -EINVAL; 3986 btrfs_print_v0_err(trans->fs_info); 3987 btrfs_abort_transaction(trans, err); 3988 break; 3989 } else { 3990 BUG(); 3991 } 3992 3993 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 3994 ret = add_tree_block(rc, &key, path, &blocks); 3995 } else if (rc->stage == UPDATE_DATA_PTRS && 3996 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3997 ret = add_data_references(rc, &key, path, &blocks); 3998 } else { 3999 btrfs_release_path(path); 4000 ret = 0; 4001 } 4002 if (ret < 0) { 4003 err = ret; 4004 break; 4005 } 4006 4007 if (!RB_EMPTY_ROOT(&blocks)) { 4008 ret = relocate_tree_blocks(trans, rc, &blocks); 4009 if (ret < 0) { 4010 /* 4011 * if we fail to relocate tree blocks, force to update 4012 * backref cache when committing transaction. 4013 */ 4014 rc->backref_cache.last_trans = trans->transid - 1; 4015 4016 if (ret != -EAGAIN) { 4017 err = ret; 4018 break; 4019 } 4020 rc->extents_found--; 4021 rc->search_start = key.objectid; 4022 } 4023 } 4024 4025 btrfs_end_transaction_throttle(trans); 4026 btrfs_btree_balance_dirty(fs_info); 4027 trans = NULL; 4028 4029 if (rc->stage == MOVE_DATA_EXTENTS && 4030 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4031 rc->found_file_extent = 1; 4032 ret = relocate_data_extent(rc->data_inode, 4033 &key, &rc->cluster); 4034 if (ret < 0) { 4035 err = ret; 4036 break; 4037 } 4038 } 4039 } 4040 if (trans && progress && err == -ENOSPC) { 4041 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags); 4042 if (ret == 1) { 4043 err = 0; 4044 progress = 0; 4045 goto restart; 4046 } 4047 } 4048 4049 btrfs_release_path(path); 4050 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); 4051 4052 if (trans) { 4053 btrfs_end_transaction_throttle(trans); 4054 btrfs_btree_balance_dirty(fs_info); 4055 } 4056 4057 if (!err) { 4058 ret = relocate_file_extent_cluster(rc->data_inode, 4059 &rc->cluster); 4060 if (ret < 0) 4061 err = ret; 4062 } 4063 4064 rc->create_reloc_tree = 0; 4065 set_reloc_control(rc); 4066 4067 backref_cache_cleanup(&rc->backref_cache); 4068 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1); 4069 4070 err = prepare_to_merge(rc, err); 4071 4072 merge_reloc_roots(rc); 4073 4074 rc->merge_reloc_tree = 0; 4075 unset_reloc_control(rc); 4076 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1); 4077 4078 /* get rid of pinned extents */ 4079 trans = btrfs_join_transaction(rc->extent_root); 4080 if (IS_ERR(trans)) { 4081 err = PTR_ERR(trans); 4082 goto out_free; 4083 } 4084 btrfs_commit_transaction(trans); 4085 out_free: 4086 btrfs_free_block_rsv(fs_info, rc->block_rsv); 4087 btrfs_free_path(path); 4088 return err; 4089 } 4090 4091 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 4092 struct btrfs_root *root, u64 objectid) 4093 { 4094 struct btrfs_path *path; 4095 struct btrfs_inode_item *item; 4096 struct extent_buffer *leaf; 4097 int ret; 4098 4099 path = btrfs_alloc_path(); 4100 if (!path) 4101 return -ENOMEM; 4102 4103 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 4104 if (ret) 4105 goto out; 4106 4107 leaf = path->nodes[0]; 4108 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 4109 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 4110 btrfs_set_inode_generation(leaf, item, 1); 4111 btrfs_set_inode_size(leaf, item, 0); 4112 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 4113 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 4114 BTRFS_INODE_PREALLOC); 4115 btrfs_mark_buffer_dirty(leaf); 4116 out: 4117 btrfs_free_path(path); 4118 return ret; 4119 } 4120 4121 /* 4122 * helper to create inode for data relocation. 4123 * the inode is in data relocation tree and its link count is 0 4124 */ 4125 static noinline_for_stack 4126 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 4127 struct btrfs_block_group_cache *group) 4128 { 4129 struct inode *inode = NULL; 4130 struct btrfs_trans_handle *trans; 4131 struct btrfs_root *root; 4132 struct btrfs_key key; 4133 u64 objectid; 4134 int err = 0; 4135 4136 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 4137 if (IS_ERR(root)) 4138 return ERR_CAST(root); 4139 4140 trans = btrfs_start_transaction(root, 6); 4141 if (IS_ERR(trans)) 4142 return ERR_CAST(trans); 4143 4144 err = btrfs_find_free_objectid(root, &objectid); 4145 if (err) 4146 goto out; 4147 4148 err = __insert_orphan_inode(trans, root, objectid); 4149 BUG_ON(err); 4150 4151 key.objectid = objectid; 4152 key.type = BTRFS_INODE_ITEM_KEY; 4153 key.offset = 0; 4154 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 4155 BUG_ON(IS_ERR(inode)); 4156 BTRFS_I(inode)->index_cnt = group->key.objectid; 4157 4158 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4159 out: 4160 btrfs_end_transaction(trans); 4161 btrfs_btree_balance_dirty(fs_info); 4162 if (err) { 4163 if (inode) 4164 iput(inode); 4165 inode = ERR_PTR(err); 4166 } 4167 return inode; 4168 } 4169 4170 static struct reloc_control *alloc_reloc_control(void) 4171 { 4172 struct reloc_control *rc; 4173 4174 rc = kzalloc(sizeof(*rc), GFP_NOFS); 4175 if (!rc) 4176 return NULL; 4177 4178 INIT_LIST_HEAD(&rc->reloc_roots); 4179 backref_cache_init(&rc->backref_cache); 4180 mapping_tree_init(&rc->reloc_root_tree); 4181 extent_io_tree_init(&rc->processed_blocks, NULL); 4182 return rc; 4183 } 4184 4185 /* 4186 * Print the block group being relocated 4187 */ 4188 static void describe_relocation(struct btrfs_fs_info *fs_info, 4189 struct btrfs_block_group_cache *block_group) 4190 { 4191 char buf[128]; /* prefixed by a '|' that'll be dropped */ 4192 u64 flags = block_group->flags; 4193 4194 /* Shouldn't happen */ 4195 if (!flags) { 4196 strcpy(buf, "|NONE"); 4197 } else { 4198 char *bp = buf; 4199 4200 #define DESCRIBE_FLAG(f, d) \ 4201 if (flags & BTRFS_BLOCK_GROUP_##f) { \ 4202 bp += snprintf(bp, buf - bp + sizeof(buf), "|%s", d); \ 4203 flags &= ~BTRFS_BLOCK_GROUP_##f; \ 4204 } 4205 DESCRIBE_FLAG(DATA, "data"); 4206 DESCRIBE_FLAG(SYSTEM, "system"); 4207 DESCRIBE_FLAG(METADATA, "metadata"); 4208 DESCRIBE_FLAG(RAID0, "raid0"); 4209 DESCRIBE_FLAG(RAID1, "raid1"); 4210 DESCRIBE_FLAG(DUP, "dup"); 4211 DESCRIBE_FLAG(RAID10, "raid10"); 4212 DESCRIBE_FLAG(RAID5, "raid5"); 4213 DESCRIBE_FLAG(RAID6, "raid6"); 4214 if (flags) 4215 snprintf(bp, buf - bp + sizeof(buf), "|0x%llx", flags); 4216 #undef DESCRIBE_FLAG 4217 } 4218 4219 btrfs_info(fs_info, 4220 "relocating block group %llu flags %s", 4221 block_group->key.objectid, buf + 1); 4222 } 4223 4224 /* 4225 * function to relocate all extents in a block group. 4226 */ 4227 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) 4228 { 4229 struct btrfs_root *extent_root = fs_info->extent_root; 4230 struct reloc_control *rc; 4231 struct inode *inode; 4232 struct btrfs_path *path; 4233 int ret; 4234 int rw = 0; 4235 int err = 0; 4236 4237 rc = alloc_reloc_control(); 4238 if (!rc) 4239 return -ENOMEM; 4240 4241 rc->extent_root = extent_root; 4242 4243 rc->block_group = btrfs_lookup_block_group(fs_info, group_start); 4244 BUG_ON(!rc->block_group); 4245 4246 ret = btrfs_inc_block_group_ro(rc->block_group); 4247 if (ret) { 4248 err = ret; 4249 goto out; 4250 } 4251 rw = 1; 4252 4253 path = btrfs_alloc_path(); 4254 if (!path) { 4255 err = -ENOMEM; 4256 goto out; 4257 } 4258 4259 inode = lookup_free_space_inode(fs_info, rc->block_group, path); 4260 btrfs_free_path(path); 4261 4262 if (!IS_ERR(inode)) 4263 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); 4264 else 4265 ret = PTR_ERR(inode); 4266 4267 if (ret && ret != -ENOENT) { 4268 err = ret; 4269 goto out; 4270 } 4271 4272 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4273 if (IS_ERR(rc->data_inode)) { 4274 err = PTR_ERR(rc->data_inode); 4275 rc->data_inode = NULL; 4276 goto out; 4277 } 4278 4279 describe_relocation(fs_info, rc->block_group); 4280 4281 btrfs_wait_block_group_reservations(rc->block_group); 4282 btrfs_wait_nocow_writers(rc->block_group); 4283 btrfs_wait_ordered_roots(fs_info, U64_MAX, 4284 rc->block_group->key.objectid, 4285 rc->block_group->key.offset); 4286 4287 while (1) { 4288 mutex_lock(&fs_info->cleaner_mutex); 4289 ret = relocate_block_group(rc); 4290 mutex_unlock(&fs_info->cleaner_mutex); 4291 if (ret < 0) { 4292 err = ret; 4293 goto out; 4294 } 4295 4296 if (rc->extents_found == 0) 4297 break; 4298 4299 btrfs_info(fs_info, "found %llu extents", rc->extents_found); 4300 4301 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4302 ret = btrfs_wait_ordered_range(rc->data_inode, 0, 4303 (u64)-1); 4304 if (ret) { 4305 err = ret; 4306 goto out; 4307 } 4308 invalidate_mapping_pages(rc->data_inode->i_mapping, 4309 0, -1); 4310 rc->stage = UPDATE_DATA_PTRS; 4311 } 4312 } 4313 4314 WARN_ON(rc->block_group->pinned > 0); 4315 WARN_ON(rc->block_group->reserved > 0); 4316 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); 4317 out: 4318 if (err && rw) 4319 btrfs_dec_block_group_ro(rc->block_group); 4320 iput(rc->data_inode); 4321 btrfs_put_block_group(rc->block_group); 4322 kfree(rc); 4323 return err; 4324 } 4325 4326 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4327 { 4328 struct btrfs_fs_info *fs_info = root->fs_info; 4329 struct btrfs_trans_handle *trans; 4330 int ret, err; 4331 4332 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4333 if (IS_ERR(trans)) 4334 return PTR_ERR(trans); 4335 4336 memset(&root->root_item.drop_progress, 0, 4337 sizeof(root->root_item.drop_progress)); 4338 root->root_item.drop_level = 0; 4339 btrfs_set_root_refs(&root->root_item, 0); 4340 ret = btrfs_update_root(trans, fs_info->tree_root, 4341 &root->root_key, &root->root_item); 4342 4343 err = btrfs_end_transaction(trans); 4344 if (err) 4345 return err; 4346 return ret; 4347 } 4348 4349 /* 4350 * recover relocation interrupted by system crash. 4351 * 4352 * this function resumes merging reloc trees with corresponding fs trees. 4353 * this is important for keeping the sharing of tree blocks 4354 */ 4355 int btrfs_recover_relocation(struct btrfs_root *root) 4356 { 4357 struct btrfs_fs_info *fs_info = root->fs_info; 4358 LIST_HEAD(reloc_roots); 4359 struct btrfs_key key; 4360 struct btrfs_root *fs_root; 4361 struct btrfs_root *reloc_root; 4362 struct btrfs_path *path; 4363 struct extent_buffer *leaf; 4364 struct reloc_control *rc = NULL; 4365 struct btrfs_trans_handle *trans; 4366 int ret; 4367 int err = 0; 4368 4369 path = btrfs_alloc_path(); 4370 if (!path) 4371 return -ENOMEM; 4372 path->reada = READA_BACK; 4373 4374 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4375 key.type = BTRFS_ROOT_ITEM_KEY; 4376 key.offset = (u64)-1; 4377 4378 while (1) { 4379 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, 4380 path, 0, 0); 4381 if (ret < 0) { 4382 err = ret; 4383 goto out; 4384 } 4385 if (ret > 0) { 4386 if (path->slots[0] == 0) 4387 break; 4388 path->slots[0]--; 4389 } 4390 leaf = path->nodes[0]; 4391 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4392 btrfs_release_path(path); 4393 4394 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4395 key.type != BTRFS_ROOT_ITEM_KEY) 4396 break; 4397 4398 reloc_root = btrfs_read_fs_root(root, &key); 4399 if (IS_ERR(reloc_root)) { 4400 err = PTR_ERR(reloc_root); 4401 goto out; 4402 } 4403 4404 list_add(&reloc_root->root_list, &reloc_roots); 4405 4406 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4407 fs_root = read_fs_root(fs_info, 4408 reloc_root->root_key.offset); 4409 if (IS_ERR(fs_root)) { 4410 ret = PTR_ERR(fs_root); 4411 if (ret != -ENOENT) { 4412 err = ret; 4413 goto out; 4414 } 4415 ret = mark_garbage_root(reloc_root); 4416 if (ret < 0) { 4417 err = ret; 4418 goto out; 4419 } 4420 } 4421 } 4422 4423 if (key.offset == 0) 4424 break; 4425 4426 key.offset--; 4427 } 4428 btrfs_release_path(path); 4429 4430 if (list_empty(&reloc_roots)) 4431 goto out; 4432 4433 rc = alloc_reloc_control(); 4434 if (!rc) { 4435 err = -ENOMEM; 4436 goto out; 4437 } 4438 4439 rc->extent_root = fs_info->extent_root; 4440 4441 set_reloc_control(rc); 4442 4443 trans = btrfs_join_transaction(rc->extent_root); 4444 if (IS_ERR(trans)) { 4445 unset_reloc_control(rc); 4446 err = PTR_ERR(trans); 4447 goto out_free; 4448 } 4449 4450 rc->merge_reloc_tree = 1; 4451 4452 while (!list_empty(&reloc_roots)) { 4453 reloc_root = list_entry(reloc_roots.next, 4454 struct btrfs_root, root_list); 4455 list_del(&reloc_root->root_list); 4456 4457 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4458 list_add_tail(&reloc_root->root_list, 4459 &rc->reloc_roots); 4460 continue; 4461 } 4462 4463 fs_root = read_fs_root(fs_info, reloc_root->root_key.offset); 4464 if (IS_ERR(fs_root)) { 4465 err = PTR_ERR(fs_root); 4466 goto out_free; 4467 } 4468 4469 err = __add_reloc_root(reloc_root); 4470 BUG_ON(err < 0); /* -ENOMEM or logic error */ 4471 fs_root->reloc_root = reloc_root; 4472 } 4473 4474 err = btrfs_commit_transaction(trans); 4475 if (err) 4476 goto out_free; 4477 4478 merge_reloc_roots(rc); 4479 4480 unset_reloc_control(rc); 4481 4482 trans = btrfs_join_transaction(rc->extent_root); 4483 if (IS_ERR(trans)) { 4484 err = PTR_ERR(trans); 4485 goto out_free; 4486 } 4487 err = btrfs_commit_transaction(trans); 4488 out_free: 4489 kfree(rc); 4490 out: 4491 if (!list_empty(&reloc_roots)) 4492 free_reloc_roots(&reloc_roots); 4493 4494 btrfs_free_path(path); 4495 4496 if (err == 0) { 4497 /* cleanup orphan inode in data relocation tree */ 4498 fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 4499 if (IS_ERR(fs_root)) 4500 err = PTR_ERR(fs_root); 4501 else 4502 err = btrfs_orphan_cleanup(fs_root); 4503 } 4504 return err; 4505 } 4506 4507 /* 4508 * helper to add ordered checksum for data relocation. 4509 * 4510 * cloning checksum properly handles the nodatasum extents. 4511 * it also saves CPU time to re-calculate the checksum. 4512 */ 4513 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) 4514 { 4515 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 4516 struct btrfs_ordered_sum *sums; 4517 struct btrfs_ordered_extent *ordered; 4518 int ret; 4519 u64 disk_bytenr; 4520 u64 new_bytenr; 4521 LIST_HEAD(list); 4522 4523 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4524 BUG_ON(ordered->file_offset != file_pos || ordered->len != len); 4525 4526 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4527 ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr, 4528 disk_bytenr + len - 1, &list, 0); 4529 if (ret) 4530 goto out; 4531 4532 while (!list_empty(&list)) { 4533 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4534 list_del_init(&sums->list); 4535 4536 /* 4537 * We need to offset the new_bytenr based on where the csum is. 4538 * We need to do this because we will read in entire prealloc 4539 * extents but we may have written to say the middle of the 4540 * prealloc extent, so we need to make sure the csum goes with 4541 * the right disk offset. 4542 * 4543 * We can do this because the data reloc inode refers strictly 4544 * to the on disk bytes, so we don't have to worry about 4545 * disk_len vs real len like with real inodes since it's all 4546 * disk length. 4547 */ 4548 new_bytenr = ordered->start + (sums->bytenr - disk_bytenr); 4549 sums->bytenr = new_bytenr; 4550 4551 btrfs_add_ordered_sum(inode, ordered, sums); 4552 } 4553 out: 4554 btrfs_put_ordered_extent(ordered); 4555 return ret; 4556 } 4557 4558 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4559 struct btrfs_root *root, struct extent_buffer *buf, 4560 struct extent_buffer *cow) 4561 { 4562 struct btrfs_fs_info *fs_info = root->fs_info; 4563 struct reloc_control *rc; 4564 struct backref_node *node; 4565 int first_cow = 0; 4566 int level; 4567 int ret = 0; 4568 4569 rc = fs_info->reloc_ctl; 4570 if (!rc) 4571 return 0; 4572 4573 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4574 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4575 4576 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 4577 if (buf == root->node) 4578 __update_reloc_root(root, cow->start); 4579 } 4580 4581 level = btrfs_header_level(buf); 4582 if (btrfs_header_generation(buf) <= 4583 btrfs_root_last_snapshot(&root->root_item)) 4584 first_cow = 1; 4585 4586 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4587 rc->create_reloc_tree) { 4588 WARN_ON(!first_cow && level == 0); 4589 4590 node = rc->backref_cache.path[level]; 4591 BUG_ON(node->bytenr != buf->start && 4592 node->new_bytenr != buf->start); 4593 4594 drop_node_buffer(node); 4595 extent_buffer_get(cow); 4596 node->eb = cow; 4597 node->new_bytenr = cow->start; 4598 4599 if (!node->pending) { 4600 list_move_tail(&node->list, 4601 &rc->backref_cache.pending[level]); 4602 node->pending = 1; 4603 } 4604 4605 if (first_cow) 4606 __mark_block_processed(rc, node); 4607 4608 if (first_cow && level > 0) 4609 rc->nodes_relocated += buf->len; 4610 } 4611 4612 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4613 ret = replace_file_extents(trans, rc, root, cow); 4614 return ret; 4615 } 4616 4617 /* 4618 * called before creating snapshot. it calculates metadata reservation 4619 * required for relocating tree blocks in the snapshot 4620 */ 4621 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4622 u64 *bytes_to_reserve) 4623 { 4624 struct btrfs_root *root; 4625 struct reloc_control *rc; 4626 4627 root = pending->root; 4628 if (!root->reloc_root) 4629 return; 4630 4631 rc = root->fs_info->reloc_ctl; 4632 if (!rc->merge_reloc_tree) 4633 return; 4634 4635 root = root->reloc_root; 4636 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4637 /* 4638 * relocation is in the stage of merging trees. the space 4639 * used by merging a reloc tree is twice the size of 4640 * relocated tree nodes in the worst case. half for cowing 4641 * the reloc tree, half for cowing the fs tree. the space 4642 * used by cowing the reloc tree will be freed after the 4643 * tree is dropped. if we create snapshot, cowing the fs 4644 * tree may use more space than it frees. so we need 4645 * reserve extra space. 4646 */ 4647 *bytes_to_reserve += rc->nodes_relocated; 4648 } 4649 4650 /* 4651 * called after snapshot is created. migrate block reservation 4652 * and create reloc root for the newly created snapshot 4653 */ 4654 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4655 struct btrfs_pending_snapshot *pending) 4656 { 4657 struct btrfs_root *root = pending->root; 4658 struct btrfs_root *reloc_root; 4659 struct btrfs_root *new_root; 4660 struct reloc_control *rc; 4661 int ret; 4662 4663 if (!root->reloc_root) 4664 return 0; 4665 4666 rc = root->fs_info->reloc_ctl; 4667 rc->merging_rsv_size += rc->nodes_relocated; 4668 4669 if (rc->merge_reloc_tree) { 4670 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4671 rc->block_rsv, 4672 rc->nodes_relocated, 1); 4673 if (ret) 4674 return ret; 4675 } 4676 4677 new_root = pending->snap; 4678 reloc_root = create_reloc_root(trans, root->reloc_root, 4679 new_root->root_key.objectid); 4680 if (IS_ERR(reloc_root)) 4681 return PTR_ERR(reloc_root); 4682 4683 ret = __add_reloc_root(reloc_root); 4684 BUG_ON(ret < 0); 4685 new_root->reloc_root = reloc_root; 4686 4687 if (rc->create_reloc_tree) 4688 ret = clone_backref_node(trans, rc, root, reloc_root); 4689 return ret; 4690 } 4691