1 /* 2 * Copyright (C) 2009 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "volumes.h" 29 #include "locking.h" 30 #include "btrfs_inode.h" 31 #include "async-thread.h" 32 #include "free-space-cache.h" 33 #include "inode-map.h" 34 35 /* 36 * backref_node, mapping_node and tree_block start with this 37 */ 38 struct tree_entry { 39 struct rb_node rb_node; 40 u64 bytenr; 41 }; 42 43 /* 44 * present a tree block in the backref cache 45 */ 46 struct backref_node { 47 struct rb_node rb_node; 48 u64 bytenr; 49 50 u64 new_bytenr; 51 /* objectid of tree block owner, can be not uptodate */ 52 u64 owner; 53 /* link to pending, changed or detached list */ 54 struct list_head list; 55 /* list of upper level blocks reference this block */ 56 struct list_head upper; 57 /* list of child blocks in the cache */ 58 struct list_head lower; 59 /* NULL if this node is not tree root */ 60 struct btrfs_root *root; 61 /* extent buffer got by COW the block */ 62 struct extent_buffer *eb; 63 /* level of tree block */ 64 unsigned int level:8; 65 /* is the block in non-reference counted tree */ 66 unsigned int cowonly:1; 67 /* 1 if no child node in the cache */ 68 unsigned int lowest:1; 69 /* is the extent buffer locked */ 70 unsigned int locked:1; 71 /* has the block been processed */ 72 unsigned int processed:1; 73 /* have backrefs of this block been checked */ 74 unsigned int checked:1; 75 /* 76 * 1 if corresponding block has been cowed but some upper 77 * level block pointers may not point to the new location 78 */ 79 unsigned int pending:1; 80 /* 81 * 1 if the backref node isn't connected to any other 82 * backref node. 83 */ 84 unsigned int detached:1; 85 }; 86 87 /* 88 * present a block pointer in the backref cache 89 */ 90 struct backref_edge { 91 struct list_head list[2]; 92 struct backref_node *node[2]; 93 }; 94 95 #define LOWER 0 96 #define UPPER 1 97 #define RELOCATION_RESERVED_NODES 256 98 99 struct backref_cache { 100 /* red black tree of all backref nodes in the cache */ 101 struct rb_root rb_root; 102 /* for passing backref nodes to btrfs_reloc_cow_block */ 103 struct backref_node *path[BTRFS_MAX_LEVEL]; 104 /* 105 * list of blocks that have been cowed but some block 106 * pointers in upper level blocks may not reflect the 107 * new location 108 */ 109 struct list_head pending[BTRFS_MAX_LEVEL]; 110 /* list of backref nodes with no child node */ 111 struct list_head leaves; 112 /* list of blocks that have been cowed in current transaction */ 113 struct list_head changed; 114 /* list of detached backref node. */ 115 struct list_head detached; 116 117 u64 last_trans; 118 119 int nr_nodes; 120 int nr_edges; 121 }; 122 123 /* 124 * map address of tree root to tree 125 */ 126 struct mapping_node { 127 struct rb_node rb_node; 128 u64 bytenr; 129 void *data; 130 }; 131 132 struct mapping_tree { 133 struct rb_root rb_root; 134 spinlock_t lock; 135 }; 136 137 /* 138 * present a tree block to process 139 */ 140 struct tree_block { 141 struct rb_node rb_node; 142 u64 bytenr; 143 struct btrfs_key key; 144 unsigned int level:8; 145 unsigned int key_ready:1; 146 }; 147 148 #define MAX_EXTENTS 128 149 150 struct file_extent_cluster { 151 u64 start; 152 u64 end; 153 u64 boundary[MAX_EXTENTS]; 154 unsigned int nr; 155 }; 156 157 struct reloc_control { 158 /* block group to relocate */ 159 struct btrfs_block_group_cache *block_group; 160 /* extent tree */ 161 struct btrfs_root *extent_root; 162 /* inode for moving data */ 163 struct inode *data_inode; 164 165 struct btrfs_block_rsv *block_rsv; 166 167 struct backref_cache backref_cache; 168 169 struct file_extent_cluster cluster; 170 /* tree blocks have been processed */ 171 struct extent_io_tree processed_blocks; 172 /* map start of tree root to corresponding reloc tree */ 173 struct mapping_tree reloc_root_tree; 174 /* list of reloc trees */ 175 struct list_head reloc_roots; 176 /* size of metadata reservation for merging reloc trees */ 177 u64 merging_rsv_size; 178 /* size of relocated tree nodes */ 179 u64 nodes_relocated; 180 /* reserved size for block group relocation*/ 181 u64 reserved_bytes; 182 183 u64 search_start; 184 u64 extents_found; 185 186 unsigned int stage:8; 187 unsigned int create_reloc_tree:1; 188 unsigned int merge_reloc_tree:1; 189 unsigned int found_file_extent:1; 190 }; 191 192 /* stages of data relocation */ 193 #define MOVE_DATA_EXTENTS 0 194 #define UPDATE_DATA_PTRS 1 195 196 static void remove_backref_node(struct backref_cache *cache, 197 struct backref_node *node); 198 static void __mark_block_processed(struct reloc_control *rc, 199 struct backref_node *node); 200 201 static void mapping_tree_init(struct mapping_tree *tree) 202 { 203 tree->rb_root = RB_ROOT; 204 spin_lock_init(&tree->lock); 205 } 206 207 static void backref_cache_init(struct backref_cache *cache) 208 { 209 int i; 210 cache->rb_root = RB_ROOT; 211 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 212 INIT_LIST_HEAD(&cache->pending[i]); 213 INIT_LIST_HEAD(&cache->changed); 214 INIT_LIST_HEAD(&cache->detached); 215 INIT_LIST_HEAD(&cache->leaves); 216 } 217 218 static void backref_cache_cleanup(struct backref_cache *cache) 219 { 220 struct backref_node *node; 221 int i; 222 223 while (!list_empty(&cache->detached)) { 224 node = list_entry(cache->detached.next, 225 struct backref_node, list); 226 remove_backref_node(cache, node); 227 } 228 229 while (!list_empty(&cache->leaves)) { 230 node = list_entry(cache->leaves.next, 231 struct backref_node, lower); 232 remove_backref_node(cache, node); 233 } 234 235 cache->last_trans = 0; 236 237 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 238 BUG_ON(!list_empty(&cache->pending[i])); 239 BUG_ON(!list_empty(&cache->changed)); 240 BUG_ON(!list_empty(&cache->detached)); 241 BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root)); 242 BUG_ON(cache->nr_nodes); 243 BUG_ON(cache->nr_edges); 244 } 245 246 static struct backref_node *alloc_backref_node(struct backref_cache *cache) 247 { 248 struct backref_node *node; 249 250 node = kzalloc(sizeof(*node), GFP_NOFS); 251 if (node) { 252 INIT_LIST_HEAD(&node->list); 253 INIT_LIST_HEAD(&node->upper); 254 INIT_LIST_HEAD(&node->lower); 255 RB_CLEAR_NODE(&node->rb_node); 256 cache->nr_nodes++; 257 } 258 return node; 259 } 260 261 static void free_backref_node(struct backref_cache *cache, 262 struct backref_node *node) 263 { 264 if (node) { 265 cache->nr_nodes--; 266 kfree(node); 267 } 268 } 269 270 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) 271 { 272 struct backref_edge *edge; 273 274 edge = kzalloc(sizeof(*edge), GFP_NOFS); 275 if (edge) 276 cache->nr_edges++; 277 return edge; 278 } 279 280 static void free_backref_edge(struct backref_cache *cache, 281 struct backref_edge *edge) 282 { 283 if (edge) { 284 cache->nr_edges--; 285 kfree(edge); 286 } 287 } 288 289 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 290 struct rb_node *node) 291 { 292 struct rb_node **p = &root->rb_node; 293 struct rb_node *parent = NULL; 294 struct tree_entry *entry; 295 296 while (*p) { 297 parent = *p; 298 entry = rb_entry(parent, struct tree_entry, rb_node); 299 300 if (bytenr < entry->bytenr) 301 p = &(*p)->rb_left; 302 else if (bytenr > entry->bytenr) 303 p = &(*p)->rb_right; 304 else 305 return parent; 306 } 307 308 rb_link_node(node, parent, p); 309 rb_insert_color(node, root); 310 return NULL; 311 } 312 313 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 314 { 315 struct rb_node *n = root->rb_node; 316 struct tree_entry *entry; 317 318 while (n) { 319 entry = rb_entry(n, struct tree_entry, rb_node); 320 321 if (bytenr < entry->bytenr) 322 n = n->rb_left; 323 else if (bytenr > entry->bytenr) 324 n = n->rb_right; 325 else 326 return n; 327 } 328 return NULL; 329 } 330 331 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr) 332 { 333 334 struct btrfs_fs_info *fs_info = NULL; 335 struct backref_node *bnode = rb_entry(rb_node, struct backref_node, 336 rb_node); 337 if (bnode->root) 338 fs_info = bnode->root->fs_info; 339 btrfs_panic(fs_info, errno, "Inconsistency in backref cache " 340 "found at offset %llu", bytenr); 341 } 342 343 /* 344 * walk up backref nodes until reach node presents tree root 345 */ 346 static struct backref_node *walk_up_backref(struct backref_node *node, 347 struct backref_edge *edges[], 348 int *index) 349 { 350 struct backref_edge *edge; 351 int idx = *index; 352 353 while (!list_empty(&node->upper)) { 354 edge = list_entry(node->upper.next, 355 struct backref_edge, list[LOWER]); 356 edges[idx++] = edge; 357 node = edge->node[UPPER]; 358 } 359 BUG_ON(node->detached); 360 *index = idx; 361 return node; 362 } 363 364 /* 365 * walk down backref nodes to find start of next reference path 366 */ 367 static struct backref_node *walk_down_backref(struct backref_edge *edges[], 368 int *index) 369 { 370 struct backref_edge *edge; 371 struct backref_node *lower; 372 int idx = *index; 373 374 while (idx > 0) { 375 edge = edges[idx - 1]; 376 lower = edge->node[LOWER]; 377 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 378 idx--; 379 continue; 380 } 381 edge = list_entry(edge->list[LOWER].next, 382 struct backref_edge, list[LOWER]); 383 edges[idx - 1] = edge; 384 *index = idx; 385 return edge->node[UPPER]; 386 } 387 *index = 0; 388 return NULL; 389 } 390 391 static void unlock_node_buffer(struct backref_node *node) 392 { 393 if (node->locked) { 394 btrfs_tree_unlock(node->eb); 395 node->locked = 0; 396 } 397 } 398 399 static void drop_node_buffer(struct backref_node *node) 400 { 401 if (node->eb) { 402 unlock_node_buffer(node); 403 free_extent_buffer(node->eb); 404 node->eb = NULL; 405 } 406 } 407 408 static void drop_backref_node(struct backref_cache *tree, 409 struct backref_node *node) 410 { 411 BUG_ON(!list_empty(&node->upper)); 412 413 drop_node_buffer(node); 414 list_del(&node->list); 415 list_del(&node->lower); 416 if (!RB_EMPTY_NODE(&node->rb_node)) 417 rb_erase(&node->rb_node, &tree->rb_root); 418 free_backref_node(tree, node); 419 } 420 421 /* 422 * remove a backref node from the backref cache 423 */ 424 static void remove_backref_node(struct backref_cache *cache, 425 struct backref_node *node) 426 { 427 struct backref_node *upper; 428 struct backref_edge *edge; 429 430 if (!node) 431 return; 432 433 BUG_ON(!node->lowest && !node->detached); 434 while (!list_empty(&node->upper)) { 435 edge = list_entry(node->upper.next, struct backref_edge, 436 list[LOWER]); 437 upper = edge->node[UPPER]; 438 list_del(&edge->list[LOWER]); 439 list_del(&edge->list[UPPER]); 440 free_backref_edge(cache, edge); 441 442 if (RB_EMPTY_NODE(&upper->rb_node)) { 443 BUG_ON(!list_empty(&node->upper)); 444 drop_backref_node(cache, node); 445 node = upper; 446 node->lowest = 1; 447 continue; 448 } 449 /* 450 * add the node to leaf node list if no other 451 * child block cached. 452 */ 453 if (list_empty(&upper->lower)) { 454 list_add_tail(&upper->lower, &cache->leaves); 455 upper->lowest = 1; 456 } 457 } 458 459 drop_backref_node(cache, node); 460 } 461 462 static void update_backref_node(struct backref_cache *cache, 463 struct backref_node *node, u64 bytenr) 464 { 465 struct rb_node *rb_node; 466 rb_erase(&node->rb_node, &cache->rb_root); 467 node->bytenr = bytenr; 468 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 469 if (rb_node) 470 backref_tree_panic(rb_node, -EEXIST, bytenr); 471 } 472 473 /* 474 * update backref cache after a transaction commit 475 */ 476 static int update_backref_cache(struct btrfs_trans_handle *trans, 477 struct backref_cache *cache) 478 { 479 struct backref_node *node; 480 int level = 0; 481 482 if (cache->last_trans == 0) { 483 cache->last_trans = trans->transid; 484 return 0; 485 } 486 487 if (cache->last_trans == trans->transid) 488 return 0; 489 490 /* 491 * detached nodes are used to avoid unnecessary backref 492 * lookup. transaction commit changes the extent tree. 493 * so the detached nodes are no longer useful. 494 */ 495 while (!list_empty(&cache->detached)) { 496 node = list_entry(cache->detached.next, 497 struct backref_node, list); 498 remove_backref_node(cache, node); 499 } 500 501 while (!list_empty(&cache->changed)) { 502 node = list_entry(cache->changed.next, 503 struct backref_node, list); 504 list_del_init(&node->list); 505 BUG_ON(node->pending); 506 update_backref_node(cache, node, node->new_bytenr); 507 } 508 509 /* 510 * some nodes can be left in the pending list if there were 511 * errors during processing the pending nodes. 512 */ 513 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 514 list_for_each_entry(node, &cache->pending[level], list) { 515 BUG_ON(!node->pending); 516 if (node->bytenr == node->new_bytenr) 517 continue; 518 update_backref_node(cache, node, node->new_bytenr); 519 } 520 } 521 522 cache->last_trans = 0; 523 return 1; 524 } 525 526 527 static int should_ignore_root(struct btrfs_root *root) 528 { 529 struct btrfs_root *reloc_root; 530 531 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 532 return 0; 533 534 reloc_root = root->reloc_root; 535 if (!reloc_root) 536 return 0; 537 538 if (btrfs_root_last_snapshot(&reloc_root->root_item) == 539 root->fs_info->running_transaction->transid - 1) 540 return 0; 541 /* 542 * if there is reloc tree and it was created in previous 543 * transaction backref lookup can find the reloc tree, 544 * so backref node for the fs tree root is useless for 545 * relocation. 546 */ 547 return 1; 548 } 549 /* 550 * find reloc tree by address of tree root 551 */ 552 static struct btrfs_root *find_reloc_root(struct reloc_control *rc, 553 u64 bytenr) 554 { 555 struct rb_node *rb_node; 556 struct mapping_node *node; 557 struct btrfs_root *root = NULL; 558 559 spin_lock(&rc->reloc_root_tree.lock); 560 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); 561 if (rb_node) { 562 node = rb_entry(rb_node, struct mapping_node, rb_node); 563 root = (struct btrfs_root *)node->data; 564 } 565 spin_unlock(&rc->reloc_root_tree.lock); 566 return root; 567 } 568 569 static int is_cowonly_root(u64 root_objectid) 570 { 571 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || 572 root_objectid == BTRFS_EXTENT_TREE_OBJECTID || 573 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 574 root_objectid == BTRFS_DEV_TREE_OBJECTID || 575 root_objectid == BTRFS_TREE_LOG_OBJECTID || 576 root_objectid == BTRFS_CSUM_TREE_OBJECTID || 577 root_objectid == BTRFS_UUID_TREE_OBJECTID || 578 root_objectid == BTRFS_QUOTA_TREE_OBJECTID || 579 root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 580 return 1; 581 return 0; 582 } 583 584 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, 585 u64 root_objectid) 586 { 587 struct btrfs_key key; 588 589 key.objectid = root_objectid; 590 key.type = BTRFS_ROOT_ITEM_KEY; 591 if (is_cowonly_root(root_objectid)) 592 key.offset = 0; 593 else 594 key.offset = (u64)-1; 595 596 return btrfs_get_fs_root(fs_info, &key, false); 597 } 598 599 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 600 static noinline_for_stack 601 struct btrfs_root *find_tree_root(struct reloc_control *rc, 602 struct extent_buffer *leaf, 603 struct btrfs_extent_ref_v0 *ref0) 604 { 605 struct btrfs_root *root; 606 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0); 607 u64 generation = btrfs_ref_generation_v0(leaf, ref0); 608 609 BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID); 610 611 root = read_fs_root(rc->extent_root->fs_info, root_objectid); 612 BUG_ON(IS_ERR(root)); 613 614 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 615 generation != btrfs_root_generation(&root->root_item)) 616 return NULL; 617 618 return root; 619 } 620 #endif 621 622 static noinline_for_stack 623 int find_inline_backref(struct extent_buffer *leaf, int slot, 624 unsigned long *ptr, unsigned long *end) 625 { 626 struct btrfs_key key; 627 struct btrfs_extent_item *ei; 628 struct btrfs_tree_block_info *bi; 629 u32 item_size; 630 631 btrfs_item_key_to_cpu(leaf, &key, slot); 632 633 item_size = btrfs_item_size_nr(leaf, slot); 634 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 635 if (item_size < sizeof(*ei)) { 636 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 637 return 1; 638 } 639 #endif 640 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 641 WARN_ON(!(btrfs_extent_flags(leaf, ei) & 642 BTRFS_EXTENT_FLAG_TREE_BLOCK)); 643 644 if (key.type == BTRFS_EXTENT_ITEM_KEY && 645 item_size <= sizeof(*ei) + sizeof(*bi)) { 646 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); 647 return 1; 648 } 649 if (key.type == BTRFS_METADATA_ITEM_KEY && 650 item_size <= sizeof(*ei)) { 651 WARN_ON(item_size < sizeof(*ei)); 652 return 1; 653 } 654 655 if (key.type == BTRFS_EXTENT_ITEM_KEY) { 656 bi = (struct btrfs_tree_block_info *)(ei + 1); 657 *ptr = (unsigned long)(bi + 1); 658 } else { 659 *ptr = (unsigned long)(ei + 1); 660 } 661 *end = (unsigned long)ei + item_size; 662 return 0; 663 } 664 665 /* 666 * build backref tree for a given tree block. root of the backref tree 667 * corresponds the tree block, leaves of the backref tree correspond 668 * roots of b-trees that reference the tree block. 669 * 670 * the basic idea of this function is check backrefs of a given block 671 * to find upper level blocks that reference the block, and then check 672 * backrefs of these upper level blocks recursively. the recursion stop 673 * when tree root is reached or backrefs for the block is cached. 674 * 675 * NOTE: if we find backrefs for a block are cached, we know backrefs 676 * for all upper level blocks that directly/indirectly reference the 677 * block are also cached. 678 */ 679 static noinline_for_stack 680 struct backref_node *build_backref_tree(struct reloc_control *rc, 681 struct btrfs_key *node_key, 682 int level, u64 bytenr) 683 { 684 struct backref_cache *cache = &rc->backref_cache; 685 struct btrfs_path *path1; 686 struct btrfs_path *path2; 687 struct extent_buffer *eb; 688 struct btrfs_root *root; 689 struct backref_node *cur; 690 struct backref_node *upper; 691 struct backref_node *lower; 692 struct backref_node *node = NULL; 693 struct backref_node *exist = NULL; 694 struct backref_edge *edge; 695 struct rb_node *rb_node; 696 struct btrfs_key key; 697 unsigned long end; 698 unsigned long ptr; 699 LIST_HEAD(list); 700 LIST_HEAD(useless); 701 int cowonly; 702 int ret; 703 int err = 0; 704 bool need_check = true; 705 706 path1 = btrfs_alloc_path(); 707 path2 = btrfs_alloc_path(); 708 if (!path1 || !path2) { 709 err = -ENOMEM; 710 goto out; 711 } 712 path1->reada = READA_FORWARD; 713 path2->reada = READA_FORWARD; 714 715 node = alloc_backref_node(cache); 716 if (!node) { 717 err = -ENOMEM; 718 goto out; 719 } 720 721 node->bytenr = bytenr; 722 node->level = level; 723 node->lowest = 1; 724 cur = node; 725 again: 726 end = 0; 727 ptr = 0; 728 key.objectid = cur->bytenr; 729 key.type = BTRFS_METADATA_ITEM_KEY; 730 key.offset = (u64)-1; 731 732 path1->search_commit_root = 1; 733 path1->skip_locking = 1; 734 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, 735 0, 0); 736 if (ret < 0) { 737 err = ret; 738 goto out; 739 } 740 ASSERT(ret); 741 ASSERT(path1->slots[0]); 742 743 path1->slots[0]--; 744 745 WARN_ON(cur->checked); 746 if (!list_empty(&cur->upper)) { 747 /* 748 * the backref was added previously when processing 749 * backref of type BTRFS_TREE_BLOCK_REF_KEY 750 */ 751 ASSERT(list_is_singular(&cur->upper)); 752 edge = list_entry(cur->upper.next, struct backref_edge, 753 list[LOWER]); 754 ASSERT(list_empty(&edge->list[UPPER])); 755 exist = edge->node[UPPER]; 756 /* 757 * add the upper level block to pending list if we need 758 * check its backrefs 759 */ 760 if (!exist->checked) 761 list_add_tail(&edge->list[UPPER], &list); 762 } else { 763 exist = NULL; 764 } 765 766 while (1) { 767 cond_resched(); 768 eb = path1->nodes[0]; 769 770 if (ptr >= end) { 771 if (path1->slots[0] >= btrfs_header_nritems(eb)) { 772 ret = btrfs_next_leaf(rc->extent_root, path1); 773 if (ret < 0) { 774 err = ret; 775 goto out; 776 } 777 if (ret > 0) 778 break; 779 eb = path1->nodes[0]; 780 } 781 782 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); 783 if (key.objectid != cur->bytenr) { 784 WARN_ON(exist); 785 break; 786 } 787 788 if (key.type == BTRFS_EXTENT_ITEM_KEY || 789 key.type == BTRFS_METADATA_ITEM_KEY) { 790 ret = find_inline_backref(eb, path1->slots[0], 791 &ptr, &end); 792 if (ret) 793 goto next; 794 } 795 } 796 797 if (ptr < end) { 798 /* update key for inline back ref */ 799 struct btrfs_extent_inline_ref *iref; 800 iref = (struct btrfs_extent_inline_ref *)ptr; 801 key.type = btrfs_extent_inline_ref_type(eb, iref); 802 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 803 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && 804 key.type != BTRFS_SHARED_BLOCK_REF_KEY); 805 } 806 807 if (exist && 808 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 809 exist->owner == key.offset) || 810 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 811 exist->bytenr == key.offset))) { 812 exist = NULL; 813 goto next; 814 } 815 816 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 817 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY || 818 key.type == BTRFS_EXTENT_REF_V0_KEY) { 819 if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 820 struct btrfs_extent_ref_v0 *ref0; 821 ref0 = btrfs_item_ptr(eb, path1->slots[0], 822 struct btrfs_extent_ref_v0); 823 if (key.objectid == key.offset) { 824 root = find_tree_root(rc, eb, ref0); 825 if (root && !should_ignore_root(root)) 826 cur->root = root; 827 else 828 list_add(&cur->list, &useless); 829 break; 830 } 831 if (is_cowonly_root(btrfs_ref_root_v0(eb, 832 ref0))) 833 cur->cowonly = 1; 834 } 835 #else 836 ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY); 837 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 838 #endif 839 if (key.objectid == key.offset) { 840 /* 841 * only root blocks of reloc trees use 842 * backref of this type. 843 */ 844 root = find_reloc_root(rc, cur->bytenr); 845 ASSERT(root); 846 cur->root = root; 847 break; 848 } 849 850 edge = alloc_backref_edge(cache); 851 if (!edge) { 852 err = -ENOMEM; 853 goto out; 854 } 855 rb_node = tree_search(&cache->rb_root, key.offset); 856 if (!rb_node) { 857 upper = alloc_backref_node(cache); 858 if (!upper) { 859 free_backref_edge(cache, edge); 860 err = -ENOMEM; 861 goto out; 862 } 863 upper->bytenr = key.offset; 864 upper->level = cur->level + 1; 865 /* 866 * backrefs for the upper level block isn't 867 * cached, add the block to pending list 868 */ 869 list_add_tail(&edge->list[UPPER], &list); 870 } else { 871 upper = rb_entry(rb_node, struct backref_node, 872 rb_node); 873 ASSERT(upper->checked); 874 INIT_LIST_HEAD(&edge->list[UPPER]); 875 } 876 list_add_tail(&edge->list[LOWER], &cur->upper); 877 edge->node[LOWER] = cur; 878 edge->node[UPPER] = upper; 879 880 goto next; 881 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 882 goto next; 883 } 884 885 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */ 886 root = read_fs_root(rc->extent_root->fs_info, key.offset); 887 if (IS_ERR(root)) { 888 err = PTR_ERR(root); 889 goto out; 890 } 891 892 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 893 cur->cowonly = 1; 894 895 if (btrfs_root_level(&root->root_item) == cur->level) { 896 /* tree root */ 897 ASSERT(btrfs_root_bytenr(&root->root_item) == 898 cur->bytenr); 899 if (should_ignore_root(root)) 900 list_add(&cur->list, &useless); 901 else 902 cur->root = root; 903 break; 904 } 905 906 level = cur->level + 1; 907 908 /* 909 * searching the tree to find upper level blocks 910 * reference the block. 911 */ 912 path2->search_commit_root = 1; 913 path2->skip_locking = 1; 914 path2->lowest_level = level; 915 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); 916 path2->lowest_level = 0; 917 if (ret < 0) { 918 err = ret; 919 goto out; 920 } 921 if (ret > 0 && path2->slots[level] > 0) 922 path2->slots[level]--; 923 924 eb = path2->nodes[level]; 925 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != 926 cur->bytenr); 927 928 lower = cur; 929 need_check = true; 930 for (; level < BTRFS_MAX_LEVEL; level++) { 931 if (!path2->nodes[level]) { 932 ASSERT(btrfs_root_bytenr(&root->root_item) == 933 lower->bytenr); 934 if (should_ignore_root(root)) 935 list_add(&lower->list, &useless); 936 else 937 lower->root = root; 938 break; 939 } 940 941 edge = alloc_backref_edge(cache); 942 if (!edge) { 943 err = -ENOMEM; 944 goto out; 945 } 946 947 eb = path2->nodes[level]; 948 rb_node = tree_search(&cache->rb_root, eb->start); 949 if (!rb_node) { 950 upper = alloc_backref_node(cache); 951 if (!upper) { 952 free_backref_edge(cache, edge); 953 err = -ENOMEM; 954 goto out; 955 } 956 upper->bytenr = eb->start; 957 upper->owner = btrfs_header_owner(eb); 958 upper->level = lower->level + 1; 959 if (!test_bit(BTRFS_ROOT_REF_COWS, 960 &root->state)) 961 upper->cowonly = 1; 962 963 /* 964 * if we know the block isn't shared 965 * we can void checking its backrefs. 966 */ 967 if (btrfs_block_can_be_shared(root, eb)) 968 upper->checked = 0; 969 else 970 upper->checked = 1; 971 972 /* 973 * add the block to pending list if we 974 * need check its backrefs, we only do this once 975 * while walking up a tree as we will catch 976 * anything else later on. 977 */ 978 if (!upper->checked && need_check) { 979 need_check = false; 980 list_add_tail(&edge->list[UPPER], 981 &list); 982 } else { 983 if (upper->checked) 984 need_check = true; 985 INIT_LIST_HEAD(&edge->list[UPPER]); 986 } 987 } else { 988 upper = rb_entry(rb_node, struct backref_node, 989 rb_node); 990 ASSERT(upper->checked); 991 INIT_LIST_HEAD(&edge->list[UPPER]); 992 if (!upper->owner) 993 upper->owner = btrfs_header_owner(eb); 994 } 995 list_add_tail(&edge->list[LOWER], &lower->upper); 996 edge->node[LOWER] = lower; 997 edge->node[UPPER] = upper; 998 999 if (rb_node) 1000 break; 1001 lower = upper; 1002 upper = NULL; 1003 } 1004 btrfs_release_path(path2); 1005 next: 1006 if (ptr < end) { 1007 ptr += btrfs_extent_inline_ref_size(key.type); 1008 if (ptr >= end) { 1009 WARN_ON(ptr > end); 1010 ptr = 0; 1011 end = 0; 1012 } 1013 } 1014 if (ptr >= end) 1015 path1->slots[0]++; 1016 } 1017 btrfs_release_path(path1); 1018 1019 cur->checked = 1; 1020 WARN_ON(exist); 1021 1022 /* the pending list isn't empty, take the first block to process */ 1023 if (!list_empty(&list)) { 1024 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1025 list_del_init(&edge->list[UPPER]); 1026 cur = edge->node[UPPER]; 1027 goto again; 1028 } 1029 1030 /* 1031 * everything goes well, connect backref nodes and insert backref nodes 1032 * into the cache. 1033 */ 1034 ASSERT(node->checked); 1035 cowonly = node->cowonly; 1036 if (!cowonly) { 1037 rb_node = tree_insert(&cache->rb_root, node->bytenr, 1038 &node->rb_node); 1039 if (rb_node) 1040 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1041 list_add_tail(&node->lower, &cache->leaves); 1042 } 1043 1044 list_for_each_entry(edge, &node->upper, list[LOWER]) 1045 list_add_tail(&edge->list[UPPER], &list); 1046 1047 while (!list_empty(&list)) { 1048 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1049 list_del_init(&edge->list[UPPER]); 1050 upper = edge->node[UPPER]; 1051 if (upper->detached) { 1052 list_del(&edge->list[LOWER]); 1053 lower = edge->node[LOWER]; 1054 free_backref_edge(cache, edge); 1055 if (list_empty(&lower->upper)) 1056 list_add(&lower->list, &useless); 1057 continue; 1058 } 1059 1060 if (!RB_EMPTY_NODE(&upper->rb_node)) { 1061 if (upper->lowest) { 1062 list_del_init(&upper->lower); 1063 upper->lowest = 0; 1064 } 1065 1066 list_add_tail(&edge->list[UPPER], &upper->lower); 1067 continue; 1068 } 1069 1070 if (!upper->checked) { 1071 /* 1072 * Still want to blow up for developers since this is a 1073 * logic bug. 1074 */ 1075 ASSERT(0); 1076 err = -EINVAL; 1077 goto out; 1078 } 1079 if (cowonly != upper->cowonly) { 1080 ASSERT(0); 1081 err = -EINVAL; 1082 goto out; 1083 } 1084 1085 if (!cowonly) { 1086 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1087 &upper->rb_node); 1088 if (rb_node) 1089 backref_tree_panic(rb_node, -EEXIST, 1090 upper->bytenr); 1091 } 1092 1093 list_add_tail(&edge->list[UPPER], &upper->lower); 1094 1095 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1096 list_add_tail(&edge->list[UPPER], &list); 1097 } 1098 /* 1099 * process useless backref nodes. backref nodes for tree leaves 1100 * are deleted from the cache. backref nodes for upper level 1101 * tree blocks are left in the cache to avoid unnecessary backref 1102 * lookup. 1103 */ 1104 while (!list_empty(&useless)) { 1105 upper = list_entry(useless.next, struct backref_node, list); 1106 list_del_init(&upper->list); 1107 ASSERT(list_empty(&upper->upper)); 1108 if (upper == node) 1109 node = NULL; 1110 if (upper->lowest) { 1111 list_del_init(&upper->lower); 1112 upper->lowest = 0; 1113 } 1114 while (!list_empty(&upper->lower)) { 1115 edge = list_entry(upper->lower.next, 1116 struct backref_edge, list[UPPER]); 1117 list_del(&edge->list[UPPER]); 1118 list_del(&edge->list[LOWER]); 1119 lower = edge->node[LOWER]; 1120 free_backref_edge(cache, edge); 1121 1122 if (list_empty(&lower->upper)) 1123 list_add(&lower->list, &useless); 1124 } 1125 __mark_block_processed(rc, upper); 1126 if (upper->level > 0) { 1127 list_add(&upper->list, &cache->detached); 1128 upper->detached = 1; 1129 } else { 1130 rb_erase(&upper->rb_node, &cache->rb_root); 1131 free_backref_node(cache, upper); 1132 } 1133 } 1134 out: 1135 btrfs_free_path(path1); 1136 btrfs_free_path(path2); 1137 if (err) { 1138 while (!list_empty(&useless)) { 1139 lower = list_entry(useless.next, 1140 struct backref_node, list); 1141 list_del_init(&lower->list); 1142 } 1143 while (!list_empty(&list)) { 1144 edge = list_first_entry(&list, struct backref_edge, 1145 list[UPPER]); 1146 list_del(&edge->list[UPPER]); 1147 list_del(&edge->list[LOWER]); 1148 lower = edge->node[LOWER]; 1149 upper = edge->node[UPPER]; 1150 free_backref_edge(cache, edge); 1151 1152 /* 1153 * Lower is no longer linked to any upper backref nodes 1154 * and isn't in the cache, we can free it ourselves. 1155 */ 1156 if (list_empty(&lower->upper) && 1157 RB_EMPTY_NODE(&lower->rb_node)) 1158 list_add(&lower->list, &useless); 1159 1160 if (!RB_EMPTY_NODE(&upper->rb_node)) 1161 continue; 1162 1163 /* Add this guy's upper edges to the list to process */ 1164 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1165 list_add_tail(&edge->list[UPPER], &list); 1166 if (list_empty(&upper->upper)) 1167 list_add(&upper->list, &useless); 1168 } 1169 1170 while (!list_empty(&useless)) { 1171 lower = list_entry(useless.next, 1172 struct backref_node, list); 1173 list_del_init(&lower->list); 1174 free_backref_node(cache, lower); 1175 } 1176 return ERR_PTR(err); 1177 } 1178 ASSERT(!node || !node->detached); 1179 return node; 1180 } 1181 1182 /* 1183 * helper to add backref node for the newly created snapshot. 1184 * the backref node is created by cloning backref node that 1185 * corresponds to root of source tree 1186 */ 1187 static int clone_backref_node(struct btrfs_trans_handle *trans, 1188 struct reloc_control *rc, 1189 struct btrfs_root *src, 1190 struct btrfs_root *dest) 1191 { 1192 struct btrfs_root *reloc_root = src->reloc_root; 1193 struct backref_cache *cache = &rc->backref_cache; 1194 struct backref_node *node = NULL; 1195 struct backref_node *new_node; 1196 struct backref_edge *edge; 1197 struct backref_edge *new_edge; 1198 struct rb_node *rb_node; 1199 1200 if (cache->last_trans > 0) 1201 update_backref_cache(trans, cache); 1202 1203 rb_node = tree_search(&cache->rb_root, src->commit_root->start); 1204 if (rb_node) { 1205 node = rb_entry(rb_node, struct backref_node, rb_node); 1206 if (node->detached) 1207 node = NULL; 1208 else 1209 BUG_ON(node->new_bytenr != reloc_root->node->start); 1210 } 1211 1212 if (!node) { 1213 rb_node = tree_search(&cache->rb_root, 1214 reloc_root->commit_root->start); 1215 if (rb_node) { 1216 node = rb_entry(rb_node, struct backref_node, 1217 rb_node); 1218 BUG_ON(node->detached); 1219 } 1220 } 1221 1222 if (!node) 1223 return 0; 1224 1225 new_node = alloc_backref_node(cache); 1226 if (!new_node) 1227 return -ENOMEM; 1228 1229 new_node->bytenr = dest->node->start; 1230 new_node->level = node->level; 1231 new_node->lowest = node->lowest; 1232 new_node->checked = 1; 1233 new_node->root = dest; 1234 1235 if (!node->lowest) { 1236 list_for_each_entry(edge, &node->lower, list[UPPER]) { 1237 new_edge = alloc_backref_edge(cache); 1238 if (!new_edge) 1239 goto fail; 1240 1241 new_edge->node[UPPER] = new_node; 1242 new_edge->node[LOWER] = edge->node[LOWER]; 1243 list_add_tail(&new_edge->list[UPPER], 1244 &new_node->lower); 1245 } 1246 } else { 1247 list_add_tail(&new_node->lower, &cache->leaves); 1248 } 1249 1250 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1251 &new_node->rb_node); 1252 if (rb_node) 1253 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr); 1254 1255 if (!new_node->lowest) { 1256 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1257 list_add_tail(&new_edge->list[LOWER], 1258 &new_edge->node[LOWER]->upper); 1259 } 1260 } 1261 return 0; 1262 fail: 1263 while (!list_empty(&new_node->lower)) { 1264 new_edge = list_entry(new_node->lower.next, 1265 struct backref_edge, list[UPPER]); 1266 list_del(&new_edge->list[UPPER]); 1267 free_backref_edge(cache, new_edge); 1268 } 1269 free_backref_node(cache, new_node); 1270 return -ENOMEM; 1271 } 1272 1273 /* 1274 * helper to add 'address of tree root -> reloc tree' mapping 1275 */ 1276 static int __must_check __add_reloc_root(struct btrfs_root *root) 1277 { 1278 struct rb_node *rb_node; 1279 struct mapping_node *node; 1280 struct reloc_control *rc = root->fs_info->reloc_ctl; 1281 1282 node = kmalloc(sizeof(*node), GFP_NOFS); 1283 if (!node) 1284 return -ENOMEM; 1285 1286 node->bytenr = root->node->start; 1287 node->data = root; 1288 1289 spin_lock(&rc->reloc_root_tree.lock); 1290 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1291 node->bytenr, &node->rb_node); 1292 spin_unlock(&rc->reloc_root_tree.lock); 1293 if (rb_node) { 1294 btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found " 1295 "for start=%llu while inserting into relocation " 1296 "tree", node->bytenr); 1297 kfree(node); 1298 return -EEXIST; 1299 } 1300 1301 list_add_tail(&root->root_list, &rc->reloc_roots); 1302 return 0; 1303 } 1304 1305 /* 1306 * helper to delete the 'address of tree root -> reloc tree' 1307 * mapping 1308 */ 1309 static void __del_reloc_root(struct btrfs_root *root) 1310 { 1311 struct rb_node *rb_node; 1312 struct mapping_node *node = NULL; 1313 struct reloc_control *rc = root->fs_info->reloc_ctl; 1314 1315 spin_lock(&rc->reloc_root_tree.lock); 1316 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1317 root->node->start); 1318 if (rb_node) { 1319 node = rb_entry(rb_node, struct mapping_node, rb_node); 1320 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1321 } 1322 spin_unlock(&rc->reloc_root_tree.lock); 1323 1324 if (!node) 1325 return; 1326 BUG_ON((struct btrfs_root *)node->data != root); 1327 1328 spin_lock(&root->fs_info->trans_lock); 1329 list_del_init(&root->root_list); 1330 spin_unlock(&root->fs_info->trans_lock); 1331 kfree(node); 1332 } 1333 1334 /* 1335 * helper to update the 'address of tree root -> reloc tree' 1336 * mapping 1337 */ 1338 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) 1339 { 1340 struct rb_node *rb_node; 1341 struct mapping_node *node = NULL; 1342 struct reloc_control *rc = root->fs_info->reloc_ctl; 1343 1344 spin_lock(&rc->reloc_root_tree.lock); 1345 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1346 root->node->start); 1347 if (rb_node) { 1348 node = rb_entry(rb_node, struct mapping_node, rb_node); 1349 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1350 } 1351 spin_unlock(&rc->reloc_root_tree.lock); 1352 1353 if (!node) 1354 return 0; 1355 BUG_ON((struct btrfs_root *)node->data != root); 1356 1357 spin_lock(&rc->reloc_root_tree.lock); 1358 node->bytenr = new_bytenr; 1359 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1360 node->bytenr, &node->rb_node); 1361 spin_unlock(&rc->reloc_root_tree.lock); 1362 if (rb_node) 1363 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1364 return 0; 1365 } 1366 1367 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 1368 struct btrfs_root *root, u64 objectid) 1369 { 1370 struct btrfs_root *reloc_root; 1371 struct extent_buffer *eb; 1372 struct btrfs_root_item *root_item; 1373 struct btrfs_key root_key; 1374 u64 last_snap = 0; 1375 int ret; 1376 1377 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 1378 BUG_ON(!root_item); 1379 1380 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 1381 root_key.type = BTRFS_ROOT_ITEM_KEY; 1382 root_key.offset = objectid; 1383 1384 if (root->root_key.objectid == objectid) { 1385 /* called by btrfs_init_reloc_root */ 1386 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 1387 BTRFS_TREE_RELOC_OBJECTID); 1388 BUG_ON(ret); 1389 1390 last_snap = btrfs_root_last_snapshot(&root->root_item); 1391 btrfs_set_root_last_snapshot(&root->root_item, 1392 trans->transid - 1); 1393 } else { 1394 /* 1395 * called by btrfs_reloc_post_snapshot_hook. 1396 * the source tree is a reloc tree, all tree blocks 1397 * modified after it was created have RELOC flag 1398 * set in their headers. so it's OK to not update 1399 * the 'last_snapshot'. 1400 */ 1401 ret = btrfs_copy_root(trans, root, root->node, &eb, 1402 BTRFS_TREE_RELOC_OBJECTID); 1403 BUG_ON(ret); 1404 } 1405 1406 memcpy(root_item, &root->root_item, sizeof(*root_item)); 1407 btrfs_set_root_bytenr(root_item, eb->start); 1408 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 1409 btrfs_set_root_generation(root_item, trans->transid); 1410 1411 if (root->root_key.objectid == objectid) { 1412 btrfs_set_root_refs(root_item, 0); 1413 memset(&root_item->drop_progress, 0, 1414 sizeof(struct btrfs_disk_key)); 1415 root_item->drop_level = 0; 1416 /* 1417 * abuse rtransid, it is safe because it is impossible to 1418 * receive data into a relocation tree. 1419 */ 1420 btrfs_set_root_rtransid(root_item, last_snap); 1421 btrfs_set_root_otransid(root_item, trans->transid); 1422 } 1423 1424 btrfs_tree_unlock(eb); 1425 free_extent_buffer(eb); 1426 1427 ret = btrfs_insert_root(trans, root->fs_info->tree_root, 1428 &root_key, root_item); 1429 BUG_ON(ret); 1430 kfree(root_item); 1431 1432 reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key); 1433 BUG_ON(IS_ERR(reloc_root)); 1434 reloc_root->last_trans = trans->transid; 1435 return reloc_root; 1436 } 1437 1438 /* 1439 * create reloc tree for a given fs tree. reloc tree is just a 1440 * snapshot of the fs tree with special root objectid. 1441 */ 1442 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 1443 struct btrfs_root *root) 1444 { 1445 struct btrfs_root *reloc_root; 1446 struct reloc_control *rc = root->fs_info->reloc_ctl; 1447 struct btrfs_block_rsv *rsv; 1448 int clear_rsv = 0; 1449 int ret; 1450 1451 if (root->reloc_root) { 1452 reloc_root = root->reloc_root; 1453 reloc_root->last_trans = trans->transid; 1454 return 0; 1455 } 1456 1457 if (!rc || !rc->create_reloc_tree || 1458 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1459 return 0; 1460 1461 if (!trans->reloc_reserved) { 1462 rsv = trans->block_rsv; 1463 trans->block_rsv = rc->block_rsv; 1464 clear_rsv = 1; 1465 } 1466 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 1467 if (clear_rsv) 1468 trans->block_rsv = rsv; 1469 1470 ret = __add_reloc_root(reloc_root); 1471 BUG_ON(ret < 0); 1472 root->reloc_root = reloc_root; 1473 return 0; 1474 } 1475 1476 /* 1477 * update root item of reloc tree 1478 */ 1479 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 1480 struct btrfs_root *root) 1481 { 1482 struct btrfs_root *reloc_root; 1483 struct btrfs_root_item *root_item; 1484 int ret; 1485 1486 if (!root->reloc_root) 1487 goto out; 1488 1489 reloc_root = root->reloc_root; 1490 root_item = &reloc_root->root_item; 1491 1492 if (root->fs_info->reloc_ctl->merge_reloc_tree && 1493 btrfs_root_refs(root_item) == 0) { 1494 root->reloc_root = NULL; 1495 __del_reloc_root(reloc_root); 1496 } 1497 1498 if (reloc_root->commit_root != reloc_root->node) { 1499 btrfs_set_root_node(root_item, reloc_root->node); 1500 free_extent_buffer(reloc_root->commit_root); 1501 reloc_root->commit_root = btrfs_root_node(reloc_root); 1502 } 1503 1504 ret = btrfs_update_root(trans, root->fs_info->tree_root, 1505 &reloc_root->root_key, root_item); 1506 BUG_ON(ret); 1507 1508 out: 1509 return 0; 1510 } 1511 1512 /* 1513 * helper to find first cached inode with inode number >= objectid 1514 * in a subvolume 1515 */ 1516 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 1517 { 1518 struct rb_node *node; 1519 struct rb_node *prev; 1520 struct btrfs_inode *entry; 1521 struct inode *inode; 1522 1523 spin_lock(&root->inode_lock); 1524 again: 1525 node = root->inode_tree.rb_node; 1526 prev = NULL; 1527 while (node) { 1528 prev = node; 1529 entry = rb_entry(node, struct btrfs_inode, rb_node); 1530 1531 if (objectid < btrfs_ino(&entry->vfs_inode)) 1532 node = node->rb_left; 1533 else if (objectid > btrfs_ino(&entry->vfs_inode)) 1534 node = node->rb_right; 1535 else 1536 break; 1537 } 1538 if (!node) { 1539 while (prev) { 1540 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1541 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 1542 node = prev; 1543 break; 1544 } 1545 prev = rb_next(prev); 1546 } 1547 } 1548 while (node) { 1549 entry = rb_entry(node, struct btrfs_inode, rb_node); 1550 inode = igrab(&entry->vfs_inode); 1551 if (inode) { 1552 spin_unlock(&root->inode_lock); 1553 return inode; 1554 } 1555 1556 objectid = btrfs_ino(&entry->vfs_inode) + 1; 1557 if (cond_resched_lock(&root->inode_lock)) 1558 goto again; 1559 1560 node = rb_next(node); 1561 } 1562 spin_unlock(&root->inode_lock); 1563 return NULL; 1564 } 1565 1566 static int in_block_group(u64 bytenr, 1567 struct btrfs_block_group_cache *block_group) 1568 { 1569 if (bytenr >= block_group->key.objectid && 1570 bytenr < block_group->key.objectid + block_group->key.offset) 1571 return 1; 1572 return 0; 1573 } 1574 1575 /* 1576 * get new location of data 1577 */ 1578 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1579 u64 bytenr, u64 num_bytes) 1580 { 1581 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1582 struct btrfs_path *path; 1583 struct btrfs_file_extent_item *fi; 1584 struct extent_buffer *leaf; 1585 int ret; 1586 1587 path = btrfs_alloc_path(); 1588 if (!path) 1589 return -ENOMEM; 1590 1591 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1592 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), 1593 bytenr, 0); 1594 if (ret < 0) 1595 goto out; 1596 if (ret > 0) { 1597 ret = -ENOENT; 1598 goto out; 1599 } 1600 1601 leaf = path->nodes[0]; 1602 fi = btrfs_item_ptr(leaf, path->slots[0], 1603 struct btrfs_file_extent_item); 1604 1605 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1606 btrfs_file_extent_compression(leaf, fi) || 1607 btrfs_file_extent_encryption(leaf, fi) || 1608 btrfs_file_extent_other_encoding(leaf, fi)); 1609 1610 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1611 ret = -EINVAL; 1612 goto out; 1613 } 1614 1615 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1616 ret = 0; 1617 out: 1618 btrfs_free_path(path); 1619 return ret; 1620 } 1621 1622 /* 1623 * update file extent items in the tree leaf to point to 1624 * the new locations. 1625 */ 1626 static noinline_for_stack 1627 int replace_file_extents(struct btrfs_trans_handle *trans, 1628 struct reloc_control *rc, 1629 struct btrfs_root *root, 1630 struct extent_buffer *leaf) 1631 { 1632 struct btrfs_key key; 1633 struct btrfs_file_extent_item *fi; 1634 struct inode *inode = NULL; 1635 u64 parent; 1636 u64 bytenr; 1637 u64 new_bytenr = 0; 1638 u64 num_bytes; 1639 u64 end; 1640 u32 nritems; 1641 u32 i; 1642 int ret = 0; 1643 int first = 1; 1644 int dirty = 0; 1645 1646 if (rc->stage != UPDATE_DATA_PTRS) 1647 return 0; 1648 1649 /* reloc trees always use full backref */ 1650 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1651 parent = leaf->start; 1652 else 1653 parent = 0; 1654 1655 nritems = btrfs_header_nritems(leaf); 1656 for (i = 0; i < nritems; i++) { 1657 cond_resched(); 1658 btrfs_item_key_to_cpu(leaf, &key, i); 1659 if (key.type != BTRFS_EXTENT_DATA_KEY) 1660 continue; 1661 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1662 if (btrfs_file_extent_type(leaf, fi) == 1663 BTRFS_FILE_EXTENT_INLINE) 1664 continue; 1665 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1666 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1667 if (bytenr == 0) 1668 continue; 1669 if (!in_block_group(bytenr, rc->block_group)) 1670 continue; 1671 1672 /* 1673 * if we are modifying block in fs tree, wait for readpage 1674 * to complete and drop the extent cache 1675 */ 1676 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1677 if (first) { 1678 inode = find_next_inode(root, key.objectid); 1679 first = 0; 1680 } else if (inode && btrfs_ino(inode) < key.objectid) { 1681 btrfs_add_delayed_iput(inode); 1682 inode = find_next_inode(root, key.objectid); 1683 } 1684 if (inode && btrfs_ino(inode) == key.objectid) { 1685 end = key.offset + 1686 btrfs_file_extent_num_bytes(leaf, fi); 1687 WARN_ON(!IS_ALIGNED(key.offset, 1688 root->sectorsize)); 1689 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 1690 end--; 1691 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1692 key.offset, end); 1693 if (!ret) 1694 continue; 1695 1696 btrfs_drop_extent_cache(inode, key.offset, end, 1697 1); 1698 unlock_extent(&BTRFS_I(inode)->io_tree, 1699 key.offset, end); 1700 } 1701 } 1702 1703 ret = get_new_location(rc->data_inode, &new_bytenr, 1704 bytenr, num_bytes); 1705 if (ret) { 1706 /* 1707 * Don't have to abort since we've not changed anything 1708 * in the file extent yet. 1709 */ 1710 break; 1711 } 1712 1713 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1714 dirty = 1; 1715 1716 key.offset -= btrfs_file_extent_offset(leaf, fi); 1717 ret = btrfs_inc_extent_ref(trans, root, new_bytenr, 1718 num_bytes, parent, 1719 btrfs_header_owner(leaf), 1720 key.objectid, key.offset); 1721 if (ret) { 1722 btrfs_abort_transaction(trans, root, ret); 1723 break; 1724 } 1725 1726 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1727 parent, btrfs_header_owner(leaf), 1728 key.objectid, key.offset); 1729 if (ret) { 1730 btrfs_abort_transaction(trans, root, ret); 1731 break; 1732 } 1733 } 1734 if (dirty) 1735 btrfs_mark_buffer_dirty(leaf); 1736 if (inode) 1737 btrfs_add_delayed_iput(inode); 1738 return ret; 1739 } 1740 1741 static noinline_for_stack 1742 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1743 struct btrfs_path *path, int level) 1744 { 1745 struct btrfs_disk_key key1; 1746 struct btrfs_disk_key key2; 1747 btrfs_node_key(eb, &key1, slot); 1748 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1749 return memcmp(&key1, &key2, sizeof(key1)); 1750 } 1751 1752 /* 1753 * try to replace tree blocks in fs tree with the new blocks 1754 * in reloc tree. tree blocks haven't been modified since the 1755 * reloc tree was create can be replaced. 1756 * 1757 * if a block was replaced, level of the block + 1 is returned. 1758 * if no block got replaced, 0 is returned. if there are other 1759 * errors, a negative error number is returned. 1760 */ 1761 static noinline_for_stack 1762 int replace_path(struct btrfs_trans_handle *trans, 1763 struct btrfs_root *dest, struct btrfs_root *src, 1764 struct btrfs_path *path, struct btrfs_key *next_key, 1765 int lowest_level, int max_level) 1766 { 1767 struct extent_buffer *eb; 1768 struct extent_buffer *parent; 1769 struct btrfs_key key; 1770 u64 old_bytenr; 1771 u64 new_bytenr; 1772 u64 old_ptr_gen; 1773 u64 new_ptr_gen; 1774 u64 last_snapshot; 1775 u32 blocksize; 1776 int cow = 0; 1777 int level; 1778 int ret; 1779 int slot; 1780 1781 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1782 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1783 1784 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1785 again: 1786 slot = path->slots[lowest_level]; 1787 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1788 1789 eb = btrfs_lock_root_node(dest); 1790 btrfs_set_lock_blocking(eb); 1791 level = btrfs_header_level(eb); 1792 1793 if (level < lowest_level) { 1794 btrfs_tree_unlock(eb); 1795 free_extent_buffer(eb); 1796 return 0; 1797 } 1798 1799 if (cow) { 1800 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); 1801 BUG_ON(ret); 1802 } 1803 btrfs_set_lock_blocking(eb); 1804 1805 if (next_key) { 1806 next_key->objectid = (u64)-1; 1807 next_key->type = (u8)-1; 1808 next_key->offset = (u64)-1; 1809 } 1810 1811 parent = eb; 1812 while (1) { 1813 level = btrfs_header_level(parent); 1814 BUG_ON(level < lowest_level); 1815 1816 ret = btrfs_bin_search(parent, &key, level, &slot); 1817 if (ret && slot > 0) 1818 slot--; 1819 1820 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1821 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1822 1823 old_bytenr = btrfs_node_blockptr(parent, slot); 1824 blocksize = dest->nodesize; 1825 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1826 1827 if (level <= max_level) { 1828 eb = path->nodes[level]; 1829 new_bytenr = btrfs_node_blockptr(eb, 1830 path->slots[level]); 1831 new_ptr_gen = btrfs_node_ptr_generation(eb, 1832 path->slots[level]); 1833 } else { 1834 new_bytenr = 0; 1835 new_ptr_gen = 0; 1836 } 1837 1838 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1839 ret = level; 1840 break; 1841 } 1842 1843 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1844 memcmp_node_keys(parent, slot, path, level)) { 1845 if (level <= lowest_level) { 1846 ret = 0; 1847 break; 1848 } 1849 1850 eb = read_tree_block(dest, old_bytenr, old_ptr_gen); 1851 if (IS_ERR(eb)) { 1852 ret = PTR_ERR(eb); 1853 break; 1854 } else if (!extent_buffer_uptodate(eb)) { 1855 ret = -EIO; 1856 free_extent_buffer(eb); 1857 break; 1858 } 1859 btrfs_tree_lock(eb); 1860 if (cow) { 1861 ret = btrfs_cow_block(trans, dest, eb, parent, 1862 slot, &eb); 1863 BUG_ON(ret); 1864 } 1865 btrfs_set_lock_blocking(eb); 1866 1867 btrfs_tree_unlock(parent); 1868 free_extent_buffer(parent); 1869 1870 parent = eb; 1871 continue; 1872 } 1873 1874 if (!cow) { 1875 btrfs_tree_unlock(parent); 1876 free_extent_buffer(parent); 1877 cow = 1; 1878 goto again; 1879 } 1880 1881 btrfs_node_key_to_cpu(path->nodes[level], &key, 1882 path->slots[level]); 1883 btrfs_release_path(path); 1884 1885 path->lowest_level = level; 1886 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1887 path->lowest_level = 0; 1888 BUG_ON(ret); 1889 1890 /* 1891 * swap blocks in fs tree and reloc tree. 1892 */ 1893 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1894 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1895 btrfs_mark_buffer_dirty(parent); 1896 1897 btrfs_set_node_blockptr(path->nodes[level], 1898 path->slots[level], old_bytenr); 1899 btrfs_set_node_ptr_generation(path->nodes[level], 1900 path->slots[level], old_ptr_gen); 1901 btrfs_mark_buffer_dirty(path->nodes[level]); 1902 1903 ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize, 1904 path->nodes[level]->start, 1905 src->root_key.objectid, level - 1, 0); 1906 BUG_ON(ret); 1907 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize, 1908 0, dest->root_key.objectid, level - 1, 1909 0); 1910 BUG_ON(ret); 1911 1912 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize, 1913 path->nodes[level]->start, 1914 src->root_key.objectid, level - 1, 0); 1915 BUG_ON(ret); 1916 1917 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize, 1918 0, dest->root_key.objectid, level - 1, 1919 0); 1920 BUG_ON(ret); 1921 1922 btrfs_unlock_up_safe(path, 0); 1923 1924 ret = level; 1925 break; 1926 } 1927 btrfs_tree_unlock(parent); 1928 free_extent_buffer(parent); 1929 return ret; 1930 } 1931 1932 /* 1933 * helper to find next relocated block in reloc tree 1934 */ 1935 static noinline_for_stack 1936 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1937 int *level) 1938 { 1939 struct extent_buffer *eb; 1940 int i; 1941 u64 last_snapshot; 1942 u32 nritems; 1943 1944 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1945 1946 for (i = 0; i < *level; i++) { 1947 free_extent_buffer(path->nodes[i]); 1948 path->nodes[i] = NULL; 1949 } 1950 1951 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1952 eb = path->nodes[i]; 1953 nritems = btrfs_header_nritems(eb); 1954 while (path->slots[i] + 1 < nritems) { 1955 path->slots[i]++; 1956 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1957 last_snapshot) 1958 continue; 1959 1960 *level = i; 1961 return 0; 1962 } 1963 free_extent_buffer(path->nodes[i]); 1964 path->nodes[i] = NULL; 1965 } 1966 return 1; 1967 } 1968 1969 /* 1970 * walk down reloc tree to find relocated block of lowest level 1971 */ 1972 static noinline_for_stack 1973 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1974 int *level) 1975 { 1976 struct extent_buffer *eb = NULL; 1977 int i; 1978 u64 bytenr; 1979 u64 ptr_gen = 0; 1980 u64 last_snapshot; 1981 u32 nritems; 1982 1983 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1984 1985 for (i = *level; i > 0; i--) { 1986 eb = path->nodes[i]; 1987 nritems = btrfs_header_nritems(eb); 1988 while (path->slots[i] < nritems) { 1989 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1990 if (ptr_gen > last_snapshot) 1991 break; 1992 path->slots[i]++; 1993 } 1994 if (path->slots[i] >= nritems) { 1995 if (i == *level) 1996 break; 1997 *level = i + 1; 1998 return 0; 1999 } 2000 if (i == 1) { 2001 *level = i; 2002 return 0; 2003 } 2004 2005 bytenr = btrfs_node_blockptr(eb, path->slots[i]); 2006 eb = read_tree_block(root, bytenr, ptr_gen); 2007 if (IS_ERR(eb)) { 2008 return PTR_ERR(eb); 2009 } else if (!extent_buffer_uptodate(eb)) { 2010 free_extent_buffer(eb); 2011 return -EIO; 2012 } 2013 BUG_ON(btrfs_header_level(eb) != i - 1); 2014 path->nodes[i - 1] = eb; 2015 path->slots[i - 1] = 0; 2016 } 2017 return 1; 2018 } 2019 2020 /* 2021 * invalidate extent cache for file extents whose key in range of 2022 * [min_key, max_key) 2023 */ 2024 static int invalidate_extent_cache(struct btrfs_root *root, 2025 struct btrfs_key *min_key, 2026 struct btrfs_key *max_key) 2027 { 2028 struct inode *inode = NULL; 2029 u64 objectid; 2030 u64 start, end; 2031 u64 ino; 2032 2033 objectid = min_key->objectid; 2034 while (1) { 2035 cond_resched(); 2036 iput(inode); 2037 2038 if (objectid > max_key->objectid) 2039 break; 2040 2041 inode = find_next_inode(root, objectid); 2042 if (!inode) 2043 break; 2044 ino = btrfs_ino(inode); 2045 2046 if (ino > max_key->objectid) { 2047 iput(inode); 2048 break; 2049 } 2050 2051 objectid = ino + 1; 2052 if (!S_ISREG(inode->i_mode)) 2053 continue; 2054 2055 if (unlikely(min_key->objectid == ino)) { 2056 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 2057 continue; 2058 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 2059 start = 0; 2060 else { 2061 start = min_key->offset; 2062 WARN_ON(!IS_ALIGNED(start, root->sectorsize)); 2063 } 2064 } else { 2065 start = 0; 2066 } 2067 2068 if (unlikely(max_key->objectid == ino)) { 2069 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 2070 continue; 2071 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 2072 end = (u64)-1; 2073 } else { 2074 if (max_key->offset == 0) 2075 continue; 2076 end = max_key->offset; 2077 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 2078 end--; 2079 } 2080 } else { 2081 end = (u64)-1; 2082 } 2083 2084 /* the lock_extent waits for readpage to complete */ 2085 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 2086 btrfs_drop_extent_cache(inode, start, end, 1); 2087 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 2088 } 2089 return 0; 2090 } 2091 2092 static int find_next_key(struct btrfs_path *path, int level, 2093 struct btrfs_key *key) 2094 2095 { 2096 while (level < BTRFS_MAX_LEVEL) { 2097 if (!path->nodes[level]) 2098 break; 2099 if (path->slots[level] + 1 < 2100 btrfs_header_nritems(path->nodes[level])) { 2101 btrfs_node_key_to_cpu(path->nodes[level], key, 2102 path->slots[level] + 1); 2103 return 0; 2104 } 2105 level++; 2106 } 2107 return 1; 2108 } 2109 2110 /* 2111 * merge the relocated tree blocks in reloc tree with corresponding 2112 * fs tree. 2113 */ 2114 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 2115 struct btrfs_root *root) 2116 { 2117 LIST_HEAD(inode_list); 2118 struct btrfs_key key; 2119 struct btrfs_key next_key; 2120 struct btrfs_trans_handle *trans = NULL; 2121 struct btrfs_root *reloc_root; 2122 struct btrfs_root_item *root_item; 2123 struct btrfs_path *path; 2124 struct extent_buffer *leaf; 2125 int level; 2126 int max_level; 2127 int replaced = 0; 2128 int ret; 2129 int err = 0; 2130 u32 min_reserved; 2131 2132 path = btrfs_alloc_path(); 2133 if (!path) 2134 return -ENOMEM; 2135 path->reada = READA_FORWARD; 2136 2137 reloc_root = root->reloc_root; 2138 root_item = &reloc_root->root_item; 2139 2140 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 2141 level = btrfs_root_level(root_item); 2142 extent_buffer_get(reloc_root->node); 2143 path->nodes[level] = reloc_root->node; 2144 path->slots[level] = 0; 2145 } else { 2146 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 2147 2148 level = root_item->drop_level; 2149 BUG_ON(level == 0); 2150 path->lowest_level = level; 2151 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 2152 path->lowest_level = 0; 2153 if (ret < 0) { 2154 btrfs_free_path(path); 2155 return ret; 2156 } 2157 2158 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 2159 path->slots[level]); 2160 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 2161 2162 btrfs_unlock_up_safe(path, 0); 2163 } 2164 2165 min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2166 memset(&next_key, 0, sizeof(next_key)); 2167 2168 while (1) { 2169 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, 2170 BTRFS_RESERVE_FLUSH_ALL); 2171 if (ret) { 2172 err = ret; 2173 goto out; 2174 } 2175 trans = btrfs_start_transaction(root, 0); 2176 if (IS_ERR(trans)) { 2177 err = PTR_ERR(trans); 2178 trans = NULL; 2179 goto out; 2180 } 2181 trans->block_rsv = rc->block_rsv; 2182 2183 replaced = 0; 2184 max_level = level; 2185 2186 ret = walk_down_reloc_tree(reloc_root, path, &level); 2187 if (ret < 0) { 2188 err = ret; 2189 goto out; 2190 } 2191 if (ret > 0) 2192 break; 2193 2194 if (!find_next_key(path, level, &key) && 2195 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 2196 ret = 0; 2197 } else { 2198 ret = replace_path(trans, root, reloc_root, path, 2199 &next_key, level, max_level); 2200 } 2201 if (ret < 0) { 2202 err = ret; 2203 goto out; 2204 } 2205 2206 if (ret > 0) { 2207 level = ret; 2208 btrfs_node_key_to_cpu(path->nodes[level], &key, 2209 path->slots[level]); 2210 replaced = 1; 2211 } 2212 2213 ret = walk_up_reloc_tree(reloc_root, path, &level); 2214 if (ret > 0) 2215 break; 2216 2217 BUG_ON(level == 0); 2218 /* 2219 * save the merging progress in the drop_progress. 2220 * this is OK since root refs == 1 in this case. 2221 */ 2222 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 2223 path->slots[level]); 2224 root_item->drop_level = level; 2225 2226 btrfs_end_transaction_throttle(trans, root); 2227 trans = NULL; 2228 2229 btrfs_btree_balance_dirty(root); 2230 2231 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2232 invalidate_extent_cache(root, &key, &next_key); 2233 } 2234 2235 /* 2236 * handle the case only one block in the fs tree need to be 2237 * relocated and the block is tree root. 2238 */ 2239 leaf = btrfs_lock_root_node(root); 2240 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf); 2241 btrfs_tree_unlock(leaf); 2242 free_extent_buffer(leaf); 2243 if (ret < 0) 2244 err = ret; 2245 out: 2246 btrfs_free_path(path); 2247 2248 if (err == 0) { 2249 memset(&root_item->drop_progress, 0, 2250 sizeof(root_item->drop_progress)); 2251 root_item->drop_level = 0; 2252 btrfs_set_root_refs(root_item, 0); 2253 btrfs_update_reloc_root(trans, root); 2254 } 2255 2256 if (trans) 2257 btrfs_end_transaction_throttle(trans, root); 2258 2259 btrfs_btree_balance_dirty(root); 2260 2261 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2262 invalidate_extent_cache(root, &key, &next_key); 2263 2264 return err; 2265 } 2266 2267 static noinline_for_stack 2268 int prepare_to_merge(struct reloc_control *rc, int err) 2269 { 2270 struct btrfs_root *root = rc->extent_root; 2271 struct btrfs_root *reloc_root; 2272 struct btrfs_trans_handle *trans; 2273 LIST_HEAD(reloc_roots); 2274 u64 num_bytes = 0; 2275 int ret; 2276 2277 mutex_lock(&root->fs_info->reloc_mutex); 2278 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2279 rc->merging_rsv_size += rc->nodes_relocated * 2; 2280 mutex_unlock(&root->fs_info->reloc_mutex); 2281 2282 again: 2283 if (!err) { 2284 num_bytes = rc->merging_rsv_size; 2285 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes, 2286 BTRFS_RESERVE_FLUSH_ALL); 2287 if (ret) 2288 err = ret; 2289 } 2290 2291 trans = btrfs_join_transaction(rc->extent_root); 2292 if (IS_ERR(trans)) { 2293 if (!err) 2294 btrfs_block_rsv_release(rc->extent_root, 2295 rc->block_rsv, num_bytes); 2296 return PTR_ERR(trans); 2297 } 2298 2299 if (!err) { 2300 if (num_bytes != rc->merging_rsv_size) { 2301 btrfs_end_transaction(trans, rc->extent_root); 2302 btrfs_block_rsv_release(rc->extent_root, 2303 rc->block_rsv, num_bytes); 2304 goto again; 2305 } 2306 } 2307 2308 rc->merge_reloc_tree = 1; 2309 2310 while (!list_empty(&rc->reloc_roots)) { 2311 reloc_root = list_entry(rc->reloc_roots.next, 2312 struct btrfs_root, root_list); 2313 list_del_init(&reloc_root->root_list); 2314 2315 root = read_fs_root(reloc_root->fs_info, 2316 reloc_root->root_key.offset); 2317 BUG_ON(IS_ERR(root)); 2318 BUG_ON(root->reloc_root != reloc_root); 2319 2320 /* 2321 * set reference count to 1, so btrfs_recover_relocation 2322 * knows it should resumes merging 2323 */ 2324 if (!err) 2325 btrfs_set_root_refs(&reloc_root->root_item, 1); 2326 btrfs_update_reloc_root(trans, root); 2327 2328 list_add(&reloc_root->root_list, &reloc_roots); 2329 } 2330 2331 list_splice(&reloc_roots, &rc->reloc_roots); 2332 2333 if (!err) 2334 btrfs_commit_transaction(trans, rc->extent_root); 2335 else 2336 btrfs_end_transaction(trans, rc->extent_root); 2337 return err; 2338 } 2339 2340 static noinline_for_stack 2341 void free_reloc_roots(struct list_head *list) 2342 { 2343 struct btrfs_root *reloc_root; 2344 2345 while (!list_empty(list)) { 2346 reloc_root = list_entry(list->next, struct btrfs_root, 2347 root_list); 2348 __del_reloc_root(reloc_root); 2349 } 2350 } 2351 2352 static noinline_for_stack 2353 void merge_reloc_roots(struct reloc_control *rc) 2354 { 2355 struct btrfs_root *root; 2356 struct btrfs_root *reloc_root; 2357 u64 last_snap; 2358 u64 otransid; 2359 u64 objectid; 2360 LIST_HEAD(reloc_roots); 2361 int found = 0; 2362 int ret = 0; 2363 again: 2364 root = rc->extent_root; 2365 2366 /* 2367 * this serializes us with btrfs_record_root_in_transaction, 2368 * we have to make sure nobody is in the middle of 2369 * adding their roots to the list while we are 2370 * doing this splice 2371 */ 2372 mutex_lock(&root->fs_info->reloc_mutex); 2373 list_splice_init(&rc->reloc_roots, &reloc_roots); 2374 mutex_unlock(&root->fs_info->reloc_mutex); 2375 2376 while (!list_empty(&reloc_roots)) { 2377 found = 1; 2378 reloc_root = list_entry(reloc_roots.next, 2379 struct btrfs_root, root_list); 2380 2381 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 2382 root = read_fs_root(reloc_root->fs_info, 2383 reloc_root->root_key.offset); 2384 BUG_ON(IS_ERR(root)); 2385 BUG_ON(root->reloc_root != reloc_root); 2386 2387 ret = merge_reloc_root(rc, root); 2388 if (ret) { 2389 if (list_empty(&reloc_root->root_list)) 2390 list_add_tail(&reloc_root->root_list, 2391 &reloc_roots); 2392 goto out; 2393 } 2394 } else { 2395 list_del_init(&reloc_root->root_list); 2396 } 2397 2398 /* 2399 * we keep the old last snapshot transid in rtranid when we 2400 * created the relocation tree. 2401 */ 2402 last_snap = btrfs_root_rtransid(&reloc_root->root_item); 2403 otransid = btrfs_root_otransid(&reloc_root->root_item); 2404 objectid = reloc_root->root_key.offset; 2405 2406 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); 2407 if (ret < 0) { 2408 if (list_empty(&reloc_root->root_list)) 2409 list_add_tail(&reloc_root->root_list, 2410 &reloc_roots); 2411 goto out; 2412 } 2413 } 2414 2415 if (found) { 2416 found = 0; 2417 goto again; 2418 } 2419 out: 2420 if (ret) { 2421 btrfs_handle_fs_error(root->fs_info, ret, NULL); 2422 if (!list_empty(&reloc_roots)) 2423 free_reloc_roots(&reloc_roots); 2424 2425 /* new reloc root may be added */ 2426 mutex_lock(&root->fs_info->reloc_mutex); 2427 list_splice_init(&rc->reloc_roots, &reloc_roots); 2428 mutex_unlock(&root->fs_info->reloc_mutex); 2429 if (!list_empty(&reloc_roots)) 2430 free_reloc_roots(&reloc_roots); 2431 } 2432 2433 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2434 } 2435 2436 static void free_block_list(struct rb_root *blocks) 2437 { 2438 struct tree_block *block; 2439 struct rb_node *rb_node; 2440 while ((rb_node = rb_first(blocks))) { 2441 block = rb_entry(rb_node, struct tree_block, rb_node); 2442 rb_erase(rb_node, blocks); 2443 kfree(block); 2444 } 2445 } 2446 2447 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2448 struct btrfs_root *reloc_root) 2449 { 2450 struct btrfs_root *root; 2451 2452 if (reloc_root->last_trans == trans->transid) 2453 return 0; 2454 2455 root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset); 2456 BUG_ON(IS_ERR(root)); 2457 BUG_ON(root->reloc_root != reloc_root); 2458 2459 return btrfs_record_root_in_trans(trans, root); 2460 } 2461 2462 static noinline_for_stack 2463 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2464 struct reloc_control *rc, 2465 struct backref_node *node, 2466 struct backref_edge *edges[]) 2467 { 2468 struct backref_node *next; 2469 struct btrfs_root *root; 2470 int index = 0; 2471 2472 next = node; 2473 while (1) { 2474 cond_resched(); 2475 next = walk_up_backref(next, edges, &index); 2476 root = next->root; 2477 BUG_ON(!root); 2478 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state)); 2479 2480 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2481 record_reloc_root_in_trans(trans, root); 2482 break; 2483 } 2484 2485 btrfs_record_root_in_trans(trans, root); 2486 root = root->reloc_root; 2487 2488 if (next->new_bytenr != root->node->start) { 2489 BUG_ON(next->new_bytenr); 2490 BUG_ON(!list_empty(&next->list)); 2491 next->new_bytenr = root->node->start; 2492 next->root = root; 2493 list_add_tail(&next->list, 2494 &rc->backref_cache.changed); 2495 __mark_block_processed(rc, next); 2496 break; 2497 } 2498 2499 WARN_ON(1); 2500 root = NULL; 2501 next = walk_down_backref(edges, &index); 2502 if (!next || next->level <= node->level) 2503 break; 2504 } 2505 if (!root) 2506 return NULL; 2507 2508 next = node; 2509 /* setup backref node path for btrfs_reloc_cow_block */ 2510 while (1) { 2511 rc->backref_cache.path[next->level] = next; 2512 if (--index < 0) 2513 break; 2514 next = edges[index]->node[UPPER]; 2515 } 2516 return root; 2517 } 2518 2519 /* 2520 * select a tree root for relocation. return NULL if the block 2521 * is reference counted. we should use do_relocation() in this 2522 * case. return a tree root pointer if the block isn't reference 2523 * counted. return -ENOENT if the block is root of reloc tree. 2524 */ 2525 static noinline_for_stack 2526 struct btrfs_root *select_one_root(struct backref_node *node) 2527 { 2528 struct backref_node *next; 2529 struct btrfs_root *root; 2530 struct btrfs_root *fs_root = NULL; 2531 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2532 int index = 0; 2533 2534 next = node; 2535 while (1) { 2536 cond_resched(); 2537 next = walk_up_backref(next, edges, &index); 2538 root = next->root; 2539 BUG_ON(!root); 2540 2541 /* no other choice for non-references counted tree */ 2542 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 2543 return root; 2544 2545 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2546 fs_root = root; 2547 2548 if (next != node) 2549 return NULL; 2550 2551 next = walk_down_backref(edges, &index); 2552 if (!next || next->level <= node->level) 2553 break; 2554 } 2555 2556 if (!fs_root) 2557 return ERR_PTR(-ENOENT); 2558 return fs_root; 2559 } 2560 2561 static noinline_for_stack 2562 u64 calcu_metadata_size(struct reloc_control *rc, 2563 struct backref_node *node, int reserve) 2564 { 2565 struct backref_node *next = node; 2566 struct backref_edge *edge; 2567 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2568 u64 num_bytes = 0; 2569 int index = 0; 2570 2571 BUG_ON(reserve && node->processed); 2572 2573 while (next) { 2574 cond_resched(); 2575 while (1) { 2576 if (next->processed && (reserve || next != node)) 2577 break; 2578 2579 num_bytes += rc->extent_root->nodesize; 2580 2581 if (list_empty(&next->upper)) 2582 break; 2583 2584 edge = list_entry(next->upper.next, 2585 struct backref_edge, list[LOWER]); 2586 edges[index++] = edge; 2587 next = edge->node[UPPER]; 2588 } 2589 next = walk_down_backref(edges, &index); 2590 } 2591 return num_bytes; 2592 } 2593 2594 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2595 struct reloc_control *rc, 2596 struct backref_node *node) 2597 { 2598 struct btrfs_root *root = rc->extent_root; 2599 u64 num_bytes; 2600 int ret; 2601 u64 tmp; 2602 2603 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2604 2605 trans->block_rsv = rc->block_rsv; 2606 rc->reserved_bytes += num_bytes; 2607 2608 /* 2609 * We are under a transaction here so we can only do limited flushing. 2610 * If we get an enospc just kick back -EAGAIN so we know to drop the 2611 * transaction and try to refill when we can flush all the things. 2612 */ 2613 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes, 2614 BTRFS_RESERVE_FLUSH_LIMIT); 2615 if (ret) { 2616 tmp = rc->extent_root->nodesize * RELOCATION_RESERVED_NODES; 2617 while (tmp <= rc->reserved_bytes) 2618 tmp <<= 1; 2619 /* 2620 * only one thread can access block_rsv at this point, 2621 * so we don't need hold lock to protect block_rsv. 2622 * we expand more reservation size here to allow enough 2623 * space for relocation and we will return eailer in 2624 * enospc case. 2625 */ 2626 rc->block_rsv->size = tmp + rc->extent_root->nodesize * 2627 RELOCATION_RESERVED_NODES; 2628 return -EAGAIN; 2629 } 2630 2631 return 0; 2632 } 2633 2634 /* 2635 * relocate a block tree, and then update pointers in upper level 2636 * blocks that reference the block to point to the new location. 2637 * 2638 * if called by link_to_upper, the block has already been relocated. 2639 * in that case this function just updates pointers. 2640 */ 2641 static int do_relocation(struct btrfs_trans_handle *trans, 2642 struct reloc_control *rc, 2643 struct backref_node *node, 2644 struct btrfs_key *key, 2645 struct btrfs_path *path, int lowest) 2646 { 2647 struct backref_node *upper; 2648 struct backref_edge *edge; 2649 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2650 struct btrfs_root *root; 2651 struct extent_buffer *eb; 2652 u32 blocksize; 2653 u64 bytenr; 2654 u64 generation; 2655 int slot; 2656 int ret; 2657 int err = 0; 2658 2659 BUG_ON(lowest && node->eb); 2660 2661 path->lowest_level = node->level + 1; 2662 rc->backref_cache.path[node->level] = node; 2663 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2664 cond_resched(); 2665 2666 upper = edge->node[UPPER]; 2667 root = select_reloc_root(trans, rc, upper, edges); 2668 BUG_ON(!root); 2669 2670 if (upper->eb && !upper->locked) { 2671 if (!lowest) { 2672 ret = btrfs_bin_search(upper->eb, key, 2673 upper->level, &slot); 2674 BUG_ON(ret); 2675 bytenr = btrfs_node_blockptr(upper->eb, slot); 2676 if (node->eb->start == bytenr) 2677 goto next; 2678 } 2679 drop_node_buffer(upper); 2680 } 2681 2682 if (!upper->eb) { 2683 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2684 if (ret < 0) { 2685 err = ret; 2686 break; 2687 } 2688 BUG_ON(ret > 0); 2689 2690 if (!upper->eb) { 2691 upper->eb = path->nodes[upper->level]; 2692 path->nodes[upper->level] = NULL; 2693 } else { 2694 BUG_ON(upper->eb != path->nodes[upper->level]); 2695 } 2696 2697 upper->locked = 1; 2698 path->locks[upper->level] = 0; 2699 2700 slot = path->slots[upper->level]; 2701 btrfs_release_path(path); 2702 } else { 2703 ret = btrfs_bin_search(upper->eb, key, upper->level, 2704 &slot); 2705 BUG_ON(ret); 2706 } 2707 2708 bytenr = btrfs_node_blockptr(upper->eb, slot); 2709 if (lowest) { 2710 BUG_ON(bytenr != node->bytenr); 2711 } else { 2712 if (node->eb->start == bytenr) 2713 goto next; 2714 } 2715 2716 blocksize = root->nodesize; 2717 generation = btrfs_node_ptr_generation(upper->eb, slot); 2718 eb = read_tree_block(root, bytenr, generation); 2719 if (IS_ERR(eb)) { 2720 err = PTR_ERR(eb); 2721 goto next; 2722 } else if (!extent_buffer_uptodate(eb)) { 2723 free_extent_buffer(eb); 2724 err = -EIO; 2725 goto next; 2726 } 2727 btrfs_tree_lock(eb); 2728 btrfs_set_lock_blocking(eb); 2729 2730 if (!node->eb) { 2731 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2732 slot, &eb); 2733 btrfs_tree_unlock(eb); 2734 free_extent_buffer(eb); 2735 if (ret < 0) { 2736 err = ret; 2737 goto next; 2738 } 2739 BUG_ON(node->eb != eb); 2740 } else { 2741 btrfs_set_node_blockptr(upper->eb, slot, 2742 node->eb->start); 2743 btrfs_set_node_ptr_generation(upper->eb, slot, 2744 trans->transid); 2745 btrfs_mark_buffer_dirty(upper->eb); 2746 2747 ret = btrfs_inc_extent_ref(trans, root, 2748 node->eb->start, blocksize, 2749 upper->eb->start, 2750 btrfs_header_owner(upper->eb), 2751 node->level, 0); 2752 BUG_ON(ret); 2753 2754 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2755 BUG_ON(ret); 2756 } 2757 next: 2758 if (!upper->pending) 2759 drop_node_buffer(upper); 2760 else 2761 unlock_node_buffer(upper); 2762 if (err) 2763 break; 2764 } 2765 2766 if (!err && node->pending) { 2767 drop_node_buffer(node); 2768 list_move_tail(&node->list, &rc->backref_cache.changed); 2769 node->pending = 0; 2770 } 2771 2772 path->lowest_level = 0; 2773 BUG_ON(err == -ENOSPC); 2774 return err; 2775 } 2776 2777 static int link_to_upper(struct btrfs_trans_handle *trans, 2778 struct reloc_control *rc, 2779 struct backref_node *node, 2780 struct btrfs_path *path) 2781 { 2782 struct btrfs_key key; 2783 2784 btrfs_node_key_to_cpu(node->eb, &key, 0); 2785 return do_relocation(trans, rc, node, &key, path, 0); 2786 } 2787 2788 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2789 struct reloc_control *rc, 2790 struct btrfs_path *path, int err) 2791 { 2792 LIST_HEAD(list); 2793 struct backref_cache *cache = &rc->backref_cache; 2794 struct backref_node *node; 2795 int level; 2796 int ret; 2797 2798 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2799 while (!list_empty(&cache->pending[level])) { 2800 node = list_entry(cache->pending[level].next, 2801 struct backref_node, list); 2802 list_move_tail(&node->list, &list); 2803 BUG_ON(!node->pending); 2804 2805 if (!err) { 2806 ret = link_to_upper(trans, rc, node, path); 2807 if (ret < 0) 2808 err = ret; 2809 } 2810 } 2811 list_splice_init(&list, &cache->pending[level]); 2812 } 2813 return err; 2814 } 2815 2816 static void mark_block_processed(struct reloc_control *rc, 2817 u64 bytenr, u32 blocksize) 2818 { 2819 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2820 EXTENT_DIRTY); 2821 } 2822 2823 static void __mark_block_processed(struct reloc_control *rc, 2824 struct backref_node *node) 2825 { 2826 u32 blocksize; 2827 if (node->level == 0 || 2828 in_block_group(node->bytenr, rc->block_group)) { 2829 blocksize = rc->extent_root->nodesize; 2830 mark_block_processed(rc, node->bytenr, blocksize); 2831 } 2832 node->processed = 1; 2833 } 2834 2835 /* 2836 * mark a block and all blocks directly/indirectly reference the block 2837 * as processed. 2838 */ 2839 static void update_processed_blocks(struct reloc_control *rc, 2840 struct backref_node *node) 2841 { 2842 struct backref_node *next = node; 2843 struct backref_edge *edge; 2844 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2845 int index = 0; 2846 2847 while (next) { 2848 cond_resched(); 2849 while (1) { 2850 if (next->processed) 2851 break; 2852 2853 __mark_block_processed(rc, next); 2854 2855 if (list_empty(&next->upper)) 2856 break; 2857 2858 edge = list_entry(next->upper.next, 2859 struct backref_edge, list[LOWER]); 2860 edges[index++] = edge; 2861 next = edge->node[UPPER]; 2862 } 2863 next = walk_down_backref(edges, &index); 2864 } 2865 } 2866 2867 static int tree_block_processed(u64 bytenr, struct reloc_control *rc) 2868 { 2869 u32 blocksize = rc->extent_root->nodesize; 2870 2871 if (test_range_bit(&rc->processed_blocks, bytenr, 2872 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2873 return 1; 2874 return 0; 2875 } 2876 2877 static int get_tree_block_key(struct reloc_control *rc, 2878 struct tree_block *block) 2879 { 2880 struct extent_buffer *eb; 2881 2882 BUG_ON(block->key_ready); 2883 eb = read_tree_block(rc->extent_root, block->bytenr, 2884 block->key.offset); 2885 if (IS_ERR(eb)) { 2886 return PTR_ERR(eb); 2887 } else if (!extent_buffer_uptodate(eb)) { 2888 free_extent_buffer(eb); 2889 return -EIO; 2890 } 2891 WARN_ON(btrfs_header_level(eb) != block->level); 2892 if (block->level == 0) 2893 btrfs_item_key_to_cpu(eb, &block->key, 0); 2894 else 2895 btrfs_node_key_to_cpu(eb, &block->key, 0); 2896 free_extent_buffer(eb); 2897 block->key_ready = 1; 2898 return 0; 2899 } 2900 2901 /* 2902 * helper function to relocate a tree block 2903 */ 2904 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2905 struct reloc_control *rc, 2906 struct backref_node *node, 2907 struct btrfs_key *key, 2908 struct btrfs_path *path) 2909 { 2910 struct btrfs_root *root; 2911 int ret = 0; 2912 2913 if (!node) 2914 return 0; 2915 2916 BUG_ON(node->processed); 2917 root = select_one_root(node); 2918 if (root == ERR_PTR(-ENOENT)) { 2919 update_processed_blocks(rc, node); 2920 goto out; 2921 } 2922 2923 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2924 ret = reserve_metadata_space(trans, rc, node); 2925 if (ret) 2926 goto out; 2927 } 2928 2929 if (root) { 2930 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2931 BUG_ON(node->new_bytenr); 2932 BUG_ON(!list_empty(&node->list)); 2933 btrfs_record_root_in_trans(trans, root); 2934 root = root->reloc_root; 2935 node->new_bytenr = root->node->start; 2936 node->root = root; 2937 list_add_tail(&node->list, &rc->backref_cache.changed); 2938 } else { 2939 path->lowest_level = node->level; 2940 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2941 btrfs_release_path(path); 2942 if (ret > 0) 2943 ret = 0; 2944 } 2945 if (!ret) 2946 update_processed_blocks(rc, node); 2947 } else { 2948 ret = do_relocation(trans, rc, node, key, path, 1); 2949 } 2950 out: 2951 if (ret || node->level == 0 || node->cowonly) 2952 remove_backref_node(&rc->backref_cache, node); 2953 return ret; 2954 } 2955 2956 /* 2957 * relocate a list of blocks 2958 */ 2959 static noinline_for_stack 2960 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2961 struct reloc_control *rc, struct rb_root *blocks) 2962 { 2963 struct backref_node *node; 2964 struct btrfs_path *path; 2965 struct tree_block *block; 2966 struct rb_node *rb_node; 2967 int ret; 2968 int err = 0; 2969 2970 path = btrfs_alloc_path(); 2971 if (!path) { 2972 err = -ENOMEM; 2973 goto out_free_blocks; 2974 } 2975 2976 rb_node = rb_first(blocks); 2977 while (rb_node) { 2978 block = rb_entry(rb_node, struct tree_block, rb_node); 2979 if (!block->key_ready) 2980 readahead_tree_block(rc->extent_root, block->bytenr); 2981 rb_node = rb_next(rb_node); 2982 } 2983 2984 rb_node = rb_first(blocks); 2985 while (rb_node) { 2986 block = rb_entry(rb_node, struct tree_block, rb_node); 2987 if (!block->key_ready) { 2988 err = get_tree_block_key(rc, block); 2989 if (err) 2990 goto out_free_path; 2991 } 2992 rb_node = rb_next(rb_node); 2993 } 2994 2995 rb_node = rb_first(blocks); 2996 while (rb_node) { 2997 block = rb_entry(rb_node, struct tree_block, rb_node); 2998 2999 node = build_backref_tree(rc, &block->key, 3000 block->level, block->bytenr); 3001 if (IS_ERR(node)) { 3002 err = PTR_ERR(node); 3003 goto out; 3004 } 3005 3006 ret = relocate_tree_block(trans, rc, node, &block->key, 3007 path); 3008 if (ret < 0) { 3009 if (ret != -EAGAIN || rb_node == rb_first(blocks)) 3010 err = ret; 3011 goto out; 3012 } 3013 rb_node = rb_next(rb_node); 3014 } 3015 out: 3016 err = finish_pending_nodes(trans, rc, path, err); 3017 3018 out_free_path: 3019 btrfs_free_path(path); 3020 out_free_blocks: 3021 free_block_list(blocks); 3022 return err; 3023 } 3024 3025 static noinline_for_stack 3026 int prealloc_file_extent_cluster(struct inode *inode, 3027 struct file_extent_cluster *cluster) 3028 { 3029 u64 alloc_hint = 0; 3030 u64 start; 3031 u64 end; 3032 u64 offset = BTRFS_I(inode)->index_cnt; 3033 u64 num_bytes; 3034 int nr = 0; 3035 int ret = 0; 3036 3037 BUG_ON(cluster->start != cluster->boundary[0]); 3038 inode_lock(inode); 3039 3040 ret = btrfs_check_data_free_space(inode, cluster->start, 3041 cluster->end + 1 - cluster->start); 3042 if (ret) 3043 goto out; 3044 3045 while (nr < cluster->nr) { 3046 start = cluster->boundary[nr] - offset; 3047 if (nr + 1 < cluster->nr) 3048 end = cluster->boundary[nr + 1] - 1 - offset; 3049 else 3050 end = cluster->end - offset; 3051 3052 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3053 num_bytes = end + 1 - start; 3054 ret = btrfs_prealloc_file_range(inode, 0, start, 3055 num_bytes, num_bytes, 3056 end + 1, &alloc_hint); 3057 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3058 if (ret) 3059 break; 3060 nr++; 3061 } 3062 btrfs_free_reserved_data_space(inode, cluster->start, 3063 cluster->end + 1 - cluster->start); 3064 out: 3065 inode_unlock(inode); 3066 return ret; 3067 } 3068 3069 static noinline_for_stack 3070 int setup_extent_mapping(struct inode *inode, u64 start, u64 end, 3071 u64 block_start) 3072 { 3073 struct btrfs_root *root = BTRFS_I(inode)->root; 3074 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 3075 struct extent_map *em; 3076 int ret = 0; 3077 3078 em = alloc_extent_map(); 3079 if (!em) 3080 return -ENOMEM; 3081 3082 em->start = start; 3083 em->len = end + 1 - start; 3084 em->block_len = em->len; 3085 em->block_start = block_start; 3086 em->bdev = root->fs_info->fs_devices->latest_bdev; 3087 set_bit(EXTENT_FLAG_PINNED, &em->flags); 3088 3089 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3090 while (1) { 3091 write_lock(&em_tree->lock); 3092 ret = add_extent_mapping(em_tree, em, 0); 3093 write_unlock(&em_tree->lock); 3094 if (ret != -EEXIST) { 3095 free_extent_map(em); 3096 break; 3097 } 3098 btrfs_drop_extent_cache(inode, start, end, 0); 3099 } 3100 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3101 return ret; 3102 } 3103 3104 static int relocate_file_extent_cluster(struct inode *inode, 3105 struct file_extent_cluster *cluster) 3106 { 3107 u64 page_start; 3108 u64 page_end; 3109 u64 offset = BTRFS_I(inode)->index_cnt; 3110 unsigned long index; 3111 unsigned long last_index; 3112 struct page *page; 3113 struct file_ra_state *ra; 3114 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 3115 int nr = 0; 3116 int ret = 0; 3117 3118 if (!cluster->nr) 3119 return 0; 3120 3121 ra = kzalloc(sizeof(*ra), GFP_NOFS); 3122 if (!ra) 3123 return -ENOMEM; 3124 3125 ret = prealloc_file_extent_cluster(inode, cluster); 3126 if (ret) 3127 goto out; 3128 3129 file_ra_state_init(ra, inode->i_mapping); 3130 3131 ret = setup_extent_mapping(inode, cluster->start - offset, 3132 cluster->end - offset, cluster->start); 3133 if (ret) 3134 goto out; 3135 3136 index = (cluster->start - offset) >> PAGE_SHIFT; 3137 last_index = (cluster->end - offset) >> PAGE_SHIFT; 3138 while (index <= last_index) { 3139 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE); 3140 if (ret) 3141 goto out; 3142 3143 page = find_lock_page(inode->i_mapping, index); 3144 if (!page) { 3145 page_cache_sync_readahead(inode->i_mapping, 3146 ra, NULL, index, 3147 last_index + 1 - index); 3148 page = find_or_create_page(inode->i_mapping, index, 3149 mask); 3150 if (!page) { 3151 btrfs_delalloc_release_metadata(inode, 3152 PAGE_SIZE); 3153 ret = -ENOMEM; 3154 goto out; 3155 } 3156 } 3157 3158 if (PageReadahead(page)) { 3159 page_cache_async_readahead(inode->i_mapping, 3160 ra, NULL, page, index, 3161 last_index + 1 - index); 3162 } 3163 3164 if (!PageUptodate(page)) { 3165 btrfs_readpage(NULL, page); 3166 lock_page(page); 3167 if (!PageUptodate(page)) { 3168 unlock_page(page); 3169 put_page(page); 3170 btrfs_delalloc_release_metadata(inode, 3171 PAGE_SIZE); 3172 ret = -EIO; 3173 goto out; 3174 } 3175 } 3176 3177 page_start = page_offset(page); 3178 page_end = page_start + PAGE_SIZE - 1; 3179 3180 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); 3181 3182 set_page_extent_mapped(page); 3183 3184 if (nr < cluster->nr && 3185 page_start + offset == cluster->boundary[nr]) { 3186 set_extent_bits(&BTRFS_I(inode)->io_tree, 3187 page_start, page_end, 3188 EXTENT_BOUNDARY); 3189 nr++; 3190 } 3191 3192 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); 3193 set_page_dirty(page); 3194 3195 unlock_extent(&BTRFS_I(inode)->io_tree, 3196 page_start, page_end); 3197 unlock_page(page); 3198 put_page(page); 3199 3200 index++; 3201 balance_dirty_pages_ratelimited(inode->i_mapping); 3202 btrfs_throttle(BTRFS_I(inode)->root); 3203 } 3204 WARN_ON(nr != cluster->nr); 3205 out: 3206 kfree(ra); 3207 return ret; 3208 } 3209 3210 static noinline_for_stack 3211 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3212 struct file_extent_cluster *cluster) 3213 { 3214 int ret; 3215 3216 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3217 ret = relocate_file_extent_cluster(inode, cluster); 3218 if (ret) 3219 return ret; 3220 cluster->nr = 0; 3221 } 3222 3223 if (!cluster->nr) 3224 cluster->start = extent_key->objectid; 3225 else 3226 BUG_ON(cluster->nr >= MAX_EXTENTS); 3227 cluster->end = extent_key->objectid + extent_key->offset - 1; 3228 cluster->boundary[cluster->nr] = extent_key->objectid; 3229 cluster->nr++; 3230 3231 if (cluster->nr >= MAX_EXTENTS) { 3232 ret = relocate_file_extent_cluster(inode, cluster); 3233 if (ret) 3234 return ret; 3235 cluster->nr = 0; 3236 } 3237 return 0; 3238 } 3239 3240 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3241 static int get_ref_objectid_v0(struct reloc_control *rc, 3242 struct btrfs_path *path, 3243 struct btrfs_key *extent_key, 3244 u64 *ref_objectid, int *path_change) 3245 { 3246 struct btrfs_key key; 3247 struct extent_buffer *leaf; 3248 struct btrfs_extent_ref_v0 *ref0; 3249 int ret; 3250 int slot; 3251 3252 leaf = path->nodes[0]; 3253 slot = path->slots[0]; 3254 while (1) { 3255 if (slot >= btrfs_header_nritems(leaf)) { 3256 ret = btrfs_next_leaf(rc->extent_root, path); 3257 if (ret < 0) 3258 return ret; 3259 BUG_ON(ret > 0); 3260 leaf = path->nodes[0]; 3261 slot = path->slots[0]; 3262 if (path_change) 3263 *path_change = 1; 3264 } 3265 btrfs_item_key_to_cpu(leaf, &key, slot); 3266 if (key.objectid != extent_key->objectid) 3267 return -ENOENT; 3268 3269 if (key.type != BTRFS_EXTENT_REF_V0_KEY) { 3270 slot++; 3271 continue; 3272 } 3273 ref0 = btrfs_item_ptr(leaf, slot, 3274 struct btrfs_extent_ref_v0); 3275 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0); 3276 break; 3277 } 3278 return 0; 3279 } 3280 #endif 3281 3282 /* 3283 * helper to add a tree block to the list. 3284 * the major work is getting the generation and level of the block 3285 */ 3286 static int add_tree_block(struct reloc_control *rc, 3287 struct btrfs_key *extent_key, 3288 struct btrfs_path *path, 3289 struct rb_root *blocks) 3290 { 3291 struct extent_buffer *eb; 3292 struct btrfs_extent_item *ei; 3293 struct btrfs_tree_block_info *bi; 3294 struct tree_block *block; 3295 struct rb_node *rb_node; 3296 u32 item_size; 3297 int level = -1; 3298 u64 generation; 3299 3300 eb = path->nodes[0]; 3301 item_size = btrfs_item_size_nr(eb, path->slots[0]); 3302 3303 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3304 item_size >= sizeof(*ei) + sizeof(*bi)) { 3305 ei = btrfs_item_ptr(eb, path->slots[0], 3306 struct btrfs_extent_item); 3307 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3308 bi = (struct btrfs_tree_block_info *)(ei + 1); 3309 level = btrfs_tree_block_level(eb, bi); 3310 } else { 3311 level = (int)extent_key->offset; 3312 } 3313 generation = btrfs_extent_generation(eb, ei); 3314 } else { 3315 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3316 u64 ref_owner; 3317 int ret; 3318 3319 BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 3320 ret = get_ref_objectid_v0(rc, path, extent_key, 3321 &ref_owner, NULL); 3322 if (ret < 0) 3323 return ret; 3324 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL); 3325 level = (int)ref_owner; 3326 /* FIXME: get real generation */ 3327 generation = 0; 3328 #else 3329 BUG(); 3330 #endif 3331 } 3332 3333 btrfs_release_path(path); 3334 3335 BUG_ON(level == -1); 3336 3337 block = kmalloc(sizeof(*block), GFP_NOFS); 3338 if (!block) 3339 return -ENOMEM; 3340 3341 block->bytenr = extent_key->objectid; 3342 block->key.objectid = rc->extent_root->nodesize; 3343 block->key.offset = generation; 3344 block->level = level; 3345 block->key_ready = 0; 3346 3347 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3348 if (rb_node) 3349 backref_tree_panic(rb_node, -EEXIST, block->bytenr); 3350 3351 return 0; 3352 } 3353 3354 /* 3355 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3356 */ 3357 static int __add_tree_block(struct reloc_control *rc, 3358 u64 bytenr, u32 blocksize, 3359 struct rb_root *blocks) 3360 { 3361 struct btrfs_path *path; 3362 struct btrfs_key key; 3363 int ret; 3364 bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info, 3365 SKINNY_METADATA); 3366 3367 if (tree_block_processed(bytenr, rc)) 3368 return 0; 3369 3370 if (tree_search(blocks, bytenr)) 3371 return 0; 3372 3373 path = btrfs_alloc_path(); 3374 if (!path) 3375 return -ENOMEM; 3376 again: 3377 key.objectid = bytenr; 3378 if (skinny) { 3379 key.type = BTRFS_METADATA_ITEM_KEY; 3380 key.offset = (u64)-1; 3381 } else { 3382 key.type = BTRFS_EXTENT_ITEM_KEY; 3383 key.offset = blocksize; 3384 } 3385 3386 path->search_commit_root = 1; 3387 path->skip_locking = 1; 3388 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3389 if (ret < 0) 3390 goto out; 3391 3392 if (ret > 0 && skinny) { 3393 if (path->slots[0]) { 3394 path->slots[0]--; 3395 btrfs_item_key_to_cpu(path->nodes[0], &key, 3396 path->slots[0]); 3397 if (key.objectid == bytenr && 3398 (key.type == BTRFS_METADATA_ITEM_KEY || 3399 (key.type == BTRFS_EXTENT_ITEM_KEY && 3400 key.offset == blocksize))) 3401 ret = 0; 3402 } 3403 3404 if (ret) { 3405 skinny = false; 3406 btrfs_release_path(path); 3407 goto again; 3408 } 3409 } 3410 BUG_ON(ret); 3411 3412 ret = add_tree_block(rc, &key, path, blocks); 3413 out: 3414 btrfs_free_path(path); 3415 return ret; 3416 } 3417 3418 /* 3419 * helper to check if the block use full backrefs for pointers in it 3420 */ 3421 static int block_use_full_backref(struct reloc_control *rc, 3422 struct extent_buffer *eb) 3423 { 3424 u64 flags; 3425 int ret; 3426 3427 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || 3428 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) 3429 return 1; 3430 3431 ret = btrfs_lookup_extent_info(NULL, rc->extent_root, 3432 eb->start, btrfs_header_level(eb), 1, 3433 NULL, &flags); 3434 BUG_ON(ret); 3435 3436 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) 3437 ret = 1; 3438 else 3439 ret = 0; 3440 return ret; 3441 } 3442 3443 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3444 struct btrfs_block_group_cache *block_group, 3445 struct inode *inode, 3446 u64 ino) 3447 { 3448 struct btrfs_key key; 3449 struct btrfs_root *root = fs_info->tree_root; 3450 struct btrfs_trans_handle *trans; 3451 int ret = 0; 3452 3453 if (inode) 3454 goto truncate; 3455 3456 key.objectid = ino; 3457 key.type = BTRFS_INODE_ITEM_KEY; 3458 key.offset = 0; 3459 3460 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3461 if (IS_ERR(inode) || is_bad_inode(inode)) { 3462 if (!IS_ERR(inode)) 3463 iput(inode); 3464 return -ENOENT; 3465 } 3466 3467 truncate: 3468 ret = btrfs_check_trunc_cache_free_space(root, 3469 &fs_info->global_block_rsv); 3470 if (ret) 3471 goto out; 3472 3473 trans = btrfs_join_transaction(root); 3474 if (IS_ERR(trans)) { 3475 ret = PTR_ERR(trans); 3476 goto out; 3477 } 3478 3479 ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode); 3480 3481 btrfs_end_transaction(trans, root); 3482 btrfs_btree_balance_dirty(root); 3483 out: 3484 iput(inode); 3485 return ret; 3486 } 3487 3488 /* 3489 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY 3490 * this function scans fs tree to find blocks reference the data extent 3491 */ 3492 static int find_data_references(struct reloc_control *rc, 3493 struct btrfs_key *extent_key, 3494 struct extent_buffer *leaf, 3495 struct btrfs_extent_data_ref *ref, 3496 struct rb_root *blocks) 3497 { 3498 struct btrfs_path *path; 3499 struct tree_block *block; 3500 struct btrfs_root *root; 3501 struct btrfs_file_extent_item *fi; 3502 struct rb_node *rb_node; 3503 struct btrfs_key key; 3504 u64 ref_root; 3505 u64 ref_objectid; 3506 u64 ref_offset; 3507 u32 ref_count; 3508 u32 nritems; 3509 int err = 0; 3510 int added = 0; 3511 int counted; 3512 int ret; 3513 3514 ref_root = btrfs_extent_data_ref_root(leaf, ref); 3515 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref); 3516 ref_offset = btrfs_extent_data_ref_offset(leaf, ref); 3517 ref_count = btrfs_extent_data_ref_count(leaf, ref); 3518 3519 /* 3520 * This is an extent belonging to the free space cache, lets just delete 3521 * it and redo the search. 3522 */ 3523 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { 3524 ret = delete_block_group_cache(rc->extent_root->fs_info, 3525 rc->block_group, 3526 NULL, ref_objectid); 3527 if (ret != -ENOENT) 3528 return ret; 3529 ret = 0; 3530 } 3531 3532 path = btrfs_alloc_path(); 3533 if (!path) 3534 return -ENOMEM; 3535 path->reada = READA_FORWARD; 3536 3537 root = read_fs_root(rc->extent_root->fs_info, ref_root); 3538 if (IS_ERR(root)) { 3539 err = PTR_ERR(root); 3540 goto out; 3541 } 3542 3543 key.objectid = ref_objectid; 3544 key.type = BTRFS_EXTENT_DATA_KEY; 3545 if (ref_offset > ((u64)-1 << 32)) 3546 key.offset = 0; 3547 else 3548 key.offset = ref_offset; 3549 3550 path->search_commit_root = 1; 3551 path->skip_locking = 1; 3552 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3553 if (ret < 0) { 3554 err = ret; 3555 goto out; 3556 } 3557 3558 leaf = path->nodes[0]; 3559 nritems = btrfs_header_nritems(leaf); 3560 /* 3561 * the references in tree blocks that use full backrefs 3562 * are not counted in 3563 */ 3564 if (block_use_full_backref(rc, leaf)) 3565 counted = 0; 3566 else 3567 counted = 1; 3568 rb_node = tree_search(blocks, leaf->start); 3569 if (rb_node) { 3570 if (counted) 3571 added = 1; 3572 else 3573 path->slots[0] = nritems; 3574 } 3575 3576 while (ref_count > 0) { 3577 while (path->slots[0] >= nritems) { 3578 ret = btrfs_next_leaf(root, path); 3579 if (ret < 0) { 3580 err = ret; 3581 goto out; 3582 } 3583 if (WARN_ON(ret > 0)) 3584 goto out; 3585 3586 leaf = path->nodes[0]; 3587 nritems = btrfs_header_nritems(leaf); 3588 added = 0; 3589 3590 if (block_use_full_backref(rc, leaf)) 3591 counted = 0; 3592 else 3593 counted = 1; 3594 rb_node = tree_search(blocks, leaf->start); 3595 if (rb_node) { 3596 if (counted) 3597 added = 1; 3598 else 3599 path->slots[0] = nritems; 3600 } 3601 } 3602 3603 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3604 if (WARN_ON(key.objectid != ref_objectid || 3605 key.type != BTRFS_EXTENT_DATA_KEY)) 3606 break; 3607 3608 fi = btrfs_item_ptr(leaf, path->slots[0], 3609 struct btrfs_file_extent_item); 3610 3611 if (btrfs_file_extent_type(leaf, fi) == 3612 BTRFS_FILE_EXTENT_INLINE) 3613 goto next; 3614 3615 if (btrfs_file_extent_disk_bytenr(leaf, fi) != 3616 extent_key->objectid) 3617 goto next; 3618 3619 key.offset -= btrfs_file_extent_offset(leaf, fi); 3620 if (key.offset != ref_offset) 3621 goto next; 3622 3623 if (counted) 3624 ref_count--; 3625 if (added) 3626 goto next; 3627 3628 if (!tree_block_processed(leaf->start, rc)) { 3629 block = kmalloc(sizeof(*block), GFP_NOFS); 3630 if (!block) { 3631 err = -ENOMEM; 3632 break; 3633 } 3634 block->bytenr = leaf->start; 3635 btrfs_item_key_to_cpu(leaf, &block->key, 0); 3636 block->level = 0; 3637 block->key_ready = 1; 3638 rb_node = tree_insert(blocks, block->bytenr, 3639 &block->rb_node); 3640 if (rb_node) 3641 backref_tree_panic(rb_node, -EEXIST, 3642 block->bytenr); 3643 } 3644 if (counted) 3645 added = 1; 3646 else 3647 path->slots[0] = nritems; 3648 next: 3649 path->slots[0]++; 3650 3651 } 3652 out: 3653 btrfs_free_path(path); 3654 return err; 3655 } 3656 3657 /* 3658 * helper to find all tree blocks that reference a given data extent 3659 */ 3660 static noinline_for_stack 3661 int add_data_references(struct reloc_control *rc, 3662 struct btrfs_key *extent_key, 3663 struct btrfs_path *path, 3664 struct rb_root *blocks) 3665 { 3666 struct btrfs_key key; 3667 struct extent_buffer *eb; 3668 struct btrfs_extent_data_ref *dref; 3669 struct btrfs_extent_inline_ref *iref; 3670 unsigned long ptr; 3671 unsigned long end; 3672 u32 blocksize = rc->extent_root->nodesize; 3673 int ret = 0; 3674 int err = 0; 3675 3676 eb = path->nodes[0]; 3677 ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 3678 end = ptr + btrfs_item_size_nr(eb, path->slots[0]); 3679 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3680 if (ptr + sizeof(struct btrfs_extent_item_v0) == end) 3681 ptr = end; 3682 else 3683 #endif 3684 ptr += sizeof(struct btrfs_extent_item); 3685 3686 while (ptr < end) { 3687 iref = (struct btrfs_extent_inline_ref *)ptr; 3688 key.type = btrfs_extent_inline_ref_type(eb, iref); 3689 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3690 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 3691 ret = __add_tree_block(rc, key.offset, blocksize, 3692 blocks); 3693 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3694 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 3695 ret = find_data_references(rc, extent_key, 3696 eb, dref, blocks); 3697 } else { 3698 BUG(); 3699 } 3700 if (ret) { 3701 err = ret; 3702 goto out; 3703 } 3704 ptr += btrfs_extent_inline_ref_size(key.type); 3705 } 3706 WARN_ON(ptr > end); 3707 3708 while (1) { 3709 cond_resched(); 3710 eb = path->nodes[0]; 3711 if (path->slots[0] >= btrfs_header_nritems(eb)) { 3712 ret = btrfs_next_leaf(rc->extent_root, path); 3713 if (ret < 0) { 3714 err = ret; 3715 break; 3716 } 3717 if (ret > 0) 3718 break; 3719 eb = path->nodes[0]; 3720 } 3721 3722 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 3723 if (key.objectid != extent_key->objectid) 3724 break; 3725 3726 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3727 if (key.type == BTRFS_SHARED_DATA_REF_KEY || 3728 key.type == BTRFS_EXTENT_REF_V0_KEY) { 3729 #else 3730 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 3731 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3732 #endif 3733 ret = __add_tree_block(rc, key.offset, blocksize, 3734 blocks); 3735 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3736 dref = btrfs_item_ptr(eb, path->slots[0], 3737 struct btrfs_extent_data_ref); 3738 ret = find_data_references(rc, extent_key, 3739 eb, dref, blocks); 3740 } else { 3741 ret = 0; 3742 } 3743 if (ret) { 3744 err = ret; 3745 break; 3746 } 3747 path->slots[0]++; 3748 } 3749 out: 3750 btrfs_release_path(path); 3751 if (err) 3752 free_block_list(blocks); 3753 return err; 3754 } 3755 3756 /* 3757 * helper to find next unprocessed extent 3758 */ 3759 static noinline_for_stack 3760 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, 3761 struct btrfs_key *extent_key) 3762 { 3763 struct btrfs_key key; 3764 struct extent_buffer *leaf; 3765 u64 start, end, last; 3766 int ret; 3767 3768 last = rc->block_group->key.objectid + rc->block_group->key.offset; 3769 while (1) { 3770 cond_resched(); 3771 if (rc->search_start >= last) { 3772 ret = 1; 3773 break; 3774 } 3775 3776 key.objectid = rc->search_start; 3777 key.type = BTRFS_EXTENT_ITEM_KEY; 3778 key.offset = 0; 3779 3780 path->search_commit_root = 1; 3781 path->skip_locking = 1; 3782 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3783 0, 0); 3784 if (ret < 0) 3785 break; 3786 next: 3787 leaf = path->nodes[0]; 3788 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3789 ret = btrfs_next_leaf(rc->extent_root, path); 3790 if (ret != 0) 3791 break; 3792 leaf = path->nodes[0]; 3793 } 3794 3795 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3796 if (key.objectid >= last) { 3797 ret = 1; 3798 break; 3799 } 3800 3801 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3802 key.type != BTRFS_METADATA_ITEM_KEY) { 3803 path->slots[0]++; 3804 goto next; 3805 } 3806 3807 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3808 key.objectid + key.offset <= rc->search_start) { 3809 path->slots[0]++; 3810 goto next; 3811 } 3812 3813 if (key.type == BTRFS_METADATA_ITEM_KEY && 3814 key.objectid + rc->extent_root->nodesize <= 3815 rc->search_start) { 3816 path->slots[0]++; 3817 goto next; 3818 } 3819 3820 ret = find_first_extent_bit(&rc->processed_blocks, 3821 key.objectid, &start, &end, 3822 EXTENT_DIRTY, NULL); 3823 3824 if (ret == 0 && start <= key.objectid) { 3825 btrfs_release_path(path); 3826 rc->search_start = end + 1; 3827 } else { 3828 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3829 rc->search_start = key.objectid + key.offset; 3830 else 3831 rc->search_start = key.objectid + 3832 rc->extent_root->nodesize; 3833 memcpy(extent_key, &key, sizeof(key)); 3834 return 0; 3835 } 3836 } 3837 btrfs_release_path(path); 3838 return ret; 3839 } 3840 3841 static void set_reloc_control(struct reloc_control *rc) 3842 { 3843 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3844 3845 mutex_lock(&fs_info->reloc_mutex); 3846 fs_info->reloc_ctl = rc; 3847 mutex_unlock(&fs_info->reloc_mutex); 3848 } 3849 3850 static void unset_reloc_control(struct reloc_control *rc) 3851 { 3852 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3853 3854 mutex_lock(&fs_info->reloc_mutex); 3855 fs_info->reloc_ctl = NULL; 3856 mutex_unlock(&fs_info->reloc_mutex); 3857 } 3858 3859 static int check_extent_flags(u64 flags) 3860 { 3861 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3862 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3863 return 1; 3864 if (!(flags & BTRFS_EXTENT_FLAG_DATA) && 3865 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3866 return 1; 3867 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3868 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 3869 return 1; 3870 return 0; 3871 } 3872 3873 static noinline_for_stack 3874 int prepare_to_relocate(struct reloc_control *rc) 3875 { 3876 struct btrfs_trans_handle *trans; 3877 int ret; 3878 3879 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root, 3880 BTRFS_BLOCK_RSV_TEMP); 3881 if (!rc->block_rsv) 3882 return -ENOMEM; 3883 3884 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3885 rc->search_start = rc->block_group->key.objectid; 3886 rc->extents_found = 0; 3887 rc->nodes_relocated = 0; 3888 rc->merging_rsv_size = 0; 3889 rc->reserved_bytes = 0; 3890 rc->block_rsv->size = rc->extent_root->nodesize * 3891 RELOCATION_RESERVED_NODES; 3892 ret = btrfs_block_rsv_refill(rc->extent_root, 3893 rc->block_rsv, rc->block_rsv->size, 3894 BTRFS_RESERVE_FLUSH_ALL); 3895 if (ret) 3896 return ret; 3897 3898 rc->create_reloc_tree = 1; 3899 set_reloc_control(rc); 3900 3901 trans = btrfs_join_transaction(rc->extent_root); 3902 if (IS_ERR(trans)) { 3903 unset_reloc_control(rc); 3904 /* 3905 * extent tree is not a ref_cow tree and has no reloc_root to 3906 * cleanup. And callers are responsible to free the above 3907 * block rsv. 3908 */ 3909 return PTR_ERR(trans); 3910 } 3911 btrfs_commit_transaction(trans, rc->extent_root); 3912 return 0; 3913 } 3914 3915 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3916 { 3917 struct rb_root blocks = RB_ROOT; 3918 struct btrfs_key key; 3919 struct btrfs_trans_handle *trans = NULL; 3920 struct btrfs_path *path; 3921 struct btrfs_extent_item *ei; 3922 u64 flags; 3923 u32 item_size; 3924 int ret; 3925 int err = 0; 3926 int progress = 0; 3927 3928 path = btrfs_alloc_path(); 3929 if (!path) 3930 return -ENOMEM; 3931 path->reada = READA_FORWARD; 3932 3933 ret = prepare_to_relocate(rc); 3934 if (ret) { 3935 err = ret; 3936 goto out_free; 3937 } 3938 3939 while (1) { 3940 rc->reserved_bytes = 0; 3941 ret = btrfs_block_rsv_refill(rc->extent_root, 3942 rc->block_rsv, rc->block_rsv->size, 3943 BTRFS_RESERVE_FLUSH_ALL); 3944 if (ret) { 3945 err = ret; 3946 break; 3947 } 3948 progress++; 3949 trans = btrfs_start_transaction(rc->extent_root, 0); 3950 if (IS_ERR(trans)) { 3951 err = PTR_ERR(trans); 3952 trans = NULL; 3953 break; 3954 } 3955 restart: 3956 if (update_backref_cache(trans, &rc->backref_cache)) { 3957 btrfs_end_transaction(trans, rc->extent_root); 3958 continue; 3959 } 3960 3961 ret = find_next_extent(rc, path, &key); 3962 if (ret < 0) 3963 err = ret; 3964 if (ret != 0) 3965 break; 3966 3967 rc->extents_found++; 3968 3969 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3970 struct btrfs_extent_item); 3971 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 3972 if (item_size >= sizeof(*ei)) { 3973 flags = btrfs_extent_flags(path->nodes[0], ei); 3974 ret = check_extent_flags(flags); 3975 BUG_ON(ret); 3976 3977 } else { 3978 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3979 u64 ref_owner; 3980 int path_change = 0; 3981 3982 BUG_ON(item_size != 3983 sizeof(struct btrfs_extent_item_v0)); 3984 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner, 3985 &path_change); 3986 if (ret < 0) { 3987 err = ret; 3988 break; 3989 } 3990 if (ref_owner < BTRFS_FIRST_FREE_OBJECTID) 3991 flags = BTRFS_EXTENT_FLAG_TREE_BLOCK; 3992 else 3993 flags = BTRFS_EXTENT_FLAG_DATA; 3994 3995 if (path_change) { 3996 btrfs_release_path(path); 3997 3998 path->search_commit_root = 1; 3999 path->skip_locking = 1; 4000 ret = btrfs_search_slot(NULL, rc->extent_root, 4001 &key, path, 0, 0); 4002 if (ret < 0) { 4003 err = ret; 4004 break; 4005 } 4006 BUG_ON(ret > 0); 4007 } 4008 #else 4009 BUG(); 4010 #endif 4011 } 4012 4013 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 4014 ret = add_tree_block(rc, &key, path, &blocks); 4015 } else if (rc->stage == UPDATE_DATA_PTRS && 4016 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4017 ret = add_data_references(rc, &key, path, &blocks); 4018 } else { 4019 btrfs_release_path(path); 4020 ret = 0; 4021 } 4022 if (ret < 0) { 4023 err = ret; 4024 break; 4025 } 4026 4027 if (!RB_EMPTY_ROOT(&blocks)) { 4028 ret = relocate_tree_blocks(trans, rc, &blocks); 4029 if (ret < 0) { 4030 /* 4031 * if we fail to relocate tree blocks, force to update 4032 * backref cache when committing transaction. 4033 */ 4034 rc->backref_cache.last_trans = trans->transid - 1; 4035 4036 if (ret != -EAGAIN) { 4037 err = ret; 4038 break; 4039 } 4040 rc->extents_found--; 4041 rc->search_start = key.objectid; 4042 } 4043 } 4044 4045 btrfs_end_transaction_throttle(trans, rc->extent_root); 4046 btrfs_btree_balance_dirty(rc->extent_root); 4047 trans = NULL; 4048 4049 if (rc->stage == MOVE_DATA_EXTENTS && 4050 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4051 rc->found_file_extent = 1; 4052 ret = relocate_data_extent(rc->data_inode, 4053 &key, &rc->cluster); 4054 if (ret < 0) { 4055 err = ret; 4056 break; 4057 } 4058 } 4059 } 4060 if (trans && progress && err == -ENOSPC) { 4061 ret = btrfs_force_chunk_alloc(trans, rc->extent_root, 4062 rc->block_group->flags); 4063 if (ret == 1) { 4064 err = 0; 4065 progress = 0; 4066 goto restart; 4067 } 4068 } 4069 4070 btrfs_release_path(path); 4071 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); 4072 4073 if (trans) { 4074 btrfs_end_transaction_throttle(trans, rc->extent_root); 4075 btrfs_btree_balance_dirty(rc->extent_root); 4076 } 4077 4078 if (!err) { 4079 ret = relocate_file_extent_cluster(rc->data_inode, 4080 &rc->cluster); 4081 if (ret < 0) 4082 err = ret; 4083 } 4084 4085 rc->create_reloc_tree = 0; 4086 set_reloc_control(rc); 4087 4088 backref_cache_cleanup(&rc->backref_cache); 4089 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 4090 4091 err = prepare_to_merge(rc, err); 4092 4093 merge_reloc_roots(rc); 4094 4095 rc->merge_reloc_tree = 0; 4096 unset_reloc_control(rc); 4097 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 4098 4099 /* get rid of pinned extents */ 4100 trans = btrfs_join_transaction(rc->extent_root); 4101 if (IS_ERR(trans)) 4102 err = PTR_ERR(trans); 4103 else 4104 btrfs_commit_transaction(trans, rc->extent_root); 4105 out_free: 4106 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); 4107 btrfs_free_path(path); 4108 return err; 4109 } 4110 4111 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 4112 struct btrfs_root *root, u64 objectid) 4113 { 4114 struct btrfs_path *path; 4115 struct btrfs_inode_item *item; 4116 struct extent_buffer *leaf; 4117 int ret; 4118 4119 path = btrfs_alloc_path(); 4120 if (!path) 4121 return -ENOMEM; 4122 4123 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 4124 if (ret) 4125 goto out; 4126 4127 leaf = path->nodes[0]; 4128 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 4129 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); 4130 btrfs_set_inode_generation(leaf, item, 1); 4131 btrfs_set_inode_size(leaf, item, 0); 4132 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 4133 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 4134 BTRFS_INODE_PREALLOC); 4135 btrfs_mark_buffer_dirty(leaf); 4136 out: 4137 btrfs_free_path(path); 4138 return ret; 4139 } 4140 4141 /* 4142 * helper to create inode for data relocation. 4143 * the inode is in data relocation tree and its link count is 0 4144 */ 4145 static noinline_for_stack 4146 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 4147 struct btrfs_block_group_cache *group) 4148 { 4149 struct inode *inode = NULL; 4150 struct btrfs_trans_handle *trans; 4151 struct btrfs_root *root; 4152 struct btrfs_key key; 4153 u64 objectid; 4154 int err = 0; 4155 4156 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 4157 if (IS_ERR(root)) 4158 return ERR_CAST(root); 4159 4160 trans = btrfs_start_transaction(root, 6); 4161 if (IS_ERR(trans)) 4162 return ERR_CAST(trans); 4163 4164 err = btrfs_find_free_objectid(root, &objectid); 4165 if (err) 4166 goto out; 4167 4168 err = __insert_orphan_inode(trans, root, objectid); 4169 BUG_ON(err); 4170 4171 key.objectid = objectid; 4172 key.type = BTRFS_INODE_ITEM_KEY; 4173 key.offset = 0; 4174 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 4175 BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); 4176 BTRFS_I(inode)->index_cnt = group->key.objectid; 4177 4178 err = btrfs_orphan_add(trans, inode); 4179 out: 4180 btrfs_end_transaction(trans, root); 4181 btrfs_btree_balance_dirty(root); 4182 if (err) { 4183 if (inode) 4184 iput(inode); 4185 inode = ERR_PTR(err); 4186 } 4187 return inode; 4188 } 4189 4190 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) 4191 { 4192 struct reloc_control *rc; 4193 4194 rc = kzalloc(sizeof(*rc), GFP_NOFS); 4195 if (!rc) 4196 return NULL; 4197 4198 INIT_LIST_HEAD(&rc->reloc_roots); 4199 backref_cache_init(&rc->backref_cache); 4200 mapping_tree_init(&rc->reloc_root_tree); 4201 extent_io_tree_init(&rc->processed_blocks, 4202 fs_info->btree_inode->i_mapping); 4203 return rc; 4204 } 4205 4206 /* 4207 * function to relocate all extents in a block group. 4208 */ 4209 int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) 4210 { 4211 struct btrfs_fs_info *fs_info = extent_root->fs_info; 4212 struct reloc_control *rc; 4213 struct inode *inode; 4214 struct btrfs_path *path; 4215 int ret; 4216 int rw = 0; 4217 int err = 0; 4218 4219 rc = alloc_reloc_control(fs_info); 4220 if (!rc) 4221 return -ENOMEM; 4222 4223 rc->extent_root = extent_root; 4224 4225 rc->block_group = btrfs_lookup_block_group(fs_info, group_start); 4226 BUG_ON(!rc->block_group); 4227 4228 ret = btrfs_inc_block_group_ro(extent_root, rc->block_group); 4229 if (ret) { 4230 err = ret; 4231 goto out; 4232 } 4233 rw = 1; 4234 4235 path = btrfs_alloc_path(); 4236 if (!path) { 4237 err = -ENOMEM; 4238 goto out; 4239 } 4240 4241 inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group, 4242 path); 4243 btrfs_free_path(path); 4244 4245 if (!IS_ERR(inode)) 4246 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); 4247 else 4248 ret = PTR_ERR(inode); 4249 4250 if (ret && ret != -ENOENT) { 4251 err = ret; 4252 goto out; 4253 } 4254 4255 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4256 if (IS_ERR(rc->data_inode)) { 4257 err = PTR_ERR(rc->data_inode); 4258 rc->data_inode = NULL; 4259 goto out; 4260 } 4261 4262 btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu", 4263 rc->block_group->key.objectid, rc->block_group->flags); 4264 4265 btrfs_wait_block_group_reservations(rc->block_group); 4266 btrfs_wait_nocow_writers(rc->block_group); 4267 btrfs_wait_ordered_roots(fs_info, -1, 4268 rc->block_group->key.objectid, 4269 rc->block_group->key.offset); 4270 4271 while (1) { 4272 mutex_lock(&fs_info->cleaner_mutex); 4273 ret = relocate_block_group(rc); 4274 mutex_unlock(&fs_info->cleaner_mutex); 4275 if (ret < 0) { 4276 err = ret; 4277 goto out; 4278 } 4279 4280 if (rc->extents_found == 0) 4281 break; 4282 4283 btrfs_info(extent_root->fs_info, "found %llu extents", 4284 rc->extents_found); 4285 4286 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4287 ret = btrfs_wait_ordered_range(rc->data_inode, 0, 4288 (u64)-1); 4289 if (ret) { 4290 err = ret; 4291 goto out; 4292 } 4293 invalidate_mapping_pages(rc->data_inode->i_mapping, 4294 0, -1); 4295 rc->stage = UPDATE_DATA_PTRS; 4296 } 4297 } 4298 4299 WARN_ON(rc->block_group->pinned > 0); 4300 WARN_ON(rc->block_group->reserved > 0); 4301 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); 4302 out: 4303 if (err && rw) 4304 btrfs_dec_block_group_ro(extent_root, rc->block_group); 4305 iput(rc->data_inode); 4306 btrfs_put_block_group(rc->block_group); 4307 kfree(rc); 4308 return err; 4309 } 4310 4311 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4312 { 4313 struct btrfs_trans_handle *trans; 4314 int ret, err; 4315 4316 trans = btrfs_start_transaction(root->fs_info->tree_root, 0); 4317 if (IS_ERR(trans)) 4318 return PTR_ERR(trans); 4319 4320 memset(&root->root_item.drop_progress, 0, 4321 sizeof(root->root_item.drop_progress)); 4322 root->root_item.drop_level = 0; 4323 btrfs_set_root_refs(&root->root_item, 0); 4324 ret = btrfs_update_root(trans, root->fs_info->tree_root, 4325 &root->root_key, &root->root_item); 4326 4327 err = btrfs_end_transaction(trans, root->fs_info->tree_root); 4328 if (err) 4329 return err; 4330 return ret; 4331 } 4332 4333 /* 4334 * recover relocation interrupted by system crash. 4335 * 4336 * this function resumes merging reloc trees with corresponding fs trees. 4337 * this is important for keeping the sharing of tree blocks 4338 */ 4339 int btrfs_recover_relocation(struct btrfs_root *root) 4340 { 4341 LIST_HEAD(reloc_roots); 4342 struct btrfs_key key; 4343 struct btrfs_root *fs_root; 4344 struct btrfs_root *reloc_root; 4345 struct btrfs_path *path; 4346 struct extent_buffer *leaf; 4347 struct reloc_control *rc = NULL; 4348 struct btrfs_trans_handle *trans; 4349 int ret; 4350 int err = 0; 4351 4352 path = btrfs_alloc_path(); 4353 if (!path) 4354 return -ENOMEM; 4355 path->reada = READA_BACK; 4356 4357 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4358 key.type = BTRFS_ROOT_ITEM_KEY; 4359 key.offset = (u64)-1; 4360 4361 while (1) { 4362 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, 4363 path, 0, 0); 4364 if (ret < 0) { 4365 err = ret; 4366 goto out; 4367 } 4368 if (ret > 0) { 4369 if (path->slots[0] == 0) 4370 break; 4371 path->slots[0]--; 4372 } 4373 leaf = path->nodes[0]; 4374 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4375 btrfs_release_path(path); 4376 4377 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4378 key.type != BTRFS_ROOT_ITEM_KEY) 4379 break; 4380 4381 reloc_root = btrfs_read_fs_root(root, &key); 4382 if (IS_ERR(reloc_root)) { 4383 err = PTR_ERR(reloc_root); 4384 goto out; 4385 } 4386 4387 list_add(&reloc_root->root_list, &reloc_roots); 4388 4389 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4390 fs_root = read_fs_root(root->fs_info, 4391 reloc_root->root_key.offset); 4392 if (IS_ERR(fs_root)) { 4393 ret = PTR_ERR(fs_root); 4394 if (ret != -ENOENT) { 4395 err = ret; 4396 goto out; 4397 } 4398 ret = mark_garbage_root(reloc_root); 4399 if (ret < 0) { 4400 err = ret; 4401 goto out; 4402 } 4403 } 4404 } 4405 4406 if (key.offset == 0) 4407 break; 4408 4409 key.offset--; 4410 } 4411 btrfs_release_path(path); 4412 4413 if (list_empty(&reloc_roots)) 4414 goto out; 4415 4416 rc = alloc_reloc_control(root->fs_info); 4417 if (!rc) { 4418 err = -ENOMEM; 4419 goto out; 4420 } 4421 4422 rc->extent_root = root->fs_info->extent_root; 4423 4424 set_reloc_control(rc); 4425 4426 trans = btrfs_join_transaction(rc->extent_root); 4427 if (IS_ERR(trans)) { 4428 unset_reloc_control(rc); 4429 err = PTR_ERR(trans); 4430 goto out_free; 4431 } 4432 4433 rc->merge_reloc_tree = 1; 4434 4435 while (!list_empty(&reloc_roots)) { 4436 reloc_root = list_entry(reloc_roots.next, 4437 struct btrfs_root, root_list); 4438 list_del(&reloc_root->root_list); 4439 4440 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4441 list_add_tail(&reloc_root->root_list, 4442 &rc->reloc_roots); 4443 continue; 4444 } 4445 4446 fs_root = read_fs_root(root->fs_info, 4447 reloc_root->root_key.offset); 4448 if (IS_ERR(fs_root)) { 4449 err = PTR_ERR(fs_root); 4450 goto out_free; 4451 } 4452 4453 err = __add_reloc_root(reloc_root); 4454 BUG_ON(err < 0); /* -ENOMEM or logic error */ 4455 fs_root->reloc_root = reloc_root; 4456 } 4457 4458 err = btrfs_commit_transaction(trans, rc->extent_root); 4459 if (err) 4460 goto out_free; 4461 4462 merge_reloc_roots(rc); 4463 4464 unset_reloc_control(rc); 4465 4466 trans = btrfs_join_transaction(rc->extent_root); 4467 if (IS_ERR(trans)) 4468 err = PTR_ERR(trans); 4469 else 4470 err = btrfs_commit_transaction(trans, rc->extent_root); 4471 out_free: 4472 kfree(rc); 4473 out: 4474 if (!list_empty(&reloc_roots)) 4475 free_reloc_roots(&reloc_roots); 4476 4477 btrfs_free_path(path); 4478 4479 if (err == 0) { 4480 /* cleanup orphan inode in data relocation tree */ 4481 fs_root = read_fs_root(root->fs_info, 4482 BTRFS_DATA_RELOC_TREE_OBJECTID); 4483 if (IS_ERR(fs_root)) 4484 err = PTR_ERR(fs_root); 4485 else 4486 err = btrfs_orphan_cleanup(fs_root); 4487 } 4488 return err; 4489 } 4490 4491 /* 4492 * helper to add ordered checksum for data relocation. 4493 * 4494 * cloning checksum properly handles the nodatasum extents. 4495 * it also saves CPU time to re-calculate the checksum. 4496 */ 4497 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) 4498 { 4499 struct btrfs_ordered_sum *sums; 4500 struct btrfs_ordered_extent *ordered; 4501 struct btrfs_root *root = BTRFS_I(inode)->root; 4502 int ret; 4503 u64 disk_bytenr; 4504 u64 new_bytenr; 4505 LIST_HEAD(list); 4506 4507 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4508 BUG_ON(ordered->file_offset != file_pos || ordered->len != len); 4509 4510 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4511 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, 4512 disk_bytenr + len - 1, &list, 0); 4513 if (ret) 4514 goto out; 4515 4516 while (!list_empty(&list)) { 4517 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4518 list_del_init(&sums->list); 4519 4520 /* 4521 * We need to offset the new_bytenr based on where the csum is. 4522 * We need to do this because we will read in entire prealloc 4523 * extents but we may have written to say the middle of the 4524 * prealloc extent, so we need to make sure the csum goes with 4525 * the right disk offset. 4526 * 4527 * We can do this because the data reloc inode refers strictly 4528 * to the on disk bytes, so we don't have to worry about 4529 * disk_len vs real len like with real inodes since it's all 4530 * disk length. 4531 */ 4532 new_bytenr = ordered->start + (sums->bytenr - disk_bytenr); 4533 sums->bytenr = new_bytenr; 4534 4535 btrfs_add_ordered_sum(inode, ordered, sums); 4536 } 4537 out: 4538 btrfs_put_ordered_extent(ordered); 4539 return ret; 4540 } 4541 4542 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4543 struct btrfs_root *root, struct extent_buffer *buf, 4544 struct extent_buffer *cow) 4545 { 4546 struct reloc_control *rc; 4547 struct backref_node *node; 4548 int first_cow = 0; 4549 int level; 4550 int ret = 0; 4551 4552 rc = root->fs_info->reloc_ctl; 4553 if (!rc) 4554 return 0; 4555 4556 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4557 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4558 4559 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 4560 if (buf == root->node) 4561 __update_reloc_root(root, cow->start); 4562 } 4563 4564 level = btrfs_header_level(buf); 4565 if (btrfs_header_generation(buf) <= 4566 btrfs_root_last_snapshot(&root->root_item)) 4567 first_cow = 1; 4568 4569 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4570 rc->create_reloc_tree) { 4571 WARN_ON(!first_cow && level == 0); 4572 4573 node = rc->backref_cache.path[level]; 4574 BUG_ON(node->bytenr != buf->start && 4575 node->new_bytenr != buf->start); 4576 4577 drop_node_buffer(node); 4578 extent_buffer_get(cow); 4579 node->eb = cow; 4580 node->new_bytenr = cow->start; 4581 4582 if (!node->pending) { 4583 list_move_tail(&node->list, 4584 &rc->backref_cache.pending[level]); 4585 node->pending = 1; 4586 } 4587 4588 if (first_cow) 4589 __mark_block_processed(rc, node); 4590 4591 if (first_cow && level > 0) 4592 rc->nodes_relocated += buf->len; 4593 } 4594 4595 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4596 ret = replace_file_extents(trans, rc, root, cow); 4597 return ret; 4598 } 4599 4600 /* 4601 * called before creating snapshot. it calculates metadata reservation 4602 * required for relocating tree blocks in the snapshot 4603 */ 4604 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4605 u64 *bytes_to_reserve) 4606 { 4607 struct btrfs_root *root; 4608 struct reloc_control *rc; 4609 4610 root = pending->root; 4611 if (!root->reloc_root) 4612 return; 4613 4614 rc = root->fs_info->reloc_ctl; 4615 if (!rc->merge_reloc_tree) 4616 return; 4617 4618 root = root->reloc_root; 4619 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4620 /* 4621 * relocation is in the stage of merging trees. the space 4622 * used by merging a reloc tree is twice the size of 4623 * relocated tree nodes in the worst case. half for cowing 4624 * the reloc tree, half for cowing the fs tree. the space 4625 * used by cowing the reloc tree will be freed after the 4626 * tree is dropped. if we create snapshot, cowing the fs 4627 * tree may use more space than it frees. so we need 4628 * reserve extra space. 4629 */ 4630 *bytes_to_reserve += rc->nodes_relocated; 4631 } 4632 4633 /* 4634 * called after snapshot is created. migrate block reservation 4635 * and create reloc root for the newly created snapshot 4636 */ 4637 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4638 struct btrfs_pending_snapshot *pending) 4639 { 4640 struct btrfs_root *root = pending->root; 4641 struct btrfs_root *reloc_root; 4642 struct btrfs_root *new_root; 4643 struct reloc_control *rc; 4644 int ret; 4645 4646 if (!root->reloc_root) 4647 return 0; 4648 4649 rc = root->fs_info->reloc_ctl; 4650 rc->merging_rsv_size += rc->nodes_relocated; 4651 4652 if (rc->merge_reloc_tree) { 4653 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4654 rc->block_rsv, 4655 rc->nodes_relocated, 1); 4656 if (ret) 4657 return ret; 4658 } 4659 4660 new_root = pending->snap; 4661 reloc_root = create_reloc_root(trans, root->reloc_root, 4662 new_root->root_key.objectid); 4663 if (IS_ERR(reloc_root)) 4664 return PTR_ERR(reloc_root); 4665 4666 ret = __add_reloc_root(reloc_root); 4667 BUG_ON(ret < 0); 4668 new_root->reloc_root = reloc_root; 4669 4670 if (rc->create_reloc_tree) 4671 ret = clone_backref_node(trans, rc, root, reloc_root); 4672 return ret; 4673 } 4674