1 /* 2 * Copyright (C) 2009 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "volumes.h" 29 #include "locking.h" 30 #include "btrfs_inode.h" 31 #include "async-thread.h" 32 #include "free-space-cache.h" 33 #include "inode-map.h" 34 35 /* 36 * backref_node, mapping_node and tree_block start with this 37 */ 38 struct tree_entry { 39 struct rb_node rb_node; 40 u64 bytenr; 41 }; 42 43 /* 44 * present a tree block in the backref cache 45 */ 46 struct backref_node { 47 struct rb_node rb_node; 48 u64 bytenr; 49 50 u64 new_bytenr; 51 /* objectid of tree block owner, can be not uptodate */ 52 u64 owner; 53 /* link to pending, changed or detached list */ 54 struct list_head list; 55 /* list of upper level blocks reference this block */ 56 struct list_head upper; 57 /* list of child blocks in the cache */ 58 struct list_head lower; 59 /* NULL if this node is not tree root */ 60 struct btrfs_root *root; 61 /* extent buffer got by COW the block */ 62 struct extent_buffer *eb; 63 /* level of tree block */ 64 unsigned int level:8; 65 /* is the block in non-reference counted tree */ 66 unsigned int cowonly:1; 67 /* 1 if no child node in the cache */ 68 unsigned int lowest:1; 69 /* is the extent buffer locked */ 70 unsigned int locked:1; 71 /* has the block been processed */ 72 unsigned int processed:1; 73 /* have backrefs of this block been checked */ 74 unsigned int checked:1; 75 /* 76 * 1 if corresponding block has been cowed but some upper 77 * level block pointers may not point to the new location 78 */ 79 unsigned int pending:1; 80 /* 81 * 1 if the backref node isn't connected to any other 82 * backref node. 83 */ 84 unsigned int detached:1; 85 }; 86 87 /* 88 * present a block pointer in the backref cache 89 */ 90 struct backref_edge { 91 struct list_head list[2]; 92 struct backref_node *node[2]; 93 }; 94 95 #define LOWER 0 96 #define UPPER 1 97 #define RELOCATION_RESERVED_NODES 256 98 99 struct backref_cache { 100 /* red black tree of all backref nodes in the cache */ 101 struct rb_root rb_root; 102 /* for passing backref nodes to btrfs_reloc_cow_block */ 103 struct backref_node *path[BTRFS_MAX_LEVEL]; 104 /* 105 * list of blocks that have been cowed but some block 106 * pointers in upper level blocks may not reflect the 107 * new location 108 */ 109 struct list_head pending[BTRFS_MAX_LEVEL]; 110 /* list of backref nodes with no child node */ 111 struct list_head leaves; 112 /* list of blocks that have been cowed in current transaction */ 113 struct list_head changed; 114 /* list of detached backref node. */ 115 struct list_head detached; 116 117 u64 last_trans; 118 119 int nr_nodes; 120 int nr_edges; 121 }; 122 123 /* 124 * map address of tree root to tree 125 */ 126 struct mapping_node { 127 struct rb_node rb_node; 128 u64 bytenr; 129 void *data; 130 }; 131 132 struct mapping_tree { 133 struct rb_root rb_root; 134 spinlock_t lock; 135 }; 136 137 /* 138 * present a tree block to process 139 */ 140 struct tree_block { 141 struct rb_node rb_node; 142 u64 bytenr; 143 struct btrfs_key key; 144 unsigned int level:8; 145 unsigned int key_ready:1; 146 }; 147 148 #define MAX_EXTENTS 128 149 150 struct file_extent_cluster { 151 u64 start; 152 u64 end; 153 u64 boundary[MAX_EXTENTS]; 154 unsigned int nr; 155 }; 156 157 struct reloc_control { 158 /* block group to relocate */ 159 struct btrfs_block_group_cache *block_group; 160 /* extent tree */ 161 struct btrfs_root *extent_root; 162 /* inode for moving data */ 163 struct inode *data_inode; 164 165 struct btrfs_block_rsv *block_rsv; 166 167 struct backref_cache backref_cache; 168 169 struct file_extent_cluster cluster; 170 /* tree blocks have been processed */ 171 struct extent_io_tree processed_blocks; 172 /* map start of tree root to corresponding reloc tree */ 173 struct mapping_tree reloc_root_tree; 174 /* list of reloc trees */ 175 struct list_head reloc_roots; 176 /* size of metadata reservation for merging reloc trees */ 177 u64 merging_rsv_size; 178 /* size of relocated tree nodes */ 179 u64 nodes_relocated; 180 /* reserved size for block group relocation*/ 181 u64 reserved_bytes; 182 183 u64 search_start; 184 u64 extents_found; 185 186 unsigned int stage:8; 187 unsigned int create_reloc_tree:1; 188 unsigned int merge_reloc_tree:1; 189 unsigned int found_file_extent:1; 190 }; 191 192 /* stages of data relocation */ 193 #define MOVE_DATA_EXTENTS 0 194 #define UPDATE_DATA_PTRS 1 195 196 static void remove_backref_node(struct backref_cache *cache, 197 struct backref_node *node); 198 static void __mark_block_processed(struct reloc_control *rc, 199 struct backref_node *node); 200 201 static void mapping_tree_init(struct mapping_tree *tree) 202 { 203 tree->rb_root = RB_ROOT; 204 spin_lock_init(&tree->lock); 205 } 206 207 static void backref_cache_init(struct backref_cache *cache) 208 { 209 int i; 210 cache->rb_root = RB_ROOT; 211 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 212 INIT_LIST_HEAD(&cache->pending[i]); 213 INIT_LIST_HEAD(&cache->changed); 214 INIT_LIST_HEAD(&cache->detached); 215 INIT_LIST_HEAD(&cache->leaves); 216 } 217 218 static void backref_cache_cleanup(struct backref_cache *cache) 219 { 220 struct backref_node *node; 221 int i; 222 223 while (!list_empty(&cache->detached)) { 224 node = list_entry(cache->detached.next, 225 struct backref_node, list); 226 remove_backref_node(cache, node); 227 } 228 229 while (!list_empty(&cache->leaves)) { 230 node = list_entry(cache->leaves.next, 231 struct backref_node, lower); 232 remove_backref_node(cache, node); 233 } 234 235 cache->last_trans = 0; 236 237 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 238 BUG_ON(!list_empty(&cache->pending[i])); 239 BUG_ON(!list_empty(&cache->changed)); 240 BUG_ON(!list_empty(&cache->detached)); 241 BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root)); 242 BUG_ON(cache->nr_nodes); 243 BUG_ON(cache->nr_edges); 244 } 245 246 static struct backref_node *alloc_backref_node(struct backref_cache *cache) 247 { 248 struct backref_node *node; 249 250 node = kzalloc(sizeof(*node), GFP_NOFS); 251 if (node) { 252 INIT_LIST_HEAD(&node->list); 253 INIT_LIST_HEAD(&node->upper); 254 INIT_LIST_HEAD(&node->lower); 255 RB_CLEAR_NODE(&node->rb_node); 256 cache->nr_nodes++; 257 } 258 return node; 259 } 260 261 static void free_backref_node(struct backref_cache *cache, 262 struct backref_node *node) 263 { 264 if (node) { 265 cache->nr_nodes--; 266 kfree(node); 267 } 268 } 269 270 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) 271 { 272 struct backref_edge *edge; 273 274 edge = kzalloc(sizeof(*edge), GFP_NOFS); 275 if (edge) 276 cache->nr_edges++; 277 return edge; 278 } 279 280 static void free_backref_edge(struct backref_cache *cache, 281 struct backref_edge *edge) 282 { 283 if (edge) { 284 cache->nr_edges--; 285 kfree(edge); 286 } 287 } 288 289 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 290 struct rb_node *node) 291 { 292 struct rb_node **p = &root->rb_node; 293 struct rb_node *parent = NULL; 294 struct tree_entry *entry; 295 296 while (*p) { 297 parent = *p; 298 entry = rb_entry(parent, struct tree_entry, rb_node); 299 300 if (bytenr < entry->bytenr) 301 p = &(*p)->rb_left; 302 else if (bytenr > entry->bytenr) 303 p = &(*p)->rb_right; 304 else 305 return parent; 306 } 307 308 rb_link_node(node, parent, p); 309 rb_insert_color(node, root); 310 return NULL; 311 } 312 313 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 314 { 315 struct rb_node *n = root->rb_node; 316 struct tree_entry *entry; 317 318 while (n) { 319 entry = rb_entry(n, struct tree_entry, rb_node); 320 321 if (bytenr < entry->bytenr) 322 n = n->rb_left; 323 else if (bytenr > entry->bytenr) 324 n = n->rb_right; 325 else 326 return n; 327 } 328 return NULL; 329 } 330 331 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr) 332 { 333 334 struct btrfs_fs_info *fs_info = NULL; 335 struct backref_node *bnode = rb_entry(rb_node, struct backref_node, 336 rb_node); 337 if (bnode->root) 338 fs_info = bnode->root->fs_info; 339 btrfs_panic(fs_info, errno, "Inconsistency in backref cache " 340 "found at offset %llu", bytenr); 341 } 342 343 /* 344 * walk up backref nodes until reach node presents tree root 345 */ 346 static struct backref_node *walk_up_backref(struct backref_node *node, 347 struct backref_edge *edges[], 348 int *index) 349 { 350 struct backref_edge *edge; 351 int idx = *index; 352 353 while (!list_empty(&node->upper)) { 354 edge = list_entry(node->upper.next, 355 struct backref_edge, list[LOWER]); 356 edges[idx++] = edge; 357 node = edge->node[UPPER]; 358 } 359 BUG_ON(node->detached); 360 *index = idx; 361 return node; 362 } 363 364 /* 365 * walk down backref nodes to find start of next reference path 366 */ 367 static struct backref_node *walk_down_backref(struct backref_edge *edges[], 368 int *index) 369 { 370 struct backref_edge *edge; 371 struct backref_node *lower; 372 int idx = *index; 373 374 while (idx > 0) { 375 edge = edges[idx - 1]; 376 lower = edge->node[LOWER]; 377 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 378 idx--; 379 continue; 380 } 381 edge = list_entry(edge->list[LOWER].next, 382 struct backref_edge, list[LOWER]); 383 edges[idx - 1] = edge; 384 *index = idx; 385 return edge->node[UPPER]; 386 } 387 *index = 0; 388 return NULL; 389 } 390 391 static void unlock_node_buffer(struct backref_node *node) 392 { 393 if (node->locked) { 394 btrfs_tree_unlock(node->eb); 395 node->locked = 0; 396 } 397 } 398 399 static void drop_node_buffer(struct backref_node *node) 400 { 401 if (node->eb) { 402 unlock_node_buffer(node); 403 free_extent_buffer(node->eb); 404 node->eb = NULL; 405 } 406 } 407 408 static void drop_backref_node(struct backref_cache *tree, 409 struct backref_node *node) 410 { 411 BUG_ON(!list_empty(&node->upper)); 412 413 drop_node_buffer(node); 414 list_del(&node->list); 415 list_del(&node->lower); 416 if (!RB_EMPTY_NODE(&node->rb_node)) 417 rb_erase(&node->rb_node, &tree->rb_root); 418 free_backref_node(tree, node); 419 } 420 421 /* 422 * remove a backref node from the backref cache 423 */ 424 static void remove_backref_node(struct backref_cache *cache, 425 struct backref_node *node) 426 { 427 struct backref_node *upper; 428 struct backref_edge *edge; 429 430 if (!node) 431 return; 432 433 BUG_ON(!node->lowest && !node->detached); 434 while (!list_empty(&node->upper)) { 435 edge = list_entry(node->upper.next, struct backref_edge, 436 list[LOWER]); 437 upper = edge->node[UPPER]; 438 list_del(&edge->list[LOWER]); 439 list_del(&edge->list[UPPER]); 440 free_backref_edge(cache, edge); 441 442 if (RB_EMPTY_NODE(&upper->rb_node)) { 443 BUG_ON(!list_empty(&node->upper)); 444 drop_backref_node(cache, node); 445 node = upper; 446 node->lowest = 1; 447 continue; 448 } 449 /* 450 * add the node to leaf node list if no other 451 * child block cached. 452 */ 453 if (list_empty(&upper->lower)) { 454 list_add_tail(&upper->lower, &cache->leaves); 455 upper->lowest = 1; 456 } 457 } 458 459 drop_backref_node(cache, node); 460 } 461 462 static void update_backref_node(struct backref_cache *cache, 463 struct backref_node *node, u64 bytenr) 464 { 465 struct rb_node *rb_node; 466 rb_erase(&node->rb_node, &cache->rb_root); 467 node->bytenr = bytenr; 468 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 469 if (rb_node) 470 backref_tree_panic(rb_node, -EEXIST, bytenr); 471 } 472 473 /* 474 * update backref cache after a transaction commit 475 */ 476 static int update_backref_cache(struct btrfs_trans_handle *trans, 477 struct backref_cache *cache) 478 { 479 struct backref_node *node; 480 int level = 0; 481 482 if (cache->last_trans == 0) { 483 cache->last_trans = trans->transid; 484 return 0; 485 } 486 487 if (cache->last_trans == trans->transid) 488 return 0; 489 490 /* 491 * detached nodes are used to avoid unnecessary backref 492 * lookup. transaction commit changes the extent tree. 493 * so the detached nodes are no longer useful. 494 */ 495 while (!list_empty(&cache->detached)) { 496 node = list_entry(cache->detached.next, 497 struct backref_node, list); 498 remove_backref_node(cache, node); 499 } 500 501 while (!list_empty(&cache->changed)) { 502 node = list_entry(cache->changed.next, 503 struct backref_node, list); 504 list_del_init(&node->list); 505 BUG_ON(node->pending); 506 update_backref_node(cache, node, node->new_bytenr); 507 } 508 509 /* 510 * some nodes can be left in the pending list if there were 511 * errors during processing the pending nodes. 512 */ 513 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 514 list_for_each_entry(node, &cache->pending[level], list) { 515 BUG_ON(!node->pending); 516 if (node->bytenr == node->new_bytenr) 517 continue; 518 update_backref_node(cache, node, node->new_bytenr); 519 } 520 } 521 522 cache->last_trans = 0; 523 return 1; 524 } 525 526 527 static int should_ignore_root(struct btrfs_root *root) 528 { 529 struct btrfs_root *reloc_root; 530 531 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 532 return 0; 533 534 reloc_root = root->reloc_root; 535 if (!reloc_root) 536 return 0; 537 538 if (btrfs_root_last_snapshot(&reloc_root->root_item) == 539 root->fs_info->running_transaction->transid - 1) 540 return 0; 541 /* 542 * if there is reloc tree and it was created in previous 543 * transaction backref lookup can find the reloc tree, 544 * so backref node for the fs tree root is useless for 545 * relocation. 546 */ 547 return 1; 548 } 549 /* 550 * find reloc tree by address of tree root 551 */ 552 static struct btrfs_root *find_reloc_root(struct reloc_control *rc, 553 u64 bytenr) 554 { 555 struct rb_node *rb_node; 556 struct mapping_node *node; 557 struct btrfs_root *root = NULL; 558 559 spin_lock(&rc->reloc_root_tree.lock); 560 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); 561 if (rb_node) { 562 node = rb_entry(rb_node, struct mapping_node, rb_node); 563 root = (struct btrfs_root *)node->data; 564 } 565 spin_unlock(&rc->reloc_root_tree.lock); 566 return root; 567 } 568 569 static int is_cowonly_root(u64 root_objectid) 570 { 571 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || 572 root_objectid == BTRFS_EXTENT_TREE_OBJECTID || 573 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 574 root_objectid == BTRFS_DEV_TREE_OBJECTID || 575 root_objectid == BTRFS_TREE_LOG_OBJECTID || 576 root_objectid == BTRFS_CSUM_TREE_OBJECTID || 577 root_objectid == BTRFS_UUID_TREE_OBJECTID || 578 root_objectid == BTRFS_QUOTA_TREE_OBJECTID) 579 return 1; 580 return 0; 581 } 582 583 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, 584 u64 root_objectid) 585 { 586 struct btrfs_key key; 587 588 key.objectid = root_objectid; 589 key.type = BTRFS_ROOT_ITEM_KEY; 590 if (is_cowonly_root(root_objectid)) 591 key.offset = 0; 592 else 593 key.offset = (u64)-1; 594 595 return btrfs_get_fs_root(fs_info, &key, false); 596 } 597 598 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 599 static noinline_for_stack 600 struct btrfs_root *find_tree_root(struct reloc_control *rc, 601 struct extent_buffer *leaf, 602 struct btrfs_extent_ref_v0 *ref0) 603 { 604 struct btrfs_root *root; 605 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0); 606 u64 generation = btrfs_ref_generation_v0(leaf, ref0); 607 608 BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID); 609 610 root = read_fs_root(rc->extent_root->fs_info, root_objectid); 611 BUG_ON(IS_ERR(root)); 612 613 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 614 generation != btrfs_root_generation(&root->root_item)) 615 return NULL; 616 617 return root; 618 } 619 #endif 620 621 static noinline_for_stack 622 int find_inline_backref(struct extent_buffer *leaf, int slot, 623 unsigned long *ptr, unsigned long *end) 624 { 625 struct btrfs_key key; 626 struct btrfs_extent_item *ei; 627 struct btrfs_tree_block_info *bi; 628 u32 item_size; 629 630 btrfs_item_key_to_cpu(leaf, &key, slot); 631 632 item_size = btrfs_item_size_nr(leaf, slot); 633 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 634 if (item_size < sizeof(*ei)) { 635 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 636 return 1; 637 } 638 #endif 639 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 640 WARN_ON(!(btrfs_extent_flags(leaf, ei) & 641 BTRFS_EXTENT_FLAG_TREE_BLOCK)); 642 643 if (key.type == BTRFS_EXTENT_ITEM_KEY && 644 item_size <= sizeof(*ei) + sizeof(*bi)) { 645 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); 646 return 1; 647 } 648 if (key.type == BTRFS_METADATA_ITEM_KEY && 649 item_size <= sizeof(*ei)) { 650 WARN_ON(item_size < sizeof(*ei)); 651 return 1; 652 } 653 654 if (key.type == BTRFS_EXTENT_ITEM_KEY) { 655 bi = (struct btrfs_tree_block_info *)(ei + 1); 656 *ptr = (unsigned long)(bi + 1); 657 } else { 658 *ptr = (unsigned long)(ei + 1); 659 } 660 *end = (unsigned long)ei + item_size; 661 return 0; 662 } 663 664 /* 665 * build backref tree for a given tree block. root of the backref tree 666 * corresponds the tree block, leaves of the backref tree correspond 667 * roots of b-trees that reference the tree block. 668 * 669 * the basic idea of this function is check backrefs of a given block 670 * to find upper level blocks that refernece the block, and then check 671 * bakcrefs of these upper level blocks recursively. the recursion stop 672 * when tree root is reached or backrefs for the block is cached. 673 * 674 * NOTE: if we find backrefs for a block are cached, we know backrefs 675 * for all upper level blocks that directly/indirectly reference the 676 * block are also cached. 677 */ 678 static noinline_for_stack 679 struct backref_node *build_backref_tree(struct reloc_control *rc, 680 struct btrfs_key *node_key, 681 int level, u64 bytenr) 682 { 683 struct backref_cache *cache = &rc->backref_cache; 684 struct btrfs_path *path1; 685 struct btrfs_path *path2; 686 struct extent_buffer *eb; 687 struct btrfs_root *root; 688 struct backref_node *cur; 689 struct backref_node *upper; 690 struct backref_node *lower; 691 struct backref_node *node = NULL; 692 struct backref_node *exist = NULL; 693 struct backref_edge *edge; 694 struct rb_node *rb_node; 695 struct btrfs_key key; 696 unsigned long end; 697 unsigned long ptr; 698 LIST_HEAD(list); 699 LIST_HEAD(useless); 700 int cowonly; 701 int ret; 702 int err = 0; 703 bool need_check = true; 704 705 path1 = btrfs_alloc_path(); 706 path2 = btrfs_alloc_path(); 707 if (!path1 || !path2) { 708 err = -ENOMEM; 709 goto out; 710 } 711 path1->reada = 1; 712 path2->reada = 2; 713 714 node = alloc_backref_node(cache); 715 if (!node) { 716 err = -ENOMEM; 717 goto out; 718 } 719 720 node->bytenr = bytenr; 721 node->level = level; 722 node->lowest = 1; 723 cur = node; 724 again: 725 end = 0; 726 ptr = 0; 727 key.objectid = cur->bytenr; 728 key.type = BTRFS_METADATA_ITEM_KEY; 729 key.offset = (u64)-1; 730 731 path1->search_commit_root = 1; 732 path1->skip_locking = 1; 733 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, 734 0, 0); 735 if (ret < 0) { 736 err = ret; 737 goto out; 738 } 739 ASSERT(ret); 740 ASSERT(path1->slots[0]); 741 742 path1->slots[0]--; 743 744 WARN_ON(cur->checked); 745 if (!list_empty(&cur->upper)) { 746 /* 747 * the backref was added previously when processing 748 * backref of type BTRFS_TREE_BLOCK_REF_KEY 749 */ 750 ASSERT(list_is_singular(&cur->upper)); 751 edge = list_entry(cur->upper.next, struct backref_edge, 752 list[LOWER]); 753 ASSERT(list_empty(&edge->list[UPPER])); 754 exist = edge->node[UPPER]; 755 /* 756 * add the upper level block to pending list if we need 757 * check its backrefs 758 */ 759 if (!exist->checked) 760 list_add_tail(&edge->list[UPPER], &list); 761 } else { 762 exist = NULL; 763 } 764 765 while (1) { 766 cond_resched(); 767 eb = path1->nodes[0]; 768 769 if (ptr >= end) { 770 if (path1->slots[0] >= btrfs_header_nritems(eb)) { 771 ret = btrfs_next_leaf(rc->extent_root, path1); 772 if (ret < 0) { 773 err = ret; 774 goto out; 775 } 776 if (ret > 0) 777 break; 778 eb = path1->nodes[0]; 779 } 780 781 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); 782 if (key.objectid != cur->bytenr) { 783 WARN_ON(exist); 784 break; 785 } 786 787 if (key.type == BTRFS_EXTENT_ITEM_KEY || 788 key.type == BTRFS_METADATA_ITEM_KEY) { 789 ret = find_inline_backref(eb, path1->slots[0], 790 &ptr, &end); 791 if (ret) 792 goto next; 793 } 794 } 795 796 if (ptr < end) { 797 /* update key for inline back ref */ 798 struct btrfs_extent_inline_ref *iref; 799 iref = (struct btrfs_extent_inline_ref *)ptr; 800 key.type = btrfs_extent_inline_ref_type(eb, iref); 801 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 802 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && 803 key.type != BTRFS_SHARED_BLOCK_REF_KEY); 804 } 805 806 if (exist && 807 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 808 exist->owner == key.offset) || 809 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 810 exist->bytenr == key.offset))) { 811 exist = NULL; 812 goto next; 813 } 814 815 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 816 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY || 817 key.type == BTRFS_EXTENT_REF_V0_KEY) { 818 if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 819 struct btrfs_extent_ref_v0 *ref0; 820 ref0 = btrfs_item_ptr(eb, path1->slots[0], 821 struct btrfs_extent_ref_v0); 822 if (key.objectid == key.offset) { 823 root = find_tree_root(rc, eb, ref0); 824 if (root && !should_ignore_root(root)) 825 cur->root = root; 826 else 827 list_add(&cur->list, &useless); 828 break; 829 } 830 if (is_cowonly_root(btrfs_ref_root_v0(eb, 831 ref0))) 832 cur->cowonly = 1; 833 } 834 #else 835 ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY); 836 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 837 #endif 838 if (key.objectid == key.offset) { 839 /* 840 * only root blocks of reloc trees use 841 * backref of this type. 842 */ 843 root = find_reloc_root(rc, cur->bytenr); 844 ASSERT(root); 845 cur->root = root; 846 break; 847 } 848 849 edge = alloc_backref_edge(cache); 850 if (!edge) { 851 err = -ENOMEM; 852 goto out; 853 } 854 rb_node = tree_search(&cache->rb_root, key.offset); 855 if (!rb_node) { 856 upper = alloc_backref_node(cache); 857 if (!upper) { 858 free_backref_edge(cache, edge); 859 err = -ENOMEM; 860 goto out; 861 } 862 upper->bytenr = key.offset; 863 upper->level = cur->level + 1; 864 /* 865 * backrefs for the upper level block isn't 866 * cached, add the block to pending list 867 */ 868 list_add_tail(&edge->list[UPPER], &list); 869 } else { 870 upper = rb_entry(rb_node, struct backref_node, 871 rb_node); 872 ASSERT(upper->checked); 873 INIT_LIST_HEAD(&edge->list[UPPER]); 874 } 875 list_add_tail(&edge->list[LOWER], &cur->upper); 876 edge->node[LOWER] = cur; 877 edge->node[UPPER] = upper; 878 879 goto next; 880 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 881 goto next; 882 } 883 884 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */ 885 root = read_fs_root(rc->extent_root->fs_info, key.offset); 886 if (IS_ERR(root)) { 887 err = PTR_ERR(root); 888 goto out; 889 } 890 891 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 892 cur->cowonly = 1; 893 894 if (btrfs_root_level(&root->root_item) == cur->level) { 895 /* tree root */ 896 ASSERT(btrfs_root_bytenr(&root->root_item) == 897 cur->bytenr); 898 if (should_ignore_root(root)) 899 list_add(&cur->list, &useless); 900 else 901 cur->root = root; 902 break; 903 } 904 905 level = cur->level + 1; 906 907 /* 908 * searching the tree to find upper level blocks 909 * reference the block. 910 */ 911 path2->search_commit_root = 1; 912 path2->skip_locking = 1; 913 path2->lowest_level = level; 914 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); 915 path2->lowest_level = 0; 916 if (ret < 0) { 917 err = ret; 918 goto out; 919 } 920 if (ret > 0 && path2->slots[level] > 0) 921 path2->slots[level]--; 922 923 eb = path2->nodes[level]; 924 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != 925 cur->bytenr); 926 927 lower = cur; 928 need_check = true; 929 for (; level < BTRFS_MAX_LEVEL; level++) { 930 if (!path2->nodes[level]) { 931 ASSERT(btrfs_root_bytenr(&root->root_item) == 932 lower->bytenr); 933 if (should_ignore_root(root)) 934 list_add(&lower->list, &useless); 935 else 936 lower->root = root; 937 break; 938 } 939 940 edge = alloc_backref_edge(cache); 941 if (!edge) { 942 err = -ENOMEM; 943 goto out; 944 } 945 946 eb = path2->nodes[level]; 947 rb_node = tree_search(&cache->rb_root, eb->start); 948 if (!rb_node) { 949 upper = alloc_backref_node(cache); 950 if (!upper) { 951 free_backref_edge(cache, edge); 952 err = -ENOMEM; 953 goto out; 954 } 955 upper->bytenr = eb->start; 956 upper->owner = btrfs_header_owner(eb); 957 upper->level = lower->level + 1; 958 if (!test_bit(BTRFS_ROOT_REF_COWS, 959 &root->state)) 960 upper->cowonly = 1; 961 962 /* 963 * if we know the block isn't shared 964 * we can void checking its backrefs. 965 */ 966 if (btrfs_block_can_be_shared(root, eb)) 967 upper->checked = 0; 968 else 969 upper->checked = 1; 970 971 /* 972 * add the block to pending list if we 973 * need check its backrefs, we only do this once 974 * while walking up a tree as we will catch 975 * anything else later on. 976 */ 977 if (!upper->checked && need_check) { 978 need_check = false; 979 list_add_tail(&edge->list[UPPER], 980 &list); 981 } else { 982 if (upper->checked) 983 need_check = true; 984 INIT_LIST_HEAD(&edge->list[UPPER]); 985 } 986 } else { 987 upper = rb_entry(rb_node, struct backref_node, 988 rb_node); 989 ASSERT(upper->checked); 990 INIT_LIST_HEAD(&edge->list[UPPER]); 991 if (!upper->owner) 992 upper->owner = btrfs_header_owner(eb); 993 } 994 list_add_tail(&edge->list[LOWER], &lower->upper); 995 edge->node[LOWER] = lower; 996 edge->node[UPPER] = upper; 997 998 if (rb_node) 999 break; 1000 lower = upper; 1001 upper = NULL; 1002 } 1003 btrfs_release_path(path2); 1004 next: 1005 if (ptr < end) { 1006 ptr += btrfs_extent_inline_ref_size(key.type); 1007 if (ptr >= end) { 1008 WARN_ON(ptr > end); 1009 ptr = 0; 1010 end = 0; 1011 } 1012 } 1013 if (ptr >= end) 1014 path1->slots[0]++; 1015 } 1016 btrfs_release_path(path1); 1017 1018 cur->checked = 1; 1019 WARN_ON(exist); 1020 1021 /* the pending list isn't empty, take the first block to process */ 1022 if (!list_empty(&list)) { 1023 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1024 list_del_init(&edge->list[UPPER]); 1025 cur = edge->node[UPPER]; 1026 goto again; 1027 } 1028 1029 /* 1030 * everything goes well, connect backref nodes and insert backref nodes 1031 * into the cache. 1032 */ 1033 ASSERT(node->checked); 1034 cowonly = node->cowonly; 1035 if (!cowonly) { 1036 rb_node = tree_insert(&cache->rb_root, node->bytenr, 1037 &node->rb_node); 1038 if (rb_node) 1039 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1040 list_add_tail(&node->lower, &cache->leaves); 1041 } 1042 1043 list_for_each_entry(edge, &node->upper, list[LOWER]) 1044 list_add_tail(&edge->list[UPPER], &list); 1045 1046 while (!list_empty(&list)) { 1047 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1048 list_del_init(&edge->list[UPPER]); 1049 upper = edge->node[UPPER]; 1050 if (upper->detached) { 1051 list_del(&edge->list[LOWER]); 1052 lower = edge->node[LOWER]; 1053 free_backref_edge(cache, edge); 1054 if (list_empty(&lower->upper)) 1055 list_add(&lower->list, &useless); 1056 continue; 1057 } 1058 1059 if (!RB_EMPTY_NODE(&upper->rb_node)) { 1060 if (upper->lowest) { 1061 list_del_init(&upper->lower); 1062 upper->lowest = 0; 1063 } 1064 1065 list_add_tail(&edge->list[UPPER], &upper->lower); 1066 continue; 1067 } 1068 1069 if (!upper->checked) { 1070 /* 1071 * Still want to blow up for developers since this is a 1072 * logic bug. 1073 */ 1074 ASSERT(0); 1075 err = -EINVAL; 1076 goto out; 1077 } 1078 if (cowonly != upper->cowonly) { 1079 ASSERT(0); 1080 err = -EINVAL; 1081 goto out; 1082 } 1083 1084 if (!cowonly) { 1085 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1086 &upper->rb_node); 1087 if (rb_node) 1088 backref_tree_panic(rb_node, -EEXIST, 1089 upper->bytenr); 1090 } 1091 1092 list_add_tail(&edge->list[UPPER], &upper->lower); 1093 1094 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1095 list_add_tail(&edge->list[UPPER], &list); 1096 } 1097 /* 1098 * process useless backref nodes. backref nodes for tree leaves 1099 * are deleted from the cache. backref nodes for upper level 1100 * tree blocks are left in the cache to avoid unnecessary backref 1101 * lookup. 1102 */ 1103 while (!list_empty(&useless)) { 1104 upper = list_entry(useless.next, struct backref_node, list); 1105 list_del_init(&upper->list); 1106 ASSERT(list_empty(&upper->upper)); 1107 if (upper == node) 1108 node = NULL; 1109 if (upper->lowest) { 1110 list_del_init(&upper->lower); 1111 upper->lowest = 0; 1112 } 1113 while (!list_empty(&upper->lower)) { 1114 edge = list_entry(upper->lower.next, 1115 struct backref_edge, list[UPPER]); 1116 list_del(&edge->list[UPPER]); 1117 list_del(&edge->list[LOWER]); 1118 lower = edge->node[LOWER]; 1119 free_backref_edge(cache, edge); 1120 1121 if (list_empty(&lower->upper)) 1122 list_add(&lower->list, &useless); 1123 } 1124 __mark_block_processed(rc, upper); 1125 if (upper->level > 0) { 1126 list_add(&upper->list, &cache->detached); 1127 upper->detached = 1; 1128 } else { 1129 rb_erase(&upper->rb_node, &cache->rb_root); 1130 free_backref_node(cache, upper); 1131 } 1132 } 1133 out: 1134 btrfs_free_path(path1); 1135 btrfs_free_path(path2); 1136 if (err) { 1137 while (!list_empty(&useless)) { 1138 lower = list_entry(useless.next, 1139 struct backref_node, list); 1140 list_del_init(&lower->list); 1141 } 1142 while (!list_empty(&list)) { 1143 edge = list_first_entry(&list, struct backref_edge, 1144 list[UPPER]); 1145 list_del(&edge->list[UPPER]); 1146 list_del(&edge->list[LOWER]); 1147 lower = edge->node[LOWER]; 1148 upper = edge->node[UPPER]; 1149 free_backref_edge(cache, edge); 1150 1151 /* 1152 * Lower is no longer linked to any upper backref nodes 1153 * and isn't in the cache, we can free it ourselves. 1154 */ 1155 if (list_empty(&lower->upper) && 1156 RB_EMPTY_NODE(&lower->rb_node)) 1157 list_add(&lower->list, &useless); 1158 1159 if (!RB_EMPTY_NODE(&upper->rb_node)) 1160 continue; 1161 1162 /* Add this guy's upper edges to the list to proces */ 1163 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1164 list_add_tail(&edge->list[UPPER], &list); 1165 if (list_empty(&upper->upper)) 1166 list_add(&upper->list, &useless); 1167 } 1168 1169 while (!list_empty(&useless)) { 1170 lower = list_entry(useless.next, 1171 struct backref_node, list); 1172 list_del_init(&lower->list); 1173 free_backref_node(cache, lower); 1174 } 1175 return ERR_PTR(err); 1176 } 1177 ASSERT(!node || !node->detached); 1178 return node; 1179 } 1180 1181 /* 1182 * helper to add backref node for the newly created snapshot. 1183 * the backref node is created by cloning backref node that 1184 * corresponds to root of source tree 1185 */ 1186 static int clone_backref_node(struct btrfs_trans_handle *trans, 1187 struct reloc_control *rc, 1188 struct btrfs_root *src, 1189 struct btrfs_root *dest) 1190 { 1191 struct btrfs_root *reloc_root = src->reloc_root; 1192 struct backref_cache *cache = &rc->backref_cache; 1193 struct backref_node *node = NULL; 1194 struct backref_node *new_node; 1195 struct backref_edge *edge; 1196 struct backref_edge *new_edge; 1197 struct rb_node *rb_node; 1198 1199 if (cache->last_trans > 0) 1200 update_backref_cache(trans, cache); 1201 1202 rb_node = tree_search(&cache->rb_root, src->commit_root->start); 1203 if (rb_node) { 1204 node = rb_entry(rb_node, struct backref_node, rb_node); 1205 if (node->detached) 1206 node = NULL; 1207 else 1208 BUG_ON(node->new_bytenr != reloc_root->node->start); 1209 } 1210 1211 if (!node) { 1212 rb_node = tree_search(&cache->rb_root, 1213 reloc_root->commit_root->start); 1214 if (rb_node) { 1215 node = rb_entry(rb_node, struct backref_node, 1216 rb_node); 1217 BUG_ON(node->detached); 1218 } 1219 } 1220 1221 if (!node) 1222 return 0; 1223 1224 new_node = alloc_backref_node(cache); 1225 if (!new_node) 1226 return -ENOMEM; 1227 1228 new_node->bytenr = dest->node->start; 1229 new_node->level = node->level; 1230 new_node->lowest = node->lowest; 1231 new_node->checked = 1; 1232 new_node->root = dest; 1233 1234 if (!node->lowest) { 1235 list_for_each_entry(edge, &node->lower, list[UPPER]) { 1236 new_edge = alloc_backref_edge(cache); 1237 if (!new_edge) 1238 goto fail; 1239 1240 new_edge->node[UPPER] = new_node; 1241 new_edge->node[LOWER] = edge->node[LOWER]; 1242 list_add_tail(&new_edge->list[UPPER], 1243 &new_node->lower); 1244 } 1245 } else { 1246 list_add_tail(&new_node->lower, &cache->leaves); 1247 } 1248 1249 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1250 &new_node->rb_node); 1251 if (rb_node) 1252 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr); 1253 1254 if (!new_node->lowest) { 1255 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1256 list_add_tail(&new_edge->list[LOWER], 1257 &new_edge->node[LOWER]->upper); 1258 } 1259 } 1260 return 0; 1261 fail: 1262 while (!list_empty(&new_node->lower)) { 1263 new_edge = list_entry(new_node->lower.next, 1264 struct backref_edge, list[UPPER]); 1265 list_del(&new_edge->list[UPPER]); 1266 free_backref_edge(cache, new_edge); 1267 } 1268 free_backref_node(cache, new_node); 1269 return -ENOMEM; 1270 } 1271 1272 /* 1273 * helper to add 'address of tree root -> reloc tree' mapping 1274 */ 1275 static int __must_check __add_reloc_root(struct btrfs_root *root) 1276 { 1277 struct rb_node *rb_node; 1278 struct mapping_node *node; 1279 struct reloc_control *rc = root->fs_info->reloc_ctl; 1280 1281 node = kmalloc(sizeof(*node), GFP_NOFS); 1282 if (!node) 1283 return -ENOMEM; 1284 1285 node->bytenr = root->node->start; 1286 node->data = root; 1287 1288 spin_lock(&rc->reloc_root_tree.lock); 1289 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1290 node->bytenr, &node->rb_node); 1291 spin_unlock(&rc->reloc_root_tree.lock); 1292 if (rb_node) { 1293 btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found " 1294 "for start=%llu while inserting into relocation " 1295 "tree", node->bytenr); 1296 kfree(node); 1297 return -EEXIST; 1298 } 1299 1300 list_add_tail(&root->root_list, &rc->reloc_roots); 1301 return 0; 1302 } 1303 1304 /* 1305 * helper to delete the 'address of tree root -> reloc tree' 1306 * mapping 1307 */ 1308 static void __del_reloc_root(struct btrfs_root *root) 1309 { 1310 struct rb_node *rb_node; 1311 struct mapping_node *node = NULL; 1312 struct reloc_control *rc = root->fs_info->reloc_ctl; 1313 1314 spin_lock(&rc->reloc_root_tree.lock); 1315 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1316 root->node->start); 1317 if (rb_node) { 1318 node = rb_entry(rb_node, struct mapping_node, rb_node); 1319 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1320 } 1321 spin_unlock(&rc->reloc_root_tree.lock); 1322 1323 if (!node) 1324 return; 1325 BUG_ON((struct btrfs_root *)node->data != root); 1326 1327 spin_lock(&root->fs_info->trans_lock); 1328 list_del_init(&root->root_list); 1329 spin_unlock(&root->fs_info->trans_lock); 1330 kfree(node); 1331 } 1332 1333 /* 1334 * helper to update the 'address of tree root -> reloc tree' 1335 * mapping 1336 */ 1337 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) 1338 { 1339 struct rb_node *rb_node; 1340 struct mapping_node *node = NULL; 1341 struct reloc_control *rc = root->fs_info->reloc_ctl; 1342 1343 spin_lock(&rc->reloc_root_tree.lock); 1344 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1345 root->node->start); 1346 if (rb_node) { 1347 node = rb_entry(rb_node, struct mapping_node, rb_node); 1348 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1349 } 1350 spin_unlock(&rc->reloc_root_tree.lock); 1351 1352 if (!node) 1353 return 0; 1354 BUG_ON((struct btrfs_root *)node->data != root); 1355 1356 spin_lock(&rc->reloc_root_tree.lock); 1357 node->bytenr = new_bytenr; 1358 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1359 node->bytenr, &node->rb_node); 1360 spin_unlock(&rc->reloc_root_tree.lock); 1361 if (rb_node) 1362 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1363 return 0; 1364 } 1365 1366 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 1367 struct btrfs_root *root, u64 objectid) 1368 { 1369 struct btrfs_root *reloc_root; 1370 struct extent_buffer *eb; 1371 struct btrfs_root_item *root_item; 1372 struct btrfs_key root_key; 1373 u64 last_snap = 0; 1374 int ret; 1375 1376 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 1377 BUG_ON(!root_item); 1378 1379 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 1380 root_key.type = BTRFS_ROOT_ITEM_KEY; 1381 root_key.offset = objectid; 1382 1383 if (root->root_key.objectid == objectid) { 1384 /* called by btrfs_init_reloc_root */ 1385 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 1386 BTRFS_TREE_RELOC_OBJECTID); 1387 BUG_ON(ret); 1388 1389 last_snap = btrfs_root_last_snapshot(&root->root_item); 1390 btrfs_set_root_last_snapshot(&root->root_item, 1391 trans->transid - 1); 1392 } else { 1393 /* 1394 * called by btrfs_reloc_post_snapshot_hook. 1395 * the source tree is a reloc tree, all tree blocks 1396 * modified after it was created have RELOC flag 1397 * set in their headers. so it's OK to not update 1398 * the 'last_snapshot'. 1399 */ 1400 ret = btrfs_copy_root(trans, root, root->node, &eb, 1401 BTRFS_TREE_RELOC_OBJECTID); 1402 BUG_ON(ret); 1403 } 1404 1405 memcpy(root_item, &root->root_item, sizeof(*root_item)); 1406 btrfs_set_root_bytenr(root_item, eb->start); 1407 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 1408 btrfs_set_root_generation(root_item, trans->transid); 1409 1410 if (root->root_key.objectid == objectid) { 1411 btrfs_set_root_refs(root_item, 0); 1412 memset(&root_item->drop_progress, 0, 1413 sizeof(struct btrfs_disk_key)); 1414 root_item->drop_level = 0; 1415 /* 1416 * abuse rtransid, it is safe because it is impossible to 1417 * receive data into a relocation tree. 1418 */ 1419 btrfs_set_root_rtransid(root_item, last_snap); 1420 btrfs_set_root_otransid(root_item, trans->transid); 1421 } 1422 1423 btrfs_tree_unlock(eb); 1424 free_extent_buffer(eb); 1425 1426 ret = btrfs_insert_root(trans, root->fs_info->tree_root, 1427 &root_key, root_item); 1428 BUG_ON(ret); 1429 kfree(root_item); 1430 1431 reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key); 1432 BUG_ON(IS_ERR(reloc_root)); 1433 reloc_root->last_trans = trans->transid; 1434 return reloc_root; 1435 } 1436 1437 /* 1438 * create reloc tree for a given fs tree. reloc tree is just a 1439 * snapshot of the fs tree with special root objectid. 1440 */ 1441 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 1442 struct btrfs_root *root) 1443 { 1444 struct btrfs_root *reloc_root; 1445 struct reloc_control *rc = root->fs_info->reloc_ctl; 1446 struct btrfs_block_rsv *rsv; 1447 int clear_rsv = 0; 1448 int ret; 1449 1450 if (root->reloc_root) { 1451 reloc_root = root->reloc_root; 1452 reloc_root->last_trans = trans->transid; 1453 return 0; 1454 } 1455 1456 if (!rc || !rc->create_reloc_tree || 1457 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1458 return 0; 1459 1460 if (!trans->reloc_reserved) { 1461 rsv = trans->block_rsv; 1462 trans->block_rsv = rc->block_rsv; 1463 clear_rsv = 1; 1464 } 1465 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 1466 if (clear_rsv) 1467 trans->block_rsv = rsv; 1468 1469 ret = __add_reloc_root(reloc_root); 1470 BUG_ON(ret < 0); 1471 root->reloc_root = reloc_root; 1472 return 0; 1473 } 1474 1475 /* 1476 * update root item of reloc tree 1477 */ 1478 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 1479 struct btrfs_root *root) 1480 { 1481 struct btrfs_root *reloc_root; 1482 struct btrfs_root_item *root_item; 1483 int ret; 1484 1485 if (!root->reloc_root) 1486 goto out; 1487 1488 reloc_root = root->reloc_root; 1489 root_item = &reloc_root->root_item; 1490 1491 if (root->fs_info->reloc_ctl->merge_reloc_tree && 1492 btrfs_root_refs(root_item) == 0) { 1493 root->reloc_root = NULL; 1494 __del_reloc_root(reloc_root); 1495 } 1496 1497 if (reloc_root->commit_root != reloc_root->node) { 1498 btrfs_set_root_node(root_item, reloc_root->node); 1499 free_extent_buffer(reloc_root->commit_root); 1500 reloc_root->commit_root = btrfs_root_node(reloc_root); 1501 } 1502 1503 ret = btrfs_update_root(trans, root->fs_info->tree_root, 1504 &reloc_root->root_key, root_item); 1505 BUG_ON(ret); 1506 1507 out: 1508 return 0; 1509 } 1510 1511 /* 1512 * helper to find first cached inode with inode number >= objectid 1513 * in a subvolume 1514 */ 1515 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 1516 { 1517 struct rb_node *node; 1518 struct rb_node *prev; 1519 struct btrfs_inode *entry; 1520 struct inode *inode; 1521 1522 spin_lock(&root->inode_lock); 1523 again: 1524 node = root->inode_tree.rb_node; 1525 prev = NULL; 1526 while (node) { 1527 prev = node; 1528 entry = rb_entry(node, struct btrfs_inode, rb_node); 1529 1530 if (objectid < btrfs_ino(&entry->vfs_inode)) 1531 node = node->rb_left; 1532 else if (objectid > btrfs_ino(&entry->vfs_inode)) 1533 node = node->rb_right; 1534 else 1535 break; 1536 } 1537 if (!node) { 1538 while (prev) { 1539 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1540 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 1541 node = prev; 1542 break; 1543 } 1544 prev = rb_next(prev); 1545 } 1546 } 1547 while (node) { 1548 entry = rb_entry(node, struct btrfs_inode, rb_node); 1549 inode = igrab(&entry->vfs_inode); 1550 if (inode) { 1551 spin_unlock(&root->inode_lock); 1552 return inode; 1553 } 1554 1555 objectid = btrfs_ino(&entry->vfs_inode) + 1; 1556 if (cond_resched_lock(&root->inode_lock)) 1557 goto again; 1558 1559 node = rb_next(node); 1560 } 1561 spin_unlock(&root->inode_lock); 1562 return NULL; 1563 } 1564 1565 static int in_block_group(u64 bytenr, 1566 struct btrfs_block_group_cache *block_group) 1567 { 1568 if (bytenr >= block_group->key.objectid && 1569 bytenr < block_group->key.objectid + block_group->key.offset) 1570 return 1; 1571 return 0; 1572 } 1573 1574 /* 1575 * get new location of data 1576 */ 1577 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1578 u64 bytenr, u64 num_bytes) 1579 { 1580 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1581 struct btrfs_path *path; 1582 struct btrfs_file_extent_item *fi; 1583 struct extent_buffer *leaf; 1584 int ret; 1585 1586 path = btrfs_alloc_path(); 1587 if (!path) 1588 return -ENOMEM; 1589 1590 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1591 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), 1592 bytenr, 0); 1593 if (ret < 0) 1594 goto out; 1595 if (ret > 0) { 1596 ret = -ENOENT; 1597 goto out; 1598 } 1599 1600 leaf = path->nodes[0]; 1601 fi = btrfs_item_ptr(leaf, path->slots[0], 1602 struct btrfs_file_extent_item); 1603 1604 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1605 btrfs_file_extent_compression(leaf, fi) || 1606 btrfs_file_extent_encryption(leaf, fi) || 1607 btrfs_file_extent_other_encoding(leaf, fi)); 1608 1609 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1610 ret = -EINVAL; 1611 goto out; 1612 } 1613 1614 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1615 ret = 0; 1616 out: 1617 btrfs_free_path(path); 1618 return ret; 1619 } 1620 1621 /* 1622 * update file extent items in the tree leaf to point to 1623 * the new locations. 1624 */ 1625 static noinline_for_stack 1626 int replace_file_extents(struct btrfs_trans_handle *trans, 1627 struct reloc_control *rc, 1628 struct btrfs_root *root, 1629 struct extent_buffer *leaf) 1630 { 1631 struct btrfs_key key; 1632 struct btrfs_file_extent_item *fi; 1633 struct inode *inode = NULL; 1634 u64 parent; 1635 u64 bytenr; 1636 u64 new_bytenr = 0; 1637 u64 num_bytes; 1638 u64 end; 1639 u32 nritems; 1640 u32 i; 1641 int ret = 0; 1642 int first = 1; 1643 int dirty = 0; 1644 1645 if (rc->stage != UPDATE_DATA_PTRS) 1646 return 0; 1647 1648 /* reloc trees always use full backref */ 1649 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1650 parent = leaf->start; 1651 else 1652 parent = 0; 1653 1654 nritems = btrfs_header_nritems(leaf); 1655 for (i = 0; i < nritems; i++) { 1656 cond_resched(); 1657 btrfs_item_key_to_cpu(leaf, &key, i); 1658 if (key.type != BTRFS_EXTENT_DATA_KEY) 1659 continue; 1660 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1661 if (btrfs_file_extent_type(leaf, fi) == 1662 BTRFS_FILE_EXTENT_INLINE) 1663 continue; 1664 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1665 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1666 if (bytenr == 0) 1667 continue; 1668 if (!in_block_group(bytenr, rc->block_group)) 1669 continue; 1670 1671 /* 1672 * if we are modifying block in fs tree, wait for readpage 1673 * to complete and drop the extent cache 1674 */ 1675 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1676 if (first) { 1677 inode = find_next_inode(root, key.objectid); 1678 first = 0; 1679 } else if (inode && btrfs_ino(inode) < key.objectid) { 1680 btrfs_add_delayed_iput(inode); 1681 inode = find_next_inode(root, key.objectid); 1682 } 1683 if (inode && btrfs_ino(inode) == key.objectid) { 1684 end = key.offset + 1685 btrfs_file_extent_num_bytes(leaf, fi); 1686 WARN_ON(!IS_ALIGNED(key.offset, 1687 root->sectorsize)); 1688 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 1689 end--; 1690 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1691 key.offset, end); 1692 if (!ret) 1693 continue; 1694 1695 btrfs_drop_extent_cache(inode, key.offset, end, 1696 1); 1697 unlock_extent(&BTRFS_I(inode)->io_tree, 1698 key.offset, end); 1699 } 1700 } 1701 1702 ret = get_new_location(rc->data_inode, &new_bytenr, 1703 bytenr, num_bytes); 1704 if (ret) { 1705 /* 1706 * Don't have to abort since we've not changed anything 1707 * in the file extent yet. 1708 */ 1709 break; 1710 } 1711 1712 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1713 dirty = 1; 1714 1715 key.offset -= btrfs_file_extent_offset(leaf, fi); 1716 ret = btrfs_inc_extent_ref(trans, root, new_bytenr, 1717 num_bytes, parent, 1718 btrfs_header_owner(leaf), 1719 key.objectid, key.offset); 1720 if (ret) { 1721 btrfs_abort_transaction(trans, root, ret); 1722 break; 1723 } 1724 1725 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1726 parent, btrfs_header_owner(leaf), 1727 key.objectid, key.offset); 1728 if (ret) { 1729 btrfs_abort_transaction(trans, root, ret); 1730 break; 1731 } 1732 } 1733 if (dirty) 1734 btrfs_mark_buffer_dirty(leaf); 1735 if (inode) 1736 btrfs_add_delayed_iput(inode); 1737 return ret; 1738 } 1739 1740 static noinline_for_stack 1741 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1742 struct btrfs_path *path, int level) 1743 { 1744 struct btrfs_disk_key key1; 1745 struct btrfs_disk_key key2; 1746 btrfs_node_key(eb, &key1, slot); 1747 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1748 return memcmp(&key1, &key2, sizeof(key1)); 1749 } 1750 1751 /* 1752 * try to replace tree blocks in fs tree with the new blocks 1753 * in reloc tree. tree blocks haven't been modified since the 1754 * reloc tree was create can be replaced. 1755 * 1756 * if a block was replaced, level of the block + 1 is returned. 1757 * if no block got replaced, 0 is returned. if there are other 1758 * errors, a negative error number is returned. 1759 */ 1760 static noinline_for_stack 1761 int replace_path(struct btrfs_trans_handle *trans, 1762 struct btrfs_root *dest, struct btrfs_root *src, 1763 struct btrfs_path *path, struct btrfs_key *next_key, 1764 int lowest_level, int max_level) 1765 { 1766 struct extent_buffer *eb; 1767 struct extent_buffer *parent; 1768 struct btrfs_key key; 1769 u64 old_bytenr; 1770 u64 new_bytenr; 1771 u64 old_ptr_gen; 1772 u64 new_ptr_gen; 1773 u64 last_snapshot; 1774 u32 blocksize; 1775 int cow = 0; 1776 int level; 1777 int ret; 1778 int slot; 1779 1780 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1781 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1782 1783 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1784 again: 1785 slot = path->slots[lowest_level]; 1786 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1787 1788 eb = btrfs_lock_root_node(dest); 1789 btrfs_set_lock_blocking(eb); 1790 level = btrfs_header_level(eb); 1791 1792 if (level < lowest_level) { 1793 btrfs_tree_unlock(eb); 1794 free_extent_buffer(eb); 1795 return 0; 1796 } 1797 1798 if (cow) { 1799 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); 1800 BUG_ON(ret); 1801 } 1802 btrfs_set_lock_blocking(eb); 1803 1804 if (next_key) { 1805 next_key->objectid = (u64)-1; 1806 next_key->type = (u8)-1; 1807 next_key->offset = (u64)-1; 1808 } 1809 1810 parent = eb; 1811 while (1) { 1812 level = btrfs_header_level(parent); 1813 BUG_ON(level < lowest_level); 1814 1815 ret = btrfs_bin_search(parent, &key, level, &slot); 1816 if (ret && slot > 0) 1817 slot--; 1818 1819 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1820 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1821 1822 old_bytenr = btrfs_node_blockptr(parent, slot); 1823 blocksize = dest->nodesize; 1824 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1825 1826 if (level <= max_level) { 1827 eb = path->nodes[level]; 1828 new_bytenr = btrfs_node_blockptr(eb, 1829 path->slots[level]); 1830 new_ptr_gen = btrfs_node_ptr_generation(eb, 1831 path->slots[level]); 1832 } else { 1833 new_bytenr = 0; 1834 new_ptr_gen = 0; 1835 } 1836 1837 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1838 ret = level; 1839 break; 1840 } 1841 1842 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1843 memcmp_node_keys(parent, slot, path, level)) { 1844 if (level <= lowest_level) { 1845 ret = 0; 1846 break; 1847 } 1848 1849 eb = read_tree_block(dest, old_bytenr, old_ptr_gen); 1850 if (IS_ERR(eb)) { 1851 ret = PTR_ERR(eb); 1852 } else if (!extent_buffer_uptodate(eb)) { 1853 ret = -EIO; 1854 free_extent_buffer(eb); 1855 break; 1856 } 1857 btrfs_tree_lock(eb); 1858 if (cow) { 1859 ret = btrfs_cow_block(trans, dest, eb, parent, 1860 slot, &eb); 1861 BUG_ON(ret); 1862 } 1863 btrfs_set_lock_blocking(eb); 1864 1865 btrfs_tree_unlock(parent); 1866 free_extent_buffer(parent); 1867 1868 parent = eb; 1869 continue; 1870 } 1871 1872 if (!cow) { 1873 btrfs_tree_unlock(parent); 1874 free_extent_buffer(parent); 1875 cow = 1; 1876 goto again; 1877 } 1878 1879 btrfs_node_key_to_cpu(path->nodes[level], &key, 1880 path->slots[level]); 1881 btrfs_release_path(path); 1882 1883 path->lowest_level = level; 1884 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1885 path->lowest_level = 0; 1886 BUG_ON(ret); 1887 1888 /* 1889 * swap blocks in fs tree and reloc tree. 1890 */ 1891 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1892 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1893 btrfs_mark_buffer_dirty(parent); 1894 1895 btrfs_set_node_blockptr(path->nodes[level], 1896 path->slots[level], old_bytenr); 1897 btrfs_set_node_ptr_generation(path->nodes[level], 1898 path->slots[level], old_ptr_gen); 1899 btrfs_mark_buffer_dirty(path->nodes[level]); 1900 1901 ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize, 1902 path->nodes[level]->start, 1903 src->root_key.objectid, level - 1, 0); 1904 BUG_ON(ret); 1905 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize, 1906 0, dest->root_key.objectid, level - 1, 1907 0); 1908 BUG_ON(ret); 1909 1910 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize, 1911 path->nodes[level]->start, 1912 src->root_key.objectid, level - 1, 0); 1913 BUG_ON(ret); 1914 1915 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize, 1916 0, dest->root_key.objectid, level - 1, 1917 0); 1918 BUG_ON(ret); 1919 1920 btrfs_unlock_up_safe(path, 0); 1921 1922 ret = level; 1923 break; 1924 } 1925 btrfs_tree_unlock(parent); 1926 free_extent_buffer(parent); 1927 return ret; 1928 } 1929 1930 /* 1931 * helper to find next relocated block in reloc tree 1932 */ 1933 static noinline_for_stack 1934 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1935 int *level) 1936 { 1937 struct extent_buffer *eb; 1938 int i; 1939 u64 last_snapshot; 1940 u32 nritems; 1941 1942 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1943 1944 for (i = 0; i < *level; i++) { 1945 free_extent_buffer(path->nodes[i]); 1946 path->nodes[i] = NULL; 1947 } 1948 1949 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1950 eb = path->nodes[i]; 1951 nritems = btrfs_header_nritems(eb); 1952 while (path->slots[i] + 1 < nritems) { 1953 path->slots[i]++; 1954 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1955 last_snapshot) 1956 continue; 1957 1958 *level = i; 1959 return 0; 1960 } 1961 free_extent_buffer(path->nodes[i]); 1962 path->nodes[i] = NULL; 1963 } 1964 return 1; 1965 } 1966 1967 /* 1968 * walk down reloc tree to find relocated block of lowest level 1969 */ 1970 static noinline_for_stack 1971 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1972 int *level) 1973 { 1974 struct extent_buffer *eb = NULL; 1975 int i; 1976 u64 bytenr; 1977 u64 ptr_gen = 0; 1978 u64 last_snapshot; 1979 u32 nritems; 1980 1981 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1982 1983 for (i = *level; i > 0; i--) { 1984 eb = path->nodes[i]; 1985 nritems = btrfs_header_nritems(eb); 1986 while (path->slots[i] < nritems) { 1987 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1988 if (ptr_gen > last_snapshot) 1989 break; 1990 path->slots[i]++; 1991 } 1992 if (path->slots[i] >= nritems) { 1993 if (i == *level) 1994 break; 1995 *level = i + 1; 1996 return 0; 1997 } 1998 if (i == 1) { 1999 *level = i; 2000 return 0; 2001 } 2002 2003 bytenr = btrfs_node_blockptr(eb, path->slots[i]); 2004 eb = read_tree_block(root, bytenr, ptr_gen); 2005 if (IS_ERR(eb)) { 2006 return PTR_ERR(eb); 2007 } else if (!extent_buffer_uptodate(eb)) { 2008 free_extent_buffer(eb); 2009 return -EIO; 2010 } 2011 BUG_ON(btrfs_header_level(eb) != i - 1); 2012 path->nodes[i - 1] = eb; 2013 path->slots[i - 1] = 0; 2014 } 2015 return 1; 2016 } 2017 2018 /* 2019 * invalidate extent cache for file extents whose key in range of 2020 * [min_key, max_key) 2021 */ 2022 static int invalidate_extent_cache(struct btrfs_root *root, 2023 struct btrfs_key *min_key, 2024 struct btrfs_key *max_key) 2025 { 2026 struct inode *inode = NULL; 2027 u64 objectid; 2028 u64 start, end; 2029 u64 ino; 2030 2031 objectid = min_key->objectid; 2032 while (1) { 2033 cond_resched(); 2034 iput(inode); 2035 2036 if (objectid > max_key->objectid) 2037 break; 2038 2039 inode = find_next_inode(root, objectid); 2040 if (!inode) 2041 break; 2042 ino = btrfs_ino(inode); 2043 2044 if (ino > max_key->objectid) { 2045 iput(inode); 2046 break; 2047 } 2048 2049 objectid = ino + 1; 2050 if (!S_ISREG(inode->i_mode)) 2051 continue; 2052 2053 if (unlikely(min_key->objectid == ino)) { 2054 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 2055 continue; 2056 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 2057 start = 0; 2058 else { 2059 start = min_key->offset; 2060 WARN_ON(!IS_ALIGNED(start, root->sectorsize)); 2061 } 2062 } else { 2063 start = 0; 2064 } 2065 2066 if (unlikely(max_key->objectid == ino)) { 2067 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 2068 continue; 2069 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 2070 end = (u64)-1; 2071 } else { 2072 if (max_key->offset == 0) 2073 continue; 2074 end = max_key->offset; 2075 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 2076 end--; 2077 } 2078 } else { 2079 end = (u64)-1; 2080 } 2081 2082 /* the lock_extent waits for readpage to complete */ 2083 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 2084 btrfs_drop_extent_cache(inode, start, end, 1); 2085 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 2086 } 2087 return 0; 2088 } 2089 2090 static int find_next_key(struct btrfs_path *path, int level, 2091 struct btrfs_key *key) 2092 2093 { 2094 while (level < BTRFS_MAX_LEVEL) { 2095 if (!path->nodes[level]) 2096 break; 2097 if (path->slots[level] + 1 < 2098 btrfs_header_nritems(path->nodes[level])) { 2099 btrfs_node_key_to_cpu(path->nodes[level], key, 2100 path->slots[level] + 1); 2101 return 0; 2102 } 2103 level++; 2104 } 2105 return 1; 2106 } 2107 2108 /* 2109 * merge the relocated tree blocks in reloc tree with corresponding 2110 * fs tree. 2111 */ 2112 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 2113 struct btrfs_root *root) 2114 { 2115 LIST_HEAD(inode_list); 2116 struct btrfs_key key; 2117 struct btrfs_key next_key; 2118 struct btrfs_trans_handle *trans = NULL; 2119 struct btrfs_root *reloc_root; 2120 struct btrfs_root_item *root_item; 2121 struct btrfs_path *path; 2122 struct extent_buffer *leaf; 2123 int level; 2124 int max_level; 2125 int replaced = 0; 2126 int ret; 2127 int err = 0; 2128 u32 min_reserved; 2129 2130 path = btrfs_alloc_path(); 2131 if (!path) 2132 return -ENOMEM; 2133 path->reada = 1; 2134 2135 reloc_root = root->reloc_root; 2136 root_item = &reloc_root->root_item; 2137 2138 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 2139 level = btrfs_root_level(root_item); 2140 extent_buffer_get(reloc_root->node); 2141 path->nodes[level] = reloc_root->node; 2142 path->slots[level] = 0; 2143 } else { 2144 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 2145 2146 level = root_item->drop_level; 2147 BUG_ON(level == 0); 2148 path->lowest_level = level; 2149 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 2150 path->lowest_level = 0; 2151 if (ret < 0) { 2152 btrfs_free_path(path); 2153 return ret; 2154 } 2155 2156 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 2157 path->slots[level]); 2158 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 2159 2160 btrfs_unlock_up_safe(path, 0); 2161 } 2162 2163 min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2164 memset(&next_key, 0, sizeof(next_key)); 2165 2166 while (1) { 2167 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, 2168 BTRFS_RESERVE_FLUSH_ALL); 2169 if (ret) { 2170 err = ret; 2171 goto out; 2172 } 2173 trans = btrfs_start_transaction(root, 0); 2174 if (IS_ERR(trans)) { 2175 err = PTR_ERR(trans); 2176 trans = NULL; 2177 goto out; 2178 } 2179 trans->block_rsv = rc->block_rsv; 2180 2181 replaced = 0; 2182 max_level = level; 2183 2184 ret = walk_down_reloc_tree(reloc_root, path, &level); 2185 if (ret < 0) { 2186 err = ret; 2187 goto out; 2188 } 2189 if (ret > 0) 2190 break; 2191 2192 if (!find_next_key(path, level, &key) && 2193 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 2194 ret = 0; 2195 } else { 2196 ret = replace_path(trans, root, reloc_root, path, 2197 &next_key, level, max_level); 2198 } 2199 if (ret < 0) { 2200 err = ret; 2201 goto out; 2202 } 2203 2204 if (ret > 0) { 2205 level = ret; 2206 btrfs_node_key_to_cpu(path->nodes[level], &key, 2207 path->slots[level]); 2208 replaced = 1; 2209 } 2210 2211 ret = walk_up_reloc_tree(reloc_root, path, &level); 2212 if (ret > 0) 2213 break; 2214 2215 BUG_ON(level == 0); 2216 /* 2217 * save the merging progress in the drop_progress. 2218 * this is OK since root refs == 1 in this case. 2219 */ 2220 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 2221 path->slots[level]); 2222 root_item->drop_level = level; 2223 2224 btrfs_end_transaction_throttle(trans, root); 2225 trans = NULL; 2226 2227 btrfs_btree_balance_dirty(root); 2228 2229 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2230 invalidate_extent_cache(root, &key, &next_key); 2231 } 2232 2233 /* 2234 * handle the case only one block in the fs tree need to be 2235 * relocated and the block is tree root. 2236 */ 2237 leaf = btrfs_lock_root_node(root); 2238 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf); 2239 btrfs_tree_unlock(leaf); 2240 free_extent_buffer(leaf); 2241 if (ret < 0) 2242 err = ret; 2243 out: 2244 btrfs_free_path(path); 2245 2246 if (err == 0) { 2247 memset(&root_item->drop_progress, 0, 2248 sizeof(root_item->drop_progress)); 2249 root_item->drop_level = 0; 2250 btrfs_set_root_refs(root_item, 0); 2251 btrfs_update_reloc_root(trans, root); 2252 } 2253 2254 if (trans) 2255 btrfs_end_transaction_throttle(trans, root); 2256 2257 btrfs_btree_balance_dirty(root); 2258 2259 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2260 invalidate_extent_cache(root, &key, &next_key); 2261 2262 return err; 2263 } 2264 2265 static noinline_for_stack 2266 int prepare_to_merge(struct reloc_control *rc, int err) 2267 { 2268 struct btrfs_root *root = rc->extent_root; 2269 struct btrfs_root *reloc_root; 2270 struct btrfs_trans_handle *trans; 2271 LIST_HEAD(reloc_roots); 2272 u64 num_bytes = 0; 2273 int ret; 2274 2275 mutex_lock(&root->fs_info->reloc_mutex); 2276 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2277 rc->merging_rsv_size += rc->nodes_relocated * 2; 2278 mutex_unlock(&root->fs_info->reloc_mutex); 2279 2280 again: 2281 if (!err) { 2282 num_bytes = rc->merging_rsv_size; 2283 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes, 2284 BTRFS_RESERVE_FLUSH_ALL); 2285 if (ret) 2286 err = ret; 2287 } 2288 2289 trans = btrfs_join_transaction(rc->extent_root); 2290 if (IS_ERR(trans)) { 2291 if (!err) 2292 btrfs_block_rsv_release(rc->extent_root, 2293 rc->block_rsv, num_bytes); 2294 return PTR_ERR(trans); 2295 } 2296 2297 if (!err) { 2298 if (num_bytes != rc->merging_rsv_size) { 2299 btrfs_end_transaction(trans, rc->extent_root); 2300 btrfs_block_rsv_release(rc->extent_root, 2301 rc->block_rsv, num_bytes); 2302 goto again; 2303 } 2304 } 2305 2306 rc->merge_reloc_tree = 1; 2307 2308 while (!list_empty(&rc->reloc_roots)) { 2309 reloc_root = list_entry(rc->reloc_roots.next, 2310 struct btrfs_root, root_list); 2311 list_del_init(&reloc_root->root_list); 2312 2313 root = read_fs_root(reloc_root->fs_info, 2314 reloc_root->root_key.offset); 2315 BUG_ON(IS_ERR(root)); 2316 BUG_ON(root->reloc_root != reloc_root); 2317 2318 /* 2319 * set reference count to 1, so btrfs_recover_relocation 2320 * knows it should resumes merging 2321 */ 2322 if (!err) 2323 btrfs_set_root_refs(&reloc_root->root_item, 1); 2324 btrfs_update_reloc_root(trans, root); 2325 2326 list_add(&reloc_root->root_list, &reloc_roots); 2327 } 2328 2329 list_splice(&reloc_roots, &rc->reloc_roots); 2330 2331 if (!err) 2332 btrfs_commit_transaction(trans, rc->extent_root); 2333 else 2334 btrfs_end_transaction(trans, rc->extent_root); 2335 return err; 2336 } 2337 2338 static noinline_for_stack 2339 void free_reloc_roots(struct list_head *list) 2340 { 2341 struct btrfs_root *reloc_root; 2342 2343 while (!list_empty(list)) { 2344 reloc_root = list_entry(list->next, struct btrfs_root, 2345 root_list); 2346 __del_reloc_root(reloc_root); 2347 } 2348 } 2349 2350 static noinline_for_stack 2351 void merge_reloc_roots(struct reloc_control *rc) 2352 { 2353 struct btrfs_root *root; 2354 struct btrfs_root *reloc_root; 2355 u64 last_snap; 2356 u64 otransid; 2357 u64 objectid; 2358 LIST_HEAD(reloc_roots); 2359 int found = 0; 2360 int ret = 0; 2361 again: 2362 root = rc->extent_root; 2363 2364 /* 2365 * this serializes us with btrfs_record_root_in_transaction, 2366 * we have to make sure nobody is in the middle of 2367 * adding their roots to the list while we are 2368 * doing this splice 2369 */ 2370 mutex_lock(&root->fs_info->reloc_mutex); 2371 list_splice_init(&rc->reloc_roots, &reloc_roots); 2372 mutex_unlock(&root->fs_info->reloc_mutex); 2373 2374 while (!list_empty(&reloc_roots)) { 2375 found = 1; 2376 reloc_root = list_entry(reloc_roots.next, 2377 struct btrfs_root, root_list); 2378 2379 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 2380 root = read_fs_root(reloc_root->fs_info, 2381 reloc_root->root_key.offset); 2382 BUG_ON(IS_ERR(root)); 2383 BUG_ON(root->reloc_root != reloc_root); 2384 2385 ret = merge_reloc_root(rc, root); 2386 if (ret) { 2387 if (list_empty(&reloc_root->root_list)) 2388 list_add_tail(&reloc_root->root_list, 2389 &reloc_roots); 2390 goto out; 2391 } 2392 } else { 2393 list_del_init(&reloc_root->root_list); 2394 } 2395 2396 /* 2397 * we keep the old last snapshod transid in rtranid when we 2398 * created the relocation tree. 2399 */ 2400 last_snap = btrfs_root_rtransid(&reloc_root->root_item); 2401 otransid = btrfs_root_otransid(&reloc_root->root_item); 2402 objectid = reloc_root->root_key.offset; 2403 2404 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); 2405 if (ret < 0) { 2406 if (list_empty(&reloc_root->root_list)) 2407 list_add_tail(&reloc_root->root_list, 2408 &reloc_roots); 2409 goto out; 2410 } 2411 } 2412 2413 if (found) { 2414 found = 0; 2415 goto again; 2416 } 2417 out: 2418 if (ret) { 2419 btrfs_std_error(root->fs_info, ret, NULL); 2420 if (!list_empty(&reloc_roots)) 2421 free_reloc_roots(&reloc_roots); 2422 2423 /* new reloc root may be added */ 2424 mutex_lock(&root->fs_info->reloc_mutex); 2425 list_splice_init(&rc->reloc_roots, &reloc_roots); 2426 mutex_unlock(&root->fs_info->reloc_mutex); 2427 if (!list_empty(&reloc_roots)) 2428 free_reloc_roots(&reloc_roots); 2429 } 2430 2431 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2432 } 2433 2434 static void free_block_list(struct rb_root *blocks) 2435 { 2436 struct tree_block *block; 2437 struct rb_node *rb_node; 2438 while ((rb_node = rb_first(blocks))) { 2439 block = rb_entry(rb_node, struct tree_block, rb_node); 2440 rb_erase(rb_node, blocks); 2441 kfree(block); 2442 } 2443 } 2444 2445 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2446 struct btrfs_root *reloc_root) 2447 { 2448 struct btrfs_root *root; 2449 2450 if (reloc_root->last_trans == trans->transid) 2451 return 0; 2452 2453 root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset); 2454 BUG_ON(IS_ERR(root)); 2455 BUG_ON(root->reloc_root != reloc_root); 2456 2457 return btrfs_record_root_in_trans(trans, root); 2458 } 2459 2460 static noinline_for_stack 2461 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2462 struct reloc_control *rc, 2463 struct backref_node *node, 2464 struct backref_edge *edges[]) 2465 { 2466 struct backref_node *next; 2467 struct btrfs_root *root; 2468 int index = 0; 2469 2470 next = node; 2471 while (1) { 2472 cond_resched(); 2473 next = walk_up_backref(next, edges, &index); 2474 root = next->root; 2475 BUG_ON(!root); 2476 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state)); 2477 2478 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2479 record_reloc_root_in_trans(trans, root); 2480 break; 2481 } 2482 2483 btrfs_record_root_in_trans(trans, root); 2484 root = root->reloc_root; 2485 2486 if (next->new_bytenr != root->node->start) { 2487 BUG_ON(next->new_bytenr); 2488 BUG_ON(!list_empty(&next->list)); 2489 next->new_bytenr = root->node->start; 2490 next->root = root; 2491 list_add_tail(&next->list, 2492 &rc->backref_cache.changed); 2493 __mark_block_processed(rc, next); 2494 break; 2495 } 2496 2497 WARN_ON(1); 2498 root = NULL; 2499 next = walk_down_backref(edges, &index); 2500 if (!next || next->level <= node->level) 2501 break; 2502 } 2503 if (!root) 2504 return NULL; 2505 2506 next = node; 2507 /* setup backref node path for btrfs_reloc_cow_block */ 2508 while (1) { 2509 rc->backref_cache.path[next->level] = next; 2510 if (--index < 0) 2511 break; 2512 next = edges[index]->node[UPPER]; 2513 } 2514 return root; 2515 } 2516 2517 /* 2518 * select a tree root for relocation. return NULL if the block 2519 * is reference counted. we should use do_relocation() in this 2520 * case. return a tree root pointer if the block isn't reference 2521 * counted. return -ENOENT if the block is root of reloc tree. 2522 */ 2523 static noinline_for_stack 2524 struct btrfs_root *select_one_root(struct backref_node *node) 2525 { 2526 struct backref_node *next; 2527 struct btrfs_root *root; 2528 struct btrfs_root *fs_root = NULL; 2529 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2530 int index = 0; 2531 2532 next = node; 2533 while (1) { 2534 cond_resched(); 2535 next = walk_up_backref(next, edges, &index); 2536 root = next->root; 2537 BUG_ON(!root); 2538 2539 /* no other choice for non-references counted tree */ 2540 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 2541 return root; 2542 2543 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2544 fs_root = root; 2545 2546 if (next != node) 2547 return NULL; 2548 2549 next = walk_down_backref(edges, &index); 2550 if (!next || next->level <= node->level) 2551 break; 2552 } 2553 2554 if (!fs_root) 2555 return ERR_PTR(-ENOENT); 2556 return fs_root; 2557 } 2558 2559 static noinline_for_stack 2560 u64 calcu_metadata_size(struct reloc_control *rc, 2561 struct backref_node *node, int reserve) 2562 { 2563 struct backref_node *next = node; 2564 struct backref_edge *edge; 2565 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2566 u64 num_bytes = 0; 2567 int index = 0; 2568 2569 BUG_ON(reserve && node->processed); 2570 2571 while (next) { 2572 cond_resched(); 2573 while (1) { 2574 if (next->processed && (reserve || next != node)) 2575 break; 2576 2577 num_bytes += rc->extent_root->nodesize; 2578 2579 if (list_empty(&next->upper)) 2580 break; 2581 2582 edge = list_entry(next->upper.next, 2583 struct backref_edge, list[LOWER]); 2584 edges[index++] = edge; 2585 next = edge->node[UPPER]; 2586 } 2587 next = walk_down_backref(edges, &index); 2588 } 2589 return num_bytes; 2590 } 2591 2592 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2593 struct reloc_control *rc, 2594 struct backref_node *node) 2595 { 2596 struct btrfs_root *root = rc->extent_root; 2597 u64 num_bytes; 2598 int ret; 2599 u64 tmp; 2600 2601 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2602 2603 trans->block_rsv = rc->block_rsv; 2604 rc->reserved_bytes += num_bytes; 2605 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes, 2606 BTRFS_RESERVE_FLUSH_ALL); 2607 if (ret) { 2608 if (ret == -EAGAIN) { 2609 tmp = rc->extent_root->nodesize * 2610 RELOCATION_RESERVED_NODES; 2611 while (tmp <= rc->reserved_bytes) 2612 tmp <<= 1; 2613 /* 2614 * only one thread can access block_rsv at this point, 2615 * so we don't need hold lock to protect block_rsv. 2616 * we expand more reservation size here to allow enough 2617 * space for relocation and we will return eailer in 2618 * enospc case. 2619 */ 2620 rc->block_rsv->size = tmp + rc->extent_root->nodesize * 2621 RELOCATION_RESERVED_NODES; 2622 } 2623 return ret; 2624 } 2625 2626 return 0; 2627 } 2628 2629 /* 2630 * relocate a block tree, and then update pointers in upper level 2631 * blocks that reference the block to point to the new location. 2632 * 2633 * if called by link_to_upper, the block has already been relocated. 2634 * in that case this function just updates pointers. 2635 */ 2636 static int do_relocation(struct btrfs_trans_handle *trans, 2637 struct reloc_control *rc, 2638 struct backref_node *node, 2639 struct btrfs_key *key, 2640 struct btrfs_path *path, int lowest) 2641 { 2642 struct backref_node *upper; 2643 struct backref_edge *edge; 2644 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2645 struct btrfs_root *root; 2646 struct extent_buffer *eb; 2647 u32 blocksize; 2648 u64 bytenr; 2649 u64 generation; 2650 int slot; 2651 int ret; 2652 int err = 0; 2653 2654 BUG_ON(lowest && node->eb); 2655 2656 path->lowest_level = node->level + 1; 2657 rc->backref_cache.path[node->level] = node; 2658 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2659 cond_resched(); 2660 2661 upper = edge->node[UPPER]; 2662 root = select_reloc_root(trans, rc, upper, edges); 2663 BUG_ON(!root); 2664 2665 if (upper->eb && !upper->locked) { 2666 if (!lowest) { 2667 ret = btrfs_bin_search(upper->eb, key, 2668 upper->level, &slot); 2669 BUG_ON(ret); 2670 bytenr = btrfs_node_blockptr(upper->eb, slot); 2671 if (node->eb->start == bytenr) 2672 goto next; 2673 } 2674 drop_node_buffer(upper); 2675 } 2676 2677 if (!upper->eb) { 2678 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2679 if (ret < 0) { 2680 err = ret; 2681 break; 2682 } 2683 BUG_ON(ret > 0); 2684 2685 if (!upper->eb) { 2686 upper->eb = path->nodes[upper->level]; 2687 path->nodes[upper->level] = NULL; 2688 } else { 2689 BUG_ON(upper->eb != path->nodes[upper->level]); 2690 } 2691 2692 upper->locked = 1; 2693 path->locks[upper->level] = 0; 2694 2695 slot = path->slots[upper->level]; 2696 btrfs_release_path(path); 2697 } else { 2698 ret = btrfs_bin_search(upper->eb, key, upper->level, 2699 &slot); 2700 BUG_ON(ret); 2701 } 2702 2703 bytenr = btrfs_node_blockptr(upper->eb, slot); 2704 if (lowest) { 2705 BUG_ON(bytenr != node->bytenr); 2706 } else { 2707 if (node->eb->start == bytenr) 2708 goto next; 2709 } 2710 2711 blocksize = root->nodesize; 2712 generation = btrfs_node_ptr_generation(upper->eb, slot); 2713 eb = read_tree_block(root, bytenr, generation); 2714 if (IS_ERR(eb)) { 2715 err = PTR_ERR(eb); 2716 goto next; 2717 } else if (!extent_buffer_uptodate(eb)) { 2718 free_extent_buffer(eb); 2719 err = -EIO; 2720 goto next; 2721 } 2722 btrfs_tree_lock(eb); 2723 btrfs_set_lock_blocking(eb); 2724 2725 if (!node->eb) { 2726 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2727 slot, &eb); 2728 btrfs_tree_unlock(eb); 2729 free_extent_buffer(eb); 2730 if (ret < 0) { 2731 err = ret; 2732 goto next; 2733 } 2734 BUG_ON(node->eb != eb); 2735 } else { 2736 btrfs_set_node_blockptr(upper->eb, slot, 2737 node->eb->start); 2738 btrfs_set_node_ptr_generation(upper->eb, slot, 2739 trans->transid); 2740 btrfs_mark_buffer_dirty(upper->eb); 2741 2742 ret = btrfs_inc_extent_ref(trans, root, 2743 node->eb->start, blocksize, 2744 upper->eb->start, 2745 btrfs_header_owner(upper->eb), 2746 node->level, 0); 2747 BUG_ON(ret); 2748 2749 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2750 BUG_ON(ret); 2751 } 2752 next: 2753 if (!upper->pending) 2754 drop_node_buffer(upper); 2755 else 2756 unlock_node_buffer(upper); 2757 if (err) 2758 break; 2759 } 2760 2761 if (!err && node->pending) { 2762 drop_node_buffer(node); 2763 list_move_tail(&node->list, &rc->backref_cache.changed); 2764 node->pending = 0; 2765 } 2766 2767 path->lowest_level = 0; 2768 BUG_ON(err == -ENOSPC); 2769 return err; 2770 } 2771 2772 static int link_to_upper(struct btrfs_trans_handle *trans, 2773 struct reloc_control *rc, 2774 struct backref_node *node, 2775 struct btrfs_path *path) 2776 { 2777 struct btrfs_key key; 2778 2779 btrfs_node_key_to_cpu(node->eb, &key, 0); 2780 return do_relocation(trans, rc, node, &key, path, 0); 2781 } 2782 2783 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2784 struct reloc_control *rc, 2785 struct btrfs_path *path, int err) 2786 { 2787 LIST_HEAD(list); 2788 struct backref_cache *cache = &rc->backref_cache; 2789 struct backref_node *node; 2790 int level; 2791 int ret; 2792 2793 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2794 while (!list_empty(&cache->pending[level])) { 2795 node = list_entry(cache->pending[level].next, 2796 struct backref_node, list); 2797 list_move_tail(&node->list, &list); 2798 BUG_ON(!node->pending); 2799 2800 if (!err) { 2801 ret = link_to_upper(trans, rc, node, path); 2802 if (ret < 0) 2803 err = ret; 2804 } 2805 } 2806 list_splice_init(&list, &cache->pending[level]); 2807 } 2808 return err; 2809 } 2810 2811 static void mark_block_processed(struct reloc_control *rc, 2812 u64 bytenr, u32 blocksize) 2813 { 2814 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2815 EXTENT_DIRTY, GFP_NOFS); 2816 } 2817 2818 static void __mark_block_processed(struct reloc_control *rc, 2819 struct backref_node *node) 2820 { 2821 u32 blocksize; 2822 if (node->level == 0 || 2823 in_block_group(node->bytenr, rc->block_group)) { 2824 blocksize = rc->extent_root->nodesize; 2825 mark_block_processed(rc, node->bytenr, blocksize); 2826 } 2827 node->processed = 1; 2828 } 2829 2830 /* 2831 * mark a block and all blocks directly/indirectly reference the block 2832 * as processed. 2833 */ 2834 static void update_processed_blocks(struct reloc_control *rc, 2835 struct backref_node *node) 2836 { 2837 struct backref_node *next = node; 2838 struct backref_edge *edge; 2839 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2840 int index = 0; 2841 2842 while (next) { 2843 cond_resched(); 2844 while (1) { 2845 if (next->processed) 2846 break; 2847 2848 __mark_block_processed(rc, next); 2849 2850 if (list_empty(&next->upper)) 2851 break; 2852 2853 edge = list_entry(next->upper.next, 2854 struct backref_edge, list[LOWER]); 2855 edges[index++] = edge; 2856 next = edge->node[UPPER]; 2857 } 2858 next = walk_down_backref(edges, &index); 2859 } 2860 } 2861 2862 static int tree_block_processed(u64 bytenr, struct reloc_control *rc) 2863 { 2864 u32 blocksize = rc->extent_root->nodesize; 2865 2866 if (test_range_bit(&rc->processed_blocks, bytenr, 2867 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2868 return 1; 2869 return 0; 2870 } 2871 2872 static int get_tree_block_key(struct reloc_control *rc, 2873 struct tree_block *block) 2874 { 2875 struct extent_buffer *eb; 2876 2877 BUG_ON(block->key_ready); 2878 eb = read_tree_block(rc->extent_root, block->bytenr, 2879 block->key.offset); 2880 if (IS_ERR(eb)) { 2881 return PTR_ERR(eb); 2882 } else if (!extent_buffer_uptodate(eb)) { 2883 free_extent_buffer(eb); 2884 return -EIO; 2885 } 2886 WARN_ON(btrfs_header_level(eb) != block->level); 2887 if (block->level == 0) 2888 btrfs_item_key_to_cpu(eb, &block->key, 0); 2889 else 2890 btrfs_node_key_to_cpu(eb, &block->key, 0); 2891 free_extent_buffer(eb); 2892 block->key_ready = 1; 2893 return 0; 2894 } 2895 2896 /* 2897 * helper function to relocate a tree block 2898 */ 2899 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2900 struct reloc_control *rc, 2901 struct backref_node *node, 2902 struct btrfs_key *key, 2903 struct btrfs_path *path) 2904 { 2905 struct btrfs_root *root; 2906 int ret = 0; 2907 2908 if (!node) 2909 return 0; 2910 2911 BUG_ON(node->processed); 2912 root = select_one_root(node); 2913 if (root == ERR_PTR(-ENOENT)) { 2914 update_processed_blocks(rc, node); 2915 goto out; 2916 } 2917 2918 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2919 ret = reserve_metadata_space(trans, rc, node); 2920 if (ret) 2921 goto out; 2922 } 2923 2924 if (root) { 2925 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2926 BUG_ON(node->new_bytenr); 2927 BUG_ON(!list_empty(&node->list)); 2928 btrfs_record_root_in_trans(trans, root); 2929 root = root->reloc_root; 2930 node->new_bytenr = root->node->start; 2931 node->root = root; 2932 list_add_tail(&node->list, &rc->backref_cache.changed); 2933 } else { 2934 path->lowest_level = node->level; 2935 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2936 btrfs_release_path(path); 2937 if (ret > 0) 2938 ret = 0; 2939 } 2940 if (!ret) 2941 update_processed_blocks(rc, node); 2942 } else { 2943 ret = do_relocation(trans, rc, node, key, path, 1); 2944 } 2945 out: 2946 if (ret || node->level == 0 || node->cowonly) 2947 remove_backref_node(&rc->backref_cache, node); 2948 return ret; 2949 } 2950 2951 /* 2952 * relocate a list of blocks 2953 */ 2954 static noinline_for_stack 2955 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2956 struct reloc_control *rc, struct rb_root *blocks) 2957 { 2958 struct backref_node *node; 2959 struct btrfs_path *path; 2960 struct tree_block *block; 2961 struct rb_node *rb_node; 2962 int ret; 2963 int err = 0; 2964 2965 path = btrfs_alloc_path(); 2966 if (!path) { 2967 err = -ENOMEM; 2968 goto out_free_blocks; 2969 } 2970 2971 rb_node = rb_first(blocks); 2972 while (rb_node) { 2973 block = rb_entry(rb_node, struct tree_block, rb_node); 2974 if (!block->key_ready) 2975 readahead_tree_block(rc->extent_root, block->bytenr); 2976 rb_node = rb_next(rb_node); 2977 } 2978 2979 rb_node = rb_first(blocks); 2980 while (rb_node) { 2981 block = rb_entry(rb_node, struct tree_block, rb_node); 2982 if (!block->key_ready) { 2983 err = get_tree_block_key(rc, block); 2984 if (err) 2985 goto out_free_path; 2986 } 2987 rb_node = rb_next(rb_node); 2988 } 2989 2990 rb_node = rb_first(blocks); 2991 while (rb_node) { 2992 block = rb_entry(rb_node, struct tree_block, rb_node); 2993 2994 node = build_backref_tree(rc, &block->key, 2995 block->level, block->bytenr); 2996 if (IS_ERR(node)) { 2997 err = PTR_ERR(node); 2998 goto out; 2999 } 3000 3001 ret = relocate_tree_block(trans, rc, node, &block->key, 3002 path); 3003 if (ret < 0) { 3004 if (ret != -EAGAIN || rb_node == rb_first(blocks)) 3005 err = ret; 3006 goto out; 3007 } 3008 rb_node = rb_next(rb_node); 3009 } 3010 out: 3011 err = finish_pending_nodes(trans, rc, path, err); 3012 3013 out_free_path: 3014 btrfs_free_path(path); 3015 out_free_blocks: 3016 free_block_list(blocks); 3017 return err; 3018 } 3019 3020 static noinline_for_stack 3021 int prealloc_file_extent_cluster(struct inode *inode, 3022 struct file_extent_cluster *cluster) 3023 { 3024 u64 alloc_hint = 0; 3025 u64 start; 3026 u64 end; 3027 u64 offset = BTRFS_I(inode)->index_cnt; 3028 u64 num_bytes; 3029 int nr = 0; 3030 int ret = 0; 3031 3032 BUG_ON(cluster->start != cluster->boundary[0]); 3033 mutex_lock(&inode->i_mutex); 3034 3035 ret = btrfs_check_data_free_space(inode, cluster->start, 3036 cluster->end + 1 - cluster->start); 3037 if (ret) 3038 goto out; 3039 3040 while (nr < cluster->nr) { 3041 start = cluster->boundary[nr] - offset; 3042 if (nr + 1 < cluster->nr) 3043 end = cluster->boundary[nr + 1] - 1 - offset; 3044 else 3045 end = cluster->end - offset; 3046 3047 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3048 num_bytes = end + 1 - start; 3049 ret = btrfs_prealloc_file_range(inode, 0, start, 3050 num_bytes, num_bytes, 3051 end + 1, &alloc_hint); 3052 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3053 if (ret) 3054 break; 3055 nr++; 3056 } 3057 btrfs_free_reserved_data_space(inode, cluster->start, 3058 cluster->end + 1 - cluster->start); 3059 out: 3060 mutex_unlock(&inode->i_mutex); 3061 return ret; 3062 } 3063 3064 static noinline_for_stack 3065 int setup_extent_mapping(struct inode *inode, u64 start, u64 end, 3066 u64 block_start) 3067 { 3068 struct btrfs_root *root = BTRFS_I(inode)->root; 3069 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 3070 struct extent_map *em; 3071 int ret = 0; 3072 3073 em = alloc_extent_map(); 3074 if (!em) 3075 return -ENOMEM; 3076 3077 em->start = start; 3078 em->len = end + 1 - start; 3079 em->block_len = em->len; 3080 em->block_start = block_start; 3081 em->bdev = root->fs_info->fs_devices->latest_bdev; 3082 set_bit(EXTENT_FLAG_PINNED, &em->flags); 3083 3084 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3085 while (1) { 3086 write_lock(&em_tree->lock); 3087 ret = add_extent_mapping(em_tree, em, 0); 3088 write_unlock(&em_tree->lock); 3089 if (ret != -EEXIST) { 3090 free_extent_map(em); 3091 break; 3092 } 3093 btrfs_drop_extent_cache(inode, start, end, 0); 3094 } 3095 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3096 return ret; 3097 } 3098 3099 static int relocate_file_extent_cluster(struct inode *inode, 3100 struct file_extent_cluster *cluster) 3101 { 3102 u64 page_start; 3103 u64 page_end; 3104 u64 offset = BTRFS_I(inode)->index_cnt; 3105 unsigned long index; 3106 unsigned long last_index; 3107 struct page *page; 3108 struct file_ra_state *ra; 3109 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 3110 int nr = 0; 3111 int ret = 0; 3112 3113 if (!cluster->nr) 3114 return 0; 3115 3116 ra = kzalloc(sizeof(*ra), GFP_NOFS); 3117 if (!ra) 3118 return -ENOMEM; 3119 3120 ret = prealloc_file_extent_cluster(inode, cluster); 3121 if (ret) 3122 goto out; 3123 3124 file_ra_state_init(ra, inode->i_mapping); 3125 3126 ret = setup_extent_mapping(inode, cluster->start - offset, 3127 cluster->end - offset, cluster->start); 3128 if (ret) 3129 goto out; 3130 3131 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; 3132 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; 3133 while (index <= last_index) { 3134 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); 3135 if (ret) 3136 goto out; 3137 3138 page = find_lock_page(inode->i_mapping, index); 3139 if (!page) { 3140 page_cache_sync_readahead(inode->i_mapping, 3141 ra, NULL, index, 3142 last_index + 1 - index); 3143 page = find_or_create_page(inode->i_mapping, index, 3144 mask); 3145 if (!page) { 3146 btrfs_delalloc_release_metadata(inode, 3147 PAGE_CACHE_SIZE); 3148 ret = -ENOMEM; 3149 goto out; 3150 } 3151 } 3152 3153 if (PageReadahead(page)) { 3154 page_cache_async_readahead(inode->i_mapping, 3155 ra, NULL, page, index, 3156 last_index + 1 - index); 3157 } 3158 3159 if (!PageUptodate(page)) { 3160 btrfs_readpage(NULL, page); 3161 lock_page(page); 3162 if (!PageUptodate(page)) { 3163 unlock_page(page); 3164 page_cache_release(page); 3165 btrfs_delalloc_release_metadata(inode, 3166 PAGE_CACHE_SIZE); 3167 ret = -EIO; 3168 goto out; 3169 } 3170 } 3171 3172 page_start = page_offset(page); 3173 page_end = page_start + PAGE_CACHE_SIZE - 1; 3174 3175 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); 3176 3177 set_page_extent_mapped(page); 3178 3179 if (nr < cluster->nr && 3180 page_start + offset == cluster->boundary[nr]) { 3181 set_extent_bits(&BTRFS_I(inode)->io_tree, 3182 page_start, page_end, 3183 EXTENT_BOUNDARY, GFP_NOFS); 3184 nr++; 3185 } 3186 3187 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); 3188 set_page_dirty(page); 3189 3190 unlock_extent(&BTRFS_I(inode)->io_tree, 3191 page_start, page_end); 3192 unlock_page(page); 3193 page_cache_release(page); 3194 3195 index++; 3196 balance_dirty_pages_ratelimited(inode->i_mapping); 3197 btrfs_throttle(BTRFS_I(inode)->root); 3198 } 3199 WARN_ON(nr != cluster->nr); 3200 out: 3201 kfree(ra); 3202 return ret; 3203 } 3204 3205 static noinline_for_stack 3206 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3207 struct file_extent_cluster *cluster) 3208 { 3209 int ret; 3210 3211 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3212 ret = relocate_file_extent_cluster(inode, cluster); 3213 if (ret) 3214 return ret; 3215 cluster->nr = 0; 3216 } 3217 3218 if (!cluster->nr) 3219 cluster->start = extent_key->objectid; 3220 else 3221 BUG_ON(cluster->nr >= MAX_EXTENTS); 3222 cluster->end = extent_key->objectid + extent_key->offset - 1; 3223 cluster->boundary[cluster->nr] = extent_key->objectid; 3224 cluster->nr++; 3225 3226 if (cluster->nr >= MAX_EXTENTS) { 3227 ret = relocate_file_extent_cluster(inode, cluster); 3228 if (ret) 3229 return ret; 3230 cluster->nr = 0; 3231 } 3232 return 0; 3233 } 3234 3235 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3236 static int get_ref_objectid_v0(struct reloc_control *rc, 3237 struct btrfs_path *path, 3238 struct btrfs_key *extent_key, 3239 u64 *ref_objectid, int *path_change) 3240 { 3241 struct btrfs_key key; 3242 struct extent_buffer *leaf; 3243 struct btrfs_extent_ref_v0 *ref0; 3244 int ret; 3245 int slot; 3246 3247 leaf = path->nodes[0]; 3248 slot = path->slots[0]; 3249 while (1) { 3250 if (slot >= btrfs_header_nritems(leaf)) { 3251 ret = btrfs_next_leaf(rc->extent_root, path); 3252 if (ret < 0) 3253 return ret; 3254 BUG_ON(ret > 0); 3255 leaf = path->nodes[0]; 3256 slot = path->slots[0]; 3257 if (path_change) 3258 *path_change = 1; 3259 } 3260 btrfs_item_key_to_cpu(leaf, &key, slot); 3261 if (key.objectid != extent_key->objectid) 3262 return -ENOENT; 3263 3264 if (key.type != BTRFS_EXTENT_REF_V0_KEY) { 3265 slot++; 3266 continue; 3267 } 3268 ref0 = btrfs_item_ptr(leaf, slot, 3269 struct btrfs_extent_ref_v0); 3270 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0); 3271 break; 3272 } 3273 return 0; 3274 } 3275 #endif 3276 3277 /* 3278 * helper to add a tree block to the list. 3279 * the major work is getting the generation and level of the block 3280 */ 3281 static int add_tree_block(struct reloc_control *rc, 3282 struct btrfs_key *extent_key, 3283 struct btrfs_path *path, 3284 struct rb_root *blocks) 3285 { 3286 struct extent_buffer *eb; 3287 struct btrfs_extent_item *ei; 3288 struct btrfs_tree_block_info *bi; 3289 struct tree_block *block; 3290 struct rb_node *rb_node; 3291 u32 item_size; 3292 int level = -1; 3293 u64 generation; 3294 3295 eb = path->nodes[0]; 3296 item_size = btrfs_item_size_nr(eb, path->slots[0]); 3297 3298 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3299 item_size >= sizeof(*ei) + sizeof(*bi)) { 3300 ei = btrfs_item_ptr(eb, path->slots[0], 3301 struct btrfs_extent_item); 3302 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3303 bi = (struct btrfs_tree_block_info *)(ei + 1); 3304 level = btrfs_tree_block_level(eb, bi); 3305 } else { 3306 level = (int)extent_key->offset; 3307 } 3308 generation = btrfs_extent_generation(eb, ei); 3309 } else { 3310 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3311 u64 ref_owner; 3312 int ret; 3313 3314 BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 3315 ret = get_ref_objectid_v0(rc, path, extent_key, 3316 &ref_owner, NULL); 3317 if (ret < 0) 3318 return ret; 3319 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL); 3320 level = (int)ref_owner; 3321 /* FIXME: get real generation */ 3322 generation = 0; 3323 #else 3324 BUG(); 3325 #endif 3326 } 3327 3328 btrfs_release_path(path); 3329 3330 BUG_ON(level == -1); 3331 3332 block = kmalloc(sizeof(*block), GFP_NOFS); 3333 if (!block) 3334 return -ENOMEM; 3335 3336 block->bytenr = extent_key->objectid; 3337 block->key.objectid = rc->extent_root->nodesize; 3338 block->key.offset = generation; 3339 block->level = level; 3340 block->key_ready = 0; 3341 3342 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3343 if (rb_node) 3344 backref_tree_panic(rb_node, -EEXIST, block->bytenr); 3345 3346 return 0; 3347 } 3348 3349 /* 3350 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3351 */ 3352 static int __add_tree_block(struct reloc_control *rc, 3353 u64 bytenr, u32 blocksize, 3354 struct rb_root *blocks) 3355 { 3356 struct btrfs_path *path; 3357 struct btrfs_key key; 3358 int ret; 3359 bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info, 3360 SKINNY_METADATA); 3361 3362 if (tree_block_processed(bytenr, rc)) 3363 return 0; 3364 3365 if (tree_search(blocks, bytenr)) 3366 return 0; 3367 3368 path = btrfs_alloc_path(); 3369 if (!path) 3370 return -ENOMEM; 3371 again: 3372 key.objectid = bytenr; 3373 if (skinny) { 3374 key.type = BTRFS_METADATA_ITEM_KEY; 3375 key.offset = (u64)-1; 3376 } else { 3377 key.type = BTRFS_EXTENT_ITEM_KEY; 3378 key.offset = blocksize; 3379 } 3380 3381 path->search_commit_root = 1; 3382 path->skip_locking = 1; 3383 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3384 if (ret < 0) 3385 goto out; 3386 3387 if (ret > 0 && skinny) { 3388 if (path->slots[0]) { 3389 path->slots[0]--; 3390 btrfs_item_key_to_cpu(path->nodes[0], &key, 3391 path->slots[0]); 3392 if (key.objectid == bytenr && 3393 (key.type == BTRFS_METADATA_ITEM_KEY || 3394 (key.type == BTRFS_EXTENT_ITEM_KEY && 3395 key.offset == blocksize))) 3396 ret = 0; 3397 } 3398 3399 if (ret) { 3400 skinny = false; 3401 btrfs_release_path(path); 3402 goto again; 3403 } 3404 } 3405 BUG_ON(ret); 3406 3407 ret = add_tree_block(rc, &key, path, blocks); 3408 out: 3409 btrfs_free_path(path); 3410 return ret; 3411 } 3412 3413 /* 3414 * helper to check if the block use full backrefs for pointers in it 3415 */ 3416 static int block_use_full_backref(struct reloc_control *rc, 3417 struct extent_buffer *eb) 3418 { 3419 u64 flags; 3420 int ret; 3421 3422 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || 3423 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) 3424 return 1; 3425 3426 ret = btrfs_lookup_extent_info(NULL, rc->extent_root, 3427 eb->start, btrfs_header_level(eb), 1, 3428 NULL, &flags); 3429 BUG_ON(ret); 3430 3431 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) 3432 ret = 1; 3433 else 3434 ret = 0; 3435 return ret; 3436 } 3437 3438 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3439 struct btrfs_block_group_cache *block_group, 3440 struct inode *inode, 3441 u64 ino) 3442 { 3443 struct btrfs_key key; 3444 struct btrfs_root *root = fs_info->tree_root; 3445 struct btrfs_trans_handle *trans; 3446 int ret = 0; 3447 3448 if (inode) 3449 goto truncate; 3450 3451 key.objectid = ino; 3452 key.type = BTRFS_INODE_ITEM_KEY; 3453 key.offset = 0; 3454 3455 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3456 if (IS_ERR(inode) || is_bad_inode(inode)) { 3457 if (!IS_ERR(inode)) 3458 iput(inode); 3459 return -ENOENT; 3460 } 3461 3462 truncate: 3463 ret = btrfs_check_trunc_cache_free_space(root, 3464 &fs_info->global_block_rsv); 3465 if (ret) 3466 goto out; 3467 3468 trans = btrfs_join_transaction(root); 3469 if (IS_ERR(trans)) { 3470 ret = PTR_ERR(trans); 3471 goto out; 3472 } 3473 3474 ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode); 3475 3476 btrfs_end_transaction(trans, root); 3477 btrfs_btree_balance_dirty(root); 3478 out: 3479 iput(inode); 3480 return ret; 3481 } 3482 3483 /* 3484 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY 3485 * this function scans fs tree to find blocks reference the data extent 3486 */ 3487 static int find_data_references(struct reloc_control *rc, 3488 struct btrfs_key *extent_key, 3489 struct extent_buffer *leaf, 3490 struct btrfs_extent_data_ref *ref, 3491 struct rb_root *blocks) 3492 { 3493 struct btrfs_path *path; 3494 struct tree_block *block; 3495 struct btrfs_root *root; 3496 struct btrfs_file_extent_item *fi; 3497 struct rb_node *rb_node; 3498 struct btrfs_key key; 3499 u64 ref_root; 3500 u64 ref_objectid; 3501 u64 ref_offset; 3502 u32 ref_count; 3503 u32 nritems; 3504 int err = 0; 3505 int added = 0; 3506 int counted; 3507 int ret; 3508 3509 ref_root = btrfs_extent_data_ref_root(leaf, ref); 3510 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref); 3511 ref_offset = btrfs_extent_data_ref_offset(leaf, ref); 3512 ref_count = btrfs_extent_data_ref_count(leaf, ref); 3513 3514 /* 3515 * This is an extent belonging to the free space cache, lets just delete 3516 * it and redo the search. 3517 */ 3518 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { 3519 ret = delete_block_group_cache(rc->extent_root->fs_info, 3520 rc->block_group, 3521 NULL, ref_objectid); 3522 if (ret != -ENOENT) 3523 return ret; 3524 ret = 0; 3525 } 3526 3527 path = btrfs_alloc_path(); 3528 if (!path) 3529 return -ENOMEM; 3530 path->reada = 1; 3531 3532 root = read_fs_root(rc->extent_root->fs_info, ref_root); 3533 if (IS_ERR(root)) { 3534 err = PTR_ERR(root); 3535 goto out; 3536 } 3537 3538 key.objectid = ref_objectid; 3539 key.type = BTRFS_EXTENT_DATA_KEY; 3540 if (ref_offset > ((u64)-1 << 32)) 3541 key.offset = 0; 3542 else 3543 key.offset = ref_offset; 3544 3545 path->search_commit_root = 1; 3546 path->skip_locking = 1; 3547 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3548 if (ret < 0) { 3549 err = ret; 3550 goto out; 3551 } 3552 3553 leaf = path->nodes[0]; 3554 nritems = btrfs_header_nritems(leaf); 3555 /* 3556 * the references in tree blocks that use full backrefs 3557 * are not counted in 3558 */ 3559 if (block_use_full_backref(rc, leaf)) 3560 counted = 0; 3561 else 3562 counted = 1; 3563 rb_node = tree_search(blocks, leaf->start); 3564 if (rb_node) { 3565 if (counted) 3566 added = 1; 3567 else 3568 path->slots[0] = nritems; 3569 } 3570 3571 while (ref_count > 0) { 3572 while (path->slots[0] >= nritems) { 3573 ret = btrfs_next_leaf(root, path); 3574 if (ret < 0) { 3575 err = ret; 3576 goto out; 3577 } 3578 if (WARN_ON(ret > 0)) 3579 goto out; 3580 3581 leaf = path->nodes[0]; 3582 nritems = btrfs_header_nritems(leaf); 3583 added = 0; 3584 3585 if (block_use_full_backref(rc, leaf)) 3586 counted = 0; 3587 else 3588 counted = 1; 3589 rb_node = tree_search(blocks, leaf->start); 3590 if (rb_node) { 3591 if (counted) 3592 added = 1; 3593 else 3594 path->slots[0] = nritems; 3595 } 3596 } 3597 3598 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3599 if (WARN_ON(key.objectid != ref_objectid || 3600 key.type != BTRFS_EXTENT_DATA_KEY)) 3601 break; 3602 3603 fi = btrfs_item_ptr(leaf, path->slots[0], 3604 struct btrfs_file_extent_item); 3605 3606 if (btrfs_file_extent_type(leaf, fi) == 3607 BTRFS_FILE_EXTENT_INLINE) 3608 goto next; 3609 3610 if (btrfs_file_extent_disk_bytenr(leaf, fi) != 3611 extent_key->objectid) 3612 goto next; 3613 3614 key.offset -= btrfs_file_extent_offset(leaf, fi); 3615 if (key.offset != ref_offset) 3616 goto next; 3617 3618 if (counted) 3619 ref_count--; 3620 if (added) 3621 goto next; 3622 3623 if (!tree_block_processed(leaf->start, rc)) { 3624 block = kmalloc(sizeof(*block), GFP_NOFS); 3625 if (!block) { 3626 err = -ENOMEM; 3627 break; 3628 } 3629 block->bytenr = leaf->start; 3630 btrfs_item_key_to_cpu(leaf, &block->key, 0); 3631 block->level = 0; 3632 block->key_ready = 1; 3633 rb_node = tree_insert(blocks, block->bytenr, 3634 &block->rb_node); 3635 if (rb_node) 3636 backref_tree_panic(rb_node, -EEXIST, 3637 block->bytenr); 3638 } 3639 if (counted) 3640 added = 1; 3641 else 3642 path->slots[0] = nritems; 3643 next: 3644 path->slots[0]++; 3645 3646 } 3647 out: 3648 btrfs_free_path(path); 3649 return err; 3650 } 3651 3652 /* 3653 * helper to find all tree blocks that reference a given data extent 3654 */ 3655 static noinline_for_stack 3656 int add_data_references(struct reloc_control *rc, 3657 struct btrfs_key *extent_key, 3658 struct btrfs_path *path, 3659 struct rb_root *blocks) 3660 { 3661 struct btrfs_key key; 3662 struct extent_buffer *eb; 3663 struct btrfs_extent_data_ref *dref; 3664 struct btrfs_extent_inline_ref *iref; 3665 unsigned long ptr; 3666 unsigned long end; 3667 u32 blocksize = rc->extent_root->nodesize; 3668 int ret = 0; 3669 int err = 0; 3670 3671 eb = path->nodes[0]; 3672 ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 3673 end = ptr + btrfs_item_size_nr(eb, path->slots[0]); 3674 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3675 if (ptr + sizeof(struct btrfs_extent_item_v0) == end) 3676 ptr = end; 3677 else 3678 #endif 3679 ptr += sizeof(struct btrfs_extent_item); 3680 3681 while (ptr < end) { 3682 iref = (struct btrfs_extent_inline_ref *)ptr; 3683 key.type = btrfs_extent_inline_ref_type(eb, iref); 3684 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3685 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 3686 ret = __add_tree_block(rc, key.offset, blocksize, 3687 blocks); 3688 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3689 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 3690 ret = find_data_references(rc, extent_key, 3691 eb, dref, blocks); 3692 } else { 3693 BUG(); 3694 } 3695 if (ret) { 3696 err = ret; 3697 goto out; 3698 } 3699 ptr += btrfs_extent_inline_ref_size(key.type); 3700 } 3701 WARN_ON(ptr > end); 3702 3703 while (1) { 3704 cond_resched(); 3705 eb = path->nodes[0]; 3706 if (path->slots[0] >= btrfs_header_nritems(eb)) { 3707 ret = btrfs_next_leaf(rc->extent_root, path); 3708 if (ret < 0) { 3709 err = ret; 3710 break; 3711 } 3712 if (ret > 0) 3713 break; 3714 eb = path->nodes[0]; 3715 } 3716 3717 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 3718 if (key.objectid != extent_key->objectid) 3719 break; 3720 3721 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3722 if (key.type == BTRFS_SHARED_DATA_REF_KEY || 3723 key.type == BTRFS_EXTENT_REF_V0_KEY) { 3724 #else 3725 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 3726 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3727 #endif 3728 ret = __add_tree_block(rc, key.offset, blocksize, 3729 blocks); 3730 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3731 dref = btrfs_item_ptr(eb, path->slots[0], 3732 struct btrfs_extent_data_ref); 3733 ret = find_data_references(rc, extent_key, 3734 eb, dref, blocks); 3735 } else { 3736 ret = 0; 3737 } 3738 if (ret) { 3739 err = ret; 3740 break; 3741 } 3742 path->slots[0]++; 3743 } 3744 out: 3745 btrfs_release_path(path); 3746 if (err) 3747 free_block_list(blocks); 3748 return err; 3749 } 3750 3751 /* 3752 * helper to find next unprocessed extent 3753 */ 3754 static noinline_for_stack 3755 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, 3756 struct btrfs_key *extent_key) 3757 { 3758 struct btrfs_key key; 3759 struct extent_buffer *leaf; 3760 u64 start, end, last; 3761 int ret; 3762 3763 last = rc->block_group->key.objectid + rc->block_group->key.offset; 3764 while (1) { 3765 cond_resched(); 3766 if (rc->search_start >= last) { 3767 ret = 1; 3768 break; 3769 } 3770 3771 key.objectid = rc->search_start; 3772 key.type = BTRFS_EXTENT_ITEM_KEY; 3773 key.offset = 0; 3774 3775 path->search_commit_root = 1; 3776 path->skip_locking = 1; 3777 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3778 0, 0); 3779 if (ret < 0) 3780 break; 3781 next: 3782 leaf = path->nodes[0]; 3783 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3784 ret = btrfs_next_leaf(rc->extent_root, path); 3785 if (ret != 0) 3786 break; 3787 leaf = path->nodes[0]; 3788 } 3789 3790 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3791 if (key.objectid >= last) { 3792 ret = 1; 3793 break; 3794 } 3795 3796 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3797 key.type != BTRFS_METADATA_ITEM_KEY) { 3798 path->slots[0]++; 3799 goto next; 3800 } 3801 3802 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3803 key.objectid + key.offset <= rc->search_start) { 3804 path->slots[0]++; 3805 goto next; 3806 } 3807 3808 if (key.type == BTRFS_METADATA_ITEM_KEY && 3809 key.objectid + rc->extent_root->nodesize <= 3810 rc->search_start) { 3811 path->slots[0]++; 3812 goto next; 3813 } 3814 3815 ret = find_first_extent_bit(&rc->processed_blocks, 3816 key.objectid, &start, &end, 3817 EXTENT_DIRTY, NULL); 3818 3819 if (ret == 0 && start <= key.objectid) { 3820 btrfs_release_path(path); 3821 rc->search_start = end + 1; 3822 } else { 3823 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3824 rc->search_start = key.objectid + key.offset; 3825 else 3826 rc->search_start = key.objectid + 3827 rc->extent_root->nodesize; 3828 memcpy(extent_key, &key, sizeof(key)); 3829 return 0; 3830 } 3831 } 3832 btrfs_release_path(path); 3833 return ret; 3834 } 3835 3836 static void set_reloc_control(struct reloc_control *rc) 3837 { 3838 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3839 3840 mutex_lock(&fs_info->reloc_mutex); 3841 fs_info->reloc_ctl = rc; 3842 mutex_unlock(&fs_info->reloc_mutex); 3843 } 3844 3845 static void unset_reloc_control(struct reloc_control *rc) 3846 { 3847 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3848 3849 mutex_lock(&fs_info->reloc_mutex); 3850 fs_info->reloc_ctl = NULL; 3851 mutex_unlock(&fs_info->reloc_mutex); 3852 } 3853 3854 static int check_extent_flags(u64 flags) 3855 { 3856 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3857 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3858 return 1; 3859 if (!(flags & BTRFS_EXTENT_FLAG_DATA) && 3860 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3861 return 1; 3862 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3863 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 3864 return 1; 3865 return 0; 3866 } 3867 3868 static noinline_for_stack 3869 int prepare_to_relocate(struct reloc_control *rc) 3870 { 3871 struct btrfs_trans_handle *trans; 3872 3873 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root, 3874 BTRFS_BLOCK_RSV_TEMP); 3875 if (!rc->block_rsv) 3876 return -ENOMEM; 3877 3878 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3879 rc->search_start = rc->block_group->key.objectid; 3880 rc->extents_found = 0; 3881 rc->nodes_relocated = 0; 3882 rc->merging_rsv_size = 0; 3883 rc->reserved_bytes = 0; 3884 rc->block_rsv->size = rc->extent_root->nodesize * 3885 RELOCATION_RESERVED_NODES; 3886 3887 rc->create_reloc_tree = 1; 3888 set_reloc_control(rc); 3889 3890 trans = btrfs_join_transaction(rc->extent_root); 3891 if (IS_ERR(trans)) { 3892 unset_reloc_control(rc); 3893 /* 3894 * extent tree is not a ref_cow tree and has no reloc_root to 3895 * cleanup. And callers are responsible to free the above 3896 * block rsv. 3897 */ 3898 return PTR_ERR(trans); 3899 } 3900 btrfs_commit_transaction(trans, rc->extent_root); 3901 return 0; 3902 } 3903 3904 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3905 { 3906 struct rb_root blocks = RB_ROOT; 3907 struct btrfs_key key; 3908 struct btrfs_trans_handle *trans = NULL; 3909 struct btrfs_path *path; 3910 struct btrfs_extent_item *ei; 3911 u64 flags; 3912 u32 item_size; 3913 int ret; 3914 int err = 0; 3915 int progress = 0; 3916 3917 path = btrfs_alloc_path(); 3918 if (!path) 3919 return -ENOMEM; 3920 path->reada = 1; 3921 3922 ret = prepare_to_relocate(rc); 3923 if (ret) { 3924 err = ret; 3925 goto out_free; 3926 } 3927 3928 while (1) { 3929 rc->reserved_bytes = 0; 3930 ret = btrfs_block_rsv_refill(rc->extent_root, 3931 rc->block_rsv, rc->block_rsv->size, 3932 BTRFS_RESERVE_FLUSH_ALL); 3933 if (ret) { 3934 err = ret; 3935 break; 3936 } 3937 progress++; 3938 trans = btrfs_start_transaction(rc->extent_root, 0); 3939 if (IS_ERR(trans)) { 3940 err = PTR_ERR(trans); 3941 trans = NULL; 3942 break; 3943 } 3944 restart: 3945 if (update_backref_cache(trans, &rc->backref_cache)) { 3946 btrfs_end_transaction(trans, rc->extent_root); 3947 continue; 3948 } 3949 3950 ret = find_next_extent(rc, path, &key); 3951 if (ret < 0) 3952 err = ret; 3953 if (ret != 0) 3954 break; 3955 3956 rc->extents_found++; 3957 3958 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3959 struct btrfs_extent_item); 3960 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 3961 if (item_size >= sizeof(*ei)) { 3962 flags = btrfs_extent_flags(path->nodes[0], ei); 3963 ret = check_extent_flags(flags); 3964 BUG_ON(ret); 3965 3966 } else { 3967 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3968 u64 ref_owner; 3969 int path_change = 0; 3970 3971 BUG_ON(item_size != 3972 sizeof(struct btrfs_extent_item_v0)); 3973 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner, 3974 &path_change); 3975 if (ret < 0) { 3976 err = ret; 3977 break; 3978 } 3979 if (ref_owner < BTRFS_FIRST_FREE_OBJECTID) 3980 flags = BTRFS_EXTENT_FLAG_TREE_BLOCK; 3981 else 3982 flags = BTRFS_EXTENT_FLAG_DATA; 3983 3984 if (path_change) { 3985 btrfs_release_path(path); 3986 3987 path->search_commit_root = 1; 3988 path->skip_locking = 1; 3989 ret = btrfs_search_slot(NULL, rc->extent_root, 3990 &key, path, 0, 0); 3991 if (ret < 0) { 3992 err = ret; 3993 break; 3994 } 3995 BUG_ON(ret > 0); 3996 } 3997 #else 3998 BUG(); 3999 #endif 4000 } 4001 4002 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 4003 ret = add_tree_block(rc, &key, path, &blocks); 4004 } else if (rc->stage == UPDATE_DATA_PTRS && 4005 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4006 ret = add_data_references(rc, &key, path, &blocks); 4007 } else { 4008 btrfs_release_path(path); 4009 ret = 0; 4010 } 4011 if (ret < 0) { 4012 err = ret; 4013 break; 4014 } 4015 4016 if (!RB_EMPTY_ROOT(&blocks)) { 4017 ret = relocate_tree_blocks(trans, rc, &blocks); 4018 if (ret < 0) { 4019 /* 4020 * if we fail to relocate tree blocks, force to update 4021 * backref cache when committing transaction. 4022 */ 4023 rc->backref_cache.last_trans = trans->transid - 1; 4024 4025 if (ret != -EAGAIN) { 4026 err = ret; 4027 break; 4028 } 4029 rc->extents_found--; 4030 rc->search_start = key.objectid; 4031 } 4032 } 4033 4034 btrfs_end_transaction_throttle(trans, rc->extent_root); 4035 btrfs_btree_balance_dirty(rc->extent_root); 4036 trans = NULL; 4037 4038 if (rc->stage == MOVE_DATA_EXTENTS && 4039 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4040 rc->found_file_extent = 1; 4041 ret = relocate_data_extent(rc->data_inode, 4042 &key, &rc->cluster); 4043 if (ret < 0) { 4044 err = ret; 4045 break; 4046 } 4047 } 4048 } 4049 if (trans && progress && err == -ENOSPC) { 4050 ret = btrfs_force_chunk_alloc(trans, rc->extent_root, 4051 rc->block_group->flags); 4052 if (ret == 1) { 4053 err = 0; 4054 progress = 0; 4055 goto restart; 4056 } 4057 } 4058 4059 btrfs_release_path(path); 4060 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, 4061 GFP_NOFS); 4062 4063 if (trans) { 4064 btrfs_end_transaction_throttle(trans, rc->extent_root); 4065 btrfs_btree_balance_dirty(rc->extent_root); 4066 } 4067 4068 if (!err) { 4069 ret = relocate_file_extent_cluster(rc->data_inode, 4070 &rc->cluster); 4071 if (ret < 0) 4072 err = ret; 4073 } 4074 4075 rc->create_reloc_tree = 0; 4076 set_reloc_control(rc); 4077 4078 backref_cache_cleanup(&rc->backref_cache); 4079 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 4080 4081 err = prepare_to_merge(rc, err); 4082 4083 merge_reloc_roots(rc); 4084 4085 rc->merge_reloc_tree = 0; 4086 unset_reloc_control(rc); 4087 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 4088 4089 /* get rid of pinned extents */ 4090 trans = btrfs_join_transaction(rc->extent_root); 4091 if (IS_ERR(trans)) 4092 err = PTR_ERR(trans); 4093 else 4094 btrfs_commit_transaction(trans, rc->extent_root); 4095 out_free: 4096 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); 4097 btrfs_free_path(path); 4098 return err; 4099 } 4100 4101 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 4102 struct btrfs_root *root, u64 objectid) 4103 { 4104 struct btrfs_path *path; 4105 struct btrfs_inode_item *item; 4106 struct extent_buffer *leaf; 4107 int ret; 4108 4109 path = btrfs_alloc_path(); 4110 if (!path) 4111 return -ENOMEM; 4112 4113 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 4114 if (ret) 4115 goto out; 4116 4117 leaf = path->nodes[0]; 4118 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 4119 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); 4120 btrfs_set_inode_generation(leaf, item, 1); 4121 btrfs_set_inode_size(leaf, item, 0); 4122 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 4123 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 4124 BTRFS_INODE_PREALLOC); 4125 btrfs_mark_buffer_dirty(leaf); 4126 out: 4127 btrfs_free_path(path); 4128 return ret; 4129 } 4130 4131 /* 4132 * helper to create inode for data relocation. 4133 * the inode is in data relocation tree and its link count is 0 4134 */ 4135 static noinline_for_stack 4136 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 4137 struct btrfs_block_group_cache *group) 4138 { 4139 struct inode *inode = NULL; 4140 struct btrfs_trans_handle *trans; 4141 struct btrfs_root *root; 4142 struct btrfs_key key; 4143 u64 objectid; 4144 int err = 0; 4145 4146 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 4147 if (IS_ERR(root)) 4148 return ERR_CAST(root); 4149 4150 trans = btrfs_start_transaction(root, 6); 4151 if (IS_ERR(trans)) 4152 return ERR_CAST(trans); 4153 4154 err = btrfs_find_free_objectid(root, &objectid); 4155 if (err) 4156 goto out; 4157 4158 err = __insert_orphan_inode(trans, root, objectid); 4159 BUG_ON(err); 4160 4161 key.objectid = objectid; 4162 key.type = BTRFS_INODE_ITEM_KEY; 4163 key.offset = 0; 4164 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 4165 BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); 4166 BTRFS_I(inode)->index_cnt = group->key.objectid; 4167 4168 err = btrfs_orphan_add(trans, inode); 4169 out: 4170 btrfs_end_transaction(trans, root); 4171 btrfs_btree_balance_dirty(root); 4172 if (err) { 4173 if (inode) 4174 iput(inode); 4175 inode = ERR_PTR(err); 4176 } 4177 return inode; 4178 } 4179 4180 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) 4181 { 4182 struct reloc_control *rc; 4183 4184 rc = kzalloc(sizeof(*rc), GFP_NOFS); 4185 if (!rc) 4186 return NULL; 4187 4188 INIT_LIST_HEAD(&rc->reloc_roots); 4189 backref_cache_init(&rc->backref_cache); 4190 mapping_tree_init(&rc->reloc_root_tree); 4191 extent_io_tree_init(&rc->processed_blocks, 4192 fs_info->btree_inode->i_mapping); 4193 return rc; 4194 } 4195 4196 /* 4197 * function to relocate all extents in a block group. 4198 */ 4199 int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) 4200 { 4201 struct btrfs_fs_info *fs_info = extent_root->fs_info; 4202 struct reloc_control *rc; 4203 struct inode *inode; 4204 struct btrfs_path *path; 4205 int ret; 4206 int rw = 0; 4207 int err = 0; 4208 4209 rc = alloc_reloc_control(fs_info); 4210 if (!rc) 4211 return -ENOMEM; 4212 4213 rc->extent_root = extent_root; 4214 4215 rc->block_group = btrfs_lookup_block_group(fs_info, group_start); 4216 BUG_ON(!rc->block_group); 4217 4218 ret = btrfs_inc_block_group_ro(extent_root, rc->block_group); 4219 if (ret) { 4220 err = ret; 4221 goto out; 4222 } 4223 rw = 1; 4224 4225 path = btrfs_alloc_path(); 4226 if (!path) { 4227 err = -ENOMEM; 4228 goto out; 4229 } 4230 4231 inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group, 4232 path); 4233 btrfs_free_path(path); 4234 4235 if (!IS_ERR(inode)) 4236 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); 4237 else 4238 ret = PTR_ERR(inode); 4239 4240 if (ret && ret != -ENOENT) { 4241 err = ret; 4242 goto out; 4243 } 4244 4245 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4246 if (IS_ERR(rc->data_inode)) { 4247 err = PTR_ERR(rc->data_inode); 4248 rc->data_inode = NULL; 4249 goto out; 4250 } 4251 4252 btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu", 4253 rc->block_group->key.objectid, rc->block_group->flags); 4254 4255 ret = btrfs_start_delalloc_roots(fs_info, 0, -1); 4256 if (ret < 0) { 4257 err = ret; 4258 goto out; 4259 } 4260 btrfs_wait_ordered_roots(fs_info, -1); 4261 4262 while (1) { 4263 mutex_lock(&fs_info->cleaner_mutex); 4264 ret = relocate_block_group(rc); 4265 mutex_unlock(&fs_info->cleaner_mutex); 4266 if (ret < 0) { 4267 err = ret; 4268 goto out; 4269 } 4270 4271 if (rc->extents_found == 0) 4272 break; 4273 4274 btrfs_info(extent_root->fs_info, "found %llu extents", 4275 rc->extents_found); 4276 4277 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4278 ret = btrfs_wait_ordered_range(rc->data_inode, 0, 4279 (u64)-1); 4280 if (ret) { 4281 err = ret; 4282 goto out; 4283 } 4284 invalidate_mapping_pages(rc->data_inode->i_mapping, 4285 0, -1); 4286 rc->stage = UPDATE_DATA_PTRS; 4287 } 4288 } 4289 4290 WARN_ON(rc->block_group->pinned > 0); 4291 WARN_ON(rc->block_group->reserved > 0); 4292 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); 4293 out: 4294 if (err && rw) 4295 btrfs_dec_block_group_ro(extent_root, rc->block_group); 4296 iput(rc->data_inode); 4297 btrfs_put_block_group(rc->block_group); 4298 kfree(rc); 4299 return err; 4300 } 4301 4302 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4303 { 4304 struct btrfs_trans_handle *trans; 4305 int ret, err; 4306 4307 trans = btrfs_start_transaction(root->fs_info->tree_root, 0); 4308 if (IS_ERR(trans)) 4309 return PTR_ERR(trans); 4310 4311 memset(&root->root_item.drop_progress, 0, 4312 sizeof(root->root_item.drop_progress)); 4313 root->root_item.drop_level = 0; 4314 btrfs_set_root_refs(&root->root_item, 0); 4315 ret = btrfs_update_root(trans, root->fs_info->tree_root, 4316 &root->root_key, &root->root_item); 4317 4318 err = btrfs_end_transaction(trans, root->fs_info->tree_root); 4319 if (err) 4320 return err; 4321 return ret; 4322 } 4323 4324 /* 4325 * recover relocation interrupted by system crash. 4326 * 4327 * this function resumes merging reloc trees with corresponding fs trees. 4328 * this is important for keeping the sharing of tree blocks 4329 */ 4330 int btrfs_recover_relocation(struct btrfs_root *root) 4331 { 4332 LIST_HEAD(reloc_roots); 4333 struct btrfs_key key; 4334 struct btrfs_root *fs_root; 4335 struct btrfs_root *reloc_root; 4336 struct btrfs_path *path; 4337 struct extent_buffer *leaf; 4338 struct reloc_control *rc = NULL; 4339 struct btrfs_trans_handle *trans; 4340 int ret; 4341 int err = 0; 4342 4343 path = btrfs_alloc_path(); 4344 if (!path) 4345 return -ENOMEM; 4346 path->reada = -1; 4347 4348 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4349 key.type = BTRFS_ROOT_ITEM_KEY; 4350 key.offset = (u64)-1; 4351 4352 while (1) { 4353 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, 4354 path, 0, 0); 4355 if (ret < 0) { 4356 err = ret; 4357 goto out; 4358 } 4359 if (ret > 0) { 4360 if (path->slots[0] == 0) 4361 break; 4362 path->slots[0]--; 4363 } 4364 leaf = path->nodes[0]; 4365 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4366 btrfs_release_path(path); 4367 4368 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4369 key.type != BTRFS_ROOT_ITEM_KEY) 4370 break; 4371 4372 reloc_root = btrfs_read_fs_root(root, &key); 4373 if (IS_ERR(reloc_root)) { 4374 err = PTR_ERR(reloc_root); 4375 goto out; 4376 } 4377 4378 list_add(&reloc_root->root_list, &reloc_roots); 4379 4380 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4381 fs_root = read_fs_root(root->fs_info, 4382 reloc_root->root_key.offset); 4383 if (IS_ERR(fs_root)) { 4384 ret = PTR_ERR(fs_root); 4385 if (ret != -ENOENT) { 4386 err = ret; 4387 goto out; 4388 } 4389 ret = mark_garbage_root(reloc_root); 4390 if (ret < 0) { 4391 err = ret; 4392 goto out; 4393 } 4394 } 4395 } 4396 4397 if (key.offset == 0) 4398 break; 4399 4400 key.offset--; 4401 } 4402 btrfs_release_path(path); 4403 4404 if (list_empty(&reloc_roots)) 4405 goto out; 4406 4407 rc = alloc_reloc_control(root->fs_info); 4408 if (!rc) { 4409 err = -ENOMEM; 4410 goto out; 4411 } 4412 4413 rc->extent_root = root->fs_info->extent_root; 4414 4415 set_reloc_control(rc); 4416 4417 trans = btrfs_join_transaction(rc->extent_root); 4418 if (IS_ERR(trans)) { 4419 unset_reloc_control(rc); 4420 err = PTR_ERR(trans); 4421 goto out_free; 4422 } 4423 4424 rc->merge_reloc_tree = 1; 4425 4426 while (!list_empty(&reloc_roots)) { 4427 reloc_root = list_entry(reloc_roots.next, 4428 struct btrfs_root, root_list); 4429 list_del(&reloc_root->root_list); 4430 4431 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4432 list_add_tail(&reloc_root->root_list, 4433 &rc->reloc_roots); 4434 continue; 4435 } 4436 4437 fs_root = read_fs_root(root->fs_info, 4438 reloc_root->root_key.offset); 4439 if (IS_ERR(fs_root)) { 4440 err = PTR_ERR(fs_root); 4441 goto out_free; 4442 } 4443 4444 err = __add_reloc_root(reloc_root); 4445 BUG_ON(err < 0); /* -ENOMEM or logic error */ 4446 fs_root->reloc_root = reloc_root; 4447 } 4448 4449 err = btrfs_commit_transaction(trans, rc->extent_root); 4450 if (err) 4451 goto out_free; 4452 4453 merge_reloc_roots(rc); 4454 4455 unset_reloc_control(rc); 4456 4457 trans = btrfs_join_transaction(rc->extent_root); 4458 if (IS_ERR(trans)) 4459 err = PTR_ERR(trans); 4460 else 4461 err = btrfs_commit_transaction(trans, rc->extent_root); 4462 out_free: 4463 kfree(rc); 4464 out: 4465 if (!list_empty(&reloc_roots)) 4466 free_reloc_roots(&reloc_roots); 4467 4468 btrfs_free_path(path); 4469 4470 if (err == 0) { 4471 /* cleanup orphan inode in data relocation tree */ 4472 fs_root = read_fs_root(root->fs_info, 4473 BTRFS_DATA_RELOC_TREE_OBJECTID); 4474 if (IS_ERR(fs_root)) 4475 err = PTR_ERR(fs_root); 4476 else 4477 err = btrfs_orphan_cleanup(fs_root); 4478 } 4479 return err; 4480 } 4481 4482 /* 4483 * helper to add ordered checksum for data relocation. 4484 * 4485 * cloning checksum properly handles the nodatasum extents. 4486 * it also saves CPU time to re-calculate the checksum. 4487 */ 4488 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) 4489 { 4490 struct btrfs_ordered_sum *sums; 4491 struct btrfs_ordered_extent *ordered; 4492 struct btrfs_root *root = BTRFS_I(inode)->root; 4493 int ret; 4494 u64 disk_bytenr; 4495 u64 new_bytenr; 4496 LIST_HEAD(list); 4497 4498 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4499 BUG_ON(ordered->file_offset != file_pos || ordered->len != len); 4500 4501 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4502 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, 4503 disk_bytenr + len - 1, &list, 0); 4504 if (ret) 4505 goto out; 4506 4507 while (!list_empty(&list)) { 4508 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4509 list_del_init(&sums->list); 4510 4511 /* 4512 * We need to offset the new_bytenr based on where the csum is. 4513 * We need to do this because we will read in entire prealloc 4514 * extents but we may have written to say the middle of the 4515 * prealloc extent, so we need to make sure the csum goes with 4516 * the right disk offset. 4517 * 4518 * We can do this because the data reloc inode refers strictly 4519 * to the on disk bytes, so we don't have to worry about 4520 * disk_len vs real len like with real inodes since it's all 4521 * disk length. 4522 */ 4523 new_bytenr = ordered->start + (sums->bytenr - disk_bytenr); 4524 sums->bytenr = new_bytenr; 4525 4526 btrfs_add_ordered_sum(inode, ordered, sums); 4527 } 4528 out: 4529 btrfs_put_ordered_extent(ordered); 4530 return ret; 4531 } 4532 4533 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4534 struct btrfs_root *root, struct extent_buffer *buf, 4535 struct extent_buffer *cow) 4536 { 4537 struct reloc_control *rc; 4538 struct backref_node *node; 4539 int first_cow = 0; 4540 int level; 4541 int ret = 0; 4542 4543 rc = root->fs_info->reloc_ctl; 4544 if (!rc) 4545 return 0; 4546 4547 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4548 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4549 4550 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 4551 if (buf == root->node) 4552 __update_reloc_root(root, cow->start); 4553 } 4554 4555 level = btrfs_header_level(buf); 4556 if (btrfs_header_generation(buf) <= 4557 btrfs_root_last_snapshot(&root->root_item)) 4558 first_cow = 1; 4559 4560 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4561 rc->create_reloc_tree) { 4562 WARN_ON(!first_cow && level == 0); 4563 4564 node = rc->backref_cache.path[level]; 4565 BUG_ON(node->bytenr != buf->start && 4566 node->new_bytenr != buf->start); 4567 4568 drop_node_buffer(node); 4569 extent_buffer_get(cow); 4570 node->eb = cow; 4571 node->new_bytenr = cow->start; 4572 4573 if (!node->pending) { 4574 list_move_tail(&node->list, 4575 &rc->backref_cache.pending[level]); 4576 node->pending = 1; 4577 } 4578 4579 if (first_cow) 4580 __mark_block_processed(rc, node); 4581 4582 if (first_cow && level > 0) 4583 rc->nodes_relocated += buf->len; 4584 } 4585 4586 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4587 ret = replace_file_extents(trans, rc, root, cow); 4588 return ret; 4589 } 4590 4591 /* 4592 * called before creating snapshot. it calculates metadata reservation 4593 * requried for relocating tree blocks in the snapshot 4594 */ 4595 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4596 u64 *bytes_to_reserve) 4597 { 4598 struct btrfs_root *root; 4599 struct reloc_control *rc; 4600 4601 root = pending->root; 4602 if (!root->reloc_root) 4603 return; 4604 4605 rc = root->fs_info->reloc_ctl; 4606 if (!rc->merge_reloc_tree) 4607 return; 4608 4609 root = root->reloc_root; 4610 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4611 /* 4612 * relocation is in the stage of merging trees. the space 4613 * used by merging a reloc tree is twice the size of 4614 * relocated tree nodes in the worst case. half for cowing 4615 * the reloc tree, half for cowing the fs tree. the space 4616 * used by cowing the reloc tree will be freed after the 4617 * tree is dropped. if we create snapshot, cowing the fs 4618 * tree may use more space than it frees. so we need 4619 * reserve extra space. 4620 */ 4621 *bytes_to_reserve += rc->nodes_relocated; 4622 } 4623 4624 /* 4625 * called after snapshot is created. migrate block reservation 4626 * and create reloc root for the newly created snapshot 4627 */ 4628 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4629 struct btrfs_pending_snapshot *pending) 4630 { 4631 struct btrfs_root *root = pending->root; 4632 struct btrfs_root *reloc_root; 4633 struct btrfs_root *new_root; 4634 struct reloc_control *rc; 4635 int ret; 4636 4637 if (!root->reloc_root) 4638 return 0; 4639 4640 rc = root->fs_info->reloc_ctl; 4641 rc->merging_rsv_size += rc->nodes_relocated; 4642 4643 if (rc->merge_reloc_tree) { 4644 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4645 rc->block_rsv, 4646 rc->nodes_relocated); 4647 if (ret) 4648 return ret; 4649 } 4650 4651 new_root = pending->snap; 4652 reloc_root = create_reloc_root(trans, root->reloc_root, 4653 new_root->root_key.objectid); 4654 if (IS_ERR(reloc_root)) 4655 return PTR_ERR(reloc_root); 4656 4657 ret = __add_reloc_root(reloc_root); 4658 BUG_ON(ret < 0); 4659 new_root->reloc_root = reloc_root; 4660 4661 if (rc->create_reloc_tree) 4662 ret = clone_backref_node(trans, rc, root, reloc_root); 4663 return ret; 4664 } 4665