1 /* 2 * Copyright (C) 2009 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "volumes.h" 29 #include "locking.h" 30 #include "btrfs_inode.h" 31 #include "async-thread.h" 32 #include "free-space-cache.h" 33 #include "inode-map.h" 34 35 /* 36 * backref_node, mapping_node and tree_block start with this 37 */ 38 struct tree_entry { 39 struct rb_node rb_node; 40 u64 bytenr; 41 }; 42 43 /* 44 * present a tree block in the backref cache 45 */ 46 struct backref_node { 47 struct rb_node rb_node; 48 u64 bytenr; 49 50 u64 new_bytenr; 51 /* objectid of tree block owner, can be not uptodate */ 52 u64 owner; 53 /* link to pending, changed or detached list */ 54 struct list_head list; 55 /* list of upper level blocks reference this block */ 56 struct list_head upper; 57 /* list of child blocks in the cache */ 58 struct list_head lower; 59 /* NULL if this node is not tree root */ 60 struct btrfs_root *root; 61 /* extent buffer got by COW the block */ 62 struct extent_buffer *eb; 63 /* level of tree block */ 64 unsigned int level:8; 65 /* is the block in non-reference counted tree */ 66 unsigned int cowonly:1; 67 /* 1 if no child node in the cache */ 68 unsigned int lowest:1; 69 /* is the extent buffer locked */ 70 unsigned int locked:1; 71 /* has the block been processed */ 72 unsigned int processed:1; 73 /* have backrefs of this block been checked */ 74 unsigned int checked:1; 75 /* 76 * 1 if corresponding block has been cowed but some upper 77 * level block pointers may not point to the new location 78 */ 79 unsigned int pending:1; 80 /* 81 * 1 if the backref node isn't connected to any other 82 * backref node. 83 */ 84 unsigned int detached:1; 85 }; 86 87 /* 88 * present a block pointer in the backref cache 89 */ 90 struct backref_edge { 91 struct list_head list[2]; 92 struct backref_node *node[2]; 93 }; 94 95 #define LOWER 0 96 #define UPPER 1 97 98 struct backref_cache { 99 /* red black tree of all backref nodes in the cache */ 100 struct rb_root rb_root; 101 /* for passing backref nodes to btrfs_reloc_cow_block */ 102 struct backref_node *path[BTRFS_MAX_LEVEL]; 103 /* 104 * list of blocks that have been cowed but some block 105 * pointers in upper level blocks may not reflect the 106 * new location 107 */ 108 struct list_head pending[BTRFS_MAX_LEVEL]; 109 /* list of backref nodes with no child node */ 110 struct list_head leaves; 111 /* list of blocks that have been cowed in current transaction */ 112 struct list_head changed; 113 /* list of detached backref node. */ 114 struct list_head detached; 115 116 u64 last_trans; 117 118 int nr_nodes; 119 int nr_edges; 120 }; 121 122 /* 123 * map address of tree root to tree 124 */ 125 struct mapping_node { 126 struct rb_node rb_node; 127 u64 bytenr; 128 void *data; 129 }; 130 131 struct mapping_tree { 132 struct rb_root rb_root; 133 spinlock_t lock; 134 }; 135 136 /* 137 * present a tree block to process 138 */ 139 struct tree_block { 140 struct rb_node rb_node; 141 u64 bytenr; 142 struct btrfs_key key; 143 unsigned int level:8; 144 unsigned int key_ready:1; 145 }; 146 147 #define MAX_EXTENTS 128 148 149 struct file_extent_cluster { 150 u64 start; 151 u64 end; 152 u64 boundary[MAX_EXTENTS]; 153 unsigned int nr; 154 }; 155 156 struct reloc_control { 157 /* block group to relocate */ 158 struct btrfs_block_group_cache *block_group; 159 /* extent tree */ 160 struct btrfs_root *extent_root; 161 /* inode for moving data */ 162 struct inode *data_inode; 163 164 struct btrfs_block_rsv *block_rsv; 165 166 struct backref_cache backref_cache; 167 168 struct file_extent_cluster cluster; 169 /* tree blocks have been processed */ 170 struct extent_io_tree processed_blocks; 171 /* map start of tree root to corresponding reloc tree */ 172 struct mapping_tree reloc_root_tree; 173 /* list of reloc trees */ 174 struct list_head reloc_roots; 175 /* size of metadata reservation for merging reloc trees */ 176 u64 merging_rsv_size; 177 /* size of relocated tree nodes */ 178 u64 nodes_relocated; 179 180 u64 search_start; 181 u64 extents_found; 182 183 unsigned int stage:8; 184 unsigned int create_reloc_tree:1; 185 unsigned int merge_reloc_tree:1; 186 unsigned int found_file_extent:1; 187 unsigned int commit_transaction:1; 188 }; 189 190 /* stages of data relocation */ 191 #define MOVE_DATA_EXTENTS 0 192 #define UPDATE_DATA_PTRS 1 193 194 static void remove_backref_node(struct backref_cache *cache, 195 struct backref_node *node); 196 static void __mark_block_processed(struct reloc_control *rc, 197 struct backref_node *node); 198 199 static void mapping_tree_init(struct mapping_tree *tree) 200 { 201 tree->rb_root = RB_ROOT; 202 spin_lock_init(&tree->lock); 203 } 204 205 static void backref_cache_init(struct backref_cache *cache) 206 { 207 int i; 208 cache->rb_root = RB_ROOT; 209 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 210 INIT_LIST_HEAD(&cache->pending[i]); 211 INIT_LIST_HEAD(&cache->changed); 212 INIT_LIST_HEAD(&cache->detached); 213 INIT_LIST_HEAD(&cache->leaves); 214 } 215 216 static void backref_cache_cleanup(struct backref_cache *cache) 217 { 218 struct backref_node *node; 219 int i; 220 221 while (!list_empty(&cache->detached)) { 222 node = list_entry(cache->detached.next, 223 struct backref_node, list); 224 remove_backref_node(cache, node); 225 } 226 227 while (!list_empty(&cache->leaves)) { 228 node = list_entry(cache->leaves.next, 229 struct backref_node, lower); 230 remove_backref_node(cache, node); 231 } 232 233 cache->last_trans = 0; 234 235 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 236 BUG_ON(!list_empty(&cache->pending[i])); 237 BUG_ON(!list_empty(&cache->changed)); 238 BUG_ON(!list_empty(&cache->detached)); 239 BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root)); 240 BUG_ON(cache->nr_nodes); 241 BUG_ON(cache->nr_edges); 242 } 243 244 static struct backref_node *alloc_backref_node(struct backref_cache *cache) 245 { 246 struct backref_node *node; 247 248 node = kzalloc(sizeof(*node), GFP_NOFS); 249 if (node) { 250 INIT_LIST_HEAD(&node->list); 251 INIT_LIST_HEAD(&node->upper); 252 INIT_LIST_HEAD(&node->lower); 253 RB_CLEAR_NODE(&node->rb_node); 254 cache->nr_nodes++; 255 } 256 return node; 257 } 258 259 static void free_backref_node(struct backref_cache *cache, 260 struct backref_node *node) 261 { 262 if (node) { 263 cache->nr_nodes--; 264 kfree(node); 265 } 266 } 267 268 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) 269 { 270 struct backref_edge *edge; 271 272 edge = kzalloc(sizeof(*edge), GFP_NOFS); 273 if (edge) 274 cache->nr_edges++; 275 return edge; 276 } 277 278 static void free_backref_edge(struct backref_cache *cache, 279 struct backref_edge *edge) 280 { 281 if (edge) { 282 cache->nr_edges--; 283 kfree(edge); 284 } 285 } 286 287 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 288 struct rb_node *node) 289 { 290 struct rb_node **p = &root->rb_node; 291 struct rb_node *parent = NULL; 292 struct tree_entry *entry; 293 294 while (*p) { 295 parent = *p; 296 entry = rb_entry(parent, struct tree_entry, rb_node); 297 298 if (bytenr < entry->bytenr) 299 p = &(*p)->rb_left; 300 else if (bytenr > entry->bytenr) 301 p = &(*p)->rb_right; 302 else 303 return parent; 304 } 305 306 rb_link_node(node, parent, p); 307 rb_insert_color(node, root); 308 return NULL; 309 } 310 311 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 312 { 313 struct rb_node *n = root->rb_node; 314 struct tree_entry *entry; 315 316 while (n) { 317 entry = rb_entry(n, struct tree_entry, rb_node); 318 319 if (bytenr < entry->bytenr) 320 n = n->rb_left; 321 else if (bytenr > entry->bytenr) 322 n = n->rb_right; 323 else 324 return n; 325 } 326 return NULL; 327 } 328 329 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr) 330 { 331 332 struct btrfs_fs_info *fs_info = NULL; 333 struct backref_node *bnode = rb_entry(rb_node, struct backref_node, 334 rb_node); 335 if (bnode->root) 336 fs_info = bnode->root->fs_info; 337 btrfs_panic(fs_info, errno, "Inconsistency in backref cache " 338 "found at offset %llu\n", bytenr); 339 } 340 341 /* 342 * walk up backref nodes until reach node presents tree root 343 */ 344 static struct backref_node *walk_up_backref(struct backref_node *node, 345 struct backref_edge *edges[], 346 int *index) 347 { 348 struct backref_edge *edge; 349 int idx = *index; 350 351 while (!list_empty(&node->upper)) { 352 edge = list_entry(node->upper.next, 353 struct backref_edge, list[LOWER]); 354 edges[idx++] = edge; 355 node = edge->node[UPPER]; 356 } 357 BUG_ON(node->detached); 358 *index = idx; 359 return node; 360 } 361 362 /* 363 * walk down backref nodes to find start of next reference path 364 */ 365 static struct backref_node *walk_down_backref(struct backref_edge *edges[], 366 int *index) 367 { 368 struct backref_edge *edge; 369 struct backref_node *lower; 370 int idx = *index; 371 372 while (idx > 0) { 373 edge = edges[idx - 1]; 374 lower = edge->node[LOWER]; 375 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 376 idx--; 377 continue; 378 } 379 edge = list_entry(edge->list[LOWER].next, 380 struct backref_edge, list[LOWER]); 381 edges[idx - 1] = edge; 382 *index = idx; 383 return edge->node[UPPER]; 384 } 385 *index = 0; 386 return NULL; 387 } 388 389 static void unlock_node_buffer(struct backref_node *node) 390 { 391 if (node->locked) { 392 btrfs_tree_unlock(node->eb); 393 node->locked = 0; 394 } 395 } 396 397 static void drop_node_buffer(struct backref_node *node) 398 { 399 if (node->eb) { 400 unlock_node_buffer(node); 401 free_extent_buffer(node->eb); 402 node->eb = NULL; 403 } 404 } 405 406 static void drop_backref_node(struct backref_cache *tree, 407 struct backref_node *node) 408 { 409 BUG_ON(!list_empty(&node->upper)); 410 411 drop_node_buffer(node); 412 list_del(&node->list); 413 list_del(&node->lower); 414 if (!RB_EMPTY_NODE(&node->rb_node)) 415 rb_erase(&node->rb_node, &tree->rb_root); 416 free_backref_node(tree, node); 417 } 418 419 /* 420 * remove a backref node from the backref cache 421 */ 422 static void remove_backref_node(struct backref_cache *cache, 423 struct backref_node *node) 424 { 425 struct backref_node *upper; 426 struct backref_edge *edge; 427 428 if (!node) 429 return; 430 431 BUG_ON(!node->lowest && !node->detached); 432 while (!list_empty(&node->upper)) { 433 edge = list_entry(node->upper.next, struct backref_edge, 434 list[LOWER]); 435 upper = edge->node[UPPER]; 436 list_del(&edge->list[LOWER]); 437 list_del(&edge->list[UPPER]); 438 free_backref_edge(cache, edge); 439 440 if (RB_EMPTY_NODE(&upper->rb_node)) { 441 BUG_ON(!list_empty(&node->upper)); 442 drop_backref_node(cache, node); 443 node = upper; 444 node->lowest = 1; 445 continue; 446 } 447 /* 448 * add the node to leaf node list if no other 449 * child block cached. 450 */ 451 if (list_empty(&upper->lower)) { 452 list_add_tail(&upper->lower, &cache->leaves); 453 upper->lowest = 1; 454 } 455 } 456 457 drop_backref_node(cache, node); 458 } 459 460 static void update_backref_node(struct backref_cache *cache, 461 struct backref_node *node, u64 bytenr) 462 { 463 struct rb_node *rb_node; 464 rb_erase(&node->rb_node, &cache->rb_root); 465 node->bytenr = bytenr; 466 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 467 if (rb_node) 468 backref_tree_panic(rb_node, -EEXIST, bytenr); 469 } 470 471 /* 472 * update backref cache after a transaction commit 473 */ 474 static int update_backref_cache(struct btrfs_trans_handle *trans, 475 struct backref_cache *cache) 476 { 477 struct backref_node *node; 478 int level = 0; 479 480 if (cache->last_trans == 0) { 481 cache->last_trans = trans->transid; 482 return 0; 483 } 484 485 if (cache->last_trans == trans->transid) 486 return 0; 487 488 /* 489 * detached nodes are used to avoid unnecessary backref 490 * lookup. transaction commit changes the extent tree. 491 * so the detached nodes are no longer useful. 492 */ 493 while (!list_empty(&cache->detached)) { 494 node = list_entry(cache->detached.next, 495 struct backref_node, list); 496 remove_backref_node(cache, node); 497 } 498 499 while (!list_empty(&cache->changed)) { 500 node = list_entry(cache->changed.next, 501 struct backref_node, list); 502 list_del_init(&node->list); 503 BUG_ON(node->pending); 504 update_backref_node(cache, node, node->new_bytenr); 505 } 506 507 /* 508 * some nodes can be left in the pending list if there were 509 * errors during processing the pending nodes. 510 */ 511 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 512 list_for_each_entry(node, &cache->pending[level], list) { 513 BUG_ON(!node->pending); 514 if (node->bytenr == node->new_bytenr) 515 continue; 516 update_backref_node(cache, node, node->new_bytenr); 517 } 518 } 519 520 cache->last_trans = 0; 521 return 1; 522 } 523 524 525 static int should_ignore_root(struct btrfs_root *root) 526 { 527 struct btrfs_root *reloc_root; 528 529 if (!root->ref_cows) 530 return 0; 531 532 reloc_root = root->reloc_root; 533 if (!reloc_root) 534 return 0; 535 536 if (btrfs_root_last_snapshot(&reloc_root->root_item) == 537 root->fs_info->running_transaction->transid - 1) 538 return 0; 539 /* 540 * if there is reloc tree and it was created in previous 541 * transaction backref lookup can find the reloc tree, 542 * so backref node for the fs tree root is useless for 543 * relocation. 544 */ 545 return 1; 546 } 547 /* 548 * find reloc tree by address of tree root 549 */ 550 static struct btrfs_root *find_reloc_root(struct reloc_control *rc, 551 u64 bytenr) 552 { 553 struct rb_node *rb_node; 554 struct mapping_node *node; 555 struct btrfs_root *root = NULL; 556 557 spin_lock(&rc->reloc_root_tree.lock); 558 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); 559 if (rb_node) { 560 node = rb_entry(rb_node, struct mapping_node, rb_node); 561 root = (struct btrfs_root *)node->data; 562 } 563 spin_unlock(&rc->reloc_root_tree.lock); 564 return root; 565 } 566 567 static int is_cowonly_root(u64 root_objectid) 568 { 569 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || 570 root_objectid == BTRFS_EXTENT_TREE_OBJECTID || 571 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 572 root_objectid == BTRFS_DEV_TREE_OBJECTID || 573 root_objectid == BTRFS_TREE_LOG_OBJECTID || 574 root_objectid == BTRFS_CSUM_TREE_OBJECTID || 575 root_objectid == BTRFS_UUID_TREE_OBJECTID || 576 root_objectid == BTRFS_QUOTA_TREE_OBJECTID) 577 return 1; 578 return 0; 579 } 580 581 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, 582 u64 root_objectid) 583 { 584 struct btrfs_key key; 585 586 key.objectid = root_objectid; 587 key.type = BTRFS_ROOT_ITEM_KEY; 588 if (is_cowonly_root(root_objectid)) 589 key.offset = 0; 590 else 591 key.offset = (u64)-1; 592 593 return btrfs_get_fs_root(fs_info, &key, false); 594 } 595 596 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 597 static noinline_for_stack 598 struct btrfs_root *find_tree_root(struct reloc_control *rc, 599 struct extent_buffer *leaf, 600 struct btrfs_extent_ref_v0 *ref0) 601 { 602 struct btrfs_root *root; 603 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0); 604 u64 generation = btrfs_ref_generation_v0(leaf, ref0); 605 606 BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID); 607 608 root = read_fs_root(rc->extent_root->fs_info, root_objectid); 609 BUG_ON(IS_ERR(root)); 610 611 if (root->ref_cows && 612 generation != btrfs_root_generation(&root->root_item)) 613 return NULL; 614 615 return root; 616 } 617 #endif 618 619 static noinline_for_stack 620 int find_inline_backref(struct extent_buffer *leaf, int slot, 621 unsigned long *ptr, unsigned long *end) 622 { 623 struct btrfs_key key; 624 struct btrfs_extent_item *ei; 625 struct btrfs_tree_block_info *bi; 626 u32 item_size; 627 628 btrfs_item_key_to_cpu(leaf, &key, slot); 629 630 item_size = btrfs_item_size_nr(leaf, slot); 631 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 632 if (item_size < sizeof(*ei)) { 633 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 634 return 1; 635 } 636 #endif 637 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 638 WARN_ON(!(btrfs_extent_flags(leaf, ei) & 639 BTRFS_EXTENT_FLAG_TREE_BLOCK)); 640 641 if (key.type == BTRFS_EXTENT_ITEM_KEY && 642 item_size <= sizeof(*ei) + sizeof(*bi)) { 643 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); 644 return 1; 645 } 646 if (key.type == BTRFS_METADATA_ITEM_KEY && 647 item_size <= sizeof(*ei)) { 648 WARN_ON(item_size < sizeof(*ei)); 649 return 1; 650 } 651 652 if (key.type == BTRFS_EXTENT_ITEM_KEY) { 653 bi = (struct btrfs_tree_block_info *)(ei + 1); 654 *ptr = (unsigned long)(bi + 1); 655 } else { 656 *ptr = (unsigned long)(ei + 1); 657 } 658 *end = (unsigned long)ei + item_size; 659 return 0; 660 } 661 662 /* 663 * build backref tree for a given tree block. root of the backref tree 664 * corresponds the tree block, leaves of the backref tree correspond 665 * roots of b-trees that reference the tree block. 666 * 667 * the basic idea of this function is check backrefs of a given block 668 * to find upper level blocks that refernece the block, and then check 669 * bakcrefs of these upper level blocks recursively. the recursion stop 670 * when tree root is reached or backrefs for the block is cached. 671 * 672 * NOTE: if we find backrefs for a block are cached, we know backrefs 673 * for all upper level blocks that directly/indirectly reference the 674 * block are also cached. 675 */ 676 static noinline_for_stack 677 struct backref_node *build_backref_tree(struct reloc_control *rc, 678 struct btrfs_key *node_key, 679 int level, u64 bytenr) 680 { 681 struct backref_cache *cache = &rc->backref_cache; 682 struct btrfs_path *path1; 683 struct btrfs_path *path2; 684 struct extent_buffer *eb; 685 struct btrfs_root *root; 686 struct backref_node *cur; 687 struct backref_node *upper; 688 struct backref_node *lower; 689 struct backref_node *node = NULL; 690 struct backref_node *exist = NULL; 691 struct backref_edge *edge; 692 struct rb_node *rb_node; 693 struct btrfs_key key; 694 unsigned long end; 695 unsigned long ptr; 696 LIST_HEAD(list); 697 LIST_HEAD(useless); 698 int cowonly; 699 int ret; 700 int err = 0; 701 bool need_check = true; 702 703 path1 = btrfs_alloc_path(); 704 path2 = btrfs_alloc_path(); 705 if (!path1 || !path2) { 706 err = -ENOMEM; 707 goto out; 708 } 709 path1->reada = 1; 710 path2->reada = 2; 711 712 node = alloc_backref_node(cache); 713 if (!node) { 714 err = -ENOMEM; 715 goto out; 716 } 717 718 node->bytenr = bytenr; 719 node->level = level; 720 node->lowest = 1; 721 cur = node; 722 again: 723 end = 0; 724 ptr = 0; 725 key.objectid = cur->bytenr; 726 key.type = BTRFS_METADATA_ITEM_KEY; 727 key.offset = (u64)-1; 728 729 path1->search_commit_root = 1; 730 path1->skip_locking = 1; 731 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, 732 0, 0); 733 if (ret < 0) { 734 err = ret; 735 goto out; 736 } 737 BUG_ON(!ret || !path1->slots[0]); 738 739 path1->slots[0]--; 740 741 WARN_ON(cur->checked); 742 if (!list_empty(&cur->upper)) { 743 /* 744 * the backref was added previously when processing 745 * backref of type BTRFS_TREE_BLOCK_REF_KEY 746 */ 747 BUG_ON(!list_is_singular(&cur->upper)); 748 edge = list_entry(cur->upper.next, struct backref_edge, 749 list[LOWER]); 750 BUG_ON(!list_empty(&edge->list[UPPER])); 751 exist = edge->node[UPPER]; 752 /* 753 * add the upper level block to pending list if we need 754 * check its backrefs 755 */ 756 if (!exist->checked) 757 list_add_tail(&edge->list[UPPER], &list); 758 } else { 759 exist = NULL; 760 } 761 762 while (1) { 763 cond_resched(); 764 eb = path1->nodes[0]; 765 766 if (ptr >= end) { 767 if (path1->slots[0] >= btrfs_header_nritems(eb)) { 768 ret = btrfs_next_leaf(rc->extent_root, path1); 769 if (ret < 0) { 770 err = ret; 771 goto out; 772 } 773 if (ret > 0) 774 break; 775 eb = path1->nodes[0]; 776 } 777 778 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); 779 if (key.objectid != cur->bytenr) { 780 WARN_ON(exist); 781 break; 782 } 783 784 if (key.type == BTRFS_EXTENT_ITEM_KEY || 785 key.type == BTRFS_METADATA_ITEM_KEY) { 786 ret = find_inline_backref(eb, path1->slots[0], 787 &ptr, &end); 788 if (ret) 789 goto next; 790 } 791 } 792 793 if (ptr < end) { 794 /* update key for inline back ref */ 795 struct btrfs_extent_inline_ref *iref; 796 iref = (struct btrfs_extent_inline_ref *)ptr; 797 key.type = btrfs_extent_inline_ref_type(eb, iref); 798 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 799 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && 800 key.type != BTRFS_SHARED_BLOCK_REF_KEY); 801 } 802 803 if (exist && 804 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 805 exist->owner == key.offset) || 806 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 807 exist->bytenr == key.offset))) { 808 exist = NULL; 809 goto next; 810 } 811 812 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 813 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY || 814 key.type == BTRFS_EXTENT_REF_V0_KEY) { 815 if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 816 struct btrfs_extent_ref_v0 *ref0; 817 ref0 = btrfs_item_ptr(eb, path1->slots[0], 818 struct btrfs_extent_ref_v0); 819 if (key.objectid == key.offset) { 820 root = find_tree_root(rc, eb, ref0); 821 if (root && !should_ignore_root(root)) 822 cur->root = root; 823 else 824 list_add(&cur->list, &useless); 825 break; 826 } 827 if (is_cowonly_root(btrfs_ref_root_v0(eb, 828 ref0))) 829 cur->cowonly = 1; 830 } 831 #else 832 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 833 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 834 #endif 835 if (key.objectid == key.offset) { 836 /* 837 * only root blocks of reloc trees use 838 * backref of this type. 839 */ 840 root = find_reloc_root(rc, cur->bytenr); 841 BUG_ON(!root); 842 cur->root = root; 843 break; 844 } 845 846 edge = alloc_backref_edge(cache); 847 if (!edge) { 848 err = -ENOMEM; 849 goto out; 850 } 851 rb_node = tree_search(&cache->rb_root, key.offset); 852 if (!rb_node) { 853 upper = alloc_backref_node(cache); 854 if (!upper) { 855 free_backref_edge(cache, edge); 856 err = -ENOMEM; 857 goto out; 858 } 859 upper->bytenr = key.offset; 860 upper->level = cur->level + 1; 861 /* 862 * backrefs for the upper level block isn't 863 * cached, add the block to pending list 864 */ 865 list_add_tail(&edge->list[UPPER], &list); 866 } else { 867 upper = rb_entry(rb_node, struct backref_node, 868 rb_node); 869 BUG_ON(!upper->checked); 870 INIT_LIST_HEAD(&edge->list[UPPER]); 871 } 872 list_add_tail(&edge->list[LOWER], &cur->upper); 873 edge->node[LOWER] = cur; 874 edge->node[UPPER] = upper; 875 876 goto next; 877 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 878 goto next; 879 } 880 881 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */ 882 root = read_fs_root(rc->extent_root->fs_info, key.offset); 883 if (IS_ERR(root)) { 884 err = PTR_ERR(root); 885 goto out; 886 } 887 888 if (!root->ref_cows) 889 cur->cowonly = 1; 890 891 if (btrfs_root_level(&root->root_item) == cur->level) { 892 /* tree root */ 893 BUG_ON(btrfs_root_bytenr(&root->root_item) != 894 cur->bytenr); 895 if (should_ignore_root(root)) 896 list_add(&cur->list, &useless); 897 else 898 cur->root = root; 899 break; 900 } 901 902 level = cur->level + 1; 903 904 /* 905 * searching the tree to find upper level blocks 906 * reference the block. 907 */ 908 path2->search_commit_root = 1; 909 path2->skip_locking = 1; 910 path2->lowest_level = level; 911 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); 912 path2->lowest_level = 0; 913 if (ret < 0) { 914 err = ret; 915 goto out; 916 } 917 if (ret > 0 && path2->slots[level] > 0) 918 path2->slots[level]--; 919 920 eb = path2->nodes[level]; 921 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != 922 cur->bytenr); 923 924 lower = cur; 925 need_check = true; 926 for (; level < BTRFS_MAX_LEVEL; level++) { 927 if (!path2->nodes[level]) { 928 BUG_ON(btrfs_root_bytenr(&root->root_item) != 929 lower->bytenr); 930 if (should_ignore_root(root)) 931 list_add(&lower->list, &useless); 932 else 933 lower->root = root; 934 break; 935 } 936 937 edge = alloc_backref_edge(cache); 938 if (!edge) { 939 err = -ENOMEM; 940 goto out; 941 } 942 943 eb = path2->nodes[level]; 944 rb_node = tree_search(&cache->rb_root, eb->start); 945 if (!rb_node) { 946 upper = alloc_backref_node(cache); 947 if (!upper) { 948 free_backref_edge(cache, edge); 949 err = -ENOMEM; 950 goto out; 951 } 952 upper->bytenr = eb->start; 953 upper->owner = btrfs_header_owner(eb); 954 upper->level = lower->level + 1; 955 if (!root->ref_cows) 956 upper->cowonly = 1; 957 958 /* 959 * if we know the block isn't shared 960 * we can void checking its backrefs. 961 */ 962 if (btrfs_block_can_be_shared(root, eb)) 963 upper->checked = 0; 964 else 965 upper->checked = 1; 966 967 /* 968 * add the block to pending list if we 969 * need check its backrefs, we only do this once 970 * while walking up a tree as we will catch 971 * anything else later on. 972 */ 973 if (!upper->checked && need_check) { 974 need_check = false; 975 list_add_tail(&edge->list[UPPER], 976 &list); 977 } else 978 INIT_LIST_HEAD(&edge->list[UPPER]); 979 } else { 980 upper = rb_entry(rb_node, struct backref_node, 981 rb_node); 982 BUG_ON(!upper->checked); 983 INIT_LIST_HEAD(&edge->list[UPPER]); 984 if (!upper->owner) 985 upper->owner = btrfs_header_owner(eb); 986 } 987 list_add_tail(&edge->list[LOWER], &lower->upper); 988 edge->node[LOWER] = lower; 989 edge->node[UPPER] = upper; 990 991 if (rb_node) 992 break; 993 lower = upper; 994 upper = NULL; 995 } 996 btrfs_release_path(path2); 997 next: 998 if (ptr < end) { 999 ptr += btrfs_extent_inline_ref_size(key.type); 1000 if (ptr >= end) { 1001 WARN_ON(ptr > end); 1002 ptr = 0; 1003 end = 0; 1004 } 1005 } 1006 if (ptr >= end) 1007 path1->slots[0]++; 1008 } 1009 btrfs_release_path(path1); 1010 1011 cur->checked = 1; 1012 WARN_ON(exist); 1013 1014 /* the pending list isn't empty, take the first block to process */ 1015 if (!list_empty(&list)) { 1016 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1017 list_del_init(&edge->list[UPPER]); 1018 cur = edge->node[UPPER]; 1019 goto again; 1020 } 1021 1022 /* 1023 * everything goes well, connect backref nodes and insert backref nodes 1024 * into the cache. 1025 */ 1026 BUG_ON(!node->checked); 1027 cowonly = node->cowonly; 1028 if (!cowonly) { 1029 rb_node = tree_insert(&cache->rb_root, node->bytenr, 1030 &node->rb_node); 1031 if (rb_node) 1032 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1033 list_add_tail(&node->lower, &cache->leaves); 1034 } 1035 1036 list_for_each_entry(edge, &node->upper, list[LOWER]) 1037 list_add_tail(&edge->list[UPPER], &list); 1038 1039 while (!list_empty(&list)) { 1040 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1041 list_del_init(&edge->list[UPPER]); 1042 upper = edge->node[UPPER]; 1043 if (upper->detached) { 1044 list_del(&edge->list[LOWER]); 1045 lower = edge->node[LOWER]; 1046 free_backref_edge(cache, edge); 1047 if (list_empty(&lower->upper)) 1048 list_add(&lower->list, &useless); 1049 continue; 1050 } 1051 1052 if (!RB_EMPTY_NODE(&upper->rb_node)) { 1053 if (upper->lowest) { 1054 list_del_init(&upper->lower); 1055 upper->lowest = 0; 1056 } 1057 1058 list_add_tail(&edge->list[UPPER], &upper->lower); 1059 continue; 1060 } 1061 1062 BUG_ON(!upper->checked); 1063 BUG_ON(cowonly != upper->cowonly); 1064 if (!cowonly) { 1065 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1066 &upper->rb_node); 1067 if (rb_node) 1068 backref_tree_panic(rb_node, -EEXIST, 1069 upper->bytenr); 1070 } 1071 1072 list_add_tail(&edge->list[UPPER], &upper->lower); 1073 1074 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1075 list_add_tail(&edge->list[UPPER], &list); 1076 } 1077 /* 1078 * process useless backref nodes. backref nodes for tree leaves 1079 * are deleted from the cache. backref nodes for upper level 1080 * tree blocks are left in the cache to avoid unnecessary backref 1081 * lookup. 1082 */ 1083 while (!list_empty(&useless)) { 1084 upper = list_entry(useless.next, struct backref_node, list); 1085 list_del_init(&upper->list); 1086 BUG_ON(!list_empty(&upper->upper)); 1087 if (upper == node) 1088 node = NULL; 1089 if (upper->lowest) { 1090 list_del_init(&upper->lower); 1091 upper->lowest = 0; 1092 } 1093 while (!list_empty(&upper->lower)) { 1094 edge = list_entry(upper->lower.next, 1095 struct backref_edge, list[UPPER]); 1096 list_del(&edge->list[UPPER]); 1097 list_del(&edge->list[LOWER]); 1098 lower = edge->node[LOWER]; 1099 free_backref_edge(cache, edge); 1100 1101 if (list_empty(&lower->upper)) 1102 list_add(&lower->list, &useless); 1103 } 1104 __mark_block_processed(rc, upper); 1105 if (upper->level > 0) { 1106 list_add(&upper->list, &cache->detached); 1107 upper->detached = 1; 1108 } else { 1109 rb_erase(&upper->rb_node, &cache->rb_root); 1110 free_backref_node(cache, upper); 1111 } 1112 } 1113 out: 1114 btrfs_free_path(path1); 1115 btrfs_free_path(path2); 1116 if (err) { 1117 while (!list_empty(&useless)) { 1118 lower = list_entry(useless.next, 1119 struct backref_node, upper); 1120 list_del_init(&lower->upper); 1121 } 1122 upper = node; 1123 INIT_LIST_HEAD(&list); 1124 while (upper) { 1125 if (RB_EMPTY_NODE(&upper->rb_node)) { 1126 list_splice_tail(&upper->upper, &list); 1127 free_backref_node(cache, upper); 1128 } 1129 1130 if (list_empty(&list)) 1131 break; 1132 1133 edge = list_entry(list.next, struct backref_edge, 1134 list[LOWER]); 1135 list_del(&edge->list[LOWER]); 1136 upper = edge->node[UPPER]; 1137 free_backref_edge(cache, edge); 1138 } 1139 return ERR_PTR(err); 1140 } 1141 BUG_ON(node && node->detached); 1142 return node; 1143 } 1144 1145 /* 1146 * helper to add backref node for the newly created snapshot. 1147 * the backref node is created by cloning backref node that 1148 * corresponds to root of source tree 1149 */ 1150 static int clone_backref_node(struct btrfs_trans_handle *trans, 1151 struct reloc_control *rc, 1152 struct btrfs_root *src, 1153 struct btrfs_root *dest) 1154 { 1155 struct btrfs_root *reloc_root = src->reloc_root; 1156 struct backref_cache *cache = &rc->backref_cache; 1157 struct backref_node *node = NULL; 1158 struct backref_node *new_node; 1159 struct backref_edge *edge; 1160 struct backref_edge *new_edge; 1161 struct rb_node *rb_node; 1162 1163 if (cache->last_trans > 0) 1164 update_backref_cache(trans, cache); 1165 1166 rb_node = tree_search(&cache->rb_root, src->commit_root->start); 1167 if (rb_node) { 1168 node = rb_entry(rb_node, struct backref_node, rb_node); 1169 if (node->detached) 1170 node = NULL; 1171 else 1172 BUG_ON(node->new_bytenr != reloc_root->node->start); 1173 } 1174 1175 if (!node) { 1176 rb_node = tree_search(&cache->rb_root, 1177 reloc_root->commit_root->start); 1178 if (rb_node) { 1179 node = rb_entry(rb_node, struct backref_node, 1180 rb_node); 1181 BUG_ON(node->detached); 1182 } 1183 } 1184 1185 if (!node) 1186 return 0; 1187 1188 new_node = alloc_backref_node(cache); 1189 if (!new_node) 1190 return -ENOMEM; 1191 1192 new_node->bytenr = dest->node->start; 1193 new_node->level = node->level; 1194 new_node->lowest = node->lowest; 1195 new_node->checked = 1; 1196 new_node->root = dest; 1197 1198 if (!node->lowest) { 1199 list_for_each_entry(edge, &node->lower, list[UPPER]) { 1200 new_edge = alloc_backref_edge(cache); 1201 if (!new_edge) 1202 goto fail; 1203 1204 new_edge->node[UPPER] = new_node; 1205 new_edge->node[LOWER] = edge->node[LOWER]; 1206 list_add_tail(&new_edge->list[UPPER], 1207 &new_node->lower); 1208 } 1209 } else { 1210 list_add_tail(&new_node->lower, &cache->leaves); 1211 } 1212 1213 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1214 &new_node->rb_node); 1215 if (rb_node) 1216 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr); 1217 1218 if (!new_node->lowest) { 1219 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1220 list_add_tail(&new_edge->list[LOWER], 1221 &new_edge->node[LOWER]->upper); 1222 } 1223 } 1224 return 0; 1225 fail: 1226 while (!list_empty(&new_node->lower)) { 1227 new_edge = list_entry(new_node->lower.next, 1228 struct backref_edge, list[UPPER]); 1229 list_del(&new_edge->list[UPPER]); 1230 free_backref_edge(cache, new_edge); 1231 } 1232 free_backref_node(cache, new_node); 1233 return -ENOMEM; 1234 } 1235 1236 /* 1237 * helper to add 'address of tree root -> reloc tree' mapping 1238 */ 1239 static int __must_check __add_reloc_root(struct btrfs_root *root) 1240 { 1241 struct rb_node *rb_node; 1242 struct mapping_node *node; 1243 struct reloc_control *rc = root->fs_info->reloc_ctl; 1244 1245 node = kmalloc(sizeof(*node), GFP_NOFS); 1246 if (!node) 1247 return -ENOMEM; 1248 1249 node->bytenr = root->node->start; 1250 node->data = root; 1251 1252 spin_lock(&rc->reloc_root_tree.lock); 1253 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1254 node->bytenr, &node->rb_node); 1255 spin_unlock(&rc->reloc_root_tree.lock); 1256 if (rb_node) { 1257 btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found " 1258 "for start=%llu while inserting into relocation " 1259 "tree\n", node->bytenr); 1260 kfree(node); 1261 return -EEXIST; 1262 } 1263 1264 list_add_tail(&root->root_list, &rc->reloc_roots); 1265 return 0; 1266 } 1267 1268 /* 1269 * helper to delete the 'address of tree root -> reloc tree' 1270 * mapping 1271 */ 1272 static void __del_reloc_root(struct btrfs_root *root) 1273 { 1274 struct rb_node *rb_node; 1275 struct mapping_node *node = NULL; 1276 struct reloc_control *rc = root->fs_info->reloc_ctl; 1277 1278 spin_lock(&rc->reloc_root_tree.lock); 1279 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1280 root->node->start); 1281 if (rb_node) { 1282 node = rb_entry(rb_node, struct mapping_node, rb_node); 1283 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1284 } 1285 spin_unlock(&rc->reloc_root_tree.lock); 1286 1287 if (!node) 1288 return; 1289 BUG_ON((struct btrfs_root *)node->data != root); 1290 1291 spin_lock(&root->fs_info->trans_lock); 1292 list_del_init(&root->root_list); 1293 spin_unlock(&root->fs_info->trans_lock); 1294 kfree(node); 1295 } 1296 1297 /* 1298 * helper to update the 'address of tree root -> reloc tree' 1299 * mapping 1300 */ 1301 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) 1302 { 1303 struct rb_node *rb_node; 1304 struct mapping_node *node = NULL; 1305 struct reloc_control *rc = root->fs_info->reloc_ctl; 1306 1307 spin_lock(&rc->reloc_root_tree.lock); 1308 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1309 root->node->start); 1310 if (rb_node) { 1311 node = rb_entry(rb_node, struct mapping_node, rb_node); 1312 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1313 } 1314 spin_unlock(&rc->reloc_root_tree.lock); 1315 1316 if (!node) 1317 return 0; 1318 BUG_ON((struct btrfs_root *)node->data != root); 1319 1320 spin_lock(&rc->reloc_root_tree.lock); 1321 node->bytenr = new_bytenr; 1322 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1323 node->bytenr, &node->rb_node); 1324 spin_unlock(&rc->reloc_root_tree.lock); 1325 if (rb_node) 1326 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1327 return 0; 1328 } 1329 1330 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 1331 struct btrfs_root *root, u64 objectid) 1332 { 1333 struct btrfs_root *reloc_root; 1334 struct extent_buffer *eb; 1335 struct btrfs_root_item *root_item; 1336 struct btrfs_key root_key; 1337 u64 last_snap = 0; 1338 int ret; 1339 1340 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 1341 BUG_ON(!root_item); 1342 1343 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 1344 root_key.type = BTRFS_ROOT_ITEM_KEY; 1345 root_key.offset = objectid; 1346 1347 if (root->root_key.objectid == objectid) { 1348 /* called by btrfs_init_reloc_root */ 1349 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 1350 BTRFS_TREE_RELOC_OBJECTID); 1351 BUG_ON(ret); 1352 1353 last_snap = btrfs_root_last_snapshot(&root->root_item); 1354 btrfs_set_root_last_snapshot(&root->root_item, 1355 trans->transid - 1); 1356 } else { 1357 /* 1358 * called by btrfs_reloc_post_snapshot_hook. 1359 * the source tree is a reloc tree, all tree blocks 1360 * modified after it was created have RELOC flag 1361 * set in their headers. so it's OK to not update 1362 * the 'last_snapshot'. 1363 */ 1364 ret = btrfs_copy_root(trans, root, root->node, &eb, 1365 BTRFS_TREE_RELOC_OBJECTID); 1366 BUG_ON(ret); 1367 } 1368 1369 memcpy(root_item, &root->root_item, sizeof(*root_item)); 1370 btrfs_set_root_bytenr(root_item, eb->start); 1371 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 1372 btrfs_set_root_generation(root_item, trans->transid); 1373 1374 if (root->root_key.objectid == objectid) { 1375 btrfs_set_root_refs(root_item, 0); 1376 memset(&root_item->drop_progress, 0, 1377 sizeof(struct btrfs_disk_key)); 1378 root_item->drop_level = 0; 1379 /* 1380 * abuse rtransid, it is safe because it is impossible to 1381 * receive data into a relocation tree. 1382 */ 1383 btrfs_set_root_rtransid(root_item, last_snap); 1384 btrfs_set_root_otransid(root_item, trans->transid); 1385 } 1386 1387 btrfs_tree_unlock(eb); 1388 free_extent_buffer(eb); 1389 1390 ret = btrfs_insert_root(trans, root->fs_info->tree_root, 1391 &root_key, root_item); 1392 BUG_ON(ret); 1393 kfree(root_item); 1394 1395 reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key); 1396 BUG_ON(IS_ERR(reloc_root)); 1397 reloc_root->last_trans = trans->transid; 1398 return reloc_root; 1399 } 1400 1401 /* 1402 * create reloc tree for a given fs tree. reloc tree is just a 1403 * snapshot of the fs tree with special root objectid. 1404 */ 1405 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 1406 struct btrfs_root *root) 1407 { 1408 struct btrfs_root *reloc_root; 1409 struct reloc_control *rc = root->fs_info->reloc_ctl; 1410 struct btrfs_block_rsv *rsv; 1411 int clear_rsv = 0; 1412 int ret; 1413 1414 if (root->reloc_root) { 1415 reloc_root = root->reloc_root; 1416 reloc_root->last_trans = trans->transid; 1417 return 0; 1418 } 1419 1420 if (!rc || !rc->create_reloc_tree || 1421 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1422 return 0; 1423 1424 if (!trans->reloc_reserved) { 1425 rsv = trans->block_rsv; 1426 trans->block_rsv = rc->block_rsv; 1427 clear_rsv = 1; 1428 } 1429 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 1430 if (clear_rsv) 1431 trans->block_rsv = rsv; 1432 1433 ret = __add_reloc_root(reloc_root); 1434 BUG_ON(ret < 0); 1435 root->reloc_root = reloc_root; 1436 return 0; 1437 } 1438 1439 /* 1440 * update root item of reloc tree 1441 */ 1442 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 1443 struct btrfs_root *root) 1444 { 1445 struct btrfs_root *reloc_root; 1446 struct btrfs_root_item *root_item; 1447 int ret; 1448 1449 if (!root->reloc_root) 1450 goto out; 1451 1452 reloc_root = root->reloc_root; 1453 root_item = &reloc_root->root_item; 1454 1455 if (root->fs_info->reloc_ctl->merge_reloc_tree && 1456 btrfs_root_refs(root_item) == 0) { 1457 root->reloc_root = NULL; 1458 __del_reloc_root(reloc_root); 1459 } 1460 1461 if (reloc_root->commit_root != reloc_root->node) { 1462 btrfs_set_root_node(root_item, reloc_root->node); 1463 free_extent_buffer(reloc_root->commit_root); 1464 reloc_root->commit_root = btrfs_root_node(reloc_root); 1465 } 1466 1467 ret = btrfs_update_root(trans, root->fs_info->tree_root, 1468 &reloc_root->root_key, root_item); 1469 BUG_ON(ret); 1470 1471 out: 1472 return 0; 1473 } 1474 1475 /* 1476 * helper to find first cached inode with inode number >= objectid 1477 * in a subvolume 1478 */ 1479 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 1480 { 1481 struct rb_node *node; 1482 struct rb_node *prev; 1483 struct btrfs_inode *entry; 1484 struct inode *inode; 1485 1486 spin_lock(&root->inode_lock); 1487 again: 1488 node = root->inode_tree.rb_node; 1489 prev = NULL; 1490 while (node) { 1491 prev = node; 1492 entry = rb_entry(node, struct btrfs_inode, rb_node); 1493 1494 if (objectid < btrfs_ino(&entry->vfs_inode)) 1495 node = node->rb_left; 1496 else if (objectid > btrfs_ino(&entry->vfs_inode)) 1497 node = node->rb_right; 1498 else 1499 break; 1500 } 1501 if (!node) { 1502 while (prev) { 1503 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1504 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 1505 node = prev; 1506 break; 1507 } 1508 prev = rb_next(prev); 1509 } 1510 } 1511 while (node) { 1512 entry = rb_entry(node, struct btrfs_inode, rb_node); 1513 inode = igrab(&entry->vfs_inode); 1514 if (inode) { 1515 spin_unlock(&root->inode_lock); 1516 return inode; 1517 } 1518 1519 objectid = btrfs_ino(&entry->vfs_inode) + 1; 1520 if (cond_resched_lock(&root->inode_lock)) 1521 goto again; 1522 1523 node = rb_next(node); 1524 } 1525 spin_unlock(&root->inode_lock); 1526 return NULL; 1527 } 1528 1529 static int in_block_group(u64 bytenr, 1530 struct btrfs_block_group_cache *block_group) 1531 { 1532 if (bytenr >= block_group->key.objectid && 1533 bytenr < block_group->key.objectid + block_group->key.offset) 1534 return 1; 1535 return 0; 1536 } 1537 1538 /* 1539 * get new location of data 1540 */ 1541 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1542 u64 bytenr, u64 num_bytes) 1543 { 1544 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1545 struct btrfs_path *path; 1546 struct btrfs_file_extent_item *fi; 1547 struct extent_buffer *leaf; 1548 int ret; 1549 1550 path = btrfs_alloc_path(); 1551 if (!path) 1552 return -ENOMEM; 1553 1554 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1555 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), 1556 bytenr, 0); 1557 if (ret < 0) 1558 goto out; 1559 if (ret > 0) { 1560 ret = -ENOENT; 1561 goto out; 1562 } 1563 1564 leaf = path->nodes[0]; 1565 fi = btrfs_item_ptr(leaf, path->slots[0], 1566 struct btrfs_file_extent_item); 1567 1568 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1569 btrfs_file_extent_compression(leaf, fi) || 1570 btrfs_file_extent_encryption(leaf, fi) || 1571 btrfs_file_extent_other_encoding(leaf, fi)); 1572 1573 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1574 ret = -EINVAL; 1575 goto out; 1576 } 1577 1578 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1579 ret = 0; 1580 out: 1581 btrfs_free_path(path); 1582 return ret; 1583 } 1584 1585 /* 1586 * update file extent items in the tree leaf to point to 1587 * the new locations. 1588 */ 1589 static noinline_for_stack 1590 int replace_file_extents(struct btrfs_trans_handle *trans, 1591 struct reloc_control *rc, 1592 struct btrfs_root *root, 1593 struct extent_buffer *leaf) 1594 { 1595 struct btrfs_key key; 1596 struct btrfs_file_extent_item *fi; 1597 struct inode *inode = NULL; 1598 u64 parent; 1599 u64 bytenr; 1600 u64 new_bytenr = 0; 1601 u64 num_bytes; 1602 u64 end; 1603 u32 nritems; 1604 u32 i; 1605 int ret = 0; 1606 int first = 1; 1607 int dirty = 0; 1608 1609 if (rc->stage != UPDATE_DATA_PTRS) 1610 return 0; 1611 1612 /* reloc trees always use full backref */ 1613 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1614 parent = leaf->start; 1615 else 1616 parent = 0; 1617 1618 nritems = btrfs_header_nritems(leaf); 1619 for (i = 0; i < nritems; i++) { 1620 cond_resched(); 1621 btrfs_item_key_to_cpu(leaf, &key, i); 1622 if (key.type != BTRFS_EXTENT_DATA_KEY) 1623 continue; 1624 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1625 if (btrfs_file_extent_type(leaf, fi) == 1626 BTRFS_FILE_EXTENT_INLINE) 1627 continue; 1628 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1629 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1630 if (bytenr == 0) 1631 continue; 1632 if (!in_block_group(bytenr, rc->block_group)) 1633 continue; 1634 1635 /* 1636 * if we are modifying block in fs tree, wait for readpage 1637 * to complete and drop the extent cache 1638 */ 1639 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1640 if (first) { 1641 inode = find_next_inode(root, key.objectid); 1642 first = 0; 1643 } else if (inode && btrfs_ino(inode) < key.objectid) { 1644 btrfs_add_delayed_iput(inode); 1645 inode = find_next_inode(root, key.objectid); 1646 } 1647 if (inode && btrfs_ino(inode) == key.objectid) { 1648 end = key.offset + 1649 btrfs_file_extent_num_bytes(leaf, fi); 1650 WARN_ON(!IS_ALIGNED(key.offset, 1651 root->sectorsize)); 1652 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 1653 end--; 1654 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1655 key.offset, end); 1656 if (!ret) 1657 continue; 1658 1659 btrfs_drop_extent_cache(inode, key.offset, end, 1660 1); 1661 unlock_extent(&BTRFS_I(inode)->io_tree, 1662 key.offset, end); 1663 } 1664 } 1665 1666 ret = get_new_location(rc->data_inode, &new_bytenr, 1667 bytenr, num_bytes); 1668 if (ret) { 1669 /* 1670 * Don't have to abort since we've not changed anything 1671 * in the file extent yet. 1672 */ 1673 break; 1674 } 1675 1676 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1677 dirty = 1; 1678 1679 key.offset -= btrfs_file_extent_offset(leaf, fi); 1680 ret = btrfs_inc_extent_ref(trans, root, new_bytenr, 1681 num_bytes, parent, 1682 btrfs_header_owner(leaf), 1683 key.objectid, key.offset, 1); 1684 if (ret) { 1685 btrfs_abort_transaction(trans, root, ret); 1686 break; 1687 } 1688 1689 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1690 parent, btrfs_header_owner(leaf), 1691 key.objectid, key.offset, 1); 1692 if (ret) { 1693 btrfs_abort_transaction(trans, root, ret); 1694 break; 1695 } 1696 } 1697 if (dirty) 1698 btrfs_mark_buffer_dirty(leaf); 1699 if (inode) 1700 btrfs_add_delayed_iput(inode); 1701 return ret; 1702 } 1703 1704 static noinline_for_stack 1705 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1706 struct btrfs_path *path, int level) 1707 { 1708 struct btrfs_disk_key key1; 1709 struct btrfs_disk_key key2; 1710 btrfs_node_key(eb, &key1, slot); 1711 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1712 return memcmp(&key1, &key2, sizeof(key1)); 1713 } 1714 1715 /* 1716 * try to replace tree blocks in fs tree with the new blocks 1717 * in reloc tree. tree blocks haven't been modified since the 1718 * reloc tree was create can be replaced. 1719 * 1720 * if a block was replaced, level of the block + 1 is returned. 1721 * if no block got replaced, 0 is returned. if there are other 1722 * errors, a negative error number is returned. 1723 */ 1724 static noinline_for_stack 1725 int replace_path(struct btrfs_trans_handle *trans, 1726 struct btrfs_root *dest, struct btrfs_root *src, 1727 struct btrfs_path *path, struct btrfs_key *next_key, 1728 int lowest_level, int max_level) 1729 { 1730 struct extent_buffer *eb; 1731 struct extent_buffer *parent; 1732 struct btrfs_key key; 1733 u64 old_bytenr; 1734 u64 new_bytenr; 1735 u64 old_ptr_gen; 1736 u64 new_ptr_gen; 1737 u64 last_snapshot; 1738 u32 blocksize; 1739 int cow = 0; 1740 int level; 1741 int ret; 1742 int slot; 1743 1744 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1745 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1746 1747 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1748 again: 1749 slot = path->slots[lowest_level]; 1750 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1751 1752 eb = btrfs_lock_root_node(dest); 1753 btrfs_set_lock_blocking(eb); 1754 level = btrfs_header_level(eb); 1755 1756 if (level < lowest_level) { 1757 btrfs_tree_unlock(eb); 1758 free_extent_buffer(eb); 1759 return 0; 1760 } 1761 1762 if (cow) { 1763 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); 1764 BUG_ON(ret); 1765 } 1766 btrfs_set_lock_blocking(eb); 1767 1768 if (next_key) { 1769 next_key->objectid = (u64)-1; 1770 next_key->type = (u8)-1; 1771 next_key->offset = (u64)-1; 1772 } 1773 1774 parent = eb; 1775 while (1) { 1776 level = btrfs_header_level(parent); 1777 BUG_ON(level < lowest_level); 1778 1779 ret = btrfs_bin_search(parent, &key, level, &slot); 1780 if (ret && slot > 0) 1781 slot--; 1782 1783 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1784 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1785 1786 old_bytenr = btrfs_node_blockptr(parent, slot); 1787 blocksize = btrfs_level_size(dest, level - 1); 1788 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1789 1790 if (level <= max_level) { 1791 eb = path->nodes[level]; 1792 new_bytenr = btrfs_node_blockptr(eb, 1793 path->slots[level]); 1794 new_ptr_gen = btrfs_node_ptr_generation(eb, 1795 path->slots[level]); 1796 } else { 1797 new_bytenr = 0; 1798 new_ptr_gen = 0; 1799 } 1800 1801 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1802 ret = level; 1803 break; 1804 } 1805 1806 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1807 memcmp_node_keys(parent, slot, path, level)) { 1808 if (level <= lowest_level) { 1809 ret = 0; 1810 break; 1811 } 1812 1813 eb = read_tree_block(dest, old_bytenr, blocksize, 1814 old_ptr_gen); 1815 if (!eb || !extent_buffer_uptodate(eb)) { 1816 ret = (!eb) ? -ENOMEM : -EIO; 1817 free_extent_buffer(eb); 1818 break; 1819 } 1820 btrfs_tree_lock(eb); 1821 if (cow) { 1822 ret = btrfs_cow_block(trans, dest, eb, parent, 1823 slot, &eb); 1824 BUG_ON(ret); 1825 } 1826 btrfs_set_lock_blocking(eb); 1827 1828 btrfs_tree_unlock(parent); 1829 free_extent_buffer(parent); 1830 1831 parent = eb; 1832 continue; 1833 } 1834 1835 if (!cow) { 1836 btrfs_tree_unlock(parent); 1837 free_extent_buffer(parent); 1838 cow = 1; 1839 goto again; 1840 } 1841 1842 btrfs_node_key_to_cpu(path->nodes[level], &key, 1843 path->slots[level]); 1844 btrfs_release_path(path); 1845 1846 path->lowest_level = level; 1847 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1848 path->lowest_level = 0; 1849 BUG_ON(ret); 1850 1851 /* 1852 * swap blocks in fs tree and reloc tree. 1853 */ 1854 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1855 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1856 btrfs_mark_buffer_dirty(parent); 1857 1858 btrfs_set_node_blockptr(path->nodes[level], 1859 path->slots[level], old_bytenr); 1860 btrfs_set_node_ptr_generation(path->nodes[level], 1861 path->slots[level], old_ptr_gen); 1862 btrfs_mark_buffer_dirty(path->nodes[level]); 1863 1864 ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize, 1865 path->nodes[level]->start, 1866 src->root_key.objectid, level - 1, 0, 1867 1); 1868 BUG_ON(ret); 1869 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize, 1870 0, dest->root_key.objectid, level - 1, 1871 0, 1); 1872 BUG_ON(ret); 1873 1874 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize, 1875 path->nodes[level]->start, 1876 src->root_key.objectid, level - 1, 0, 1877 1); 1878 BUG_ON(ret); 1879 1880 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize, 1881 0, dest->root_key.objectid, level - 1, 1882 0, 1); 1883 BUG_ON(ret); 1884 1885 btrfs_unlock_up_safe(path, 0); 1886 1887 ret = level; 1888 break; 1889 } 1890 btrfs_tree_unlock(parent); 1891 free_extent_buffer(parent); 1892 return ret; 1893 } 1894 1895 /* 1896 * helper to find next relocated block in reloc tree 1897 */ 1898 static noinline_for_stack 1899 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1900 int *level) 1901 { 1902 struct extent_buffer *eb; 1903 int i; 1904 u64 last_snapshot; 1905 u32 nritems; 1906 1907 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1908 1909 for (i = 0; i < *level; i++) { 1910 free_extent_buffer(path->nodes[i]); 1911 path->nodes[i] = NULL; 1912 } 1913 1914 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1915 eb = path->nodes[i]; 1916 nritems = btrfs_header_nritems(eb); 1917 while (path->slots[i] + 1 < nritems) { 1918 path->slots[i]++; 1919 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1920 last_snapshot) 1921 continue; 1922 1923 *level = i; 1924 return 0; 1925 } 1926 free_extent_buffer(path->nodes[i]); 1927 path->nodes[i] = NULL; 1928 } 1929 return 1; 1930 } 1931 1932 /* 1933 * walk down reloc tree to find relocated block of lowest level 1934 */ 1935 static noinline_for_stack 1936 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1937 int *level) 1938 { 1939 struct extent_buffer *eb = NULL; 1940 int i; 1941 u64 bytenr; 1942 u64 ptr_gen = 0; 1943 u64 last_snapshot; 1944 u32 blocksize; 1945 u32 nritems; 1946 1947 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1948 1949 for (i = *level; i > 0; i--) { 1950 eb = path->nodes[i]; 1951 nritems = btrfs_header_nritems(eb); 1952 while (path->slots[i] < nritems) { 1953 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1954 if (ptr_gen > last_snapshot) 1955 break; 1956 path->slots[i]++; 1957 } 1958 if (path->slots[i] >= nritems) { 1959 if (i == *level) 1960 break; 1961 *level = i + 1; 1962 return 0; 1963 } 1964 if (i == 1) { 1965 *level = i; 1966 return 0; 1967 } 1968 1969 bytenr = btrfs_node_blockptr(eb, path->slots[i]); 1970 blocksize = btrfs_level_size(root, i - 1); 1971 eb = read_tree_block(root, bytenr, blocksize, ptr_gen); 1972 if (!eb || !extent_buffer_uptodate(eb)) { 1973 free_extent_buffer(eb); 1974 return -EIO; 1975 } 1976 BUG_ON(btrfs_header_level(eb) != i - 1); 1977 path->nodes[i - 1] = eb; 1978 path->slots[i - 1] = 0; 1979 } 1980 return 1; 1981 } 1982 1983 /* 1984 * invalidate extent cache for file extents whose key in range of 1985 * [min_key, max_key) 1986 */ 1987 static int invalidate_extent_cache(struct btrfs_root *root, 1988 struct btrfs_key *min_key, 1989 struct btrfs_key *max_key) 1990 { 1991 struct inode *inode = NULL; 1992 u64 objectid; 1993 u64 start, end; 1994 u64 ino; 1995 1996 objectid = min_key->objectid; 1997 while (1) { 1998 cond_resched(); 1999 iput(inode); 2000 2001 if (objectid > max_key->objectid) 2002 break; 2003 2004 inode = find_next_inode(root, objectid); 2005 if (!inode) 2006 break; 2007 ino = btrfs_ino(inode); 2008 2009 if (ino > max_key->objectid) { 2010 iput(inode); 2011 break; 2012 } 2013 2014 objectid = ino + 1; 2015 if (!S_ISREG(inode->i_mode)) 2016 continue; 2017 2018 if (unlikely(min_key->objectid == ino)) { 2019 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 2020 continue; 2021 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 2022 start = 0; 2023 else { 2024 start = min_key->offset; 2025 WARN_ON(!IS_ALIGNED(start, root->sectorsize)); 2026 } 2027 } else { 2028 start = 0; 2029 } 2030 2031 if (unlikely(max_key->objectid == ino)) { 2032 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 2033 continue; 2034 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 2035 end = (u64)-1; 2036 } else { 2037 if (max_key->offset == 0) 2038 continue; 2039 end = max_key->offset; 2040 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 2041 end--; 2042 } 2043 } else { 2044 end = (u64)-1; 2045 } 2046 2047 /* the lock_extent waits for readpage to complete */ 2048 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 2049 btrfs_drop_extent_cache(inode, start, end, 1); 2050 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 2051 } 2052 return 0; 2053 } 2054 2055 static int find_next_key(struct btrfs_path *path, int level, 2056 struct btrfs_key *key) 2057 2058 { 2059 while (level < BTRFS_MAX_LEVEL) { 2060 if (!path->nodes[level]) 2061 break; 2062 if (path->slots[level] + 1 < 2063 btrfs_header_nritems(path->nodes[level])) { 2064 btrfs_node_key_to_cpu(path->nodes[level], key, 2065 path->slots[level] + 1); 2066 return 0; 2067 } 2068 level++; 2069 } 2070 return 1; 2071 } 2072 2073 /* 2074 * merge the relocated tree blocks in reloc tree with corresponding 2075 * fs tree. 2076 */ 2077 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 2078 struct btrfs_root *root) 2079 { 2080 LIST_HEAD(inode_list); 2081 struct btrfs_key key; 2082 struct btrfs_key next_key; 2083 struct btrfs_trans_handle *trans = NULL; 2084 struct btrfs_root *reloc_root; 2085 struct btrfs_root_item *root_item; 2086 struct btrfs_path *path; 2087 struct extent_buffer *leaf; 2088 int level; 2089 int max_level; 2090 int replaced = 0; 2091 int ret; 2092 int err = 0; 2093 u32 min_reserved; 2094 2095 path = btrfs_alloc_path(); 2096 if (!path) 2097 return -ENOMEM; 2098 path->reada = 1; 2099 2100 reloc_root = root->reloc_root; 2101 root_item = &reloc_root->root_item; 2102 2103 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 2104 level = btrfs_root_level(root_item); 2105 extent_buffer_get(reloc_root->node); 2106 path->nodes[level] = reloc_root->node; 2107 path->slots[level] = 0; 2108 } else { 2109 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 2110 2111 level = root_item->drop_level; 2112 BUG_ON(level == 0); 2113 path->lowest_level = level; 2114 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 2115 path->lowest_level = 0; 2116 if (ret < 0) { 2117 btrfs_free_path(path); 2118 return ret; 2119 } 2120 2121 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 2122 path->slots[level]); 2123 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 2124 2125 btrfs_unlock_up_safe(path, 0); 2126 } 2127 2128 min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2129 memset(&next_key, 0, sizeof(next_key)); 2130 2131 while (1) { 2132 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, 2133 BTRFS_RESERVE_FLUSH_ALL); 2134 if (ret) { 2135 err = ret; 2136 goto out; 2137 } 2138 trans = btrfs_start_transaction(root, 0); 2139 if (IS_ERR(trans)) { 2140 err = PTR_ERR(trans); 2141 trans = NULL; 2142 goto out; 2143 } 2144 trans->block_rsv = rc->block_rsv; 2145 2146 replaced = 0; 2147 max_level = level; 2148 2149 ret = walk_down_reloc_tree(reloc_root, path, &level); 2150 if (ret < 0) { 2151 err = ret; 2152 goto out; 2153 } 2154 if (ret > 0) 2155 break; 2156 2157 if (!find_next_key(path, level, &key) && 2158 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 2159 ret = 0; 2160 } else { 2161 ret = replace_path(trans, root, reloc_root, path, 2162 &next_key, level, max_level); 2163 } 2164 if (ret < 0) { 2165 err = ret; 2166 goto out; 2167 } 2168 2169 if (ret > 0) { 2170 level = ret; 2171 btrfs_node_key_to_cpu(path->nodes[level], &key, 2172 path->slots[level]); 2173 replaced = 1; 2174 } 2175 2176 ret = walk_up_reloc_tree(reloc_root, path, &level); 2177 if (ret > 0) 2178 break; 2179 2180 BUG_ON(level == 0); 2181 /* 2182 * save the merging progress in the drop_progress. 2183 * this is OK since root refs == 1 in this case. 2184 */ 2185 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 2186 path->slots[level]); 2187 root_item->drop_level = level; 2188 2189 btrfs_end_transaction_throttle(trans, root); 2190 trans = NULL; 2191 2192 btrfs_btree_balance_dirty(root); 2193 2194 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2195 invalidate_extent_cache(root, &key, &next_key); 2196 } 2197 2198 /* 2199 * handle the case only one block in the fs tree need to be 2200 * relocated and the block is tree root. 2201 */ 2202 leaf = btrfs_lock_root_node(root); 2203 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf); 2204 btrfs_tree_unlock(leaf); 2205 free_extent_buffer(leaf); 2206 if (ret < 0) 2207 err = ret; 2208 out: 2209 btrfs_free_path(path); 2210 2211 if (err == 0) { 2212 memset(&root_item->drop_progress, 0, 2213 sizeof(root_item->drop_progress)); 2214 root_item->drop_level = 0; 2215 btrfs_set_root_refs(root_item, 0); 2216 btrfs_update_reloc_root(trans, root); 2217 } 2218 2219 if (trans) 2220 btrfs_end_transaction_throttle(trans, root); 2221 2222 btrfs_btree_balance_dirty(root); 2223 2224 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2225 invalidate_extent_cache(root, &key, &next_key); 2226 2227 return err; 2228 } 2229 2230 static noinline_for_stack 2231 int prepare_to_merge(struct reloc_control *rc, int err) 2232 { 2233 struct btrfs_root *root = rc->extent_root; 2234 struct btrfs_root *reloc_root; 2235 struct btrfs_trans_handle *trans; 2236 LIST_HEAD(reloc_roots); 2237 u64 num_bytes = 0; 2238 int ret; 2239 2240 mutex_lock(&root->fs_info->reloc_mutex); 2241 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2242 rc->merging_rsv_size += rc->nodes_relocated * 2; 2243 mutex_unlock(&root->fs_info->reloc_mutex); 2244 2245 again: 2246 if (!err) { 2247 num_bytes = rc->merging_rsv_size; 2248 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes, 2249 BTRFS_RESERVE_FLUSH_ALL); 2250 if (ret) 2251 err = ret; 2252 } 2253 2254 trans = btrfs_join_transaction(rc->extent_root); 2255 if (IS_ERR(trans)) { 2256 if (!err) 2257 btrfs_block_rsv_release(rc->extent_root, 2258 rc->block_rsv, num_bytes); 2259 return PTR_ERR(trans); 2260 } 2261 2262 if (!err) { 2263 if (num_bytes != rc->merging_rsv_size) { 2264 btrfs_end_transaction(trans, rc->extent_root); 2265 btrfs_block_rsv_release(rc->extent_root, 2266 rc->block_rsv, num_bytes); 2267 goto again; 2268 } 2269 } 2270 2271 rc->merge_reloc_tree = 1; 2272 2273 while (!list_empty(&rc->reloc_roots)) { 2274 reloc_root = list_entry(rc->reloc_roots.next, 2275 struct btrfs_root, root_list); 2276 list_del_init(&reloc_root->root_list); 2277 2278 root = read_fs_root(reloc_root->fs_info, 2279 reloc_root->root_key.offset); 2280 BUG_ON(IS_ERR(root)); 2281 BUG_ON(root->reloc_root != reloc_root); 2282 2283 /* 2284 * set reference count to 1, so btrfs_recover_relocation 2285 * knows it should resumes merging 2286 */ 2287 if (!err) 2288 btrfs_set_root_refs(&reloc_root->root_item, 1); 2289 btrfs_update_reloc_root(trans, root); 2290 2291 list_add(&reloc_root->root_list, &reloc_roots); 2292 } 2293 2294 list_splice(&reloc_roots, &rc->reloc_roots); 2295 2296 if (!err) 2297 btrfs_commit_transaction(trans, rc->extent_root); 2298 else 2299 btrfs_end_transaction(trans, rc->extent_root); 2300 return err; 2301 } 2302 2303 static noinline_for_stack 2304 void free_reloc_roots(struct list_head *list) 2305 { 2306 struct btrfs_root *reloc_root; 2307 2308 while (!list_empty(list)) { 2309 reloc_root = list_entry(list->next, struct btrfs_root, 2310 root_list); 2311 __del_reloc_root(reloc_root); 2312 free_extent_buffer(reloc_root->node); 2313 free_extent_buffer(reloc_root->commit_root); 2314 kfree(reloc_root); 2315 } 2316 } 2317 2318 static noinline_for_stack 2319 int merge_reloc_roots(struct reloc_control *rc) 2320 { 2321 struct btrfs_trans_handle *trans; 2322 struct btrfs_root *root; 2323 struct btrfs_root *reloc_root; 2324 u64 last_snap; 2325 u64 otransid; 2326 u64 objectid; 2327 LIST_HEAD(reloc_roots); 2328 int found = 0; 2329 int ret = 0; 2330 again: 2331 root = rc->extent_root; 2332 2333 /* 2334 * this serializes us with btrfs_record_root_in_transaction, 2335 * we have to make sure nobody is in the middle of 2336 * adding their roots to the list while we are 2337 * doing this splice 2338 */ 2339 mutex_lock(&root->fs_info->reloc_mutex); 2340 list_splice_init(&rc->reloc_roots, &reloc_roots); 2341 mutex_unlock(&root->fs_info->reloc_mutex); 2342 2343 while (!list_empty(&reloc_roots)) { 2344 found = 1; 2345 reloc_root = list_entry(reloc_roots.next, 2346 struct btrfs_root, root_list); 2347 2348 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 2349 root = read_fs_root(reloc_root->fs_info, 2350 reloc_root->root_key.offset); 2351 BUG_ON(IS_ERR(root)); 2352 BUG_ON(root->reloc_root != reloc_root); 2353 2354 ret = merge_reloc_root(rc, root); 2355 if (ret) { 2356 __del_reloc_root(reloc_root); 2357 free_extent_buffer(reloc_root->node); 2358 free_extent_buffer(reloc_root->commit_root); 2359 kfree(reloc_root); 2360 goto out; 2361 } 2362 } else { 2363 list_del_init(&reloc_root->root_list); 2364 } 2365 2366 /* 2367 * we keep the old last snapshod transid in rtranid when we 2368 * created the relocation tree. 2369 */ 2370 last_snap = btrfs_root_rtransid(&reloc_root->root_item); 2371 otransid = btrfs_root_otransid(&reloc_root->root_item); 2372 objectid = reloc_root->root_key.offset; 2373 2374 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); 2375 if (ret < 0) { 2376 if (list_empty(&reloc_root->root_list)) 2377 list_add_tail(&reloc_root->root_list, 2378 &reloc_roots); 2379 goto out; 2380 } else if (!ret) { 2381 /* 2382 * recover the last snapshot tranid to avoid 2383 * the space balance break NOCOW. 2384 */ 2385 root = read_fs_root(rc->extent_root->fs_info, 2386 objectid); 2387 if (IS_ERR(root)) 2388 continue; 2389 2390 trans = btrfs_join_transaction(root); 2391 BUG_ON(IS_ERR(trans)); 2392 2393 /* Check if the fs/file tree was snapshoted or not. */ 2394 if (btrfs_root_last_snapshot(&root->root_item) == 2395 otransid - 1) 2396 btrfs_set_root_last_snapshot(&root->root_item, 2397 last_snap); 2398 2399 btrfs_end_transaction(trans, root); 2400 } 2401 } 2402 2403 if (found) { 2404 found = 0; 2405 goto again; 2406 } 2407 out: 2408 if (ret) { 2409 btrfs_std_error(root->fs_info, ret); 2410 if (!list_empty(&reloc_roots)) 2411 free_reloc_roots(&reloc_roots); 2412 2413 /* new reloc root may be added */ 2414 mutex_lock(&root->fs_info->reloc_mutex); 2415 list_splice_init(&rc->reloc_roots, &reloc_roots); 2416 mutex_unlock(&root->fs_info->reloc_mutex); 2417 if (!list_empty(&reloc_roots)) 2418 free_reloc_roots(&reloc_roots); 2419 } 2420 2421 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2422 return ret; 2423 } 2424 2425 static void free_block_list(struct rb_root *blocks) 2426 { 2427 struct tree_block *block; 2428 struct rb_node *rb_node; 2429 while ((rb_node = rb_first(blocks))) { 2430 block = rb_entry(rb_node, struct tree_block, rb_node); 2431 rb_erase(rb_node, blocks); 2432 kfree(block); 2433 } 2434 } 2435 2436 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2437 struct btrfs_root *reloc_root) 2438 { 2439 struct btrfs_root *root; 2440 2441 if (reloc_root->last_trans == trans->transid) 2442 return 0; 2443 2444 root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset); 2445 BUG_ON(IS_ERR(root)); 2446 BUG_ON(root->reloc_root != reloc_root); 2447 2448 return btrfs_record_root_in_trans(trans, root); 2449 } 2450 2451 static noinline_for_stack 2452 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2453 struct reloc_control *rc, 2454 struct backref_node *node, 2455 struct backref_edge *edges[], int *nr) 2456 { 2457 struct backref_node *next; 2458 struct btrfs_root *root; 2459 int index = 0; 2460 2461 next = node; 2462 while (1) { 2463 cond_resched(); 2464 next = walk_up_backref(next, edges, &index); 2465 root = next->root; 2466 BUG_ON(!root); 2467 BUG_ON(!root->ref_cows); 2468 2469 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2470 record_reloc_root_in_trans(trans, root); 2471 break; 2472 } 2473 2474 btrfs_record_root_in_trans(trans, root); 2475 root = root->reloc_root; 2476 2477 if (next->new_bytenr != root->node->start) { 2478 BUG_ON(next->new_bytenr); 2479 BUG_ON(!list_empty(&next->list)); 2480 next->new_bytenr = root->node->start; 2481 next->root = root; 2482 list_add_tail(&next->list, 2483 &rc->backref_cache.changed); 2484 __mark_block_processed(rc, next); 2485 break; 2486 } 2487 2488 WARN_ON(1); 2489 root = NULL; 2490 next = walk_down_backref(edges, &index); 2491 if (!next || next->level <= node->level) 2492 break; 2493 } 2494 if (!root) 2495 return NULL; 2496 2497 *nr = index; 2498 next = node; 2499 /* setup backref node path for btrfs_reloc_cow_block */ 2500 while (1) { 2501 rc->backref_cache.path[next->level] = next; 2502 if (--index < 0) 2503 break; 2504 next = edges[index]->node[UPPER]; 2505 } 2506 return root; 2507 } 2508 2509 /* 2510 * select a tree root for relocation. return NULL if the block 2511 * is reference counted. we should use do_relocation() in this 2512 * case. return a tree root pointer if the block isn't reference 2513 * counted. return -ENOENT if the block is root of reloc tree. 2514 */ 2515 static noinline_for_stack 2516 struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans, 2517 struct backref_node *node) 2518 { 2519 struct backref_node *next; 2520 struct btrfs_root *root; 2521 struct btrfs_root *fs_root = NULL; 2522 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2523 int index = 0; 2524 2525 next = node; 2526 while (1) { 2527 cond_resched(); 2528 next = walk_up_backref(next, edges, &index); 2529 root = next->root; 2530 BUG_ON(!root); 2531 2532 /* no other choice for non-references counted tree */ 2533 if (!root->ref_cows) 2534 return root; 2535 2536 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2537 fs_root = root; 2538 2539 if (next != node) 2540 return NULL; 2541 2542 next = walk_down_backref(edges, &index); 2543 if (!next || next->level <= node->level) 2544 break; 2545 } 2546 2547 if (!fs_root) 2548 return ERR_PTR(-ENOENT); 2549 return fs_root; 2550 } 2551 2552 static noinline_for_stack 2553 u64 calcu_metadata_size(struct reloc_control *rc, 2554 struct backref_node *node, int reserve) 2555 { 2556 struct backref_node *next = node; 2557 struct backref_edge *edge; 2558 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2559 u64 num_bytes = 0; 2560 int index = 0; 2561 2562 BUG_ON(reserve && node->processed); 2563 2564 while (next) { 2565 cond_resched(); 2566 while (1) { 2567 if (next->processed && (reserve || next != node)) 2568 break; 2569 2570 num_bytes += btrfs_level_size(rc->extent_root, 2571 next->level); 2572 2573 if (list_empty(&next->upper)) 2574 break; 2575 2576 edge = list_entry(next->upper.next, 2577 struct backref_edge, list[LOWER]); 2578 edges[index++] = edge; 2579 next = edge->node[UPPER]; 2580 } 2581 next = walk_down_backref(edges, &index); 2582 } 2583 return num_bytes; 2584 } 2585 2586 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2587 struct reloc_control *rc, 2588 struct backref_node *node) 2589 { 2590 struct btrfs_root *root = rc->extent_root; 2591 u64 num_bytes; 2592 int ret; 2593 2594 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2595 2596 trans->block_rsv = rc->block_rsv; 2597 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes, 2598 BTRFS_RESERVE_FLUSH_ALL); 2599 if (ret) { 2600 if (ret == -EAGAIN) 2601 rc->commit_transaction = 1; 2602 return ret; 2603 } 2604 2605 return 0; 2606 } 2607 2608 static void release_metadata_space(struct reloc_control *rc, 2609 struct backref_node *node) 2610 { 2611 u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2; 2612 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes); 2613 } 2614 2615 /* 2616 * relocate a block tree, and then update pointers in upper level 2617 * blocks that reference the block to point to the new location. 2618 * 2619 * if called by link_to_upper, the block has already been relocated. 2620 * in that case this function just updates pointers. 2621 */ 2622 static int do_relocation(struct btrfs_trans_handle *trans, 2623 struct reloc_control *rc, 2624 struct backref_node *node, 2625 struct btrfs_key *key, 2626 struct btrfs_path *path, int lowest) 2627 { 2628 struct backref_node *upper; 2629 struct backref_edge *edge; 2630 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2631 struct btrfs_root *root; 2632 struct extent_buffer *eb; 2633 u32 blocksize; 2634 u64 bytenr; 2635 u64 generation; 2636 int nr; 2637 int slot; 2638 int ret; 2639 int err = 0; 2640 2641 BUG_ON(lowest && node->eb); 2642 2643 path->lowest_level = node->level + 1; 2644 rc->backref_cache.path[node->level] = node; 2645 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2646 cond_resched(); 2647 2648 upper = edge->node[UPPER]; 2649 root = select_reloc_root(trans, rc, upper, edges, &nr); 2650 BUG_ON(!root); 2651 2652 if (upper->eb && !upper->locked) { 2653 if (!lowest) { 2654 ret = btrfs_bin_search(upper->eb, key, 2655 upper->level, &slot); 2656 BUG_ON(ret); 2657 bytenr = btrfs_node_blockptr(upper->eb, slot); 2658 if (node->eb->start == bytenr) 2659 goto next; 2660 } 2661 drop_node_buffer(upper); 2662 } 2663 2664 if (!upper->eb) { 2665 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2666 if (ret < 0) { 2667 err = ret; 2668 break; 2669 } 2670 BUG_ON(ret > 0); 2671 2672 if (!upper->eb) { 2673 upper->eb = path->nodes[upper->level]; 2674 path->nodes[upper->level] = NULL; 2675 } else { 2676 BUG_ON(upper->eb != path->nodes[upper->level]); 2677 } 2678 2679 upper->locked = 1; 2680 path->locks[upper->level] = 0; 2681 2682 slot = path->slots[upper->level]; 2683 btrfs_release_path(path); 2684 } else { 2685 ret = btrfs_bin_search(upper->eb, key, upper->level, 2686 &slot); 2687 BUG_ON(ret); 2688 } 2689 2690 bytenr = btrfs_node_blockptr(upper->eb, slot); 2691 if (lowest) { 2692 BUG_ON(bytenr != node->bytenr); 2693 } else { 2694 if (node->eb->start == bytenr) 2695 goto next; 2696 } 2697 2698 blocksize = btrfs_level_size(root, node->level); 2699 generation = btrfs_node_ptr_generation(upper->eb, slot); 2700 eb = read_tree_block(root, bytenr, blocksize, generation); 2701 if (!eb || !extent_buffer_uptodate(eb)) { 2702 free_extent_buffer(eb); 2703 err = -EIO; 2704 goto next; 2705 } 2706 btrfs_tree_lock(eb); 2707 btrfs_set_lock_blocking(eb); 2708 2709 if (!node->eb) { 2710 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2711 slot, &eb); 2712 btrfs_tree_unlock(eb); 2713 free_extent_buffer(eb); 2714 if (ret < 0) { 2715 err = ret; 2716 goto next; 2717 } 2718 BUG_ON(node->eb != eb); 2719 } else { 2720 btrfs_set_node_blockptr(upper->eb, slot, 2721 node->eb->start); 2722 btrfs_set_node_ptr_generation(upper->eb, slot, 2723 trans->transid); 2724 btrfs_mark_buffer_dirty(upper->eb); 2725 2726 ret = btrfs_inc_extent_ref(trans, root, 2727 node->eb->start, blocksize, 2728 upper->eb->start, 2729 btrfs_header_owner(upper->eb), 2730 node->level, 0, 1); 2731 BUG_ON(ret); 2732 2733 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2734 BUG_ON(ret); 2735 } 2736 next: 2737 if (!upper->pending) 2738 drop_node_buffer(upper); 2739 else 2740 unlock_node_buffer(upper); 2741 if (err) 2742 break; 2743 } 2744 2745 if (!err && node->pending) { 2746 drop_node_buffer(node); 2747 list_move_tail(&node->list, &rc->backref_cache.changed); 2748 node->pending = 0; 2749 } 2750 2751 path->lowest_level = 0; 2752 BUG_ON(err == -ENOSPC); 2753 return err; 2754 } 2755 2756 static int link_to_upper(struct btrfs_trans_handle *trans, 2757 struct reloc_control *rc, 2758 struct backref_node *node, 2759 struct btrfs_path *path) 2760 { 2761 struct btrfs_key key; 2762 2763 btrfs_node_key_to_cpu(node->eb, &key, 0); 2764 return do_relocation(trans, rc, node, &key, path, 0); 2765 } 2766 2767 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2768 struct reloc_control *rc, 2769 struct btrfs_path *path, int err) 2770 { 2771 LIST_HEAD(list); 2772 struct backref_cache *cache = &rc->backref_cache; 2773 struct backref_node *node; 2774 int level; 2775 int ret; 2776 2777 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2778 while (!list_empty(&cache->pending[level])) { 2779 node = list_entry(cache->pending[level].next, 2780 struct backref_node, list); 2781 list_move_tail(&node->list, &list); 2782 BUG_ON(!node->pending); 2783 2784 if (!err) { 2785 ret = link_to_upper(trans, rc, node, path); 2786 if (ret < 0) 2787 err = ret; 2788 } 2789 } 2790 list_splice_init(&list, &cache->pending[level]); 2791 } 2792 return err; 2793 } 2794 2795 static void mark_block_processed(struct reloc_control *rc, 2796 u64 bytenr, u32 blocksize) 2797 { 2798 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2799 EXTENT_DIRTY, GFP_NOFS); 2800 } 2801 2802 static void __mark_block_processed(struct reloc_control *rc, 2803 struct backref_node *node) 2804 { 2805 u32 blocksize; 2806 if (node->level == 0 || 2807 in_block_group(node->bytenr, rc->block_group)) { 2808 blocksize = btrfs_level_size(rc->extent_root, node->level); 2809 mark_block_processed(rc, node->bytenr, blocksize); 2810 } 2811 node->processed = 1; 2812 } 2813 2814 /* 2815 * mark a block and all blocks directly/indirectly reference the block 2816 * as processed. 2817 */ 2818 static void update_processed_blocks(struct reloc_control *rc, 2819 struct backref_node *node) 2820 { 2821 struct backref_node *next = node; 2822 struct backref_edge *edge; 2823 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2824 int index = 0; 2825 2826 while (next) { 2827 cond_resched(); 2828 while (1) { 2829 if (next->processed) 2830 break; 2831 2832 __mark_block_processed(rc, next); 2833 2834 if (list_empty(&next->upper)) 2835 break; 2836 2837 edge = list_entry(next->upper.next, 2838 struct backref_edge, list[LOWER]); 2839 edges[index++] = edge; 2840 next = edge->node[UPPER]; 2841 } 2842 next = walk_down_backref(edges, &index); 2843 } 2844 } 2845 2846 static int tree_block_processed(u64 bytenr, u32 blocksize, 2847 struct reloc_control *rc) 2848 { 2849 if (test_range_bit(&rc->processed_blocks, bytenr, 2850 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2851 return 1; 2852 return 0; 2853 } 2854 2855 static int get_tree_block_key(struct reloc_control *rc, 2856 struct tree_block *block) 2857 { 2858 struct extent_buffer *eb; 2859 2860 BUG_ON(block->key_ready); 2861 eb = read_tree_block(rc->extent_root, block->bytenr, 2862 block->key.objectid, block->key.offset); 2863 if (!eb || !extent_buffer_uptodate(eb)) { 2864 free_extent_buffer(eb); 2865 return -EIO; 2866 } 2867 WARN_ON(btrfs_header_level(eb) != block->level); 2868 if (block->level == 0) 2869 btrfs_item_key_to_cpu(eb, &block->key, 0); 2870 else 2871 btrfs_node_key_to_cpu(eb, &block->key, 0); 2872 free_extent_buffer(eb); 2873 block->key_ready = 1; 2874 return 0; 2875 } 2876 2877 static int reada_tree_block(struct reloc_control *rc, 2878 struct tree_block *block) 2879 { 2880 BUG_ON(block->key_ready); 2881 if (block->key.type == BTRFS_METADATA_ITEM_KEY) 2882 readahead_tree_block(rc->extent_root, block->bytenr, 2883 block->key.objectid, 2884 rc->extent_root->leafsize); 2885 else 2886 readahead_tree_block(rc->extent_root, block->bytenr, 2887 block->key.objectid, block->key.offset); 2888 return 0; 2889 } 2890 2891 /* 2892 * helper function to relocate a tree block 2893 */ 2894 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2895 struct reloc_control *rc, 2896 struct backref_node *node, 2897 struct btrfs_key *key, 2898 struct btrfs_path *path) 2899 { 2900 struct btrfs_root *root; 2901 int release = 0; 2902 int ret = 0; 2903 2904 if (!node) 2905 return 0; 2906 2907 BUG_ON(node->processed); 2908 root = select_one_root(trans, node); 2909 if (root == ERR_PTR(-ENOENT)) { 2910 update_processed_blocks(rc, node); 2911 goto out; 2912 } 2913 2914 if (!root || root->ref_cows) { 2915 ret = reserve_metadata_space(trans, rc, node); 2916 if (ret) 2917 goto out; 2918 release = 1; 2919 } 2920 2921 if (root) { 2922 if (root->ref_cows) { 2923 BUG_ON(node->new_bytenr); 2924 BUG_ON(!list_empty(&node->list)); 2925 btrfs_record_root_in_trans(trans, root); 2926 root = root->reloc_root; 2927 node->new_bytenr = root->node->start; 2928 node->root = root; 2929 list_add_tail(&node->list, &rc->backref_cache.changed); 2930 } else { 2931 path->lowest_level = node->level; 2932 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2933 btrfs_release_path(path); 2934 if (ret > 0) 2935 ret = 0; 2936 } 2937 if (!ret) 2938 update_processed_blocks(rc, node); 2939 } else { 2940 ret = do_relocation(trans, rc, node, key, path, 1); 2941 } 2942 out: 2943 if (ret || node->level == 0 || node->cowonly) { 2944 if (release) 2945 release_metadata_space(rc, node); 2946 remove_backref_node(&rc->backref_cache, node); 2947 } 2948 return ret; 2949 } 2950 2951 /* 2952 * relocate a list of blocks 2953 */ 2954 static noinline_for_stack 2955 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2956 struct reloc_control *rc, struct rb_root *blocks) 2957 { 2958 struct backref_node *node; 2959 struct btrfs_path *path; 2960 struct tree_block *block; 2961 struct rb_node *rb_node; 2962 int ret; 2963 int err = 0; 2964 2965 path = btrfs_alloc_path(); 2966 if (!path) { 2967 err = -ENOMEM; 2968 goto out_free_blocks; 2969 } 2970 2971 rb_node = rb_first(blocks); 2972 while (rb_node) { 2973 block = rb_entry(rb_node, struct tree_block, rb_node); 2974 if (!block->key_ready) 2975 reada_tree_block(rc, block); 2976 rb_node = rb_next(rb_node); 2977 } 2978 2979 rb_node = rb_first(blocks); 2980 while (rb_node) { 2981 block = rb_entry(rb_node, struct tree_block, rb_node); 2982 if (!block->key_ready) { 2983 err = get_tree_block_key(rc, block); 2984 if (err) 2985 goto out_free_path; 2986 } 2987 rb_node = rb_next(rb_node); 2988 } 2989 2990 rb_node = rb_first(blocks); 2991 while (rb_node) { 2992 block = rb_entry(rb_node, struct tree_block, rb_node); 2993 2994 node = build_backref_tree(rc, &block->key, 2995 block->level, block->bytenr); 2996 if (IS_ERR(node)) { 2997 err = PTR_ERR(node); 2998 goto out; 2999 } 3000 3001 ret = relocate_tree_block(trans, rc, node, &block->key, 3002 path); 3003 if (ret < 0) { 3004 if (ret != -EAGAIN || rb_node == rb_first(blocks)) 3005 err = ret; 3006 goto out; 3007 } 3008 rb_node = rb_next(rb_node); 3009 } 3010 out: 3011 err = finish_pending_nodes(trans, rc, path, err); 3012 3013 out_free_path: 3014 btrfs_free_path(path); 3015 out_free_blocks: 3016 free_block_list(blocks); 3017 return err; 3018 } 3019 3020 static noinline_for_stack 3021 int prealloc_file_extent_cluster(struct inode *inode, 3022 struct file_extent_cluster *cluster) 3023 { 3024 u64 alloc_hint = 0; 3025 u64 start; 3026 u64 end; 3027 u64 offset = BTRFS_I(inode)->index_cnt; 3028 u64 num_bytes; 3029 int nr = 0; 3030 int ret = 0; 3031 3032 BUG_ON(cluster->start != cluster->boundary[0]); 3033 mutex_lock(&inode->i_mutex); 3034 3035 ret = btrfs_check_data_free_space(inode, cluster->end + 3036 1 - cluster->start); 3037 if (ret) 3038 goto out; 3039 3040 while (nr < cluster->nr) { 3041 start = cluster->boundary[nr] - offset; 3042 if (nr + 1 < cluster->nr) 3043 end = cluster->boundary[nr + 1] - 1 - offset; 3044 else 3045 end = cluster->end - offset; 3046 3047 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3048 num_bytes = end + 1 - start; 3049 ret = btrfs_prealloc_file_range(inode, 0, start, 3050 num_bytes, num_bytes, 3051 end + 1, &alloc_hint); 3052 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3053 if (ret) 3054 break; 3055 nr++; 3056 } 3057 btrfs_free_reserved_data_space(inode, cluster->end + 3058 1 - cluster->start); 3059 out: 3060 mutex_unlock(&inode->i_mutex); 3061 return ret; 3062 } 3063 3064 static noinline_for_stack 3065 int setup_extent_mapping(struct inode *inode, u64 start, u64 end, 3066 u64 block_start) 3067 { 3068 struct btrfs_root *root = BTRFS_I(inode)->root; 3069 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 3070 struct extent_map *em; 3071 int ret = 0; 3072 3073 em = alloc_extent_map(); 3074 if (!em) 3075 return -ENOMEM; 3076 3077 em->start = start; 3078 em->len = end + 1 - start; 3079 em->block_len = em->len; 3080 em->block_start = block_start; 3081 em->bdev = root->fs_info->fs_devices->latest_bdev; 3082 set_bit(EXTENT_FLAG_PINNED, &em->flags); 3083 3084 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3085 while (1) { 3086 write_lock(&em_tree->lock); 3087 ret = add_extent_mapping(em_tree, em, 0); 3088 write_unlock(&em_tree->lock); 3089 if (ret != -EEXIST) { 3090 free_extent_map(em); 3091 break; 3092 } 3093 btrfs_drop_extent_cache(inode, start, end, 0); 3094 } 3095 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3096 return ret; 3097 } 3098 3099 static int relocate_file_extent_cluster(struct inode *inode, 3100 struct file_extent_cluster *cluster) 3101 { 3102 u64 page_start; 3103 u64 page_end; 3104 u64 offset = BTRFS_I(inode)->index_cnt; 3105 unsigned long index; 3106 unsigned long last_index; 3107 struct page *page; 3108 struct file_ra_state *ra; 3109 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 3110 int nr = 0; 3111 int ret = 0; 3112 3113 if (!cluster->nr) 3114 return 0; 3115 3116 ra = kzalloc(sizeof(*ra), GFP_NOFS); 3117 if (!ra) 3118 return -ENOMEM; 3119 3120 ret = prealloc_file_extent_cluster(inode, cluster); 3121 if (ret) 3122 goto out; 3123 3124 file_ra_state_init(ra, inode->i_mapping); 3125 3126 ret = setup_extent_mapping(inode, cluster->start - offset, 3127 cluster->end - offset, cluster->start); 3128 if (ret) 3129 goto out; 3130 3131 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; 3132 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; 3133 while (index <= last_index) { 3134 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); 3135 if (ret) 3136 goto out; 3137 3138 page = find_lock_page(inode->i_mapping, index); 3139 if (!page) { 3140 page_cache_sync_readahead(inode->i_mapping, 3141 ra, NULL, index, 3142 last_index + 1 - index); 3143 page = find_or_create_page(inode->i_mapping, index, 3144 mask); 3145 if (!page) { 3146 btrfs_delalloc_release_metadata(inode, 3147 PAGE_CACHE_SIZE); 3148 ret = -ENOMEM; 3149 goto out; 3150 } 3151 } 3152 3153 if (PageReadahead(page)) { 3154 page_cache_async_readahead(inode->i_mapping, 3155 ra, NULL, page, index, 3156 last_index + 1 - index); 3157 } 3158 3159 if (!PageUptodate(page)) { 3160 btrfs_readpage(NULL, page); 3161 lock_page(page); 3162 if (!PageUptodate(page)) { 3163 unlock_page(page); 3164 page_cache_release(page); 3165 btrfs_delalloc_release_metadata(inode, 3166 PAGE_CACHE_SIZE); 3167 ret = -EIO; 3168 goto out; 3169 } 3170 } 3171 3172 page_start = page_offset(page); 3173 page_end = page_start + PAGE_CACHE_SIZE - 1; 3174 3175 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); 3176 3177 set_page_extent_mapped(page); 3178 3179 if (nr < cluster->nr && 3180 page_start + offset == cluster->boundary[nr]) { 3181 set_extent_bits(&BTRFS_I(inode)->io_tree, 3182 page_start, page_end, 3183 EXTENT_BOUNDARY, GFP_NOFS); 3184 nr++; 3185 } 3186 3187 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); 3188 set_page_dirty(page); 3189 3190 unlock_extent(&BTRFS_I(inode)->io_tree, 3191 page_start, page_end); 3192 unlock_page(page); 3193 page_cache_release(page); 3194 3195 index++; 3196 balance_dirty_pages_ratelimited(inode->i_mapping); 3197 btrfs_throttle(BTRFS_I(inode)->root); 3198 } 3199 WARN_ON(nr != cluster->nr); 3200 out: 3201 kfree(ra); 3202 return ret; 3203 } 3204 3205 static noinline_for_stack 3206 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3207 struct file_extent_cluster *cluster) 3208 { 3209 int ret; 3210 3211 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3212 ret = relocate_file_extent_cluster(inode, cluster); 3213 if (ret) 3214 return ret; 3215 cluster->nr = 0; 3216 } 3217 3218 if (!cluster->nr) 3219 cluster->start = extent_key->objectid; 3220 else 3221 BUG_ON(cluster->nr >= MAX_EXTENTS); 3222 cluster->end = extent_key->objectid + extent_key->offset - 1; 3223 cluster->boundary[cluster->nr] = extent_key->objectid; 3224 cluster->nr++; 3225 3226 if (cluster->nr >= MAX_EXTENTS) { 3227 ret = relocate_file_extent_cluster(inode, cluster); 3228 if (ret) 3229 return ret; 3230 cluster->nr = 0; 3231 } 3232 return 0; 3233 } 3234 3235 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3236 static int get_ref_objectid_v0(struct reloc_control *rc, 3237 struct btrfs_path *path, 3238 struct btrfs_key *extent_key, 3239 u64 *ref_objectid, int *path_change) 3240 { 3241 struct btrfs_key key; 3242 struct extent_buffer *leaf; 3243 struct btrfs_extent_ref_v0 *ref0; 3244 int ret; 3245 int slot; 3246 3247 leaf = path->nodes[0]; 3248 slot = path->slots[0]; 3249 while (1) { 3250 if (slot >= btrfs_header_nritems(leaf)) { 3251 ret = btrfs_next_leaf(rc->extent_root, path); 3252 if (ret < 0) 3253 return ret; 3254 BUG_ON(ret > 0); 3255 leaf = path->nodes[0]; 3256 slot = path->slots[0]; 3257 if (path_change) 3258 *path_change = 1; 3259 } 3260 btrfs_item_key_to_cpu(leaf, &key, slot); 3261 if (key.objectid != extent_key->objectid) 3262 return -ENOENT; 3263 3264 if (key.type != BTRFS_EXTENT_REF_V0_KEY) { 3265 slot++; 3266 continue; 3267 } 3268 ref0 = btrfs_item_ptr(leaf, slot, 3269 struct btrfs_extent_ref_v0); 3270 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0); 3271 break; 3272 } 3273 return 0; 3274 } 3275 #endif 3276 3277 /* 3278 * helper to add a tree block to the list. 3279 * the major work is getting the generation and level of the block 3280 */ 3281 static int add_tree_block(struct reloc_control *rc, 3282 struct btrfs_key *extent_key, 3283 struct btrfs_path *path, 3284 struct rb_root *blocks) 3285 { 3286 struct extent_buffer *eb; 3287 struct btrfs_extent_item *ei; 3288 struct btrfs_tree_block_info *bi; 3289 struct tree_block *block; 3290 struct rb_node *rb_node; 3291 u32 item_size; 3292 int level = -1; 3293 u64 generation; 3294 3295 eb = path->nodes[0]; 3296 item_size = btrfs_item_size_nr(eb, path->slots[0]); 3297 3298 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3299 item_size >= sizeof(*ei) + sizeof(*bi)) { 3300 ei = btrfs_item_ptr(eb, path->slots[0], 3301 struct btrfs_extent_item); 3302 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3303 bi = (struct btrfs_tree_block_info *)(ei + 1); 3304 level = btrfs_tree_block_level(eb, bi); 3305 } else { 3306 level = (int)extent_key->offset; 3307 } 3308 generation = btrfs_extent_generation(eb, ei); 3309 } else { 3310 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3311 u64 ref_owner; 3312 int ret; 3313 3314 BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 3315 ret = get_ref_objectid_v0(rc, path, extent_key, 3316 &ref_owner, NULL); 3317 if (ret < 0) 3318 return ret; 3319 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL); 3320 level = (int)ref_owner; 3321 /* FIXME: get real generation */ 3322 generation = 0; 3323 #else 3324 BUG(); 3325 #endif 3326 } 3327 3328 btrfs_release_path(path); 3329 3330 BUG_ON(level == -1); 3331 3332 block = kmalloc(sizeof(*block), GFP_NOFS); 3333 if (!block) 3334 return -ENOMEM; 3335 3336 block->bytenr = extent_key->objectid; 3337 block->key.objectid = rc->extent_root->leafsize; 3338 block->key.offset = generation; 3339 block->level = level; 3340 block->key_ready = 0; 3341 3342 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3343 if (rb_node) 3344 backref_tree_panic(rb_node, -EEXIST, block->bytenr); 3345 3346 return 0; 3347 } 3348 3349 /* 3350 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3351 */ 3352 static int __add_tree_block(struct reloc_control *rc, 3353 u64 bytenr, u32 blocksize, 3354 struct rb_root *blocks) 3355 { 3356 struct btrfs_path *path; 3357 struct btrfs_key key; 3358 int ret; 3359 bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info, 3360 SKINNY_METADATA); 3361 3362 if (tree_block_processed(bytenr, blocksize, rc)) 3363 return 0; 3364 3365 if (tree_search(blocks, bytenr)) 3366 return 0; 3367 3368 path = btrfs_alloc_path(); 3369 if (!path) 3370 return -ENOMEM; 3371 again: 3372 key.objectid = bytenr; 3373 if (skinny) { 3374 key.type = BTRFS_METADATA_ITEM_KEY; 3375 key.offset = (u64)-1; 3376 } else { 3377 key.type = BTRFS_EXTENT_ITEM_KEY; 3378 key.offset = blocksize; 3379 } 3380 3381 path->search_commit_root = 1; 3382 path->skip_locking = 1; 3383 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3384 if (ret < 0) 3385 goto out; 3386 3387 if (ret > 0 && skinny) { 3388 if (path->slots[0]) { 3389 path->slots[0]--; 3390 btrfs_item_key_to_cpu(path->nodes[0], &key, 3391 path->slots[0]); 3392 if (key.objectid == bytenr && 3393 (key.type == BTRFS_METADATA_ITEM_KEY || 3394 (key.type == BTRFS_EXTENT_ITEM_KEY && 3395 key.offset == blocksize))) 3396 ret = 0; 3397 } 3398 3399 if (ret) { 3400 skinny = false; 3401 btrfs_release_path(path); 3402 goto again; 3403 } 3404 } 3405 BUG_ON(ret); 3406 3407 ret = add_tree_block(rc, &key, path, blocks); 3408 out: 3409 btrfs_free_path(path); 3410 return ret; 3411 } 3412 3413 /* 3414 * helper to check if the block use full backrefs for pointers in it 3415 */ 3416 static int block_use_full_backref(struct reloc_control *rc, 3417 struct extent_buffer *eb) 3418 { 3419 u64 flags; 3420 int ret; 3421 3422 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || 3423 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) 3424 return 1; 3425 3426 ret = btrfs_lookup_extent_info(NULL, rc->extent_root, 3427 eb->start, btrfs_header_level(eb), 1, 3428 NULL, &flags); 3429 BUG_ON(ret); 3430 3431 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) 3432 ret = 1; 3433 else 3434 ret = 0; 3435 return ret; 3436 } 3437 3438 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3439 struct inode *inode, u64 ino) 3440 { 3441 struct btrfs_key key; 3442 struct btrfs_root *root = fs_info->tree_root; 3443 struct btrfs_trans_handle *trans; 3444 int ret = 0; 3445 3446 if (inode) 3447 goto truncate; 3448 3449 key.objectid = ino; 3450 key.type = BTRFS_INODE_ITEM_KEY; 3451 key.offset = 0; 3452 3453 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3454 if (IS_ERR(inode) || is_bad_inode(inode)) { 3455 if (!IS_ERR(inode)) 3456 iput(inode); 3457 return -ENOENT; 3458 } 3459 3460 truncate: 3461 ret = btrfs_check_trunc_cache_free_space(root, 3462 &fs_info->global_block_rsv); 3463 if (ret) 3464 goto out; 3465 3466 trans = btrfs_join_transaction(root); 3467 if (IS_ERR(trans)) { 3468 ret = PTR_ERR(trans); 3469 goto out; 3470 } 3471 3472 ret = btrfs_truncate_free_space_cache(root, trans, inode); 3473 3474 btrfs_end_transaction(trans, root); 3475 btrfs_btree_balance_dirty(root); 3476 out: 3477 iput(inode); 3478 return ret; 3479 } 3480 3481 /* 3482 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY 3483 * this function scans fs tree to find blocks reference the data extent 3484 */ 3485 static int find_data_references(struct reloc_control *rc, 3486 struct btrfs_key *extent_key, 3487 struct extent_buffer *leaf, 3488 struct btrfs_extent_data_ref *ref, 3489 struct rb_root *blocks) 3490 { 3491 struct btrfs_path *path; 3492 struct tree_block *block; 3493 struct btrfs_root *root; 3494 struct btrfs_file_extent_item *fi; 3495 struct rb_node *rb_node; 3496 struct btrfs_key key; 3497 u64 ref_root; 3498 u64 ref_objectid; 3499 u64 ref_offset; 3500 u32 ref_count; 3501 u32 nritems; 3502 int err = 0; 3503 int added = 0; 3504 int counted; 3505 int ret; 3506 3507 ref_root = btrfs_extent_data_ref_root(leaf, ref); 3508 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref); 3509 ref_offset = btrfs_extent_data_ref_offset(leaf, ref); 3510 ref_count = btrfs_extent_data_ref_count(leaf, ref); 3511 3512 /* 3513 * This is an extent belonging to the free space cache, lets just delete 3514 * it and redo the search. 3515 */ 3516 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { 3517 ret = delete_block_group_cache(rc->extent_root->fs_info, 3518 NULL, ref_objectid); 3519 if (ret != -ENOENT) 3520 return ret; 3521 ret = 0; 3522 } 3523 3524 path = btrfs_alloc_path(); 3525 if (!path) 3526 return -ENOMEM; 3527 path->reada = 1; 3528 3529 root = read_fs_root(rc->extent_root->fs_info, ref_root); 3530 if (IS_ERR(root)) { 3531 err = PTR_ERR(root); 3532 goto out; 3533 } 3534 3535 key.objectid = ref_objectid; 3536 key.type = BTRFS_EXTENT_DATA_KEY; 3537 if (ref_offset > ((u64)-1 << 32)) 3538 key.offset = 0; 3539 else 3540 key.offset = ref_offset; 3541 3542 path->search_commit_root = 1; 3543 path->skip_locking = 1; 3544 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3545 if (ret < 0) { 3546 err = ret; 3547 goto out; 3548 } 3549 3550 leaf = path->nodes[0]; 3551 nritems = btrfs_header_nritems(leaf); 3552 /* 3553 * the references in tree blocks that use full backrefs 3554 * are not counted in 3555 */ 3556 if (block_use_full_backref(rc, leaf)) 3557 counted = 0; 3558 else 3559 counted = 1; 3560 rb_node = tree_search(blocks, leaf->start); 3561 if (rb_node) { 3562 if (counted) 3563 added = 1; 3564 else 3565 path->slots[0] = nritems; 3566 } 3567 3568 while (ref_count > 0) { 3569 while (path->slots[0] >= nritems) { 3570 ret = btrfs_next_leaf(root, path); 3571 if (ret < 0) { 3572 err = ret; 3573 goto out; 3574 } 3575 if (WARN_ON(ret > 0)) 3576 goto out; 3577 3578 leaf = path->nodes[0]; 3579 nritems = btrfs_header_nritems(leaf); 3580 added = 0; 3581 3582 if (block_use_full_backref(rc, leaf)) 3583 counted = 0; 3584 else 3585 counted = 1; 3586 rb_node = tree_search(blocks, leaf->start); 3587 if (rb_node) { 3588 if (counted) 3589 added = 1; 3590 else 3591 path->slots[0] = nritems; 3592 } 3593 } 3594 3595 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3596 if (WARN_ON(key.objectid != ref_objectid || 3597 key.type != BTRFS_EXTENT_DATA_KEY)) 3598 break; 3599 3600 fi = btrfs_item_ptr(leaf, path->slots[0], 3601 struct btrfs_file_extent_item); 3602 3603 if (btrfs_file_extent_type(leaf, fi) == 3604 BTRFS_FILE_EXTENT_INLINE) 3605 goto next; 3606 3607 if (btrfs_file_extent_disk_bytenr(leaf, fi) != 3608 extent_key->objectid) 3609 goto next; 3610 3611 key.offset -= btrfs_file_extent_offset(leaf, fi); 3612 if (key.offset != ref_offset) 3613 goto next; 3614 3615 if (counted) 3616 ref_count--; 3617 if (added) 3618 goto next; 3619 3620 if (!tree_block_processed(leaf->start, leaf->len, rc)) { 3621 block = kmalloc(sizeof(*block), GFP_NOFS); 3622 if (!block) { 3623 err = -ENOMEM; 3624 break; 3625 } 3626 block->bytenr = leaf->start; 3627 btrfs_item_key_to_cpu(leaf, &block->key, 0); 3628 block->level = 0; 3629 block->key_ready = 1; 3630 rb_node = tree_insert(blocks, block->bytenr, 3631 &block->rb_node); 3632 if (rb_node) 3633 backref_tree_panic(rb_node, -EEXIST, 3634 block->bytenr); 3635 } 3636 if (counted) 3637 added = 1; 3638 else 3639 path->slots[0] = nritems; 3640 next: 3641 path->slots[0]++; 3642 3643 } 3644 out: 3645 btrfs_free_path(path); 3646 return err; 3647 } 3648 3649 /* 3650 * helper to find all tree blocks that reference a given data extent 3651 */ 3652 static noinline_for_stack 3653 int add_data_references(struct reloc_control *rc, 3654 struct btrfs_key *extent_key, 3655 struct btrfs_path *path, 3656 struct rb_root *blocks) 3657 { 3658 struct btrfs_key key; 3659 struct extent_buffer *eb; 3660 struct btrfs_extent_data_ref *dref; 3661 struct btrfs_extent_inline_ref *iref; 3662 unsigned long ptr; 3663 unsigned long end; 3664 u32 blocksize = btrfs_level_size(rc->extent_root, 0); 3665 int ret = 0; 3666 int err = 0; 3667 3668 eb = path->nodes[0]; 3669 ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 3670 end = ptr + btrfs_item_size_nr(eb, path->slots[0]); 3671 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3672 if (ptr + sizeof(struct btrfs_extent_item_v0) == end) 3673 ptr = end; 3674 else 3675 #endif 3676 ptr += sizeof(struct btrfs_extent_item); 3677 3678 while (ptr < end) { 3679 iref = (struct btrfs_extent_inline_ref *)ptr; 3680 key.type = btrfs_extent_inline_ref_type(eb, iref); 3681 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3682 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 3683 ret = __add_tree_block(rc, key.offset, blocksize, 3684 blocks); 3685 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3686 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 3687 ret = find_data_references(rc, extent_key, 3688 eb, dref, blocks); 3689 } else { 3690 BUG(); 3691 } 3692 if (ret) { 3693 err = ret; 3694 goto out; 3695 } 3696 ptr += btrfs_extent_inline_ref_size(key.type); 3697 } 3698 WARN_ON(ptr > end); 3699 3700 while (1) { 3701 cond_resched(); 3702 eb = path->nodes[0]; 3703 if (path->slots[0] >= btrfs_header_nritems(eb)) { 3704 ret = btrfs_next_leaf(rc->extent_root, path); 3705 if (ret < 0) { 3706 err = ret; 3707 break; 3708 } 3709 if (ret > 0) 3710 break; 3711 eb = path->nodes[0]; 3712 } 3713 3714 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 3715 if (key.objectid != extent_key->objectid) 3716 break; 3717 3718 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3719 if (key.type == BTRFS_SHARED_DATA_REF_KEY || 3720 key.type == BTRFS_EXTENT_REF_V0_KEY) { 3721 #else 3722 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 3723 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3724 #endif 3725 ret = __add_tree_block(rc, key.offset, blocksize, 3726 blocks); 3727 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3728 dref = btrfs_item_ptr(eb, path->slots[0], 3729 struct btrfs_extent_data_ref); 3730 ret = find_data_references(rc, extent_key, 3731 eb, dref, blocks); 3732 } else { 3733 ret = 0; 3734 } 3735 if (ret) { 3736 err = ret; 3737 break; 3738 } 3739 path->slots[0]++; 3740 } 3741 out: 3742 btrfs_release_path(path); 3743 if (err) 3744 free_block_list(blocks); 3745 return err; 3746 } 3747 3748 /* 3749 * helper to find next unprocessed extent 3750 */ 3751 static noinline_for_stack 3752 int find_next_extent(struct btrfs_trans_handle *trans, 3753 struct reloc_control *rc, struct btrfs_path *path, 3754 struct btrfs_key *extent_key) 3755 { 3756 struct btrfs_key key; 3757 struct extent_buffer *leaf; 3758 u64 start, end, last; 3759 int ret; 3760 3761 last = rc->block_group->key.objectid + rc->block_group->key.offset; 3762 while (1) { 3763 cond_resched(); 3764 if (rc->search_start >= last) { 3765 ret = 1; 3766 break; 3767 } 3768 3769 key.objectid = rc->search_start; 3770 key.type = BTRFS_EXTENT_ITEM_KEY; 3771 key.offset = 0; 3772 3773 path->search_commit_root = 1; 3774 path->skip_locking = 1; 3775 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3776 0, 0); 3777 if (ret < 0) 3778 break; 3779 next: 3780 leaf = path->nodes[0]; 3781 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3782 ret = btrfs_next_leaf(rc->extent_root, path); 3783 if (ret != 0) 3784 break; 3785 leaf = path->nodes[0]; 3786 } 3787 3788 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3789 if (key.objectid >= last) { 3790 ret = 1; 3791 break; 3792 } 3793 3794 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3795 key.type != BTRFS_METADATA_ITEM_KEY) { 3796 path->slots[0]++; 3797 goto next; 3798 } 3799 3800 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3801 key.objectid + key.offset <= rc->search_start) { 3802 path->slots[0]++; 3803 goto next; 3804 } 3805 3806 if (key.type == BTRFS_METADATA_ITEM_KEY && 3807 key.objectid + rc->extent_root->leafsize <= 3808 rc->search_start) { 3809 path->slots[0]++; 3810 goto next; 3811 } 3812 3813 ret = find_first_extent_bit(&rc->processed_blocks, 3814 key.objectid, &start, &end, 3815 EXTENT_DIRTY, NULL); 3816 3817 if (ret == 0 && start <= key.objectid) { 3818 btrfs_release_path(path); 3819 rc->search_start = end + 1; 3820 } else { 3821 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3822 rc->search_start = key.objectid + key.offset; 3823 else 3824 rc->search_start = key.objectid + 3825 rc->extent_root->leafsize; 3826 memcpy(extent_key, &key, sizeof(key)); 3827 return 0; 3828 } 3829 } 3830 btrfs_release_path(path); 3831 return ret; 3832 } 3833 3834 static void set_reloc_control(struct reloc_control *rc) 3835 { 3836 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3837 3838 mutex_lock(&fs_info->reloc_mutex); 3839 fs_info->reloc_ctl = rc; 3840 mutex_unlock(&fs_info->reloc_mutex); 3841 } 3842 3843 static void unset_reloc_control(struct reloc_control *rc) 3844 { 3845 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3846 3847 mutex_lock(&fs_info->reloc_mutex); 3848 fs_info->reloc_ctl = NULL; 3849 mutex_unlock(&fs_info->reloc_mutex); 3850 } 3851 3852 static int check_extent_flags(u64 flags) 3853 { 3854 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3855 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3856 return 1; 3857 if (!(flags & BTRFS_EXTENT_FLAG_DATA) && 3858 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3859 return 1; 3860 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3861 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 3862 return 1; 3863 return 0; 3864 } 3865 3866 static noinline_for_stack 3867 int prepare_to_relocate(struct reloc_control *rc) 3868 { 3869 struct btrfs_trans_handle *trans; 3870 int ret; 3871 3872 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root, 3873 BTRFS_BLOCK_RSV_TEMP); 3874 if (!rc->block_rsv) 3875 return -ENOMEM; 3876 3877 /* 3878 * reserve some space for creating reloc trees. 3879 * btrfs_init_reloc_root will use them when there 3880 * is no reservation in transaction handle. 3881 */ 3882 ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv, 3883 rc->extent_root->nodesize * 256, 3884 BTRFS_RESERVE_FLUSH_ALL); 3885 if (ret) 3886 return ret; 3887 3888 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3889 rc->search_start = rc->block_group->key.objectid; 3890 rc->extents_found = 0; 3891 rc->nodes_relocated = 0; 3892 rc->merging_rsv_size = 0; 3893 3894 rc->create_reloc_tree = 1; 3895 set_reloc_control(rc); 3896 3897 trans = btrfs_join_transaction(rc->extent_root); 3898 if (IS_ERR(trans)) { 3899 unset_reloc_control(rc); 3900 /* 3901 * extent tree is not a ref_cow tree and has no reloc_root to 3902 * cleanup. And callers are responsible to free the above 3903 * block rsv. 3904 */ 3905 return PTR_ERR(trans); 3906 } 3907 btrfs_commit_transaction(trans, rc->extent_root); 3908 return 0; 3909 } 3910 3911 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3912 { 3913 struct rb_root blocks = RB_ROOT; 3914 struct btrfs_key key; 3915 struct btrfs_trans_handle *trans = NULL; 3916 struct btrfs_path *path; 3917 struct btrfs_extent_item *ei; 3918 u64 flags; 3919 u32 item_size; 3920 int ret; 3921 int err = 0; 3922 int progress = 0; 3923 3924 path = btrfs_alloc_path(); 3925 if (!path) 3926 return -ENOMEM; 3927 path->reada = 1; 3928 3929 ret = prepare_to_relocate(rc); 3930 if (ret) { 3931 err = ret; 3932 goto out_free; 3933 } 3934 3935 while (1) { 3936 progress++; 3937 trans = btrfs_start_transaction(rc->extent_root, 0); 3938 if (IS_ERR(trans)) { 3939 err = PTR_ERR(trans); 3940 trans = NULL; 3941 break; 3942 } 3943 restart: 3944 if (update_backref_cache(trans, &rc->backref_cache)) { 3945 btrfs_end_transaction(trans, rc->extent_root); 3946 continue; 3947 } 3948 3949 ret = find_next_extent(trans, rc, path, &key); 3950 if (ret < 0) 3951 err = ret; 3952 if (ret != 0) 3953 break; 3954 3955 rc->extents_found++; 3956 3957 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3958 struct btrfs_extent_item); 3959 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 3960 if (item_size >= sizeof(*ei)) { 3961 flags = btrfs_extent_flags(path->nodes[0], ei); 3962 ret = check_extent_flags(flags); 3963 BUG_ON(ret); 3964 3965 } else { 3966 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3967 u64 ref_owner; 3968 int path_change = 0; 3969 3970 BUG_ON(item_size != 3971 sizeof(struct btrfs_extent_item_v0)); 3972 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner, 3973 &path_change); 3974 if (ref_owner < BTRFS_FIRST_FREE_OBJECTID) 3975 flags = BTRFS_EXTENT_FLAG_TREE_BLOCK; 3976 else 3977 flags = BTRFS_EXTENT_FLAG_DATA; 3978 3979 if (path_change) { 3980 btrfs_release_path(path); 3981 3982 path->search_commit_root = 1; 3983 path->skip_locking = 1; 3984 ret = btrfs_search_slot(NULL, rc->extent_root, 3985 &key, path, 0, 0); 3986 if (ret < 0) { 3987 err = ret; 3988 break; 3989 } 3990 BUG_ON(ret > 0); 3991 } 3992 #else 3993 BUG(); 3994 #endif 3995 } 3996 3997 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 3998 ret = add_tree_block(rc, &key, path, &blocks); 3999 } else if (rc->stage == UPDATE_DATA_PTRS && 4000 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4001 ret = add_data_references(rc, &key, path, &blocks); 4002 } else { 4003 btrfs_release_path(path); 4004 ret = 0; 4005 } 4006 if (ret < 0) { 4007 err = ret; 4008 break; 4009 } 4010 4011 if (!RB_EMPTY_ROOT(&blocks)) { 4012 ret = relocate_tree_blocks(trans, rc, &blocks); 4013 if (ret < 0) { 4014 if (ret != -EAGAIN) { 4015 err = ret; 4016 break; 4017 } 4018 rc->extents_found--; 4019 rc->search_start = key.objectid; 4020 } 4021 } 4022 4023 if (rc->commit_transaction) { 4024 rc->commit_transaction = 0; 4025 ret = btrfs_commit_transaction(trans, rc->extent_root); 4026 BUG_ON(ret); 4027 } else { 4028 btrfs_end_transaction_throttle(trans, rc->extent_root); 4029 btrfs_btree_balance_dirty(rc->extent_root); 4030 } 4031 trans = NULL; 4032 4033 if (rc->stage == MOVE_DATA_EXTENTS && 4034 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4035 rc->found_file_extent = 1; 4036 ret = relocate_data_extent(rc->data_inode, 4037 &key, &rc->cluster); 4038 if (ret < 0) { 4039 err = ret; 4040 break; 4041 } 4042 } 4043 } 4044 if (trans && progress && err == -ENOSPC) { 4045 ret = btrfs_force_chunk_alloc(trans, rc->extent_root, 4046 rc->block_group->flags); 4047 if (ret == 0) { 4048 err = 0; 4049 progress = 0; 4050 goto restart; 4051 } 4052 } 4053 4054 btrfs_release_path(path); 4055 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, 4056 GFP_NOFS); 4057 4058 if (trans) { 4059 btrfs_end_transaction_throttle(trans, rc->extent_root); 4060 btrfs_btree_balance_dirty(rc->extent_root); 4061 } 4062 4063 if (!err) { 4064 ret = relocate_file_extent_cluster(rc->data_inode, 4065 &rc->cluster); 4066 if (ret < 0) 4067 err = ret; 4068 } 4069 4070 rc->create_reloc_tree = 0; 4071 set_reloc_control(rc); 4072 4073 backref_cache_cleanup(&rc->backref_cache); 4074 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 4075 4076 err = prepare_to_merge(rc, err); 4077 4078 merge_reloc_roots(rc); 4079 4080 rc->merge_reloc_tree = 0; 4081 unset_reloc_control(rc); 4082 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 4083 4084 /* get rid of pinned extents */ 4085 trans = btrfs_join_transaction(rc->extent_root); 4086 if (IS_ERR(trans)) 4087 err = PTR_ERR(trans); 4088 else 4089 btrfs_commit_transaction(trans, rc->extent_root); 4090 out_free: 4091 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); 4092 btrfs_free_path(path); 4093 return err; 4094 } 4095 4096 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 4097 struct btrfs_root *root, u64 objectid) 4098 { 4099 struct btrfs_path *path; 4100 struct btrfs_inode_item *item; 4101 struct extent_buffer *leaf; 4102 int ret; 4103 4104 path = btrfs_alloc_path(); 4105 if (!path) 4106 return -ENOMEM; 4107 4108 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 4109 if (ret) 4110 goto out; 4111 4112 leaf = path->nodes[0]; 4113 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 4114 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); 4115 btrfs_set_inode_generation(leaf, item, 1); 4116 btrfs_set_inode_size(leaf, item, 0); 4117 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 4118 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 4119 BTRFS_INODE_PREALLOC); 4120 btrfs_mark_buffer_dirty(leaf); 4121 btrfs_release_path(path); 4122 out: 4123 btrfs_free_path(path); 4124 return ret; 4125 } 4126 4127 /* 4128 * helper to create inode for data relocation. 4129 * the inode is in data relocation tree and its link count is 0 4130 */ 4131 static noinline_for_stack 4132 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 4133 struct btrfs_block_group_cache *group) 4134 { 4135 struct inode *inode = NULL; 4136 struct btrfs_trans_handle *trans; 4137 struct btrfs_root *root; 4138 struct btrfs_key key; 4139 u64 objectid = BTRFS_FIRST_FREE_OBJECTID; 4140 int err = 0; 4141 4142 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 4143 if (IS_ERR(root)) 4144 return ERR_CAST(root); 4145 4146 trans = btrfs_start_transaction(root, 6); 4147 if (IS_ERR(trans)) 4148 return ERR_CAST(trans); 4149 4150 err = btrfs_find_free_objectid(root, &objectid); 4151 if (err) 4152 goto out; 4153 4154 err = __insert_orphan_inode(trans, root, objectid); 4155 BUG_ON(err); 4156 4157 key.objectid = objectid; 4158 key.type = BTRFS_INODE_ITEM_KEY; 4159 key.offset = 0; 4160 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 4161 BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); 4162 BTRFS_I(inode)->index_cnt = group->key.objectid; 4163 4164 err = btrfs_orphan_add(trans, inode); 4165 out: 4166 btrfs_end_transaction(trans, root); 4167 btrfs_btree_balance_dirty(root); 4168 if (err) { 4169 if (inode) 4170 iput(inode); 4171 inode = ERR_PTR(err); 4172 } 4173 return inode; 4174 } 4175 4176 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) 4177 { 4178 struct reloc_control *rc; 4179 4180 rc = kzalloc(sizeof(*rc), GFP_NOFS); 4181 if (!rc) 4182 return NULL; 4183 4184 INIT_LIST_HEAD(&rc->reloc_roots); 4185 backref_cache_init(&rc->backref_cache); 4186 mapping_tree_init(&rc->reloc_root_tree); 4187 extent_io_tree_init(&rc->processed_blocks, 4188 fs_info->btree_inode->i_mapping); 4189 return rc; 4190 } 4191 4192 /* 4193 * function to relocate all extents in a block group. 4194 */ 4195 int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) 4196 { 4197 struct btrfs_fs_info *fs_info = extent_root->fs_info; 4198 struct reloc_control *rc; 4199 struct inode *inode; 4200 struct btrfs_path *path; 4201 int ret; 4202 int rw = 0; 4203 int err = 0; 4204 4205 rc = alloc_reloc_control(fs_info); 4206 if (!rc) 4207 return -ENOMEM; 4208 4209 rc->extent_root = extent_root; 4210 4211 rc->block_group = btrfs_lookup_block_group(fs_info, group_start); 4212 BUG_ON(!rc->block_group); 4213 4214 if (!rc->block_group->ro) { 4215 ret = btrfs_set_block_group_ro(extent_root, rc->block_group); 4216 if (ret) { 4217 err = ret; 4218 goto out; 4219 } 4220 rw = 1; 4221 } 4222 4223 path = btrfs_alloc_path(); 4224 if (!path) { 4225 err = -ENOMEM; 4226 goto out; 4227 } 4228 4229 inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group, 4230 path); 4231 btrfs_free_path(path); 4232 4233 if (!IS_ERR(inode)) 4234 ret = delete_block_group_cache(fs_info, inode, 0); 4235 else 4236 ret = PTR_ERR(inode); 4237 4238 if (ret && ret != -ENOENT) { 4239 err = ret; 4240 goto out; 4241 } 4242 4243 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4244 if (IS_ERR(rc->data_inode)) { 4245 err = PTR_ERR(rc->data_inode); 4246 rc->data_inode = NULL; 4247 goto out; 4248 } 4249 4250 printk(KERN_INFO "btrfs: relocating block group %llu flags %llu\n", 4251 rc->block_group->key.objectid, rc->block_group->flags); 4252 4253 ret = btrfs_start_delalloc_roots(fs_info, 0); 4254 if (ret < 0) { 4255 err = ret; 4256 goto out; 4257 } 4258 btrfs_wait_ordered_roots(fs_info, -1); 4259 4260 while (1) { 4261 mutex_lock(&fs_info->cleaner_mutex); 4262 ret = relocate_block_group(rc); 4263 mutex_unlock(&fs_info->cleaner_mutex); 4264 if (ret < 0) { 4265 err = ret; 4266 goto out; 4267 } 4268 4269 if (rc->extents_found == 0) 4270 break; 4271 4272 printk(KERN_INFO "btrfs: found %llu extents\n", 4273 rc->extents_found); 4274 4275 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4276 ret = btrfs_wait_ordered_range(rc->data_inode, 0, 4277 (u64)-1); 4278 if (ret) { 4279 err = ret; 4280 goto out; 4281 } 4282 invalidate_mapping_pages(rc->data_inode->i_mapping, 4283 0, -1); 4284 rc->stage = UPDATE_DATA_PTRS; 4285 } 4286 } 4287 4288 filemap_write_and_wait_range(fs_info->btree_inode->i_mapping, 4289 rc->block_group->key.objectid, 4290 rc->block_group->key.objectid + 4291 rc->block_group->key.offset - 1); 4292 4293 WARN_ON(rc->block_group->pinned > 0); 4294 WARN_ON(rc->block_group->reserved > 0); 4295 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); 4296 out: 4297 if (err && rw) 4298 btrfs_set_block_group_rw(extent_root, rc->block_group); 4299 iput(rc->data_inode); 4300 btrfs_put_block_group(rc->block_group); 4301 kfree(rc); 4302 return err; 4303 } 4304 4305 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4306 { 4307 struct btrfs_trans_handle *trans; 4308 int ret, err; 4309 4310 trans = btrfs_start_transaction(root->fs_info->tree_root, 0); 4311 if (IS_ERR(trans)) 4312 return PTR_ERR(trans); 4313 4314 memset(&root->root_item.drop_progress, 0, 4315 sizeof(root->root_item.drop_progress)); 4316 root->root_item.drop_level = 0; 4317 btrfs_set_root_refs(&root->root_item, 0); 4318 ret = btrfs_update_root(trans, root->fs_info->tree_root, 4319 &root->root_key, &root->root_item); 4320 4321 err = btrfs_end_transaction(trans, root->fs_info->tree_root); 4322 if (err) 4323 return err; 4324 return ret; 4325 } 4326 4327 /* 4328 * recover relocation interrupted by system crash. 4329 * 4330 * this function resumes merging reloc trees with corresponding fs trees. 4331 * this is important for keeping the sharing of tree blocks 4332 */ 4333 int btrfs_recover_relocation(struct btrfs_root *root) 4334 { 4335 LIST_HEAD(reloc_roots); 4336 struct btrfs_key key; 4337 struct btrfs_root *fs_root; 4338 struct btrfs_root *reloc_root; 4339 struct btrfs_path *path; 4340 struct extent_buffer *leaf; 4341 struct reloc_control *rc = NULL; 4342 struct btrfs_trans_handle *trans; 4343 int ret; 4344 int err = 0; 4345 4346 path = btrfs_alloc_path(); 4347 if (!path) 4348 return -ENOMEM; 4349 path->reada = -1; 4350 4351 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4352 key.type = BTRFS_ROOT_ITEM_KEY; 4353 key.offset = (u64)-1; 4354 4355 while (1) { 4356 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, 4357 path, 0, 0); 4358 if (ret < 0) { 4359 err = ret; 4360 goto out; 4361 } 4362 if (ret > 0) { 4363 if (path->slots[0] == 0) 4364 break; 4365 path->slots[0]--; 4366 } 4367 leaf = path->nodes[0]; 4368 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4369 btrfs_release_path(path); 4370 4371 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4372 key.type != BTRFS_ROOT_ITEM_KEY) 4373 break; 4374 4375 reloc_root = btrfs_read_fs_root(root, &key); 4376 if (IS_ERR(reloc_root)) { 4377 err = PTR_ERR(reloc_root); 4378 goto out; 4379 } 4380 4381 list_add(&reloc_root->root_list, &reloc_roots); 4382 4383 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4384 fs_root = read_fs_root(root->fs_info, 4385 reloc_root->root_key.offset); 4386 if (IS_ERR(fs_root)) { 4387 ret = PTR_ERR(fs_root); 4388 if (ret != -ENOENT) { 4389 err = ret; 4390 goto out; 4391 } 4392 ret = mark_garbage_root(reloc_root); 4393 if (ret < 0) { 4394 err = ret; 4395 goto out; 4396 } 4397 } 4398 } 4399 4400 if (key.offset == 0) 4401 break; 4402 4403 key.offset--; 4404 } 4405 btrfs_release_path(path); 4406 4407 if (list_empty(&reloc_roots)) 4408 goto out; 4409 4410 rc = alloc_reloc_control(root->fs_info); 4411 if (!rc) { 4412 err = -ENOMEM; 4413 goto out; 4414 } 4415 4416 rc->extent_root = root->fs_info->extent_root; 4417 4418 set_reloc_control(rc); 4419 4420 trans = btrfs_join_transaction(rc->extent_root); 4421 if (IS_ERR(trans)) { 4422 unset_reloc_control(rc); 4423 err = PTR_ERR(trans); 4424 goto out_free; 4425 } 4426 4427 rc->merge_reloc_tree = 1; 4428 4429 while (!list_empty(&reloc_roots)) { 4430 reloc_root = list_entry(reloc_roots.next, 4431 struct btrfs_root, root_list); 4432 list_del(&reloc_root->root_list); 4433 4434 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4435 list_add_tail(&reloc_root->root_list, 4436 &rc->reloc_roots); 4437 continue; 4438 } 4439 4440 fs_root = read_fs_root(root->fs_info, 4441 reloc_root->root_key.offset); 4442 if (IS_ERR(fs_root)) { 4443 err = PTR_ERR(fs_root); 4444 goto out_free; 4445 } 4446 4447 err = __add_reloc_root(reloc_root); 4448 BUG_ON(err < 0); /* -ENOMEM or logic error */ 4449 fs_root->reloc_root = reloc_root; 4450 } 4451 4452 err = btrfs_commit_transaction(trans, rc->extent_root); 4453 if (err) 4454 goto out_free; 4455 4456 merge_reloc_roots(rc); 4457 4458 unset_reloc_control(rc); 4459 4460 trans = btrfs_join_transaction(rc->extent_root); 4461 if (IS_ERR(trans)) 4462 err = PTR_ERR(trans); 4463 else 4464 err = btrfs_commit_transaction(trans, rc->extent_root); 4465 out_free: 4466 kfree(rc); 4467 out: 4468 if (!list_empty(&reloc_roots)) 4469 free_reloc_roots(&reloc_roots); 4470 4471 btrfs_free_path(path); 4472 4473 if (err == 0) { 4474 /* cleanup orphan inode in data relocation tree */ 4475 fs_root = read_fs_root(root->fs_info, 4476 BTRFS_DATA_RELOC_TREE_OBJECTID); 4477 if (IS_ERR(fs_root)) 4478 err = PTR_ERR(fs_root); 4479 else 4480 err = btrfs_orphan_cleanup(fs_root); 4481 } 4482 return err; 4483 } 4484 4485 /* 4486 * helper to add ordered checksum for data relocation. 4487 * 4488 * cloning checksum properly handles the nodatasum extents. 4489 * it also saves CPU time to re-calculate the checksum. 4490 */ 4491 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) 4492 { 4493 struct btrfs_ordered_sum *sums; 4494 struct btrfs_ordered_extent *ordered; 4495 struct btrfs_root *root = BTRFS_I(inode)->root; 4496 int ret; 4497 u64 disk_bytenr; 4498 u64 new_bytenr; 4499 LIST_HEAD(list); 4500 4501 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4502 BUG_ON(ordered->file_offset != file_pos || ordered->len != len); 4503 4504 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4505 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, 4506 disk_bytenr + len - 1, &list, 0); 4507 if (ret) 4508 goto out; 4509 4510 while (!list_empty(&list)) { 4511 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4512 list_del_init(&sums->list); 4513 4514 /* 4515 * We need to offset the new_bytenr based on where the csum is. 4516 * We need to do this because we will read in entire prealloc 4517 * extents but we may have written to say the middle of the 4518 * prealloc extent, so we need to make sure the csum goes with 4519 * the right disk offset. 4520 * 4521 * We can do this because the data reloc inode refers strictly 4522 * to the on disk bytes, so we don't have to worry about 4523 * disk_len vs real len like with real inodes since it's all 4524 * disk length. 4525 */ 4526 new_bytenr = ordered->start + (sums->bytenr - disk_bytenr); 4527 sums->bytenr = new_bytenr; 4528 4529 btrfs_add_ordered_sum(inode, ordered, sums); 4530 } 4531 out: 4532 btrfs_put_ordered_extent(ordered); 4533 return ret; 4534 } 4535 4536 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4537 struct btrfs_root *root, struct extent_buffer *buf, 4538 struct extent_buffer *cow) 4539 { 4540 struct reloc_control *rc; 4541 struct backref_node *node; 4542 int first_cow = 0; 4543 int level; 4544 int ret = 0; 4545 4546 rc = root->fs_info->reloc_ctl; 4547 if (!rc) 4548 return 0; 4549 4550 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4551 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4552 4553 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 4554 if (buf == root->node) 4555 __update_reloc_root(root, cow->start); 4556 } 4557 4558 level = btrfs_header_level(buf); 4559 if (btrfs_header_generation(buf) <= 4560 btrfs_root_last_snapshot(&root->root_item)) 4561 first_cow = 1; 4562 4563 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4564 rc->create_reloc_tree) { 4565 WARN_ON(!first_cow && level == 0); 4566 4567 node = rc->backref_cache.path[level]; 4568 BUG_ON(node->bytenr != buf->start && 4569 node->new_bytenr != buf->start); 4570 4571 drop_node_buffer(node); 4572 extent_buffer_get(cow); 4573 node->eb = cow; 4574 node->new_bytenr = cow->start; 4575 4576 if (!node->pending) { 4577 list_move_tail(&node->list, 4578 &rc->backref_cache.pending[level]); 4579 node->pending = 1; 4580 } 4581 4582 if (first_cow) 4583 __mark_block_processed(rc, node); 4584 4585 if (first_cow && level > 0) 4586 rc->nodes_relocated += buf->len; 4587 } 4588 4589 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4590 ret = replace_file_extents(trans, rc, root, cow); 4591 return ret; 4592 } 4593 4594 /* 4595 * called before creating snapshot. it calculates metadata reservation 4596 * requried for relocating tree blocks in the snapshot 4597 */ 4598 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 4599 struct btrfs_pending_snapshot *pending, 4600 u64 *bytes_to_reserve) 4601 { 4602 struct btrfs_root *root; 4603 struct reloc_control *rc; 4604 4605 root = pending->root; 4606 if (!root->reloc_root) 4607 return; 4608 4609 rc = root->fs_info->reloc_ctl; 4610 if (!rc->merge_reloc_tree) 4611 return; 4612 4613 root = root->reloc_root; 4614 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4615 /* 4616 * relocation is in the stage of merging trees. the space 4617 * used by merging a reloc tree is twice the size of 4618 * relocated tree nodes in the worst case. half for cowing 4619 * the reloc tree, half for cowing the fs tree. the space 4620 * used by cowing the reloc tree will be freed after the 4621 * tree is dropped. if we create snapshot, cowing the fs 4622 * tree may use more space than it frees. so we need 4623 * reserve extra space. 4624 */ 4625 *bytes_to_reserve += rc->nodes_relocated; 4626 } 4627 4628 /* 4629 * called after snapshot is created. migrate block reservation 4630 * and create reloc root for the newly created snapshot 4631 */ 4632 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4633 struct btrfs_pending_snapshot *pending) 4634 { 4635 struct btrfs_root *root = pending->root; 4636 struct btrfs_root *reloc_root; 4637 struct btrfs_root *new_root; 4638 struct reloc_control *rc; 4639 int ret; 4640 4641 if (!root->reloc_root) 4642 return 0; 4643 4644 rc = root->fs_info->reloc_ctl; 4645 rc->merging_rsv_size += rc->nodes_relocated; 4646 4647 if (rc->merge_reloc_tree) { 4648 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4649 rc->block_rsv, 4650 rc->nodes_relocated); 4651 if (ret) 4652 return ret; 4653 } 4654 4655 new_root = pending->snap; 4656 reloc_root = create_reloc_root(trans, root->reloc_root, 4657 new_root->root_key.objectid); 4658 if (IS_ERR(reloc_root)) 4659 return PTR_ERR(reloc_root); 4660 4661 ret = __add_reloc_root(reloc_root); 4662 BUG_ON(ret < 0); 4663 new_root->reloc_root = reloc_root; 4664 4665 if (rc->create_reloc_tree) 4666 ret = clone_backref_node(trans, rc, root, reloc_root); 4667 return ret; 4668 } 4669