1 /* 2 * Copyright (C) 2009 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "volumes.h" 29 #include "locking.h" 30 #include "btrfs_inode.h" 31 #include "async-thread.h" 32 #include "free-space-cache.h" 33 #include "inode-map.h" 34 #include "qgroup.h" 35 #include "print-tree.h" 36 37 /* 38 * backref_node, mapping_node and tree_block start with this 39 */ 40 struct tree_entry { 41 struct rb_node rb_node; 42 u64 bytenr; 43 }; 44 45 /* 46 * present a tree block in the backref cache 47 */ 48 struct backref_node { 49 struct rb_node rb_node; 50 u64 bytenr; 51 52 u64 new_bytenr; 53 /* objectid of tree block owner, can be not uptodate */ 54 u64 owner; 55 /* link to pending, changed or detached list */ 56 struct list_head list; 57 /* list of upper level blocks reference this block */ 58 struct list_head upper; 59 /* list of child blocks in the cache */ 60 struct list_head lower; 61 /* NULL if this node is not tree root */ 62 struct btrfs_root *root; 63 /* extent buffer got by COW the block */ 64 struct extent_buffer *eb; 65 /* level of tree block */ 66 unsigned int level:8; 67 /* is the block in non-reference counted tree */ 68 unsigned int cowonly:1; 69 /* 1 if no child node in the cache */ 70 unsigned int lowest:1; 71 /* is the extent buffer locked */ 72 unsigned int locked:1; 73 /* has the block been processed */ 74 unsigned int processed:1; 75 /* have backrefs of this block been checked */ 76 unsigned int checked:1; 77 /* 78 * 1 if corresponding block has been cowed but some upper 79 * level block pointers may not point to the new location 80 */ 81 unsigned int pending:1; 82 /* 83 * 1 if the backref node isn't connected to any other 84 * backref node. 85 */ 86 unsigned int detached:1; 87 }; 88 89 /* 90 * present a block pointer in the backref cache 91 */ 92 struct backref_edge { 93 struct list_head list[2]; 94 struct backref_node *node[2]; 95 }; 96 97 #define LOWER 0 98 #define UPPER 1 99 #define RELOCATION_RESERVED_NODES 256 100 101 struct backref_cache { 102 /* red black tree of all backref nodes in the cache */ 103 struct rb_root rb_root; 104 /* for passing backref nodes to btrfs_reloc_cow_block */ 105 struct backref_node *path[BTRFS_MAX_LEVEL]; 106 /* 107 * list of blocks that have been cowed but some block 108 * pointers in upper level blocks may not reflect the 109 * new location 110 */ 111 struct list_head pending[BTRFS_MAX_LEVEL]; 112 /* list of backref nodes with no child node */ 113 struct list_head leaves; 114 /* list of blocks that have been cowed in current transaction */ 115 struct list_head changed; 116 /* list of detached backref node. */ 117 struct list_head detached; 118 119 u64 last_trans; 120 121 int nr_nodes; 122 int nr_edges; 123 }; 124 125 /* 126 * map address of tree root to tree 127 */ 128 struct mapping_node { 129 struct rb_node rb_node; 130 u64 bytenr; 131 void *data; 132 }; 133 134 struct mapping_tree { 135 struct rb_root rb_root; 136 spinlock_t lock; 137 }; 138 139 /* 140 * present a tree block to process 141 */ 142 struct tree_block { 143 struct rb_node rb_node; 144 u64 bytenr; 145 struct btrfs_key key; 146 unsigned int level:8; 147 unsigned int key_ready:1; 148 }; 149 150 #define MAX_EXTENTS 128 151 152 struct file_extent_cluster { 153 u64 start; 154 u64 end; 155 u64 boundary[MAX_EXTENTS]; 156 unsigned int nr; 157 }; 158 159 struct reloc_control { 160 /* block group to relocate */ 161 struct btrfs_block_group_cache *block_group; 162 /* extent tree */ 163 struct btrfs_root *extent_root; 164 /* inode for moving data */ 165 struct inode *data_inode; 166 167 struct btrfs_block_rsv *block_rsv; 168 169 struct backref_cache backref_cache; 170 171 struct file_extent_cluster cluster; 172 /* tree blocks have been processed */ 173 struct extent_io_tree processed_blocks; 174 /* map start of tree root to corresponding reloc tree */ 175 struct mapping_tree reloc_root_tree; 176 /* list of reloc trees */ 177 struct list_head reloc_roots; 178 /* size of metadata reservation for merging reloc trees */ 179 u64 merging_rsv_size; 180 /* size of relocated tree nodes */ 181 u64 nodes_relocated; 182 /* reserved size for block group relocation*/ 183 u64 reserved_bytes; 184 185 u64 search_start; 186 u64 extents_found; 187 188 unsigned int stage:8; 189 unsigned int create_reloc_tree:1; 190 unsigned int merge_reloc_tree:1; 191 unsigned int found_file_extent:1; 192 }; 193 194 /* stages of data relocation */ 195 #define MOVE_DATA_EXTENTS 0 196 #define UPDATE_DATA_PTRS 1 197 198 static void remove_backref_node(struct backref_cache *cache, 199 struct backref_node *node); 200 static void __mark_block_processed(struct reloc_control *rc, 201 struct backref_node *node); 202 203 static void mapping_tree_init(struct mapping_tree *tree) 204 { 205 tree->rb_root = RB_ROOT; 206 spin_lock_init(&tree->lock); 207 } 208 209 static void backref_cache_init(struct backref_cache *cache) 210 { 211 int i; 212 cache->rb_root = RB_ROOT; 213 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 214 INIT_LIST_HEAD(&cache->pending[i]); 215 INIT_LIST_HEAD(&cache->changed); 216 INIT_LIST_HEAD(&cache->detached); 217 INIT_LIST_HEAD(&cache->leaves); 218 } 219 220 static void backref_cache_cleanup(struct backref_cache *cache) 221 { 222 struct backref_node *node; 223 int i; 224 225 while (!list_empty(&cache->detached)) { 226 node = list_entry(cache->detached.next, 227 struct backref_node, list); 228 remove_backref_node(cache, node); 229 } 230 231 while (!list_empty(&cache->leaves)) { 232 node = list_entry(cache->leaves.next, 233 struct backref_node, lower); 234 remove_backref_node(cache, node); 235 } 236 237 cache->last_trans = 0; 238 239 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 240 ASSERT(list_empty(&cache->pending[i])); 241 ASSERT(list_empty(&cache->changed)); 242 ASSERT(list_empty(&cache->detached)); 243 ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); 244 ASSERT(!cache->nr_nodes); 245 ASSERT(!cache->nr_edges); 246 } 247 248 static struct backref_node *alloc_backref_node(struct backref_cache *cache) 249 { 250 struct backref_node *node; 251 252 node = kzalloc(sizeof(*node), GFP_NOFS); 253 if (node) { 254 INIT_LIST_HEAD(&node->list); 255 INIT_LIST_HEAD(&node->upper); 256 INIT_LIST_HEAD(&node->lower); 257 RB_CLEAR_NODE(&node->rb_node); 258 cache->nr_nodes++; 259 } 260 return node; 261 } 262 263 static void free_backref_node(struct backref_cache *cache, 264 struct backref_node *node) 265 { 266 if (node) { 267 cache->nr_nodes--; 268 kfree(node); 269 } 270 } 271 272 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) 273 { 274 struct backref_edge *edge; 275 276 edge = kzalloc(sizeof(*edge), GFP_NOFS); 277 if (edge) 278 cache->nr_edges++; 279 return edge; 280 } 281 282 static void free_backref_edge(struct backref_cache *cache, 283 struct backref_edge *edge) 284 { 285 if (edge) { 286 cache->nr_edges--; 287 kfree(edge); 288 } 289 } 290 291 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 292 struct rb_node *node) 293 { 294 struct rb_node **p = &root->rb_node; 295 struct rb_node *parent = NULL; 296 struct tree_entry *entry; 297 298 while (*p) { 299 parent = *p; 300 entry = rb_entry(parent, struct tree_entry, rb_node); 301 302 if (bytenr < entry->bytenr) 303 p = &(*p)->rb_left; 304 else if (bytenr > entry->bytenr) 305 p = &(*p)->rb_right; 306 else 307 return parent; 308 } 309 310 rb_link_node(node, parent, p); 311 rb_insert_color(node, root); 312 return NULL; 313 } 314 315 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 316 { 317 struct rb_node *n = root->rb_node; 318 struct tree_entry *entry; 319 320 while (n) { 321 entry = rb_entry(n, struct tree_entry, rb_node); 322 323 if (bytenr < entry->bytenr) 324 n = n->rb_left; 325 else if (bytenr > entry->bytenr) 326 n = n->rb_right; 327 else 328 return n; 329 } 330 return NULL; 331 } 332 333 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr) 334 { 335 336 struct btrfs_fs_info *fs_info = NULL; 337 struct backref_node *bnode = rb_entry(rb_node, struct backref_node, 338 rb_node); 339 if (bnode->root) 340 fs_info = bnode->root->fs_info; 341 btrfs_panic(fs_info, errno, 342 "Inconsistency in backref cache found at offset %llu", 343 bytenr); 344 } 345 346 /* 347 * walk up backref nodes until reach node presents tree root 348 */ 349 static struct backref_node *walk_up_backref(struct backref_node *node, 350 struct backref_edge *edges[], 351 int *index) 352 { 353 struct backref_edge *edge; 354 int idx = *index; 355 356 while (!list_empty(&node->upper)) { 357 edge = list_entry(node->upper.next, 358 struct backref_edge, list[LOWER]); 359 edges[idx++] = edge; 360 node = edge->node[UPPER]; 361 } 362 BUG_ON(node->detached); 363 *index = idx; 364 return node; 365 } 366 367 /* 368 * walk down backref nodes to find start of next reference path 369 */ 370 static struct backref_node *walk_down_backref(struct backref_edge *edges[], 371 int *index) 372 { 373 struct backref_edge *edge; 374 struct backref_node *lower; 375 int idx = *index; 376 377 while (idx > 0) { 378 edge = edges[idx - 1]; 379 lower = edge->node[LOWER]; 380 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 381 idx--; 382 continue; 383 } 384 edge = list_entry(edge->list[LOWER].next, 385 struct backref_edge, list[LOWER]); 386 edges[idx - 1] = edge; 387 *index = idx; 388 return edge->node[UPPER]; 389 } 390 *index = 0; 391 return NULL; 392 } 393 394 static void unlock_node_buffer(struct backref_node *node) 395 { 396 if (node->locked) { 397 btrfs_tree_unlock(node->eb); 398 node->locked = 0; 399 } 400 } 401 402 static void drop_node_buffer(struct backref_node *node) 403 { 404 if (node->eb) { 405 unlock_node_buffer(node); 406 free_extent_buffer(node->eb); 407 node->eb = NULL; 408 } 409 } 410 411 static void drop_backref_node(struct backref_cache *tree, 412 struct backref_node *node) 413 { 414 BUG_ON(!list_empty(&node->upper)); 415 416 drop_node_buffer(node); 417 list_del(&node->list); 418 list_del(&node->lower); 419 if (!RB_EMPTY_NODE(&node->rb_node)) 420 rb_erase(&node->rb_node, &tree->rb_root); 421 free_backref_node(tree, node); 422 } 423 424 /* 425 * remove a backref node from the backref cache 426 */ 427 static void remove_backref_node(struct backref_cache *cache, 428 struct backref_node *node) 429 { 430 struct backref_node *upper; 431 struct backref_edge *edge; 432 433 if (!node) 434 return; 435 436 BUG_ON(!node->lowest && !node->detached); 437 while (!list_empty(&node->upper)) { 438 edge = list_entry(node->upper.next, struct backref_edge, 439 list[LOWER]); 440 upper = edge->node[UPPER]; 441 list_del(&edge->list[LOWER]); 442 list_del(&edge->list[UPPER]); 443 free_backref_edge(cache, edge); 444 445 if (RB_EMPTY_NODE(&upper->rb_node)) { 446 BUG_ON(!list_empty(&node->upper)); 447 drop_backref_node(cache, node); 448 node = upper; 449 node->lowest = 1; 450 continue; 451 } 452 /* 453 * add the node to leaf node list if no other 454 * child block cached. 455 */ 456 if (list_empty(&upper->lower)) { 457 list_add_tail(&upper->lower, &cache->leaves); 458 upper->lowest = 1; 459 } 460 } 461 462 drop_backref_node(cache, node); 463 } 464 465 static void update_backref_node(struct backref_cache *cache, 466 struct backref_node *node, u64 bytenr) 467 { 468 struct rb_node *rb_node; 469 rb_erase(&node->rb_node, &cache->rb_root); 470 node->bytenr = bytenr; 471 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 472 if (rb_node) 473 backref_tree_panic(rb_node, -EEXIST, bytenr); 474 } 475 476 /* 477 * update backref cache after a transaction commit 478 */ 479 static int update_backref_cache(struct btrfs_trans_handle *trans, 480 struct backref_cache *cache) 481 { 482 struct backref_node *node; 483 int level = 0; 484 485 if (cache->last_trans == 0) { 486 cache->last_trans = trans->transid; 487 return 0; 488 } 489 490 if (cache->last_trans == trans->transid) 491 return 0; 492 493 /* 494 * detached nodes are used to avoid unnecessary backref 495 * lookup. transaction commit changes the extent tree. 496 * so the detached nodes are no longer useful. 497 */ 498 while (!list_empty(&cache->detached)) { 499 node = list_entry(cache->detached.next, 500 struct backref_node, list); 501 remove_backref_node(cache, node); 502 } 503 504 while (!list_empty(&cache->changed)) { 505 node = list_entry(cache->changed.next, 506 struct backref_node, list); 507 list_del_init(&node->list); 508 BUG_ON(node->pending); 509 update_backref_node(cache, node, node->new_bytenr); 510 } 511 512 /* 513 * some nodes can be left in the pending list if there were 514 * errors during processing the pending nodes. 515 */ 516 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 517 list_for_each_entry(node, &cache->pending[level], list) { 518 BUG_ON(!node->pending); 519 if (node->bytenr == node->new_bytenr) 520 continue; 521 update_backref_node(cache, node, node->new_bytenr); 522 } 523 } 524 525 cache->last_trans = 0; 526 return 1; 527 } 528 529 530 static int should_ignore_root(struct btrfs_root *root) 531 { 532 struct btrfs_root *reloc_root; 533 534 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 535 return 0; 536 537 reloc_root = root->reloc_root; 538 if (!reloc_root) 539 return 0; 540 541 if (btrfs_root_last_snapshot(&reloc_root->root_item) == 542 root->fs_info->running_transaction->transid - 1) 543 return 0; 544 /* 545 * if there is reloc tree and it was created in previous 546 * transaction backref lookup can find the reloc tree, 547 * so backref node for the fs tree root is useless for 548 * relocation. 549 */ 550 return 1; 551 } 552 /* 553 * find reloc tree by address of tree root 554 */ 555 static struct btrfs_root *find_reloc_root(struct reloc_control *rc, 556 u64 bytenr) 557 { 558 struct rb_node *rb_node; 559 struct mapping_node *node; 560 struct btrfs_root *root = NULL; 561 562 spin_lock(&rc->reloc_root_tree.lock); 563 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); 564 if (rb_node) { 565 node = rb_entry(rb_node, struct mapping_node, rb_node); 566 root = (struct btrfs_root *)node->data; 567 } 568 spin_unlock(&rc->reloc_root_tree.lock); 569 return root; 570 } 571 572 static int is_cowonly_root(u64 root_objectid) 573 { 574 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || 575 root_objectid == BTRFS_EXTENT_TREE_OBJECTID || 576 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 577 root_objectid == BTRFS_DEV_TREE_OBJECTID || 578 root_objectid == BTRFS_TREE_LOG_OBJECTID || 579 root_objectid == BTRFS_CSUM_TREE_OBJECTID || 580 root_objectid == BTRFS_UUID_TREE_OBJECTID || 581 root_objectid == BTRFS_QUOTA_TREE_OBJECTID || 582 root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 583 return 1; 584 return 0; 585 } 586 587 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, 588 u64 root_objectid) 589 { 590 struct btrfs_key key; 591 592 key.objectid = root_objectid; 593 key.type = BTRFS_ROOT_ITEM_KEY; 594 if (is_cowonly_root(root_objectid)) 595 key.offset = 0; 596 else 597 key.offset = (u64)-1; 598 599 return btrfs_get_fs_root(fs_info, &key, false); 600 } 601 602 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 603 static noinline_for_stack 604 struct btrfs_root *find_tree_root(struct reloc_control *rc, 605 struct extent_buffer *leaf, 606 struct btrfs_extent_ref_v0 *ref0) 607 { 608 struct btrfs_root *root; 609 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0); 610 u64 generation = btrfs_ref_generation_v0(leaf, ref0); 611 612 BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID); 613 614 root = read_fs_root(rc->extent_root->fs_info, root_objectid); 615 BUG_ON(IS_ERR(root)); 616 617 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 618 generation != btrfs_root_generation(&root->root_item)) 619 return NULL; 620 621 return root; 622 } 623 #endif 624 625 static noinline_for_stack 626 int find_inline_backref(struct extent_buffer *leaf, int slot, 627 unsigned long *ptr, unsigned long *end) 628 { 629 struct btrfs_key key; 630 struct btrfs_extent_item *ei; 631 struct btrfs_tree_block_info *bi; 632 u32 item_size; 633 634 btrfs_item_key_to_cpu(leaf, &key, slot); 635 636 item_size = btrfs_item_size_nr(leaf, slot); 637 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 638 if (item_size < sizeof(*ei)) { 639 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 640 return 1; 641 } 642 #endif 643 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 644 WARN_ON(!(btrfs_extent_flags(leaf, ei) & 645 BTRFS_EXTENT_FLAG_TREE_BLOCK)); 646 647 if (key.type == BTRFS_EXTENT_ITEM_KEY && 648 item_size <= sizeof(*ei) + sizeof(*bi)) { 649 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); 650 return 1; 651 } 652 if (key.type == BTRFS_METADATA_ITEM_KEY && 653 item_size <= sizeof(*ei)) { 654 WARN_ON(item_size < sizeof(*ei)); 655 return 1; 656 } 657 658 if (key.type == BTRFS_EXTENT_ITEM_KEY) { 659 bi = (struct btrfs_tree_block_info *)(ei + 1); 660 *ptr = (unsigned long)(bi + 1); 661 } else { 662 *ptr = (unsigned long)(ei + 1); 663 } 664 *end = (unsigned long)ei + item_size; 665 return 0; 666 } 667 668 /* 669 * build backref tree for a given tree block. root of the backref tree 670 * corresponds the tree block, leaves of the backref tree correspond 671 * roots of b-trees that reference the tree block. 672 * 673 * the basic idea of this function is check backrefs of a given block 674 * to find upper level blocks that reference the block, and then check 675 * backrefs of these upper level blocks recursively. the recursion stop 676 * when tree root is reached or backrefs for the block is cached. 677 * 678 * NOTE: if we find backrefs for a block are cached, we know backrefs 679 * for all upper level blocks that directly/indirectly reference the 680 * block are also cached. 681 */ 682 static noinline_for_stack 683 struct backref_node *build_backref_tree(struct reloc_control *rc, 684 struct btrfs_key *node_key, 685 int level, u64 bytenr) 686 { 687 struct backref_cache *cache = &rc->backref_cache; 688 struct btrfs_path *path1; 689 struct btrfs_path *path2; 690 struct extent_buffer *eb; 691 struct btrfs_root *root; 692 struct backref_node *cur; 693 struct backref_node *upper; 694 struct backref_node *lower; 695 struct backref_node *node = NULL; 696 struct backref_node *exist = NULL; 697 struct backref_edge *edge; 698 struct rb_node *rb_node; 699 struct btrfs_key key; 700 unsigned long end; 701 unsigned long ptr; 702 LIST_HEAD(list); 703 LIST_HEAD(useless); 704 int cowonly; 705 int ret; 706 int err = 0; 707 bool need_check = true; 708 709 path1 = btrfs_alloc_path(); 710 path2 = btrfs_alloc_path(); 711 if (!path1 || !path2) { 712 err = -ENOMEM; 713 goto out; 714 } 715 path1->reada = READA_FORWARD; 716 path2->reada = READA_FORWARD; 717 718 node = alloc_backref_node(cache); 719 if (!node) { 720 err = -ENOMEM; 721 goto out; 722 } 723 724 node->bytenr = bytenr; 725 node->level = level; 726 node->lowest = 1; 727 cur = node; 728 again: 729 end = 0; 730 ptr = 0; 731 key.objectid = cur->bytenr; 732 key.type = BTRFS_METADATA_ITEM_KEY; 733 key.offset = (u64)-1; 734 735 path1->search_commit_root = 1; 736 path1->skip_locking = 1; 737 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, 738 0, 0); 739 if (ret < 0) { 740 err = ret; 741 goto out; 742 } 743 ASSERT(ret); 744 ASSERT(path1->slots[0]); 745 746 path1->slots[0]--; 747 748 WARN_ON(cur->checked); 749 if (!list_empty(&cur->upper)) { 750 /* 751 * the backref was added previously when processing 752 * backref of type BTRFS_TREE_BLOCK_REF_KEY 753 */ 754 ASSERT(list_is_singular(&cur->upper)); 755 edge = list_entry(cur->upper.next, struct backref_edge, 756 list[LOWER]); 757 ASSERT(list_empty(&edge->list[UPPER])); 758 exist = edge->node[UPPER]; 759 /* 760 * add the upper level block to pending list if we need 761 * check its backrefs 762 */ 763 if (!exist->checked) 764 list_add_tail(&edge->list[UPPER], &list); 765 } else { 766 exist = NULL; 767 } 768 769 while (1) { 770 cond_resched(); 771 eb = path1->nodes[0]; 772 773 if (ptr >= end) { 774 if (path1->slots[0] >= btrfs_header_nritems(eb)) { 775 ret = btrfs_next_leaf(rc->extent_root, path1); 776 if (ret < 0) { 777 err = ret; 778 goto out; 779 } 780 if (ret > 0) 781 break; 782 eb = path1->nodes[0]; 783 } 784 785 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); 786 if (key.objectid != cur->bytenr) { 787 WARN_ON(exist); 788 break; 789 } 790 791 if (key.type == BTRFS_EXTENT_ITEM_KEY || 792 key.type == BTRFS_METADATA_ITEM_KEY) { 793 ret = find_inline_backref(eb, path1->slots[0], 794 &ptr, &end); 795 if (ret) 796 goto next; 797 } 798 } 799 800 if (ptr < end) { 801 /* update key for inline back ref */ 802 struct btrfs_extent_inline_ref *iref; 803 int type; 804 iref = (struct btrfs_extent_inline_ref *)ptr; 805 type = btrfs_get_extent_inline_ref_type(eb, iref, 806 BTRFS_REF_TYPE_BLOCK); 807 if (type == BTRFS_REF_TYPE_INVALID) { 808 err = -EINVAL; 809 goto out; 810 } 811 key.type = type; 812 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 813 814 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && 815 key.type != BTRFS_SHARED_BLOCK_REF_KEY); 816 } 817 818 if (exist && 819 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 820 exist->owner == key.offset) || 821 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 822 exist->bytenr == key.offset))) { 823 exist = NULL; 824 goto next; 825 } 826 827 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 828 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY || 829 key.type == BTRFS_EXTENT_REF_V0_KEY) { 830 if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 831 struct btrfs_extent_ref_v0 *ref0; 832 ref0 = btrfs_item_ptr(eb, path1->slots[0], 833 struct btrfs_extent_ref_v0); 834 if (key.objectid == key.offset) { 835 root = find_tree_root(rc, eb, ref0); 836 if (root && !should_ignore_root(root)) 837 cur->root = root; 838 else 839 list_add(&cur->list, &useless); 840 break; 841 } 842 if (is_cowonly_root(btrfs_ref_root_v0(eb, 843 ref0))) 844 cur->cowonly = 1; 845 } 846 #else 847 ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY); 848 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 849 #endif 850 if (key.objectid == key.offset) { 851 /* 852 * only root blocks of reloc trees use 853 * backref of this type. 854 */ 855 root = find_reloc_root(rc, cur->bytenr); 856 ASSERT(root); 857 cur->root = root; 858 break; 859 } 860 861 edge = alloc_backref_edge(cache); 862 if (!edge) { 863 err = -ENOMEM; 864 goto out; 865 } 866 rb_node = tree_search(&cache->rb_root, key.offset); 867 if (!rb_node) { 868 upper = alloc_backref_node(cache); 869 if (!upper) { 870 free_backref_edge(cache, edge); 871 err = -ENOMEM; 872 goto out; 873 } 874 upper->bytenr = key.offset; 875 upper->level = cur->level + 1; 876 /* 877 * backrefs for the upper level block isn't 878 * cached, add the block to pending list 879 */ 880 list_add_tail(&edge->list[UPPER], &list); 881 } else { 882 upper = rb_entry(rb_node, struct backref_node, 883 rb_node); 884 ASSERT(upper->checked); 885 INIT_LIST_HEAD(&edge->list[UPPER]); 886 } 887 list_add_tail(&edge->list[LOWER], &cur->upper); 888 edge->node[LOWER] = cur; 889 edge->node[UPPER] = upper; 890 891 goto next; 892 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 893 goto next; 894 } 895 896 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */ 897 root = read_fs_root(rc->extent_root->fs_info, key.offset); 898 if (IS_ERR(root)) { 899 err = PTR_ERR(root); 900 goto out; 901 } 902 903 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 904 cur->cowonly = 1; 905 906 if (btrfs_root_level(&root->root_item) == cur->level) { 907 /* tree root */ 908 ASSERT(btrfs_root_bytenr(&root->root_item) == 909 cur->bytenr); 910 if (should_ignore_root(root)) 911 list_add(&cur->list, &useless); 912 else 913 cur->root = root; 914 break; 915 } 916 917 level = cur->level + 1; 918 919 /* 920 * searching the tree to find upper level blocks 921 * reference the block. 922 */ 923 path2->search_commit_root = 1; 924 path2->skip_locking = 1; 925 path2->lowest_level = level; 926 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); 927 path2->lowest_level = 0; 928 if (ret < 0) { 929 err = ret; 930 goto out; 931 } 932 if (ret > 0 && path2->slots[level] > 0) 933 path2->slots[level]--; 934 935 eb = path2->nodes[level]; 936 if (btrfs_node_blockptr(eb, path2->slots[level]) != 937 cur->bytenr) { 938 btrfs_err(root->fs_info, 939 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", 940 cur->bytenr, level - 1, root->objectid, 941 node_key->objectid, node_key->type, 942 node_key->offset); 943 err = -ENOENT; 944 goto out; 945 } 946 lower = cur; 947 need_check = true; 948 for (; level < BTRFS_MAX_LEVEL; level++) { 949 if (!path2->nodes[level]) { 950 ASSERT(btrfs_root_bytenr(&root->root_item) == 951 lower->bytenr); 952 if (should_ignore_root(root)) 953 list_add(&lower->list, &useless); 954 else 955 lower->root = root; 956 break; 957 } 958 959 edge = alloc_backref_edge(cache); 960 if (!edge) { 961 err = -ENOMEM; 962 goto out; 963 } 964 965 eb = path2->nodes[level]; 966 rb_node = tree_search(&cache->rb_root, eb->start); 967 if (!rb_node) { 968 upper = alloc_backref_node(cache); 969 if (!upper) { 970 free_backref_edge(cache, edge); 971 err = -ENOMEM; 972 goto out; 973 } 974 upper->bytenr = eb->start; 975 upper->owner = btrfs_header_owner(eb); 976 upper->level = lower->level + 1; 977 if (!test_bit(BTRFS_ROOT_REF_COWS, 978 &root->state)) 979 upper->cowonly = 1; 980 981 /* 982 * if we know the block isn't shared 983 * we can void checking its backrefs. 984 */ 985 if (btrfs_block_can_be_shared(root, eb)) 986 upper->checked = 0; 987 else 988 upper->checked = 1; 989 990 /* 991 * add the block to pending list if we 992 * need check its backrefs, we only do this once 993 * while walking up a tree as we will catch 994 * anything else later on. 995 */ 996 if (!upper->checked && need_check) { 997 need_check = false; 998 list_add_tail(&edge->list[UPPER], 999 &list); 1000 } else { 1001 if (upper->checked) 1002 need_check = true; 1003 INIT_LIST_HEAD(&edge->list[UPPER]); 1004 } 1005 } else { 1006 upper = rb_entry(rb_node, struct backref_node, 1007 rb_node); 1008 ASSERT(upper->checked); 1009 INIT_LIST_HEAD(&edge->list[UPPER]); 1010 if (!upper->owner) 1011 upper->owner = btrfs_header_owner(eb); 1012 } 1013 list_add_tail(&edge->list[LOWER], &lower->upper); 1014 edge->node[LOWER] = lower; 1015 edge->node[UPPER] = upper; 1016 1017 if (rb_node) 1018 break; 1019 lower = upper; 1020 upper = NULL; 1021 } 1022 btrfs_release_path(path2); 1023 next: 1024 if (ptr < end) { 1025 ptr += btrfs_extent_inline_ref_size(key.type); 1026 if (ptr >= end) { 1027 WARN_ON(ptr > end); 1028 ptr = 0; 1029 end = 0; 1030 } 1031 } 1032 if (ptr >= end) 1033 path1->slots[0]++; 1034 } 1035 btrfs_release_path(path1); 1036 1037 cur->checked = 1; 1038 WARN_ON(exist); 1039 1040 /* the pending list isn't empty, take the first block to process */ 1041 if (!list_empty(&list)) { 1042 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1043 list_del_init(&edge->list[UPPER]); 1044 cur = edge->node[UPPER]; 1045 goto again; 1046 } 1047 1048 /* 1049 * everything goes well, connect backref nodes and insert backref nodes 1050 * into the cache. 1051 */ 1052 ASSERT(node->checked); 1053 cowonly = node->cowonly; 1054 if (!cowonly) { 1055 rb_node = tree_insert(&cache->rb_root, node->bytenr, 1056 &node->rb_node); 1057 if (rb_node) 1058 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1059 list_add_tail(&node->lower, &cache->leaves); 1060 } 1061 1062 list_for_each_entry(edge, &node->upper, list[LOWER]) 1063 list_add_tail(&edge->list[UPPER], &list); 1064 1065 while (!list_empty(&list)) { 1066 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1067 list_del_init(&edge->list[UPPER]); 1068 upper = edge->node[UPPER]; 1069 if (upper->detached) { 1070 list_del(&edge->list[LOWER]); 1071 lower = edge->node[LOWER]; 1072 free_backref_edge(cache, edge); 1073 if (list_empty(&lower->upper)) 1074 list_add(&lower->list, &useless); 1075 continue; 1076 } 1077 1078 if (!RB_EMPTY_NODE(&upper->rb_node)) { 1079 if (upper->lowest) { 1080 list_del_init(&upper->lower); 1081 upper->lowest = 0; 1082 } 1083 1084 list_add_tail(&edge->list[UPPER], &upper->lower); 1085 continue; 1086 } 1087 1088 if (!upper->checked) { 1089 /* 1090 * Still want to blow up for developers since this is a 1091 * logic bug. 1092 */ 1093 ASSERT(0); 1094 err = -EINVAL; 1095 goto out; 1096 } 1097 if (cowonly != upper->cowonly) { 1098 ASSERT(0); 1099 err = -EINVAL; 1100 goto out; 1101 } 1102 1103 if (!cowonly) { 1104 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1105 &upper->rb_node); 1106 if (rb_node) 1107 backref_tree_panic(rb_node, -EEXIST, 1108 upper->bytenr); 1109 } 1110 1111 list_add_tail(&edge->list[UPPER], &upper->lower); 1112 1113 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1114 list_add_tail(&edge->list[UPPER], &list); 1115 } 1116 /* 1117 * process useless backref nodes. backref nodes for tree leaves 1118 * are deleted from the cache. backref nodes for upper level 1119 * tree blocks are left in the cache to avoid unnecessary backref 1120 * lookup. 1121 */ 1122 while (!list_empty(&useless)) { 1123 upper = list_entry(useless.next, struct backref_node, list); 1124 list_del_init(&upper->list); 1125 ASSERT(list_empty(&upper->upper)); 1126 if (upper == node) 1127 node = NULL; 1128 if (upper->lowest) { 1129 list_del_init(&upper->lower); 1130 upper->lowest = 0; 1131 } 1132 while (!list_empty(&upper->lower)) { 1133 edge = list_entry(upper->lower.next, 1134 struct backref_edge, list[UPPER]); 1135 list_del(&edge->list[UPPER]); 1136 list_del(&edge->list[LOWER]); 1137 lower = edge->node[LOWER]; 1138 free_backref_edge(cache, edge); 1139 1140 if (list_empty(&lower->upper)) 1141 list_add(&lower->list, &useless); 1142 } 1143 __mark_block_processed(rc, upper); 1144 if (upper->level > 0) { 1145 list_add(&upper->list, &cache->detached); 1146 upper->detached = 1; 1147 } else { 1148 rb_erase(&upper->rb_node, &cache->rb_root); 1149 free_backref_node(cache, upper); 1150 } 1151 } 1152 out: 1153 btrfs_free_path(path1); 1154 btrfs_free_path(path2); 1155 if (err) { 1156 while (!list_empty(&useless)) { 1157 lower = list_entry(useless.next, 1158 struct backref_node, list); 1159 list_del_init(&lower->list); 1160 } 1161 while (!list_empty(&list)) { 1162 edge = list_first_entry(&list, struct backref_edge, 1163 list[UPPER]); 1164 list_del(&edge->list[UPPER]); 1165 list_del(&edge->list[LOWER]); 1166 lower = edge->node[LOWER]; 1167 upper = edge->node[UPPER]; 1168 free_backref_edge(cache, edge); 1169 1170 /* 1171 * Lower is no longer linked to any upper backref nodes 1172 * and isn't in the cache, we can free it ourselves. 1173 */ 1174 if (list_empty(&lower->upper) && 1175 RB_EMPTY_NODE(&lower->rb_node)) 1176 list_add(&lower->list, &useless); 1177 1178 if (!RB_EMPTY_NODE(&upper->rb_node)) 1179 continue; 1180 1181 /* Add this guy's upper edges to the list to process */ 1182 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1183 list_add_tail(&edge->list[UPPER], &list); 1184 if (list_empty(&upper->upper)) 1185 list_add(&upper->list, &useless); 1186 } 1187 1188 while (!list_empty(&useless)) { 1189 lower = list_entry(useless.next, 1190 struct backref_node, list); 1191 list_del_init(&lower->list); 1192 if (lower == node) 1193 node = NULL; 1194 free_backref_node(cache, lower); 1195 } 1196 1197 free_backref_node(cache, node); 1198 return ERR_PTR(err); 1199 } 1200 ASSERT(!node || !node->detached); 1201 return node; 1202 } 1203 1204 /* 1205 * helper to add backref node for the newly created snapshot. 1206 * the backref node is created by cloning backref node that 1207 * corresponds to root of source tree 1208 */ 1209 static int clone_backref_node(struct btrfs_trans_handle *trans, 1210 struct reloc_control *rc, 1211 struct btrfs_root *src, 1212 struct btrfs_root *dest) 1213 { 1214 struct btrfs_root *reloc_root = src->reloc_root; 1215 struct backref_cache *cache = &rc->backref_cache; 1216 struct backref_node *node = NULL; 1217 struct backref_node *new_node; 1218 struct backref_edge *edge; 1219 struct backref_edge *new_edge; 1220 struct rb_node *rb_node; 1221 1222 if (cache->last_trans > 0) 1223 update_backref_cache(trans, cache); 1224 1225 rb_node = tree_search(&cache->rb_root, src->commit_root->start); 1226 if (rb_node) { 1227 node = rb_entry(rb_node, struct backref_node, rb_node); 1228 if (node->detached) 1229 node = NULL; 1230 else 1231 BUG_ON(node->new_bytenr != reloc_root->node->start); 1232 } 1233 1234 if (!node) { 1235 rb_node = tree_search(&cache->rb_root, 1236 reloc_root->commit_root->start); 1237 if (rb_node) { 1238 node = rb_entry(rb_node, struct backref_node, 1239 rb_node); 1240 BUG_ON(node->detached); 1241 } 1242 } 1243 1244 if (!node) 1245 return 0; 1246 1247 new_node = alloc_backref_node(cache); 1248 if (!new_node) 1249 return -ENOMEM; 1250 1251 new_node->bytenr = dest->node->start; 1252 new_node->level = node->level; 1253 new_node->lowest = node->lowest; 1254 new_node->checked = 1; 1255 new_node->root = dest; 1256 1257 if (!node->lowest) { 1258 list_for_each_entry(edge, &node->lower, list[UPPER]) { 1259 new_edge = alloc_backref_edge(cache); 1260 if (!new_edge) 1261 goto fail; 1262 1263 new_edge->node[UPPER] = new_node; 1264 new_edge->node[LOWER] = edge->node[LOWER]; 1265 list_add_tail(&new_edge->list[UPPER], 1266 &new_node->lower); 1267 } 1268 } else { 1269 list_add_tail(&new_node->lower, &cache->leaves); 1270 } 1271 1272 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1273 &new_node->rb_node); 1274 if (rb_node) 1275 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr); 1276 1277 if (!new_node->lowest) { 1278 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1279 list_add_tail(&new_edge->list[LOWER], 1280 &new_edge->node[LOWER]->upper); 1281 } 1282 } 1283 return 0; 1284 fail: 1285 while (!list_empty(&new_node->lower)) { 1286 new_edge = list_entry(new_node->lower.next, 1287 struct backref_edge, list[UPPER]); 1288 list_del(&new_edge->list[UPPER]); 1289 free_backref_edge(cache, new_edge); 1290 } 1291 free_backref_node(cache, new_node); 1292 return -ENOMEM; 1293 } 1294 1295 /* 1296 * helper to add 'address of tree root -> reloc tree' mapping 1297 */ 1298 static int __must_check __add_reloc_root(struct btrfs_root *root) 1299 { 1300 struct btrfs_fs_info *fs_info = root->fs_info; 1301 struct rb_node *rb_node; 1302 struct mapping_node *node; 1303 struct reloc_control *rc = fs_info->reloc_ctl; 1304 1305 node = kmalloc(sizeof(*node), GFP_NOFS); 1306 if (!node) 1307 return -ENOMEM; 1308 1309 node->bytenr = root->node->start; 1310 node->data = root; 1311 1312 spin_lock(&rc->reloc_root_tree.lock); 1313 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1314 node->bytenr, &node->rb_node); 1315 spin_unlock(&rc->reloc_root_tree.lock); 1316 if (rb_node) { 1317 btrfs_panic(fs_info, -EEXIST, 1318 "Duplicate root found for start=%llu while inserting into relocation tree", 1319 node->bytenr); 1320 } 1321 1322 list_add_tail(&root->root_list, &rc->reloc_roots); 1323 return 0; 1324 } 1325 1326 /* 1327 * helper to delete the 'address of tree root -> reloc tree' 1328 * mapping 1329 */ 1330 static void __del_reloc_root(struct btrfs_root *root) 1331 { 1332 struct btrfs_fs_info *fs_info = root->fs_info; 1333 struct rb_node *rb_node; 1334 struct mapping_node *node = NULL; 1335 struct reloc_control *rc = fs_info->reloc_ctl; 1336 1337 spin_lock(&rc->reloc_root_tree.lock); 1338 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1339 root->node->start); 1340 if (rb_node) { 1341 node = rb_entry(rb_node, struct mapping_node, rb_node); 1342 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1343 } 1344 spin_unlock(&rc->reloc_root_tree.lock); 1345 1346 if (!node) 1347 return; 1348 BUG_ON((struct btrfs_root *)node->data != root); 1349 1350 spin_lock(&fs_info->trans_lock); 1351 list_del_init(&root->root_list); 1352 spin_unlock(&fs_info->trans_lock); 1353 kfree(node); 1354 } 1355 1356 /* 1357 * helper to update the 'address of tree root -> reloc tree' 1358 * mapping 1359 */ 1360 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) 1361 { 1362 struct btrfs_fs_info *fs_info = root->fs_info; 1363 struct rb_node *rb_node; 1364 struct mapping_node *node = NULL; 1365 struct reloc_control *rc = fs_info->reloc_ctl; 1366 1367 spin_lock(&rc->reloc_root_tree.lock); 1368 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1369 root->node->start); 1370 if (rb_node) { 1371 node = rb_entry(rb_node, struct mapping_node, rb_node); 1372 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1373 } 1374 spin_unlock(&rc->reloc_root_tree.lock); 1375 1376 if (!node) 1377 return 0; 1378 BUG_ON((struct btrfs_root *)node->data != root); 1379 1380 spin_lock(&rc->reloc_root_tree.lock); 1381 node->bytenr = new_bytenr; 1382 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1383 node->bytenr, &node->rb_node); 1384 spin_unlock(&rc->reloc_root_tree.lock); 1385 if (rb_node) 1386 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1387 return 0; 1388 } 1389 1390 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 1391 struct btrfs_root *root, u64 objectid) 1392 { 1393 struct btrfs_fs_info *fs_info = root->fs_info; 1394 struct btrfs_root *reloc_root; 1395 struct extent_buffer *eb; 1396 struct btrfs_root_item *root_item; 1397 struct btrfs_key root_key; 1398 int ret; 1399 1400 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 1401 BUG_ON(!root_item); 1402 1403 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 1404 root_key.type = BTRFS_ROOT_ITEM_KEY; 1405 root_key.offset = objectid; 1406 1407 if (root->root_key.objectid == objectid) { 1408 u64 commit_root_gen; 1409 1410 /* called by btrfs_init_reloc_root */ 1411 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 1412 BTRFS_TREE_RELOC_OBJECTID); 1413 BUG_ON(ret); 1414 /* 1415 * Set the last_snapshot field to the generation of the commit 1416 * root - like this ctree.c:btrfs_block_can_be_shared() behaves 1417 * correctly (returns true) when the relocation root is created 1418 * either inside the critical section of a transaction commit 1419 * (through transaction.c:qgroup_account_snapshot()) and when 1420 * it's created before the transaction commit is started. 1421 */ 1422 commit_root_gen = btrfs_header_generation(root->commit_root); 1423 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen); 1424 } else { 1425 /* 1426 * called by btrfs_reloc_post_snapshot_hook. 1427 * the source tree is a reloc tree, all tree blocks 1428 * modified after it was created have RELOC flag 1429 * set in their headers. so it's OK to not update 1430 * the 'last_snapshot'. 1431 */ 1432 ret = btrfs_copy_root(trans, root, root->node, &eb, 1433 BTRFS_TREE_RELOC_OBJECTID); 1434 BUG_ON(ret); 1435 } 1436 1437 memcpy(root_item, &root->root_item, sizeof(*root_item)); 1438 btrfs_set_root_bytenr(root_item, eb->start); 1439 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 1440 btrfs_set_root_generation(root_item, trans->transid); 1441 1442 if (root->root_key.objectid == objectid) { 1443 btrfs_set_root_refs(root_item, 0); 1444 memset(&root_item->drop_progress, 0, 1445 sizeof(struct btrfs_disk_key)); 1446 root_item->drop_level = 0; 1447 } 1448 1449 btrfs_tree_unlock(eb); 1450 free_extent_buffer(eb); 1451 1452 ret = btrfs_insert_root(trans, fs_info->tree_root, 1453 &root_key, root_item); 1454 BUG_ON(ret); 1455 kfree(root_item); 1456 1457 reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key); 1458 BUG_ON(IS_ERR(reloc_root)); 1459 reloc_root->last_trans = trans->transid; 1460 return reloc_root; 1461 } 1462 1463 /* 1464 * create reloc tree for a given fs tree. reloc tree is just a 1465 * snapshot of the fs tree with special root objectid. 1466 */ 1467 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 1468 struct btrfs_root *root) 1469 { 1470 struct btrfs_fs_info *fs_info = root->fs_info; 1471 struct btrfs_root *reloc_root; 1472 struct reloc_control *rc = fs_info->reloc_ctl; 1473 struct btrfs_block_rsv *rsv; 1474 int clear_rsv = 0; 1475 int ret; 1476 1477 if (root->reloc_root) { 1478 reloc_root = root->reloc_root; 1479 reloc_root->last_trans = trans->transid; 1480 return 0; 1481 } 1482 1483 if (!rc || !rc->create_reloc_tree || 1484 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1485 return 0; 1486 1487 if (!trans->reloc_reserved) { 1488 rsv = trans->block_rsv; 1489 trans->block_rsv = rc->block_rsv; 1490 clear_rsv = 1; 1491 } 1492 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 1493 if (clear_rsv) 1494 trans->block_rsv = rsv; 1495 1496 ret = __add_reloc_root(reloc_root); 1497 BUG_ON(ret < 0); 1498 root->reloc_root = reloc_root; 1499 return 0; 1500 } 1501 1502 /* 1503 * update root item of reloc tree 1504 */ 1505 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 1506 struct btrfs_root *root) 1507 { 1508 struct btrfs_fs_info *fs_info = root->fs_info; 1509 struct btrfs_root *reloc_root; 1510 struct btrfs_root_item *root_item; 1511 int ret; 1512 1513 if (!root->reloc_root) 1514 goto out; 1515 1516 reloc_root = root->reloc_root; 1517 root_item = &reloc_root->root_item; 1518 1519 if (fs_info->reloc_ctl->merge_reloc_tree && 1520 btrfs_root_refs(root_item) == 0) { 1521 root->reloc_root = NULL; 1522 __del_reloc_root(reloc_root); 1523 } 1524 1525 if (reloc_root->commit_root != reloc_root->node) { 1526 btrfs_set_root_node(root_item, reloc_root->node); 1527 free_extent_buffer(reloc_root->commit_root); 1528 reloc_root->commit_root = btrfs_root_node(reloc_root); 1529 } 1530 1531 ret = btrfs_update_root(trans, fs_info->tree_root, 1532 &reloc_root->root_key, root_item); 1533 BUG_ON(ret); 1534 1535 out: 1536 return 0; 1537 } 1538 1539 /* 1540 * helper to find first cached inode with inode number >= objectid 1541 * in a subvolume 1542 */ 1543 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 1544 { 1545 struct rb_node *node; 1546 struct rb_node *prev; 1547 struct btrfs_inode *entry; 1548 struct inode *inode; 1549 1550 spin_lock(&root->inode_lock); 1551 again: 1552 node = root->inode_tree.rb_node; 1553 prev = NULL; 1554 while (node) { 1555 prev = node; 1556 entry = rb_entry(node, struct btrfs_inode, rb_node); 1557 1558 if (objectid < btrfs_ino(entry)) 1559 node = node->rb_left; 1560 else if (objectid > btrfs_ino(entry)) 1561 node = node->rb_right; 1562 else 1563 break; 1564 } 1565 if (!node) { 1566 while (prev) { 1567 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1568 if (objectid <= btrfs_ino(entry)) { 1569 node = prev; 1570 break; 1571 } 1572 prev = rb_next(prev); 1573 } 1574 } 1575 while (node) { 1576 entry = rb_entry(node, struct btrfs_inode, rb_node); 1577 inode = igrab(&entry->vfs_inode); 1578 if (inode) { 1579 spin_unlock(&root->inode_lock); 1580 return inode; 1581 } 1582 1583 objectid = btrfs_ino(entry) + 1; 1584 if (cond_resched_lock(&root->inode_lock)) 1585 goto again; 1586 1587 node = rb_next(node); 1588 } 1589 spin_unlock(&root->inode_lock); 1590 return NULL; 1591 } 1592 1593 static int in_block_group(u64 bytenr, 1594 struct btrfs_block_group_cache *block_group) 1595 { 1596 if (bytenr >= block_group->key.objectid && 1597 bytenr < block_group->key.objectid + block_group->key.offset) 1598 return 1; 1599 return 0; 1600 } 1601 1602 /* 1603 * get new location of data 1604 */ 1605 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1606 u64 bytenr, u64 num_bytes) 1607 { 1608 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1609 struct btrfs_path *path; 1610 struct btrfs_file_extent_item *fi; 1611 struct extent_buffer *leaf; 1612 int ret; 1613 1614 path = btrfs_alloc_path(); 1615 if (!path) 1616 return -ENOMEM; 1617 1618 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1619 ret = btrfs_lookup_file_extent(NULL, root, path, 1620 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0); 1621 if (ret < 0) 1622 goto out; 1623 if (ret > 0) { 1624 ret = -ENOENT; 1625 goto out; 1626 } 1627 1628 leaf = path->nodes[0]; 1629 fi = btrfs_item_ptr(leaf, path->slots[0], 1630 struct btrfs_file_extent_item); 1631 1632 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1633 btrfs_file_extent_compression(leaf, fi) || 1634 btrfs_file_extent_encryption(leaf, fi) || 1635 btrfs_file_extent_other_encoding(leaf, fi)); 1636 1637 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1638 ret = -EINVAL; 1639 goto out; 1640 } 1641 1642 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1643 ret = 0; 1644 out: 1645 btrfs_free_path(path); 1646 return ret; 1647 } 1648 1649 /* 1650 * update file extent items in the tree leaf to point to 1651 * the new locations. 1652 */ 1653 static noinline_for_stack 1654 int replace_file_extents(struct btrfs_trans_handle *trans, 1655 struct reloc_control *rc, 1656 struct btrfs_root *root, 1657 struct extent_buffer *leaf) 1658 { 1659 struct btrfs_fs_info *fs_info = root->fs_info; 1660 struct btrfs_key key; 1661 struct btrfs_file_extent_item *fi; 1662 struct inode *inode = NULL; 1663 u64 parent; 1664 u64 bytenr; 1665 u64 new_bytenr = 0; 1666 u64 num_bytes; 1667 u64 end; 1668 u32 nritems; 1669 u32 i; 1670 int ret = 0; 1671 int first = 1; 1672 int dirty = 0; 1673 1674 if (rc->stage != UPDATE_DATA_PTRS) 1675 return 0; 1676 1677 /* reloc trees always use full backref */ 1678 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1679 parent = leaf->start; 1680 else 1681 parent = 0; 1682 1683 nritems = btrfs_header_nritems(leaf); 1684 for (i = 0; i < nritems; i++) { 1685 cond_resched(); 1686 btrfs_item_key_to_cpu(leaf, &key, i); 1687 if (key.type != BTRFS_EXTENT_DATA_KEY) 1688 continue; 1689 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1690 if (btrfs_file_extent_type(leaf, fi) == 1691 BTRFS_FILE_EXTENT_INLINE) 1692 continue; 1693 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1694 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1695 if (bytenr == 0) 1696 continue; 1697 if (!in_block_group(bytenr, rc->block_group)) 1698 continue; 1699 1700 /* 1701 * if we are modifying block in fs tree, wait for readpage 1702 * to complete and drop the extent cache 1703 */ 1704 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1705 if (first) { 1706 inode = find_next_inode(root, key.objectid); 1707 first = 0; 1708 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) { 1709 btrfs_add_delayed_iput(inode); 1710 inode = find_next_inode(root, key.objectid); 1711 } 1712 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) { 1713 end = key.offset + 1714 btrfs_file_extent_num_bytes(leaf, fi); 1715 WARN_ON(!IS_ALIGNED(key.offset, 1716 fs_info->sectorsize)); 1717 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1718 end--; 1719 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1720 key.offset, end); 1721 if (!ret) 1722 continue; 1723 1724 btrfs_drop_extent_cache(BTRFS_I(inode), 1725 key.offset, end, 1); 1726 unlock_extent(&BTRFS_I(inode)->io_tree, 1727 key.offset, end); 1728 } 1729 } 1730 1731 ret = get_new_location(rc->data_inode, &new_bytenr, 1732 bytenr, num_bytes); 1733 if (ret) { 1734 /* 1735 * Don't have to abort since we've not changed anything 1736 * in the file extent yet. 1737 */ 1738 break; 1739 } 1740 1741 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1742 dirty = 1; 1743 1744 key.offset -= btrfs_file_extent_offset(leaf, fi); 1745 ret = btrfs_inc_extent_ref(trans, root, new_bytenr, 1746 num_bytes, parent, 1747 btrfs_header_owner(leaf), 1748 key.objectid, key.offset); 1749 if (ret) { 1750 btrfs_abort_transaction(trans, ret); 1751 break; 1752 } 1753 1754 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1755 parent, btrfs_header_owner(leaf), 1756 key.objectid, key.offset); 1757 if (ret) { 1758 btrfs_abort_transaction(trans, ret); 1759 break; 1760 } 1761 } 1762 if (dirty) 1763 btrfs_mark_buffer_dirty(leaf); 1764 if (inode) 1765 btrfs_add_delayed_iput(inode); 1766 return ret; 1767 } 1768 1769 static noinline_for_stack 1770 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1771 struct btrfs_path *path, int level) 1772 { 1773 struct btrfs_disk_key key1; 1774 struct btrfs_disk_key key2; 1775 btrfs_node_key(eb, &key1, slot); 1776 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1777 return memcmp(&key1, &key2, sizeof(key1)); 1778 } 1779 1780 /* 1781 * try to replace tree blocks in fs tree with the new blocks 1782 * in reloc tree. tree blocks haven't been modified since the 1783 * reloc tree was create can be replaced. 1784 * 1785 * if a block was replaced, level of the block + 1 is returned. 1786 * if no block got replaced, 0 is returned. if there are other 1787 * errors, a negative error number is returned. 1788 */ 1789 static noinline_for_stack 1790 int replace_path(struct btrfs_trans_handle *trans, 1791 struct btrfs_root *dest, struct btrfs_root *src, 1792 struct btrfs_path *path, struct btrfs_key *next_key, 1793 int lowest_level, int max_level) 1794 { 1795 struct btrfs_fs_info *fs_info = dest->fs_info; 1796 struct extent_buffer *eb; 1797 struct extent_buffer *parent; 1798 struct btrfs_key key; 1799 u64 old_bytenr; 1800 u64 new_bytenr; 1801 u64 old_ptr_gen; 1802 u64 new_ptr_gen; 1803 u64 last_snapshot; 1804 u32 blocksize; 1805 int cow = 0; 1806 int level; 1807 int ret; 1808 int slot; 1809 1810 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1811 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1812 1813 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1814 again: 1815 slot = path->slots[lowest_level]; 1816 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1817 1818 eb = btrfs_lock_root_node(dest); 1819 btrfs_set_lock_blocking(eb); 1820 level = btrfs_header_level(eb); 1821 1822 if (level < lowest_level) { 1823 btrfs_tree_unlock(eb); 1824 free_extent_buffer(eb); 1825 return 0; 1826 } 1827 1828 if (cow) { 1829 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); 1830 BUG_ON(ret); 1831 } 1832 btrfs_set_lock_blocking(eb); 1833 1834 if (next_key) { 1835 next_key->objectid = (u64)-1; 1836 next_key->type = (u8)-1; 1837 next_key->offset = (u64)-1; 1838 } 1839 1840 parent = eb; 1841 while (1) { 1842 level = btrfs_header_level(parent); 1843 BUG_ON(level < lowest_level); 1844 1845 ret = btrfs_bin_search(parent, &key, level, &slot); 1846 if (ret && slot > 0) 1847 slot--; 1848 1849 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1850 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1851 1852 old_bytenr = btrfs_node_blockptr(parent, slot); 1853 blocksize = fs_info->nodesize; 1854 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1855 1856 if (level <= max_level) { 1857 eb = path->nodes[level]; 1858 new_bytenr = btrfs_node_blockptr(eb, 1859 path->slots[level]); 1860 new_ptr_gen = btrfs_node_ptr_generation(eb, 1861 path->slots[level]); 1862 } else { 1863 new_bytenr = 0; 1864 new_ptr_gen = 0; 1865 } 1866 1867 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1868 ret = level; 1869 break; 1870 } 1871 1872 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1873 memcmp_node_keys(parent, slot, path, level)) { 1874 if (level <= lowest_level) { 1875 ret = 0; 1876 break; 1877 } 1878 1879 eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen); 1880 if (IS_ERR(eb)) { 1881 ret = PTR_ERR(eb); 1882 break; 1883 } else if (!extent_buffer_uptodate(eb)) { 1884 ret = -EIO; 1885 free_extent_buffer(eb); 1886 break; 1887 } 1888 btrfs_tree_lock(eb); 1889 if (cow) { 1890 ret = btrfs_cow_block(trans, dest, eb, parent, 1891 slot, &eb); 1892 BUG_ON(ret); 1893 } 1894 btrfs_set_lock_blocking(eb); 1895 1896 btrfs_tree_unlock(parent); 1897 free_extent_buffer(parent); 1898 1899 parent = eb; 1900 continue; 1901 } 1902 1903 if (!cow) { 1904 btrfs_tree_unlock(parent); 1905 free_extent_buffer(parent); 1906 cow = 1; 1907 goto again; 1908 } 1909 1910 btrfs_node_key_to_cpu(path->nodes[level], &key, 1911 path->slots[level]); 1912 btrfs_release_path(path); 1913 1914 path->lowest_level = level; 1915 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1916 path->lowest_level = 0; 1917 BUG_ON(ret); 1918 1919 /* 1920 * Info qgroup to trace both subtrees. 1921 * 1922 * We must trace both trees. 1923 * 1) Tree reloc subtree 1924 * If not traced, we will leak data numbers 1925 * 2) Fs subtree 1926 * If not traced, we will double count old data 1927 * and tree block numbers, if current trans doesn't free 1928 * data reloc tree inode. 1929 */ 1930 ret = btrfs_qgroup_trace_subtree(trans, src, parent, 1931 btrfs_header_generation(parent), 1932 btrfs_header_level(parent)); 1933 if (ret < 0) 1934 break; 1935 ret = btrfs_qgroup_trace_subtree(trans, dest, 1936 path->nodes[level], 1937 btrfs_header_generation(path->nodes[level]), 1938 btrfs_header_level(path->nodes[level])); 1939 if (ret < 0) 1940 break; 1941 1942 /* 1943 * swap blocks in fs tree and reloc tree. 1944 */ 1945 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1946 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1947 btrfs_mark_buffer_dirty(parent); 1948 1949 btrfs_set_node_blockptr(path->nodes[level], 1950 path->slots[level], old_bytenr); 1951 btrfs_set_node_ptr_generation(path->nodes[level], 1952 path->slots[level], old_ptr_gen); 1953 btrfs_mark_buffer_dirty(path->nodes[level]); 1954 1955 ret = btrfs_inc_extent_ref(trans, src, old_bytenr, 1956 blocksize, path->nodes[level]->start, 1957 src->root_key.objectid, level - 1, 0); 1958 BUG_ON(ret); 1959 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, 1960 blocksize, 0, dest->root_key.objectid, 1961 level - 1, 0); 1962 BUG_ON(ret); 1963 1964 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize, 1965 path->nodes[level]->start, 1966 src->root_key.objectid, level - 1, 0); 1967 BUG_ON(ret); 1968 1969 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize, 1970 0, dest->root_key.objectid, level - 1, 1971 0); 1972 BUG_ON(ret); 1973 1974 btrfs_unlock_up_safe(path, 0); 1975 1976 ret = level; 1977 break; 1978 } 1979 btrfs_tree_unlock(parent); 1980 free_extent_buffer(parent); 1981 return ret; 1982 } 1983 1984 /* 1985 * helper to find next relocated block in reloc tree 1986 */ 1987 static noinline_for_stack 1988 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1989 int *level) 1990 { 1991 struct extent_buffer *eb; 1992 int i; 1993 u64 last_snapshot; 1994 u32 nritems; 1995 1996 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1997 1998 for (i = 0; i < *level; i++) { 1999 free_extent_buffer(path->nodes[i]); 2000 path->nodes[i] = NULL; 2001 } 2002 2003 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 2004 eb = path->nodes[i]; 2005 nritems = btrfs_header_nritems(eb); 2006 while (path->slots[i] + 1 < nritems) { 2007 path->slots[i]++; 2008 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 2009 last_snapshot) 2010 continue; 2011 2012 *level = i; 2013 return 0; 2014 } 2015 free_extent_buffer(path->nodes[i]); 2016 path->nodes[i] = NULL; 2017 } 2018 return 1; 2019 } 2020 2021 /* 2022 * walk down reloc tree to find relocated block of lowest level 2023 */ 2024 static noinline_for_stack 2025 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 2026 int *level) 2027 { 2028 struct btrfs_fs_info *fs_info = root->fs_info; 2029 struct extent_buffer *eb = NULL; 2030 int i; 2031 u64 bytenr; 2032 u64 ptr_gen = 0; 2033 u64 last_snapshot; 2034 u32 nritems; 2035 2036 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2037 2038 for (i = *level; i > 0; i--) { 2039 eb = path->nodes[i]; 2040 nritems = btrfs_header_nritems(eb); 2041 while (path->slots[i] < nritems) { 2042 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 2043 if (ptr_gen > last_snapshot) 2044 break; 2045 path->slots[i]++; 2046 } 2047 if (path->slots[i] >= nritems) { 2048 if (i == *level) 2049 break; 2050 *level = i + 1; 2051 return 0; 2052 } 2053 if (i == 1) { 2054 *level = i; 2055 return 0; 2056 } 2057 2058 bytenr = btrfs_node_blockptr(eb, path->slots[i]); 2059 eb = read_tree_block(fs_info, bytenr, ptr_gen); 2060 if (IS_ERR(eb)) { 2061 return PTR_ERR(eb); 2062 } else if (!extent_buffer_uptodate(eb)) { 2063 free_extent_buffer(eb); 2064 return -EIO; 2065 } 2066 BUG_ON(btrfs_header_level(eb) != i - 1); 2067 path->nodes[i - 1] = eb; 2068 path->slots[i - 1] = 0; 2069 } 2070 return 1; 2071 } 2072 2073 /* 2074 * invalidate extent cache for file extents whose key in range of 2075 * [min_key, max_key) 2076 */ 2077 static int invalidate_extent_cache(struct btrfs_root *root, 2078 struct btrfs_key *min_key, 2079 struct btrfs_key *max_key) 2080 { 2081 struct btrfs_fs_info *fs_info = root->fs_info; 2082 struct inode *inode = NULL; 2083 u64 objectid; 2084 u64 start, end; 2085 u64 ino; 2086 2087 objectid = min_key->objectid; 2088 while (1) { 2089 cond_resched(); 2090 iput(inode); 2091 2092 if (objectid > max_key->objectid) 2093 break; 2094 2095 inode = find_next_inode(root, objectid); 2096 if (!inode) 2097 break; 2098 ino = btrfs_ino(BTRFS_I(inode)); 2099 2100 if (ino > max_key->objectid) { 2101 iput(inode); 2102 break; 2103 } 2104 2105 objectid = ino + 1; 2106 if (!S_ISREG(inode->i_mode)) 2107 continue; 2108 2109 if (unlikely(min_key->objectid == ino)) { 2110 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 2111 continue; 2112 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 2113 start = 0; 2114 else { 2115 start = min_key->offset; 2116 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); 2117 } 2118 } else { 2119 start = 0; 2120 } 2121 2122 if (unlikely(max_key->objectid == ino)) { 2123 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 2124 continue; 2125 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 2126 end = (u64)-1; 2127 } else { 2128 if (max_key->offset == 0) 2129 continue; 2130 end = max_key->offset; 2131 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 2132 end--; 2133 } 2134 } else { 2135 end = (u64)-1; 2136 } 2137 2138 /* the lock_extent waits for readpage to complete */ 2139 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 2140 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1); 2141 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 2142 } 2143 return 0; 2144 } 2145 2146 static int find_next_key(struct btrfs_path *path, int level, 2147 struct btrfs_key *key) 2148 2149 { 2150 while (level < BTRFS_MAX_LEVEL) { 2151 if (!path->nodes[level]) 2152 break; 2153 if (path->slots[level] + 1 < 2154 btrfs_header_nritems(path->nodes[level])) { 2155 btrfs_node_key_to_cpu(path->nodes[level], key, 2156 path->slots[level] + 1); 2157 return 0; 2158 } 2159 level++; 2160 } 2161 return 1; 2162 } 2163 2164 /* 2165 * merge the relocated tree blocks in reloc tree with corresponding 2166 * fs tree. 2167 */ 2168 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 2169 struct btrfs_root *root) 2170 { 2171 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2172 LIST_HEAD(inode_list); 2173 struct btrfs_key key; 2174 struct btrfs_key next_key; 2175 struct btrfs_trans_handle *trans = NULL; 2176 struct btrfs_root *reloc_root; 2177 struct btrfs_root_item *root_item; 2178 struct btrfs_path *path; 2179 struct extent_buffer *leaf; 2180 int level; 2181 int max_level; 2182 int replaced = 0; 2183 int ret; 2184 int err = 0; 2185 u32 min_reserved; 2186 2187 path = btrfs_alloc_path(); 2188 if (!path) 2189 return -ENOMEM; 2190 path->reada = READA_FORWARD; 2191 2192 reloc_root = root->reloc_root; 2193 root_item = &reloc_root->root_item; 2194 2195 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 2196 level = btrfs_root_level(root_item); 2197 extent_buffer_get(reloc_root->node); 2198 path->nodes[level] = reloc_root->node; 2199 path->slots[level] = 0; 2200 } else { 2201 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 2202 2203 level = root_item->drop_level; 2204 BUG_ON(level == 0); 2205 path->lowest_level = level; 2206 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 2207 path->lowest_level = 0; 2208 if (ret < 0) { 2209 btrfs_free_path(path); 2210 return ret; 2211 } 2212 2213 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 2214 path->slots[level]); 2215 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 2216 2217 btrfs_unlock_up_safe(path, 0); 2218 } 2219 2220 min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2221 memset(&next_key, 0, sizeof(next_key)); 2222 2223 while (1) { 2224 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, 2225 BTRFS_RESERVE_FLUSH_ALL); 2226 if (ret) { 2227 err = ret; 2228 goto out; 2229 } 2230 trans = btrfs_start_transaction(root, 0); 2231 if (IS_ERR(trans)) { 2232 err = PTR_ERR(trans); 2233 trans = NULL; 2234 goto out; 2235 } 2236 trans->block_rsv = rc->block_rsv; 2237 2238 replaced = 0; 2239 max_level = level; 2240 2241 ret = walk_down_reloc_tree(reloc_root, path, &level); 2242 if (ret < 0) { 2243 err = ret; 2244 goto out; 2245 } 2246 if (ret > 0) 2247 break; 2248 2249 if (!find_next_key(path, level, &key) && 2250 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 2251 ret = 0; 2252 } else { 2253 ret = replace_path(trans, root, reloc_root, path, 2254 &next_key, level, max_level); 2255 } 2256 if (ret < 0) { 2257 err = ret; 2258 goto out; 2259 } 2260 2261 if (ret > 0) { 2262 level = ret; 2263 btrfs_node_key_to_cpu(path->nodes[level], &key, 2264 path->slots[level]); 2265 replaced = 1; 2266 } 2267 2268 ret = walk_up_reloc_tree(reloc_root, path, &level); 2269 if (ret > 0) 2270 break; 2271 2272 BUG_ON(level == 0); 2273 /* 2274 * save the merging progress in the drop_progress. 2275 * this is OK since root refs == 1 in this case. 2276 */ 2277 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 2278 path->slots[level]); 2279 root_item->drop_level = level; 2280 2281 btrfs_end_transaction_throttle(trans); 2282 trans = NULL; 2283 2284 btrfs_btree_balance_dirty(fs_info); 2285 2286 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2287 invalidate_extent_cache(root, &key, &next_key); 2288 } 2289 2290 /* 2291 * handle the case only one block in the fs tree need to be 2292 * relocated and the block is tree root. 2293 */ 2294 leaf = btrfs_lock_root_node(root); 2295 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf); 2296 btrfs_tree_unlock(leaf); 2297 free_extent_buffer(leaf); 2298 if (ret < 0) 2299 err = ret; 2300 out: 2301 btrfs_free_path(path); 2302 2303 if (err == 0) { 2304 memset(&root_item->drop_progress, 0, 2305 sizeof(root_item->drop_progress)); 2306 root_item->drop_level = 0; 2307 btrfs_set_root_refs(root_item, 0); 2308 btrfs_update_reloc_root(trans, root); 2309 } 2310 2311 if (trans) 2312 btrfs_end_transaction_throttle(trans); 2313 2314 btrfs_btree_balance_dirty(fs_info); 2315 2316 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2317 invalidate_extent_cache(root, &key, &next_key); 2318 2319 return err; 2320 } 2321 2322 static noinline_for_stack 2323 int prepare_to_merge(struct reloc_control *rc, int err) 2324 { 2325 struct btrfs_root *root = rc->extent_root; 2326 struct btrfs_fs_info *fs_info = root->fs_info; 2327 struct btrfs_root *reloc_root; 2328 struct btrfs_trans_handle *trans; 2329 LIST_HEAD(reloc_roots); 2330 u64 num_bytes = 0; 2331 int ret; 2332 2333 mutex_lock(&fs_info->reloc_mutex); 2334 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2335 rc->merging_rsv_size += rc->nodes_relocated * 2; 2336 mutex_unlock(&fs_info->reloc_mutex); 2337 2338 again: 2339 if (!err) { 2340 num_bytes = rc->merging_rsv_size; 2341 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes, 2342 BTRFS_RESERVE_FLUSH_ALL); 2343 if (ret) 2344 err = ret; 2345 } 2346 2347 trans = btrfs_join_transaction(rc->extent_root); 2348 if (IS_ERR(trans)) { 2349 if (!err) 2350 btrfs_block_rsv_release(fs_info, rc->block_rsv, 2351 num_bytes); 2352 return PTR_ERR(trans); 2353 } 2354 2355 if (!err) { 2356 if (num_bytes != rc->merging_rsv_size) { 2357 btrfs_end_transaction(trans); 2358 btrfs_block_rsv_release(fs_info, rc->block_rsv, 2359 num_bytes); 2360 goto again; 2361 } 2362 } 2363 2364 rc->merge_reloc_tree = 1; 2365 2366 while (!list_empty(&rc->reloc_roots)) { 2367 reloc_root = list_entry(rc->reloc_roots.next, 2368 struct btrfs_root, root_list); 2369 list_del_init(&reloc_root->root_list); 2370 2371 root = read_fs_root(fs_info, reloc_root->root_key.offset); 2372 BUG_ON(IS_ERR(root)); 2373 BUG_ON(root->reloc_root != reloc_root); 2374 2375 /* 2376 * set reference count to 1, so btrfs_recover_relocation 2377 * knows it should resumes merging 2378 */ 2379 if (!err) 2380 btrfs_set_root_refs(&reloc_root->root_item, 1); 2381 btrfs_update_reloc_root(trans, root); 2382 2383 list_add(&reloc_root->root_list, &reloc_roots); 2384 } 2385 2386 list_splice(&reloc_roots, &rc->reloc_roots); 2387 2388 if (!err) 2389 btrfs_commit_transaction(trans); 2390 else 2391 btrfs_end_transaction(trans); 2392 return err; 2393 } 2394 2395 static noinline_for_stack 2396 void free_reloc_roots(struct list_head *list) 2397 { 2398 struct btrfs_root *reloc_root; 2399 2400 while (!list_empty(list)) { 2401 reloc_root = list_entry(list->next, struct btrfs_root, 2402 root_list); 2403 __del_reloc_root(reloc_root); 2404 free_extent_buffer(reloc_root->node); 2405 free_extent_buffer(reloc_root->commit_root); 2406 reloc_root->node = NULL; 2407 reloc_root->commit_root = NULL; 2408 } 2409 } 2410 2411 static noinline_for_stack 2412 void merge_reloc_roots(struct reloc_control *rc) 2413 { 2414 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2415 struct btrfs_root *root; 2416 struct btrfs_root *reloc_root; 2417 LIST_HEAD(reloc_roots); 2418 int found = 0; 2419 int ret = 0; 2420 again: 2421 root = rc->extent_root; 2422 2423 /* 2424 * this serializes us with btrfs_record_root_in_transaction, 2425 * we have to make sure nobody is in the middle of 2426 * adding their roots to the list while we are 2427 * doing this splice 2428 */ 2429 mutex_lock(&fs_info->reloc_mutex); 2430 list_splice_init(&rc->reloc_roots, &reloc_roots); 2431 mutex_unlock(&fs_info->reloc_mutex); 2432 2433 while (!list_empty(&reloc_roots)) { 2434 found = 1; 2435 reloc_root = list_entry(reloc_roots.next, 2436 struct btrfs_root, root_list); 2437 2438 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 2439 root = read_fs_root(fs_info, 2440 reloc_root->root_key.offset); 2441 BUG_ON(IS_ERR(root)); 2442 BUG_ON(root->reloc_root != reloc_root); 2443 2444 ret = merge_reloc_root(rc, root); 2445 if (ret) { 2446 if (list_empty(&reloc_root->root_list)) 2447 list_add_tail(&reloc_root->root_list, 2448 &reloc_roots); 2449 goto out; 2450 } 2451 } else { 2452 list_del_init(&reloc_root->root_list); 2453 } 2454 2455 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); 2456 if (ret < 0) { 2457 if (list_empty(&reloc_root->root_list)) 2458 list_add_tail(&reloc_root->root_list, 2459 &reloc_roots); 2460 goto out; 2461 } 2462 } 2463 2464 if (found) { 2465 found = 0; 2466 goto again; 2467 } 2468 out: 2469 if (ret) { 2470 btrfs_handle_fs_error(fs_info, ret, NULL); 2471 if (!list_empty(&reloc_roots)) 2472 free_reloc_roots(&reloc_roots); 2473 2474 /* new reloc root may be added */ 2475 mutex_lock(&fs_info->reloc_mutex); 2476 list_splice_init(&rc->reloc_roots, &reloc_roots); 2477 mutex_unlock(&fs_info->reloc_mutex); 2478 if (!list_empty(&reloc_roots)) 2479 free_reloc_roots(&reloc_roots); 2480 } 2481 2482 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2483 } 2484 2485 static void free_block_list(struct rb_root *blocks) 2486 { 2487 struct tree_block *block; 2488 struct rb_node *rb_node; 2489 while ((rb_node = rb_first(blocks))) { 2490 block = rb_entry(rb_node, struct tree_block, rb_node); 2491 rb_erase(rb_node, blocks); 2492 kfree(block); 2493 } 2494 } 2495 2496 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2497 struct btrfs_root *reloc_root) 2498 { 2499 struct btrfs_fs_info *fs_info = reloc_root->fs_info; 2500 struct btrfs_root *root; 2501 2502 if (reloc_root->last_trans == trans->transid) 2503 return 0; 2504 2505 root = read_fs_root(fs_info, reloc_root->root_key.offset); 2506 BUG_ON(IS_ERR(root)); 2507 BUG_ON(root->reloc_root != reloc_root); 2508 2509 return btrfs_record_root_in_trans(trans, root); 2510 } 2511 2512 static noinline_for_stack 2513 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2514 struct reloc_control *rc, 2515 struct backref_node *node, 2516 struct backref_edge *edges[]) 2517 { 2518 struct backref_node *next; 2519 struct btrfs_root *root; 2520 int index = 0; 2521 2522 next = node; 2523 while (1) { 2524 cond_resched(); 2525 next = walk_up_backref(next, edges, &index); 2526 root = next->root; 2527 BUG_ON(!root); 2528 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state)); 2529 2530 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2531 record_reloc_root_in_trans(trans, root); 2532 break; 2533 } 2534 2535 btrfs_record_root_in_trans(trans, root); 2536 root = root->reloc_root; 2537 2538 if (next->new_bytenr != root->node->start) { 2539 BUG_ON(next->new_bytenr); 2540 BUG_ON(!list_empty(&next->list)); 2541 next->new_bytenr = root->node->start; 2542 next->root = root; 2543 list_add_tail(&next->list, 2544 &rc->backref_cache.changed); 2545 __mark_block_processed(rc, next); 2546 break; 2547 } 2548 2549 WARN_ON(1); 2550 root = NULL; 2551 next = walk_down_backref(edges, &index); 2552 if (!next || next->level <= node->level) 2553 break; 2554 } 2555 if (!root) 2556 return NULL; 2557 2558 next = node; 2559 /* setup backref node path for btrfs_reloc_cow_block */ 2560 while (1) { 2561 rc->backref_cache.path[next->level] = next; 2562 if (--index < 0) 2563 break; 2564 next = edges[index]->node[UPPER]; 2565 } 2566 return root; 2567 } 2568 2569 /* 2570 * select a tree root for relocation. return NULL if the block 2571 * is reference counted. we should use do_relocation() in this 2572 * case. return a tree root pointer if the block isn't reference 2573 * counted. return -ENOENT if the block is root of reloc tree. 2574 */ 2575 static noinline_for_stack 2576 struct btrfs_root *select_one_root(struct backref_node *node) 2577 { 2578 struct backref_node *next; 2579 struct btrfs_root *root; 2580 struct btrfs_root *fs_root = NULL; 2581 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2582 int index = 0; 2583 2584 next = node; 2585 while (1) { 2586 cond_resched(); 2587 next = walk_up_backref(next, edges, &index); 2588 root = next->root; 2589 BUG_ON(!root); 2590 2591 /* no other choice for non-references counted tree */ 2592 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 2593 return root; 2594 2595 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2596 fs_root = root; 2597 2598 if (next != node) 2599 return NULL; 2600 2601 next = walk_down_backref(edges, &index); 2602 if (!next || next->level <= node->level) 2603 break; 2604 } 2605 2606 if (!fs_root) 2607 return ERR_PTR(-ENOENT); 2608 return fs_root; 2609 } 2610 2611 static noinline_for_stack 2612 u64 calcu_metadata_size(struct reloc_control *rc, 2613 struct backref_node *node, int reserve) 2614 { 2615 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2616 struct backref_node *next = node; 2617 struct backref_edge *edge; 2618 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2619 u64 num_bytes = 0; 2620 int index = 0; 2621 2622 BUG_ON(reserve && node->processed); 2623 2624 while (next) { 2625 cond_resched(); 2626 while (1) { 2627 if (next->processed && (reserve || next != node)) 2628 break; 2629 2630 num_bytes += fs_info->nodesize; 2631 2632 if (list_empty(&next->upper)) 2633 break; 2634 2635 edge = list_entry(next->upper.next, 2636 struct backref_edge, list[LOWER]); 2637 edges[index++] = edge; 2638 next = edge->node[UPPER]; 2639 } 2640 next = walk_down_backref(edges, &index); 2641 } 2642 return num_bytes; 2643 } 2644 2645 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2646 struct reloc_control *rc, 2647 struct backref_node *node) 2648 { 2649 struct btrfs_root *root = rc->extent_root; 2650 struct btrfs_fs_info *fs_info = root->fs_info; 2651 u64 num_bytes; 2652 int ret; 2653 u64 tmp; 2654 2655 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2656 2657 trans->block_rsv = rc->block_rsv; 2658 rc->reserved_bytes += num_bytes; 2659 2660 /* 2661 * We are under a transaction here so we can only do limited flushing. 2662 * If we get an enospc just kick back -EAGAIN so we know to drop the 2663 * transaction and try to refill when we can flush all the things. 2664 */ 2665 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes, 2666 BTRFS_RESERVE_FLUSH_LIMIT); 2667 if (ret) { 2668 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; 2669 while (tmp <= rc->reserved_bytes) 2670 tmp <<= 1; 2671 /* 2672 * only one thread can access block_rsv at this point, 2673 * so we don't need hold lock to protect block_rsv. 2674 * we expand more reservation size here to allow enough 2675 * space for relocation and we will return eailer in 2676 * enospc case. 2677 */ 2678 rc->block_rsv->size = tmp + fs_info->nodesize * 2679 RELOCATION_RESERVED_NODES; 2680 return -EAGAIN; 2681 } 2682 2683 return 0; 2684 } 2685 2686 /* 2687 * relocate a block tree, and then update pointers in upper level 2688 * blocks that reference the block to point to the new location. 2689 * 2690 * if called by link_to_upper, the block has already been relocated. 2691 * in that case this function just updates pointers. 2692 */ 2693 static int do_relocation(struct btrfs_trans_handle *trans, 2694 struct reloc_control *rc, 2695 struct backref_node *node, 2696 struct btrfs_key *key, 2697 struct btrfs_path *path, int lowest) 2698 { 2699 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2700 struct backref_node *upper; 2701 struct backref_edge *edge; 2702 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2703 struct btrfs_root *root; 2704 struct extent_buffer *eb; 2705 u32 blocksize; 2706 u64 bytenr; 2707 u64 generation; 2708 int slot; 2709 int ret; 2710 int err = 0; 2711 2712 BUG_ON(lowest && node->eb); 2713 2714 path->lowest_level = node->level + 1; 2715 rc->backref_cache.path[node->level] = node; 2716 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2717 cond_resched(); 2718 2719 upper = edge->node[UPPER]; 2720 root = select_reloc_root(trans, rc, upper, edges); 2721 BUG_ON(!root); 2722 2723 if (upper->eb && !upper->locked) { 2724 if (!lowest) { 2725 ret = btrfs_bin_search(upper->eb, key, 2726 upper->level, &slot); 2727 BUG_ON(ret); 2728 bytenr = btrfs_node_blockptr(upper->eb, slot); 2729 if (node->eb->start == bytenr) 2730 goto next; 2731 } 2732 drop_node_buffer(upper); 2733 } 2734 2735 if (!upper->eb) { 2736 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2737 if (ret) { 2738 if (ret < 0) 2739 err = ret; 2740 else 2741 err = -ENOENT; 2742 2743 btrfs_release_path(path); 2744 break; 2745 } 2746 2747 if (!upper->eb) { 2748 upper->eb = path->nodes[upper->level]; 2749 path->nodes[upper->level] = NULL; 2750 } else { 2751 BUG_ON(upper->eb != path->nodes[upper->level]); 2752 } 2753 2754 upper->locked = 1; 2755 path->locks[upper->level] = 0; 2756 2757 slot = path->slots[upper->level]; 2758 btrfs_release_path(path); 2759 } else { 2760 ret = btrfs_bin_search(upper->eb, key, upper->level, 2761 &slot); 2762 BUG_ON(ret); 2763 } 2764 2765 bytenr = btrfs_node_blockptr(upper->eb, slot); 2766 if (lowest) { 2767 if (bytenr != node->bytenr) { 2768 btrfs_err(root->fs_info, 2769 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", 2770 bytenr, node->bytenr, slot, 2771 upper->eb->start); 2772 err = -EIO; 2773 goto next; 2774 } 2775 } else { 2776 if (node->eb->start == bytenr) 2777 goto next; 2778 } 2779 2780 blocksize = root->fs_info->nodesize; 2781 generation = btrfs_node_ptr_generation(upper->eb, slot); 2782 eb = read_tree_block(fs_info, bytenr, generation); 2783 if (IS_ERR(eb)) { 2784 err = PTR_ERR(eb); 2785 goto next; 2786 } else if (!extent_buffer_uptodate(eb)) { 2787 free_extent_buffer(eb); 2788 err = -EIO; 2789 goto next; 2790 } 2791 btrfs_tree_lock(eb); 2792 btrfs_set_lock_blocking(eb); 2793 2794 if (!node->eb) { 2795 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2796 slot, &eb); 2797 btrfs_tree_unlock(eb); 2798 free_extent_buffer(eb); 2799 if (ret < 0) { 2800 err = ret; 2801 goto next; 2802 } 2803 BUG_ON(node->eb != eb); 2804 } else { 2805 btrfs_set_node_blockptr(upper->eb, slot, 2806 node->eb->start); 2807 btrfs_set_node_ptr_generation(upper->eb, slot, 2808 trans->transid); 2809 btrfs_mark_buffer_dirty(upper->eb); 2810 2811 ret = btrfs_inc_extent_ref(trans, root, 2812 node->eb->start, blocksize, 2813 upper->eb->start, 2814 btrfs_header_owner(upper->eb), 2815 node->level, 0); 2816 BUG_ON(ret); 2817 2818 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2819 BUG_ON(ret); 2820 } 2821 next: 2822 if (!upper->pending) 2823 drop_node_buffer(upper); 2824 else 2825 unlock_node_buffer(upper); 2826 if (err) 2827 break; 2828 } 2829 2830 if (!err && node->pending) { 2831 drop_node_buffer(node); 2832 list_move_tail(&node->list, &rc->backref_cache.changed); 2833 node->pending = 0; 2834 } 2835 2836 path->lowest_level = 0; 2837 BUG_ON(err == -ENOSPC); 2838 return err; 2839 } 2840 2841 static int link_to_upper(struct btrfs_trans_handle *trans, 2842 struct reloc_control *rc, 2843 struct backref_node *node, 2844 struct btrfs_path *path) 2845 { 2846 struct btrfs_key key; 2847 2848 btrfs_node_key_to_cpu(node->eb, &key, 0); 2849 return do_relocation(trans, rc, node, &key, path, 0); 2850 } 2851 2852 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2853 struct reloc_control *rc, 2854 struct btrfs_path *path, int err) 2855 { 2856 LIST_HEAD(list); 2857 struct backref_cache *cache = &rc->backref_cache; 2858 struct backref_node *node; 2859 int level; 2860 int ret; 2861 2862 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2863 while (!list_empty(&cache->pending[level])) { 2864 node = list_entry(cache->pending[level].next, 2865 struct backref_node, list); 2866 list_move_tail(&node->list, &list); 2867 BUG_ON(!node->pending); 2868 2869 if (!err) { 2870 ret = link_to_upper(trans, rc, node, path); 2871 if (ret < 0) 2872 err = ret; 2873 } 2874 } 2875 list_splice_init(&list, &cache->pending[level]); 2876 } 2877 return err; 2878 } 2879 2880 static void mark_block_processed(struct reloc_control *rc, 2881 u64 bytenr, u32 blocksize) 2882 { 2883 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2884 EXTENT_DIRTY); 2885 } 2886 2887 static void __mark_block_processed(struct reloc_control *rc, 2888 struct backref_node *node) 2889 { 2890 u32 blocksize; 2891 if (node->level == 0 || 2892 in_block_group(node->bytenr, rc->block_group)) { 2893 blocksize = rc->extent_root->fs_info->nodesize; 2894 mark_block_processed(rc, node->bytenr, blocksize); 2895 } 2896 node->processed = 1; 2897 } 2898 2899 /* 2900 * mark a block and all blocks directly/indirectly reference the block 2901 * as processed. 2902 */ 2903 static void update_processed_blocks(struct reloc_control *rc, 2904 struct backref_node *node) 2905 { 2906 struct backref_node *next = node; 2907 struct backref_edge *edge; 2908 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2909 int index = 0; 2910 2911 while (next) { 2912 cond_resched(); 2913 while (1) { 2914 if (next->processed) 2915 break; 2916 2917 __mark_block_processed(rc, next); 2918 2919 if (list_empty(&next->upper)) 2920 break; 2921 2922 edge = list_entry(next->upper.next, 2923 struct backref_edge, list[LOWER]); 2924 edges[index++] = edge; 2925 next = edge->node[UPPER]; 2926 } 2927 next = walk_down_backref(edges, &index); 2928 } 2929 } 2930 2931 static int tree_block_processed(u64 bytenr, struct reloc_control *rc) 2932 { 2933 u32 blocksize = rc->extent_root->fs_info->nodesize; 2934 2935 if (test_range_bit(&rc->processed_blocks, bytenr, 2936 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2937 return 1; 2938 return 0; 2939 } 2940 2941 static int get_tree_block_key(struct btrfs_fs_info *fs_info, 2942 struct tree_block *block) 2943 { 2944 struct extent_buffer *eb; 2945 2946 BUG_ON(block->key_ready); 2947 eb = read_tree_block(fs_info, block->bytenr, block->key.offset); 2948 if (IS_ERR(eb)) { 2949 return PTR_ERR(eb); 2950 } else if (!extent_buffer_uptodate(eb)) { 2951 free_extent_buffer(eb); 2952 return -EIO; 2953 } 2954 WARN_ON(btrfs_header_level(eb) != block->level); 2955 if (block->level == 0) 2956 btrfs_item_key_to_cpu(eb, &block->key, 0); 2957 else 2958 btrfs_node_key_to_cpu(eb, &block->key, 0); 2959 free_extent_buffer(eb); 2960 block->key_ready = 1; 2961 return 0; 2962 } 2963 2964 /* 2965 * helper function to relocate a tree block 2966 */ 2967 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2968 struct reloc_control *rc, 2969 struct backref_node *node, 2970 struct btrfs_key *key, 2971 struct btrfs_path *path) 2972 { 2973 struct btrfs_root *root; 2974 int ret = 0; 2975 2976 if (!node) 2977 return 0; 2978 2979 BUG_ON(node->processed); 2980 root = select_one_root(node); 2981 if (root == ERR_PTR(-ENOENT)) { 2982 update_processed_blocks(rc, node); 2983 goto out; 2984 } 2985 2986 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2987 ret = reserve_metadata_space(trans, rc, node); 2988 if (ret) 2989 goto out; 2990 } 2991 2992 if (root) { 2993 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2994 BUG_ON(node->new_bytenr); 2995 BUG_ON(!list_empty(&node->list)); 2996 btrfs_record_root_in_trans(trans, root); 2997 root = root->reloc_root; 2998 node->new_bytenr = root->node->start; 2999 node->root = root; 3000 list_add_tail(&node->list, &rc->backref_cache.changed); 3001 } else { 3002 path->lowest_level = node->level; 3003 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 3004 btrfs_release_path(path); 3005 if (ret > 0) 3006 ret = 0; 3007 } 3008 if (!ret) 3009 update_processed_blocks(rc, node); 3010 } else { 3011 ret = do_relocation(trans, rc, node, key, path, 1); 3012 } 3013 out: 3014 if (ret || node->level == 0 || node->cowonly) 3015 remove_backref_node(&rc->backref_cache, node); 3016 return ret; 3017 } 3018 3019 /* 3020 * relocate a list of blocks 3021 */ 3022 static noinline_for_stack 3023 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 3024 struct reloc_control *rc, struct rb_root *blocks) 3025 { 3026 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3027 struct backref_node *node; 3028 struct btrfs_path *path; 3029 struct tree_block *block; 3030 struct rb_node *rb_node; 3031 int ret; 3032 int err = 0; 3033 3034 path = btrfs_alloc_path(); 3035 if (!path) { 3036 err = -ENOMEM; 3037 goto out_free_blocks; 3038 } 3039 3040 rb_node = rb_first(blocks); 3041 while (rb_node) { 3042 block = rb_entry(rb_node, struct tree_block, rb_node); 3043 if (!block->key_ready) 3044 readahead_tree_block(fs_info, block->bytenr); 3045 rb_node = rb_next(rb_node); 3046 } 3047 3048 rb_node = rb_first(blocks); 3049 while (rb_node) { 3050 block = rb_entry(rb_node, struct tree_block, rb_node); 3051 if (!block->key_ready) { 3052 err = get_tree_block_key(fs_info, block); 3053 if (err) 3054 goto out_free_path; 3055 } 3056 rb_node = rb_next(rb_node); 3057 } 3058 3059 rb_node = rb_first(blocks); 3060 while (rb_node) { 3061 block = rb_entry(rb_node, struct tree_block, rb_node); 3062 3063 node = build_backref_tree(rc, &block->key, 3064 block->level, block->bytenr); 3065 if (IS_ERR(node)) { 3066 err = PTR_ERR(node); 3067 goto out; 3068 } 3069 3070 ret = relocate_tree_block(trans, rc, node, &block->key, 3071 path); 3072 if (ret < 0) { 3073 if (ret != -EAGAIN || rb_node == rb_first(blocks)) 3074 err = ret; 3075 goto out; 3076 } 3077 rb_node = rb_next(rb_node); 3078 } 3079 out: 3080 err = finish_pending_nodes(trans, rc, path, err); 3081 3082 out_free_path: 3083 btrfs_free_path(path); 3084 out_free_blocks: 3085 free_block_list(blocks); 3086 return err; 3087 } 3088 3089 static noinline_for_stack 3090 int prealloc_file_extent_cluster(struct inode *inode, 3091 struct file_extent_cluster *cluster) 3092 { 3093 u64 alloc_hint = 0; 3094 u64 start; 3095 u64 end; 3096 u64 offset = BTRFS_I(inode)->index_cnt; 3097 u64 num_bytes; 3098 int nr = 0; 3099 int ret = 0; 3100 u64 prealloc_start = cluster->start - offset; 3101 u64 prealloc_end = cluster->end - offset; 3102 u64 cur_offset; 3103 struct extent_changeset *data_reserved = NULL; 3104 3105 BUG_ON(cluster->start != cluster->boundary[0]); 3106 inode_lock(inode); 3107 3108 ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start, 3109 prealloc_end + 1 - prealloc_start); 3110 if (ret) 3111 goto out; 3112 3113 cur_offset = prealloc_start; 3114 while (nr < cluster->nr) { 3115 start = cluster->boundary[nr] - offset; 3116 if (nr + 1 < cluster->nr) 3117 end = cluster->boundary[nr + 1] - 1 - offset; 3118 else 3119 end = cluster->end - offset; 3120 3121 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3122 num_bytes = end + 1 - start; 3123 if (cur_offset < start) 3124 btrfs_free_reserved_data_space(inode, data_reserved, 3125 cur_offset, start - cur_offset); 3126 ret = btrfs_prealloc_file_range(inode, 0, start, 3127 num_bytes, num_bytes, 3128 end + 1, &alloc_hint); 3129 cur_offset = end + 1; 3130 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3131 if (ret) 3132 break; 3133 nr++; 3134 } 3135 if (cur_offset < prealloc_end) 3136 btrfs_free_reserved_data_space(inode, data_reserved, 3137 cur_offset, prealloc_end + 1 - cur_offset); 3138 out: 3139 inode_unlock(inode); 3140 extent_changeset_free(data_reserved); 3141 return ret; 3142 } 3143 3144 static noinline_for_stack 3145 int setup_extent_mapping(struct inode *inode, u64 start, u64 end, 3146 u64 block_start) 3147 { 3148 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3149 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 3150 struct extent_map *em; 3151 int ret = 0; 3152 3153 em = alloc_extent_map(); 3154 if (!em) 3155 return -ENOMEM; 3156 3157 em->start = start; 3158 em->len = end + 1 - start; 3159 em->block_len = em->len; 3160 em->block_start = block_start; 3161 em->bdev = fs_info->fs_devices->latest_bdev; 3162 set_bit(EXTENT_FLAG_PINNED, &em->flags); 3163 3164 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3165 while (1) { 3166 write_lock(&em_tree->lock); 3167 ret = add_extent_mapping(em_tree, em, 0); 3168 write_unlock(&em_tree->lock); 3169 if (ret != -EEXIST) { 3170 free_extent_map(em); 3171 break; 3172 } 3173 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); 3174 } 3175 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3176 return ret; 3177 } 3178 3179 static int relocate_file_extent_cluster(struct inode *inode, 3180 struct file_extent_cluster *cluster) 3181 { 3182 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3183 u64 page_start; 3184 u64 page_end; 3185 u64 offset = BTRFS_I(inode)->index_cnt; 3186 unsigned long index; 3187 unsigned long last_index; 3188 struct page *page; 3189 struct file_ra_state *ra; 3190 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 3191 int nr = 0; 3192 int ret = 0; 3193 3194 if (!cluster->nr) 3195 return 0; 3196 3197 ra = kzalloc(sizeof(*ra), GFP_NOFS); 3198 if (!ra) 3199 return -ENOMEM; 3200 3201 ret = prealloc_file_extent_cluster(inode, cluster); 3202 if (ret) 3203 goto out; 3204 3205 file_ra_state_init(ra, inode->i_mapping); 3206 3207 ret = setup_extent_mapping(inode, cluster->start - offset, 3208 cluster->end - offset, cluster->start); 3209 if (ret) 3210 goto out; 3211 3212 index = (cluster->start - offset) >> PAGE_SHIFT; 3213 last_index = (cluster->end - offset) >> PAGE_SHIFT; 3214 while (index <= last_index) { 3215 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 3216 PAGE_SIZE); 3217 if (ret) 3218 goto out; 3219 3220 page = find_lock_page(inode->i_mapping, index); 3221 if (!page) { 3222 page_cache_sync_readahead(inode->i_mapping, 3223 ra, NULL, index, 3224 last_index + 1 - index); 3225 page = find_or_create_page(inode->i_mapping, index, 3226 mask); 3227 if (!page) { 3228 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3229 PAGE_SIZE); 3230 ret = -ENOMEM; 3231 goto out; 3232 } 3233 } 3234 3235 if (PageReadahead(page)) { 3236 page_cache_async_readahead(inode->i_mapping, 3237 ra, NULL, page, index, 3238 last_index + 1 - index); 3239 } 3240 3241 if (!PageUptodate(page)) { 3242 btrfs_readpage(NULL, page); 3243 lock_page(page); 3244 if (!PageUptodate(page)) { 3245 unlock_page(page); 3246 put_page(page); 3247 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3248 PAGE_SIZE); 3249 btrfs_delalloc_release_extents(BTRFS_I(inode), 3250 PAGE_SIZE); 3251 ret = -EIO; 3252 goto out; 3253 } 3254 } 3255 3256 page_start = page_offset(page); 3257 page_end = page_start + PAGE_SIZE - 1; 3258 3259 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); 3260 3261 set_page_extent_mapped(page); 3262 3263 if (nr < cluster->nr && 3264 page_start + offset == cluster->boundary[nr]) { 3265 set_extent_bits(&BTRFS_I(inode)->io_tree, 3266 page_start, page_end, 3267 EXTENT_BOUNDARY); 3268 nr++; 3269 } 3270 3271 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 3272 NULL, 0); 3273 if (ret) { 3274 unlock_page(page); 3275 put_page(page); 3276 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3277 PAGE_SIZE); 3278 btrfs_delalloc_release_extents(BTRFS_I(inode), 3279 PAGE_SIZE); 3280 3281 clear_extent_bits(&BTRFS_I(inode)->io_tree, 3282 page_start, page_end, 3283 EXTENT_LOCKED | EXTENT_BOUNDARY); 3284 goto out; 3285 3286 } 3287 set_page_dirty(page); 3288 3289 unlock_extent(&BTRFS_I(inode)->io_tree, 3290 page_start, page_end); 3291 unlock_page(page); 3292 put_page(page); 3293 3294 index++; 3295 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 3296 balance_dirty_pages_ratelimited(inode->i_mapping); 3297 btrfs_throttle(fs_info); 3298 } 3299 WARN_ON(nr != cluster->nr); 3300 out: 3301 kfree(ra); 3302 return ret; 3303 } 3304 3305 static noinline_for_stack 3306 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3307 struct file_extent_cluster *cluster) 3308 { 3309 int ret; 3310 3311 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3312 ret = relocate_file_extent_cluster(inode, cluster); 3313 if (ret) 3314 return ret; 3315 cluster->nr = 0; 3316 } 3317 3318 if (!cluster->nr) 3319 cluster->start = extent_key->objectid; 3320 else 3321 BUG_ON(cluster->nr >= MAX_EXTENTS); 3322 cluster->end = extent_key->objectid + extent_key->offset - 1; 3323 cluster->boundary[cluster->nr] = extent_key->objectid; 3324 cluster->nr++; 3325 3326 if (cluster->nr >= MAX_EXTENTS) { 3327 ret = relocate_file_extent_cluster(inode, cluster); 3328 if (ret) 3329 return ret; 3330 cluster->nr = 0; 3331 } 3332 return 0; 3333 } 3334 3335 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3336 static int get_ref_objectid_v0(struct reloc_control *rc, 3337 struct btrfs_path *path, 3338 struct btrfs_key *extent_key, 3339 u64 *ref_objectid, int *path_change) 3340 { 3341 struct btrfs_key key; 3342 struct extent_buffer *leaf; 3343 struct btrfs_extent_ref_v0 *ref0; 3344 int ret; 3345 int slot; 3346 3347 leaf = path->nodes[0]; 3348 slot = path->slots[0]; 3349 while (1) { 3350 if (slot >= btrfs_header_nritems(leaf)) { 3351 ret = btrfs_next_leaf(rc->extent_root, path); 3352 if (ret < 0) 3353 return ret; 3354 BUG_ON(ret > 0); 3355 leaf = path->nodes[0]; 3356 slot = path->slots[0]; 3357 if (path_change) 3358 *path_change = 1; 3359 } 3360 btrfs_item_key_to_cpu(leaf, &key, slot); 3361 if (key.objectid != extent_key->objectid) 3362 return -ENOENT; 3363 3364 if (key.type != BTRFS_EXTENT_REF_V0_KEY) { 3365 slot++; 3366 continue; 3367 } 3368 ref0 = btrfs_item_ptr(leaf, slot, 3369 struct btrfs_extent_ref_v0); 3370 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0); 3371 break; 3372 } 3373 return 0; 3374 } 3375 #endif 3376 3377 /* 3378 * helper to add a tree block to the list. 3379 * the major work is getting the generation and level of the block 3380 */ 3381 static int add_tree_block(struct reloc_control *rc, 3382 struct btrfs_key *extent_key, 3383 struct btrfs_path *path, 3384 struct rb_root *blocks) 3385 { 3386 struct extent_buffer *eb; 3387 struct btrfs_extent_item *ei; 3388 struct btrfs_tree_block_info *bi; 3389 struct tree_block *block; 3390 struct rb_node *rb_node; 3391 u32 item_size; 3392 int level = -1; 3393 u64 generation; 3394 3395 eb = path->nodes[0]; 3396 item_size = btrfs_item_size_nr(eb, path->slots[0]); 3397 3398 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3399 item_size >= sizeof(*ei) + sizeof(*bi)) { 3400 ei = btrfs_item_ptr(eb, path->slots[0], 3401 struct btrfs_extent_item); 3402 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3403 bi = (struct btrfs_tree_block_info *)(ei + 1); 3404 level = btrfs_tree_block_level(eb, bi); 3405 } else { 3406 level = (int)extent_key->offset; 3407 } 3408 generation = btrfs_extent_generation(eb, ei); 3409 } else { 3410 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3411 u64 ref_owner; 3412 int ret; 3413 3414 BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 3415 ret = get_ref_objectid_v0(rc, path, extent_key, 3416 &ref_owner, NULL); 3417 if (ret < 0) 3418 return ret; 3419 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL); 3420 level = (int)ref_owner; 3421 /* FIXME: get real generation */ 3422 generation = 0; 3423 #else 3424 BUG(); 3425 #endif 3426 } 3427 3428 btrfs_release_path(path); 3429 3430 BUG_ON(level == -1); 3431 3432 block = kmalloc(sizeof(*block), GFP_NOFS); 3433 if (!block) 3434 return -ENOMEM; 3435 3436 block->bytenr = extent_key->objectid; 3437 block->key.objectid = rc->extent_root->fs_info->nodesize; 3438 block->key.offset = generation; 3439 block->level = level; 3440 block->key_ready = 0; 3441 3442 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3443 if (rb_node) 3444 backref_tree_panic(rb_node, -EEXIST, block->bytenr); 3445 3446 return 0; 3447 } 3448 3449 /* 3450 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3451 */ 3452 static int __add_tree_block(struct reloc_control *rc, 3453 u64 bytenr, u32 blocksize, 3454 struct rb_root *blocks) 3455 { 3456 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3457 struct btrfs_path *path; 3458 struct btrfs_key key; 3459 int ret; 3460 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 3461 3462 if (tree_block_processed(bytenr, rc)) 3463 return 0; 3464 3465 if (tree_search(blocks, bytenr)) 3466 return 0; 3467 3468 path = btrfs_alloc_path(); 3469 if (!path) 3470 return -ENOMEM; 3471 again: 3472 key.objectid = bytenr; 3473 if (skinny) { 3474 key.type = BTRFS_METADATA_ITEM_KEY; 3475 key.offset = (u64)-1; 3476 } else { 3477 key.type = BTRFS_EXTENT_ITEM_KEY; 3478 key.offset = blocksize; 3479 } 3480 3481 path->search_commit_root = 1; 3482 path->skip_locking = 1; 3483 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3484 if (ret < 0) 3485 goto out; 3486 3487 if (ret > 0 && skinny) { 3488 if (path->slots[0]) { 3489 path->slots[0]--; 3490 btrfs_item_key_to_cpu(path->nodes[0], &key, 3491 path->slots[0]); 3492 if (key.objectid == bytenr && 3493 (key.type == BTRFS_METADATA_ITEM_KEY || 3494 (key.type == BTRFS_EXTENT_ITEM_KEY && 3495 key.offset == blocksize))) 3496 ret = 0; 3497 } 3498 3499 if (ret) { 3500 skinny = false; 3501 btrfs_release_path(path); 3502 goto again; 3503 } 3504 } 3505 if (ret) { 3506 ASSERT(ret == 1); 3507 btrfs_print_leaf(path->nodes[0]); 3508 btrfs_err(fs_info, 3509 "tree block extent item (%llu) is not found in extent tree", 3510 bytenr); 3511 WARN_ON(1); 3512 ret = -EINVAL; 3513 goto out; 3514 } 3515 3516 ret = add_tree_block(rc, &key, path, blocks); 3517 out: 3518 btrfs_free_path(path); 3519 return ret; 3520 } 3521 3522 /* 3523 * helper to check if the block use full backrefs for pointers in it 3524 */ 3525 static int block_use_full_backref(struct reloc_control *rc, 3526 struct extent_buffer *eb) 3527 { 3528 u64 flags; 3529 int ret; 3530 3531 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || 3532 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) 3533 return 1; 3534 3535 ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info, 3536 eb->start, btrfs_header_level(eb), 1, 3537 NULL, &flags); 3538 BUG_ON(ret); 3539 3540 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) 3541 ret = 1; 3542 else 3543 ret = 0; 3544 return ret; 3545 } 3546 3547 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3548 struct btrfs_block_group_cache *block_group, 3549 struct inode *inode, 3550 u64 ino) 3551 { 3552 struct btrfs_key key; 3553 struct btrfs_root *root = fs_info->tree_root; 3554 struct btrfs_trans_handle *trans; 3555 int ret = 0; 3556 3557 if (inode) 3558 goto truncate; 3559 3560 key.objectid = ino; 3561 key.type = BTRFS_INODE_ITEM_KEY; 3562 key.offset = 0; 3563 3564 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3565 if (IS_ERR(inode) || is_bad_inode(inode)) { 3566 if (!IS_ERR(inode)) 3567 iput(inode); 3568 return -ENOENT; 3569 } 3570 3571 truncate: 3572 ret = btrfs_check_trunc_cache_free_space(fs_info, 3573 &fs_info->global_block_rsv); 3574 if (ret) 3575 goto out; 3576 3577 trans = btrfs_join_transaction(root); 3578 if (IS_ERR(trans)) { 3579 ret = PTR_ERR(trans); 3580 goto out; 3581 } 3582 3583 ret = btrfs_truncate_free_space_cache(trans, block_group, inode); 3584 3585 btrfs_end_transaction(trans); 3586 btrfs_btree_balance_dirty(fs_info); 3587 out: 3588 iput(inode); 3589 return ret; 3590 } 3591 3592 /* 3593 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY 3594 * this function scans fs tree to find blocks reference the data extent 3595 */ 3596 static int find_data_references(struct reloc_control *rc, 3597 struct btrfs_key *extent_key, 3598 struct extent_buffer *leaf, 3599 struct btrfs_extent_data_ref *ref, 3600 struct rb_root *blocks) 3601 { 3602 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3603 struct btrfs_path *path; 3604 struct tree_block *block; 3605 struct btrfs_root *root; 3606 struct btrfs_file_extent_item *fi; 3607 struct rb_node *rb_node; 3608 struct btrfs_key key; 3609 u64 ref_root; 3610 u64 ref_objectid; 3611 u64 ref_offset; 3612 u32 ref_count; 3613 u32 nritems; 3614 int err = 0; 3615 int added = 0; 3616 int counted; 3617 int ret; 3618 3619 ref_root = btrfs_extent_data_ref_root(leaf, ref); 3620 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref); 3621 ref_offset = btrfs_extent_data_ref_offset(leaf, ref); 3622 ref_count = btrfs_extent_data_ref_count(leaf, ref); 3623 3624 /* 3625 * This is an extent belonging to the free space cache, lets just delete 3626 * it and redo the search. 3627 */ 3628 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { 3629 ret = delete_block_group_cache(fs_info, rc->block_group, 3630 NULL, ref_objectid); 3631 if (ret != -ENOENT) 3632 return ret; 3633 ret = 0; 3634 } 3635 3636 path = btrfs_alloc_path(); 3637 if (!path) 3638 return -ENOMEM; 3639 path->reada = READA_FORWARD; 3640 3641 root = read_fs_root(fs_info, ref_root); 3642 if (IS_ERR(root)) { 3643 err = PTR_ERR(root); 3644 goto out; 3645 } 3646 3647 key.objectid = ref_objectid; 3648 key.type = BTRFS_EXTENT_DATA_KEY; 3649 if (ref_offset > ((u64)-1 << 32)) 3650 key.offset = 0; 3651 else 3652 key.offset = ref_offset; 3653 3654 path->search_commit_root = 1; 3655 path->skip_locking = 1; 3656 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3657 if (ret < 0) { 3658 err = ret; 3659 goto out; 3660 } 3661 3662 leaf = path->nodes[0]; 3663 nritems = btrfs_header_nritems(leaf); 3664 /* 3665 * the references in tree blocks that use full backrefs 3666 * are not counted in 3667 */ 3668 if (block_use_full_backref(rc, leaf)) 3669 counted = 0; 3670 else 3671 counted = 1; 3672 rb_node = tree_search(blocks, leaf->start); 3673 if (rb_node) { 3674 if (counted) 3675 added = 1; 3676 else 3677 path->slots[0] = nritems; 3678 } 3679 3680 while (ref_count > 0) { 3681 while (path->slots[0] >= nritems) { 3682 ret = btrfs_next_leaf(root, path); 3683 if (ret < 0) { 3684 err = ret; 3685 goto out; 3686 } 3687 if (WARN_ON(ret > 0)) 3688 goto out; 3689 3690 leaf = path->nodes[0]; 3691 nritems = btrfs_header_nritems(leaf); 3692 added = 0; 3693 3694 if (block_use_full_backref(rc, leaf)) 3695 counted = 0; 3696 else 3697 counted = 1; 3698 rb_node = tree_search(blocks, leaf->start); 3699 if (rb_node) { 3700 if (counted) 3701 added = 1; 3702 else 3703 path->slots[0] = nritems; 3704 } 3705 } 3706 3707 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3708 if (WARN_ON(key.objectid != ref_objectid || 3709 key.type != BTRFS_EXTENT_DATA_KEY)) 3710 break; 3711 3712 fi = btrfs_item_ptr(leaf, path->slots[0], 3713 struct btrfs_file_extent_item); 3714 3715 if (btrfs_file_extent_type(leaf, fi) == 3716 BTRFS_FILE_EXTENT_INLINE) 3717 goto next; 3718 3719 if (btrfs_file_extent_disk_bytenr(leaf, fi) != 3720 extent_key->objectid) 3721 goto next; 3722 3723 key.offset -= btrfs_file_extent_offset(leaf, fi); 3724 if (key.offset != ref_offset) 3725 goto next; 3726 3727 if (counted) 3728 ref_count--; 3729 if (added) 3730 goto next; 3731 3732 if (!tree_block_processed(leaf->start, rc)) { 3733 block = kmalloc(sizeof(*block), GFP_NOFS); 3734 if (!block) { 3735 err = -ENOMEM; 3736 break; 3737 } 3738 block->bytenr = leaf->start; 3739 btrfs_item_key_to_cpu(leaf, &block->key, 0); 3740 block->level = 0; 3741 block->key_ready = 1; 3742 rb_node = tree_insert(blocks, block->bytenr, 3743 &block->rb_node); 3744 if (rb_node) 3745 backref_tree_panic(rb_node, -EEXIST, 3746 block->bytenr); 3747 } 3748 if (counted) 3749 added = 1; 3750 else 3751 path->slots[0] = nritems; 3752 next: 3753 path->slots[0]++; 3754 3755 } 3756 out: 3757 btrfs_free_path(path); 3758 return err; 3759 } 3760 3761 /* 3762 * helper to find all tree blocks that reference a given data extent 3763 */ 3764 static noinline_for_stack 3765 int add_data_references(struct reloc_control *rc, 3766 struct btrfs_key *extent_key, 3767 struct btrfs_path *path, 3768 struct rb_root *blocks) 3769 { 3770 struct btrfs_key key; 3771 struct extent_buffer *eb; 3772 struct btrfs_extent_data_ref *dref; 3773 struct btrfs_extent_inline_ref *iref; 3774 unsigned long ptr; 3775 unsigned long end; 3776 u32 blocksize = rc->extent_root->fs_info->nodesize; 3777 int ret = 0; 3778 int err = 0; 3779 3780 eb = path->nodes[0]; 3781 ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 3782 end = ptr + btrfs_item_size_nr(eb, path->slots[0]); 3783 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3784 if (ptr + sizeof(struct btrfs_extent_item_v0) == end) 3785 ptr = end; 3786 else 3787 #endif 3788 ptr += sizeof(struct btrfs_extent_item); 3789 3790 while (ptr < end) { 3791 iref = (struct btrfs_extent_inline_ref *)ptr; 3792 key.type = btrfs_get_extent_inline_ref_type(eb, iref, 3793 BTRFS_REF_TYPE_DATA); 3794 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3795 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 3796 ret = __add_tree_block(rc, key.offset, blocksize, 3797 blocks); 3798 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3799 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 3800 ret = find_data_references(rc, extent_key, 3801 eb, dref, blocks); 3802 } else { 3803 ret = -EINVAL; 3804 btrfs_err(rc->extent_root->fs_info, 3805 "extent %llu slot %d has an invalid inline ref type", 3806 eb->start, path->slots[0]); 3807 } 3808 if (ret) { 3809 err = ret; 3810 goto out; 3811 } 3812 ptr += btrfs_extent_inline_ref_size(key.type); 3813 } 3814 WARN_ON(ptr > end); 3815 3816 while (1) { 3817 cond_resched(); 3818 eb = path->nodes[0]; 3819 if (path->slots[0] >= btrfs_header_nritems(eb)) { 3820 ret = btrfs_next_leaf(rc->extent_root, path); 3821 if (ret < 0) { 3822 err = ret; 3823 break; 3824 } 3825 if (ret > 0) 3826 break; 3827 eb = path->nodes[0]; 3828 } 3829 3830 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 3831 if (key.objectid != extent_key->objectid) 3832 break; 3833 3834 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3835 if (key.type == BTRFS_SHARED_DATA_REF_KEY || 3836 key.type == BTRFS_EXTENT_REF_V0_KEY) { 3837 #else 3838 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 3839 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3840 #endif 3841 ret = __add_tree_block(rc, key.offset, blocksize, 3842 blocks); 3843 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3844 dref = btrfs_item_ptr(eb, path->slots[0], 3845 struct btrfs_extent_data_ref); 3846 ret = find_data_references(rc, extent_key, 3847 eb, dref, blocks); 3848 } else { 3849 ret = 0; 3850 } 3851 if (ret) { 3852 err = ret; 3853 break; 3854 } 3855 path->slots[0]++; 3856 } 3857 out: 3858 btrfs_release_path(path); 3859 if (err) 3860 free_block_list(blocks); 3861 return err; 3862 } 3863 3864 /* 3865 * helper to find next unprocessed extent 3866 */ 3867 static noinline_for_stack 3868 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, 3869 struct btrfs_key *extent_key) 3870 { 3871 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3872 struct btrfs_key key; 3873 struct extent_buffer *leaf; 3874 u64 start, end, last; 3875 int ret; 3876 3877 last = rc->block_group->key.objectid + rc->block_group->key.offset; 3878 while (1) { 3879 cond_resched(); 3880 if (rc->search_start >= last) { 3881 ret = 1; 3882 break; 3883 } 3884 3885 key.objectid = rc->search_start; 3886 key.type = BTRFS_EXTENT_ITEM_KEY; 3887 key.offset = 0; 3888 3889 path->search_commit_root = 1; 3890 path->skip_locking = 1; 3891 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3892 0, 0); 3893 if (ret < 0) 3894 break; 3895 next: 3896 leaf = path->nodes[0]; 3897 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3898 ret = btrfs_next_leaf(rc->extent_root, path); 3899 if (ret != 0) 3900 break; 3901 leaf = path->nodes[0]; 3902 } 3903 3904 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3905 if (key.objectid >= last) { 3906 ret = 1; 3907 break; 3908 } 3909 3910 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3911 key.type != BTRFS_METADATA_ITEM_KEY) { 3912 path->slots[0]++; 3913 goto next; 3914 } 3915 3916 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3917 key.objectid + key.offset <= rc->search_start) { 3918 path->slots[0]++; 3919 goto next; 3920 } 3921 3922 if (key.type == BTRFS_METADATA_ITEM_KEY && 3923 key.objectid + fs_info->nodesize <= 3924 rc->search_start) { 3925 path->slots[0]++; 3926 goto next; 3927 } 3928 3929 ret = find_first_extent_bit(&rc->processed_blocks, 3930 key.objectid, &start, &end, 3931 EXTENT_DIRTY, NULL); 3932 3933 if (ret == 0 && start <= key.objectid) { 3934 btrfs_release_path(path); 3935 rc->search_start = end + 1; 3936 } else { 3937 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3938 rc->search_start = key.objectid + key.offset; 3939 else 3940 rc->search_start = key.objectid + 3941 fs_info->nodesize; 3942 memcpy(extent_key, &key, sizeof(key)); 3943 return 0; 3944 } 3945 } 3946 btrfs_release_path(path); 3947 return ret; 3948 } 3949 3950 static void set_reloc_control(struct reloc_control *rc) 3951 { 3952 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3953 3954 mutex_lock(&fs_info->reloc_mutex); 3955 fs_info->reloc_ctl = rc; 3956 mutex_unlock(&fs_info->reloc_mutex); 3957 } 3958 3959 static void unset_reloc_control(struct reloc_control *rc) 3960 { 3961 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3962 3963 mutex_lock(&fs_info->reloc_mutex); 3964 fs_info->reloc_ctl = NULL; 3965 mutex_unlock(&fs_info->reloc_mutex); 3966 } 3967 3968 static int check_extent_flags(u64 flags) 3969 { 3970 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3971 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3972 return 1; 3973 if (!(flags & BTRFS_EXTENT_FLAG_DATA) && 3974 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3975 return 1; 3976 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3977 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 3978 return 1; 3979 return 0; 3980 } 3981 3982 static noinline_for_stack 3983 int prepare_to_relocate(struct reloc_control *rc) 3984 { 3985 struct btrfs_trans_handle *trans; 3986 int ret; 3987 3988 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info, 3989 BTRFS_BLOCK_RSV_TEMP); 3990 if (!rc->block_rsv) 3991 return -ENOMEM; 3992 3993 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3994 rc->search_start = rc->block_group->key.objectid; 3995 rc->extents_found = 0; 3996 rc->nodes_relocated = 0; 3997 rc->merging_rsv_size = 0; 3998 rc->reserved_bytes = 0; 3999 rc->block_rsv->size = rc->extent_root->fs_info->nodesize * 4000 RELOCATION_RESERVED_NODES; 4001 ret = btrfs_block_rsv_refill(rc->extent_root, 4002 rc->block_rsv, rc->block_rsv->size, 4003 BTRFS_RESERVE_FLUSH_ALL); 4004 if (ret) 4005 return ret; 4006 4007 rc->create_reloc_tree = 1; 4008 set_reloc_control(rc); 4009 4010 trans = btrfs_join_transaction(rc->extent_root); 4011 if (IS_ERR(trans)) { 4012 unset_reloc_control(rc); 4013 /* 4014 * extent tree is not a ref_cow tree and has no reloc_root to 4015 * cleanup. And callers are responsible to free the above 4016 * block rsv. 4017 */ 4018 return PTR_ERR(trans); 4019 } 4020 btrfs_commit_transaction(trans); 4021 return 0; 4022 } 4023 4024 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 4025 { 4026 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 4027 struct rb_root blocks = RB_ROOT; 4028 struct btrfs_key key; 4029 struct btrfs_trans_handle *trans = NULL; 4030 struct btrfs_path *path; 4031 struct btrfs_extent_item *ei; 4032 u64 flags; 4033 u32 item_size; 4034 int ret; 4035 int err = 0; 4036 int progress = 0; 4037 4038 path = btrfs_alloc_path(); 4039 if (!path) 4040 return -ENOMEM; 4041 path->reada = READA_FORWARD; 4042 4043 ret = prepare_to_relocate(rc); 4044 if (ret) { 4045 err = ret; 4046 goto out_free; 4047 } 4048 4049 while (1) { 4050 rc->reserved_bytes = 0; 4051 ret = btrfs_block_rsv_refill(rc->extent_root, 4052 rc->block_rsv, rc->block_rsv->size, 4053 BTRFS_RESERVE_FLUSH_ALL); 4054 if (ret) { 4055 err = ret; 4056 break; 4057 } 4058 progress++; 4059 trans = btrfs_start_transaction(rc->extent_root, 0); 4060 if (IS_ERR(trans)) { 4061 err = PTR_ERR(trans); 4062 trans = NULL; 4063 break; 4064 } 4065 restart: 4066 if (update_backref_cache(trans, &rc->backref_cache)) { 4067 btrfs_end_transaction(trans); 4068 continue; 4069 } 4070 4071 ret = find_next_extent(rc, path, &key); 4072 if (ret < 0) 4073 err = ret; 4074 if (ret != 0) 4075 break; 4076 4077 rc->extents_found++; 4078 4079 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 4080 struct btrfs_extent_item); 4081 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 4082 if (item_size >= sizeof(*ei)) { 4083 flags = btrfs_extent_flags(path->nodes[0], ei); 4084 ret = check_extent_flags(flags); 4085 BUG_ON(ret); 4086 4087 } else { 4088 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 4089 u64 ref_owner; 4090 int path_change = 0; 4091 4092 BUG_ON(item_size != 4093 sizeof(struct btrfs_extent_item_v0)); 4094 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner, 4095 &path_change); 4096 if (ret < 0) { 4097 err = ret; 4098 break; 4099 } 4100 if (ref_owner < BTRFS_FIRST_FREE_OBJECTID) 4101 flags = BTRFS_EXTENT_FLAG_TREE_BLOCK; 4102 else 4103 flags = BTRFS_EXTENT_FLAG_DATA; 4104 4105 if (path_change) { 4106 btrfs_release_path(path); 4107 4108 path->search_commit_root = 1; 4109 path->skip_locking = 1; 4110 ret = btrfs_search_slot(NULL, rc->extent_root, 4111 &key, path, 0, 0); 4112 if (ret < 0) { 4113 err = ret; 4114 break; 4115 } 4116 BUG_ON(ret > 0); 4117 } 4118 #else 4119 BUG(); 4120 #endif 4121 } 4122 4123 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 4124 ret = add_tree_block(rc, &key, path, &blocks); 4125 } else if (rc->stage == UPDATE_DATA_PTRS && 4126 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4127 ret = add_data_references(rc, &key, path, &blocks); 4128 } else { 4129 btrfs_release_path(path); 4130 ret = 0; 4131 } 4132 if (ret < 0) { 4133 err = ret; 4134 break; 4135 } 4136 4137 if (!RB_EMPTY_ROOT(&blocks)) { 4138 ret = relocate_tree_blocks(trans, rc, &blocks); 4139 if (ret < 0) { 4140 /* 4141 * if we fail to relocate tree blocks, force to update 4142 * backref cache when committing transaction. 4143 */ 4144 rc->backref_cache.last_trans = trans->transid - 1; 4145 4146 if (ret != -EAGAIN) { 4147 err = ret; 4148 break; 4149 } 4150 rc->extents_found--; 4151 rc->search_start = key.objectid; 4152 } 4153 } 4154 4155 btrfs_end_transaction_throttle(trans); 4156 btrfs_btree_balance_dirty(fs_info); 4157 trans = NULL; 4158 4159 if (rc->stage == MOVE_DATA_EXTENTS && 4160 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4161 rc->found_file_extent = 1; 4162 ret = relocate_data_extent(rc->data_inode, 4163 &key, &rc->cluster); 4164 if (ret < 0) { 4165 err = ret; 4166 break; 4167 } 4168 } 4169 } 4170 if (trans && progress && err == -ENOSPC) { 4171 ret = btrfs_force_chunk_alloc(trans, fs_info, 4172 rc->block_group->flags); 4173 if (ret == 1) { 4174 err = 0; 4175 progress = 0; 4176 goto restart; 4177 } 4178 } 4179 4180 btrfs_release_path(path); 4181 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); 4182 4183 if (trans) { 4184 btrfs_end_transaction_throttle(trans); 4185 btrfs_btree_balance_dirty(fs_info); 4186 } 4187 4188 if (!err) { 4189 ret = relocate_file_extent_cluster(rc->data_inode, 4190 &rc->cluster); 4191 if (ret < 0) 4192 err = ret; 4193 } 4194 4195 rc->create_reloc_tree = 0; 4196 set_reloc_control(rc); 4197 4198 backref_cache_cleanup(&rc->backref_cache); 4199 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1); 4200 4201 err = prepare_to_merge(rc, err); 4202 4203 merge_reloc_roots(rc); 4204 4205 rc->merge_reloc_tree = 0; 4206 unset_reloc_control(rc); 4207 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1); 4208 4209 /* get rid of pinned extents */ 4210 trans = btrfs_join_transaction(rc->extent_root); 4211 if (IS_ERR(trans)) { 4212 err = PTR_ERR(trans); 4213 goto out_free; 4214 } 4215 btrfs_commit_transaction(trans); 4216 out_free: 4217 btrfs_free_block_rsv(fs_info, rc->block_rsv); 4218 btrfs_free_path(path); 4219 return err; 4220 } 4221 4222 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 4223 struct btrfs_root *root, u64 objectid) 4224 { 4225 struct btrfs_path *path; 4226 struct btrfs_inode_item *item; 4227 struct extent_buffer *leaf; 4228 int ret; 4229 4230 path = btrfs_alloc_path(); 4231 if (!path) 4232 return -ENOMEM; 4233 4234 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 4235 if (ret) 4236 goto out; 4237 4238 leaf = path->nodes[0]; 4239 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 4240 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 4241 btrfs_set_inode_generation(leaf, item, 1); 4242 btrfs_set_inode_size(leaf, item, 0); 4243 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 4244 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 4245 BTRFS_INODE_PREALLOC); 4246 btrfs_mark_buffer_dirty(leaf); 4247 out: 4248 btrfs_free_path(path); 4249 return ret; 4250 } 4251 4252 /* 4253 * helper to create inode for data relocation. 4254 * the inode is in data relocation tree and its link count is 0 4255 */ 4256 static noinline_for_stack 4257 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 4258 struct btrfs_block_group_cache *group) 4259 { 4260 struct inode *inode = NULL; 4261 struct btrfs_trans_handle *trans; 4262 struct btrfs_root *root; 4263 struct btrfs_key key; 4264 u64 objectid; 4265 int err = 0; 4266 4267 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 4268 if (IS_ERR(root)) 4269 return ERR_CAST(root); 4270 4271 trans = btrfs_start_transaction(root, 6); 4272 if (IS_ERR(trans)) 4273 return ERR_CAST(trans); 4274 4275 err = btrfs_find_free_objectid(root, &objectid); 4276 if (err) 4277 goto out; 4278 4279 err = __insert_orphan_inode(trans, root, objectid); 4280 BUG_ON(err); 4281 4282 key.objectid = objectid; 4283 key.type = BTRFS_INODE_ITEM_KEY; 4284 key.offset = 0; 4285 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 4286 BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); 4287 BTRFS_I(inode)->index_cnt = group->key.objectid; 4288 4289 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4290 out: 4291 btrfs_end_transaction(trans); 4292 btrfs_btree_balance_dirty(fs_info); 4293 if (err) { 4294 if (inode) 4295 iput(inode); 4296 inode = ERR_PTR(err); 4297 } 4298 return inode; 4299 } 4300 4301 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) 4302 { 4303 struct reloc_control *rc; 4304 4305 rc = kzalloc(sizeof(*rc), GFP_NOFS); 4306 if (!rc) 4307 return NULL; 4308 4309 INIT_LIST_HEAD(&rc->reloc_roots); 4310 backref_cache_init(&rc->backref_cache); 4311 mapping_tree_init(&rc->reloc_root_tree); 4312 extent_io_tree_init(&rc->processed_blocks, NULL); 4313 return rc; 4314 } 4315 4316 /* 4317 * Print the block group being relocated 4318 */ 4319 static void describe_relocation(struct btrfs_fs_info *fs_info, 4320 struct btrfs_block_group_cache *block_group) 4321 { 4322 char buf[128]; /* prefixed by a '|' that'll be dropped */ 4323 u64 flags = block_group->flags; 4324 4325 /* Shouldn't happen */ 4326 if (!flags) { 4327 strcpy(buf, "|NONE"); 4328 } else { 4329 char *bp = buf; 4330 4331 #define DESCRIBE_FLAG(f, d) \ 4332 if (flags & BTRFS_BLOCK_GROUP_##f) { \ 4333 bp += snprintf(bp, buf - bp + sizeof(buf), "|%s", d); \ 4334 flags &= ~BTRFS_BLOCK_GROUP_##f; \ 4335 } 4336 DESCRIBE_FLAG(DATA, "data"); 4337 DESCRIBE_FLAG(SYSTEM, "system"); 4338 DESCRIBE_FLAG(METADATA, "metadata"); 4339 DESCRIBE_FLAG(RAID0, "raid0"); 4340 DESCRIBE_FLAG(RAID1, "raid1"); 4341 DESCRIBE_FLAG(DUP, "dup"); 4342 DESCRIBE_FLAG(RAID10, "raid10"); 4343 DESCRIBE_FLAG(RAID5, "raid5"); 4344 DESCRIBE_FLAG(RAID6, "raid6"); 4345 if (flags) 4346 snprintf(buf, buf - bp + sizeof(buf), "|0x%llx", flags); 4347 #undef DESCRIBE_FLAG 4348 } 4349 4350 btrfs_info(fs_info, 4351 "relocating block group %llu flags %s", 4352 block_group->key.objectid, buf + 1); 4353 } 4354 4355 /* 4356 * function to relocate all extents in a block group. 4357 */ 4358 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) 4359 { 4360 struct btrfs_root *extent_root = fs_info->extent_root; 4361 struct reloc_control *rc; 4362 struct inode *inode; 4363 struct btrfs_path *path; 4364 int ret; 4365 int rw = 0; 4366 int err = 0; 4367 4368 rc = alloc_reloc_control(fs_info); 4369 if (!rc) 4370 return -ENOMEM; 4371 4372 rc->extent_root = extent_root; 4373 4374 rc->block_group = btrfs_lookup_block_group(fs_info, group_start); 4375 BUG_ON(!rc->block_group); 4376 4377 ret = btrfs_inc_block_group_ro(fs_info, rc->block_group); 4378 if (ret) { 4379 err = ret; 4380 goto out; 4381 } 4382 rw = 1; 4383 4384 path = btrfs_alloc_path(); 4385 if (!path) { 4386 err = -ENOMEM; 4387 goto out; 4388 } 4389 4390 inode = lookup_free_space_inode(fs_info, rc->block_group, path); 4391 btrfs_free_path(path); 4392 4393 if (!IS_ERR(inode)) 4394 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); 4395 else 4396 ret = PTR_ERR(inode); 4397 4398 if (ret && ret != -ENOENT) { 4399 err = ret; 4400 goto out; 4401 } 4402 4403 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4404 if (IS_ERR(rc->data_inode)) { 4405 err = PTR_ERR(rc->data_inode); 4406 rc->data_inode = NULL; 4407 goto out; 4408 } 4409 4410 describe_relocation(fs_info, rc->block_group); 4411 4412 btrfs_wait_block_group_reservations(rc->block_group); 4413 btrfs_wait_nocow_writers(rc->block_group); 4414 btrfs_wait_ordered_roots(fs_info, U64_MAX, 4415 rc->block_group->key.objectid, 4416 rc->block_group->key.offset); 4417 4418 while (1) { 4419 mutex_lock(&fs_info->cleaner_mutex); 4420 ret = relocate_block_group(rc); 4421 mutex_unlock(&fs_info->cleaner_mutex); 4422 if (ret < 0) { 4423 err = ret; 4424 goto out; 4425 } 4426 4427 if (rc->extents_found == 0) 4428 break; 4429 4430 btrfs_info(fs_info, "found %llu extents", rc->extents_found); 4431 4432 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4433 ret = btrfs_wait_ordered_range(rc->data_inode, 0, 4434 (u64)-1); 4435 if (ret) { 4436 err = ret; 4437 goto out; 4438 } 4439 invalidate_mapping_pages(rc->data_inode->i_mapping, 4440 0, -1); 4441 rc->stage = UPDATE_DATA_PTRS; 4442 } 4443 } 4444 4445 WARN_ON(rc->block_group->pinned > 0); 4446 WARN_ON(rc->block_group->reserved > 0); 4447 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); 4448 out: 4449 if (err && rw) 4450 btrfs_dec_block_group_ro(rc->block_group); 4451 iput(rc->data_inode); 4452 btrfs_put_block_group(rc->block_group); 4453 kfree(rc); 4454 return err; 4455 } 4456 4457 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4458 { 4459 struct btrfs_fs_info *fs_info = root->fs_info; 4460 struct btrfs_trans_handle *trans; 4461 int ret, err; 4462 4463 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4464 if (IS_ERR(trans)) 4465 return PTR_ERR(trans); 4466 4467 memset(&root->root_item.drop_progress, 0, 4468 sizeof(root->root_item.drop_progress)); 4469 root->root_item.drop_level = 0; 4470 btrfs_set_root_refs(&root->root_item, 0); 4471 ret = btrfs_update_root(trans, fs_info->tree_root, 4472 &root->root_key, &root->root_item); 4473 4474 err = btrfs_end_transaction(trans); 4475 if (err) 4476 return err; 4477 return ret; 4478 } 4479 4480 /* 4481 * recover relocation interrupted by system crash. 4482 * 4483 * this function resumes merging reloc trees with corresponding fs trees. 4484 * this is important for keeping the sharing of tree blocks 4485 */ 4486 int btrfs_recover_relocation(struct btrfs_root *root) 4487 { 4488 struct btrfs_fs_info *fs_info = root->fs_info; 4489 LIST_HEAD(reloc_roots); 4490 struct btrfs_key key; 4491 struct btrfs_root *fs_root; 4492 struct btrfs_root *reloc_root; 4493 struct btrfs_path *path; 4494 struct extent_buffer *leaf; 4495 struct reloc_control *rc = NULL; 4496 struct btrfs_trans_handle *trans; 4497 int ret; 4498 int err = 0; 4499 4500 path = btrfs_alloc_path(); 4501 if (!path) 4502 return -ENOMEM; 4503 path->reada = READA_BACK; 4504 4505 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4506 key.type = BTRFS_ROOT_ITEM_KEY; 4507 key.offset = (u64)-1; 4508 4509 while (1) { 4510 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, 4511 path, 0, 0); 4512 if (ret < 0) { 4513 err = ret; 4514 goto out; 4515 } 4516 if (ret > 0) { 4517 if (path->slots[0] == 0) 4518 break; 4519 path->slots[0]--; 4520 } 4521 leaf = path->nodes[0]; 4522 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4523 btrfs_release_path(path); 4524 4525 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4526 key.type != BTRFS_ROOT_ITEM_KEY) 4527 break; 4528 4529 reloc_root = btrfs_read_fs_root(root, &key); 4530 if (IS_ERR(reloc_root)) { 4531 err = PTR_ERR(reloc_root); 4532 goto out; 4533 } 4534 4535 list_add(&reloc_root->root_list, &reloc_roots); 4536 4537 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4538 fs_root = read_fs_root(fs_info, 4539 reloc_root->root_key.offset); 4540 if (IS_ERR(fs_root)) { 4541 ret = PTR_ERR(fs_root); 4542 if (ret != -ENOENT) { 4543 err = ret; 4544 goto out; 4545 } 4546 ret = mark_garbage_root(reloc_root); 4547 if (ret < 0) { 4548 err = ret; 4549 goto out; 4550 } 4551 } 4552 } 4553 4554 if (key.offset == 0) 4555 break; 4556 4557 key.offset--; 4558 } 4559 btrfs_release_path(path); 4560 4561 if (list_empty(&reloc_roots)) 4562 goto out; 4563 4564 rc = alloc_reloc_control(fs_info); 4565 if (!rc) { 4566 err = -ENOMEM; 4567 goto out; 4568 } 4569 4570 rc->extent_root = fs_info->extent_root; 4571 4572 set_reloc_control(rc); 4573 4574 trans = btrfs_join_transaction(rc->extent_root); 4575 if (IS_ERR(trans)) { 4576 unset_reloc_control(rc); 4577 err = PTR_ERR(trans); 4578 goto out_free; 4579 } 4580 4581 rc->merge_reloc_tree = 1; 4582 4583 while (!list_empty(&reloc_roots)) { 4584 reloc_root = list_entry(reloc_roots.next, 4585 struct btrfs_root, root_list); 4586 list_del(&reloc_root->root_list); 4587 4588 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4589 list_add_tail(&reloc_root->root_list, 4590 &rc->reloc_roots); 4591 continue; 4592 } 4593 4594 fs_root = read_fs_root(fs_info, reloc_root->root_key.offset); 4595 if (IS_ERR(fs_root)) { 4596 err = PTR_ERR(fs_root); 4597 goto out_free; 4598 } 4599 4600 err = __add_reloc_root(reloc_root); 4601 BUG_ON(err < 0); /* -ENOMEM or logic error */ 4602 fs_root->reloc_root = reloc_root; 4603 } 4604 4605 err = btrfs_commit_transaction(trans); 4606 if (err) 4607 goto out_free; 4608 4609 merge_reloc_roots(rc); 4610 4611 unset_reloc_control(rc); 4612 4613 trans = btrfs_join_transaction(rc->extent_root); 4614 if (IS_ERR(trans)) { 4615 err = PTR_ERR(trans); 4616 goto out_free; 4617 } 4618 err = btrfs_commit_transaction(trans); 4619 out_free: 4620 kfree(rc); 4621 out: 4622 if (!list_empty(&reloc_roots)) 4623 free_reloc_roots(&reloc_roots); 4624 4625 btrfs_free_path(path); 4626 4627 if (err == 0) { 4628 /* cleanup orphan inode in data relocation tree */ 4629 fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 4630 if (IS_ERR(fs_root)) 4631 err = PTR_ERR(fs_root); 4632 else 4633 err = btrfs_orphan_cleanup(fs_root); 4634 } 4635 return err; 4636 } 4637 4638 /* 4639 * helper to add ordered checksum for data relocation. 4640 * 4641 * cloning checksum properly handles the nodatasum extents. 4642 * it also saves CPU time to re-calculate the checksum. 4643 */ 4644 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) 4645 { 4646 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 4647 struct btrfs_ordered_sum *sums; 4648 struct btrfs_ordered_extent *ordered; 4649 int ret; 4650 u64 disk_bytenr; 4651 u64 new_bytenr; 4652 LIST_HEAD(list); 4653 4654 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4655 BUG_ON(ordered->file_offset != file_pos || ordered->len != len); 4656 4657 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4658 ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr, 4659 disk_bytenr + len - 1, &list, 0); 4660 if (ret) 4661 goto out; 4662 4663 while (!list_empty(&list)) { 4664 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4665 list_del_init(&sums->list); 4666 4667 /* 4668 * We need to offset the new_bytenr based on where the csum is. 4669 * We need to do this because we will read in entire prealloc 4670 * extents but we may have written to say the middle of the 4671 * prealloc extent, so we need to make sure the csum goes with 4672 * the right disk offset. 4673 * 4674 * We can do this because the data reloc inode refers strictly 4675 * to the on disk bytes, so we don't have to worry about 4676 * disk_len vs real len like with real inodes since it's all 4677 * disk length. 4678 */ 4679 new_bytenr = ordered->start + (sums->bytenr - disk_bytenr); 4680 sums->bytenr = new_bytenr; 4681 4682 btrfs_add_ordered_sum(inode, ordered, sums); 4683 } 4684 out: 4685 btrfs_put_ordered_extent(ordered); 4686 return ret; 4687 } 4688 4689 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4690 struct btrfs_root *root, struct extent_buffer *buf, 4691 struct extent_buffer *cow) 4692 { 4693 struct btrfs_fs_info *fs_info = root->fs_info; 4694 struct reloc_control *rc; 4695 struct backref_node *node; 4696 int first_cow = 0; 4697 int level; 4698 int ret = 0; 4699 4700 rc = fs_info->reloc_ctl; 4701 if (!rc) 4702 return 0; 4703 4704 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4705 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4706 4707 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 4708 if (buf == root->node) 4709 __update_reloc_root(root, cow->start); 4710 } 4711 4712 level = btrfs_header_level(buf); 4713 if (btrfs_header_generation(buf) <= 4714 btrfs_root_last_snapshot(&root->root_item)) 4715 first_cow = 1; 4716 4717 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4718 rc->create_reloc_tree) { 4719 WARN_ON(!first_cow && level == 0); 4720 4721 node = rc->backref_cache.path[level]; 4722 BUG_ON(node->bytenr != buf->start && 4723 node->new_bytenr != buf->start); 4724 4725 drop_node_buffer(node); 4726 extent_buffer_get(cow); 4727 node->eb = cow; 4728 node->new_bytenr = cow->start; 4729 4730 if (!node->pending) { 4731 list_move_tail(&node->list, 4732 &rc->backref_cache.pending[level]); 4733 node->pending = 1; 4734 } 4735 4736 if (first_cow) 4737 __mark_block_processed(rc, node); 4738 4739 if (first_cow && level > 0) 4740 rc->nodes_relocated += buf->len; 4741 } 4742 4743 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4744 ret = replace_file_extents(trans, rc, root, cow); 4745 return ret; 4746 } 4747 4748 /* 4749 * called before creating snapshot. it calculates metadata reservation 4750 * required for relocating tree blocks in the snapshot 4751 */ 4752 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4753 u64 *bytes_to_reserve) 4754 { 4755 struct btrfs_root *root; 4756 struct reloc_control *rc; 4757 4758 root = pending->root; 4759 if (!root->reloc_root) 4760 return; 4761 4762 rc = root->fs_info->reloc_ctl; 4763 if (!rc->merge_reloc_tree) 4764 return; 4765 4766 root = root->reloc_root; 4767 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4768 /* 4769 * relocation is in the stage of merging trees. the space 4770 * used by merging a reloc tree is twice the size of 4771 * relocated tree nodes in the worst case. half for cowing 4772 * the reloc tree, half for cowing the fs tree. the space 4773 * used by cowing the reloc tree will be freed after the 4774 * tree is dropped. if we create snapshot, cowing the fs 4775 * tree may use more space than it frees. so we need 4776 * reserve extra space. 4777 */ 4778 *bytes_to_reserve += rc->nodes_relocated; 4779 } 4780 4781 /* 4782 * called after snapshot is created. migrate block reservation 4783 * and create reloc root for the newly created snapshot 4784 */ 4785 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4786 struct btrfs_pending_snapshot *pending) 4787 { 4788 struct btrfs_root *root = pending->root; 4789 struct btrfs_root *reloc_root; 4790 struct btrfs_root *new_root; 4791 struct reloc_control *rc; 4792 int ret; 4793 4794 if (!root->reloc_root) 4795 return 0; 4796 4797 rc = root->fs_info->reloc_ctl; 4798 rc->merging_rsv_size += rc->nodes_relocated; 4799 4800 if (rc->merge_reloc_tree) { 4801 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4802 rc->block_rsv, 4803 rc->nodes_relocated, 1); 4804 if (ret) 4805 return ret; 4806 } 4807 4808 new_root = pending->snap; 4809 reloc_root = create_reloc_root(trans, root->reloc_root, 4810 new_root->root_key.objectid); 4811 if (IS_ERR(reloc_root)) 4812 return PTR_ERR(reloc_root); 4813 4814 ret = __add_reloc_root(reloc_root); 4815 BUG_ON(ret < 0); 4816 new_root->reloc_root = reloc_root; 4817 4818 if (rc->create_reloc_tree) 4819 ret = clone_backref_node(trans, rc, root, reloc_root); 4820 return ret; 4821 } 4822