1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 STRATO. All rights reserved. 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/rbtree.h> 8 #include <trace/events/btrfs.h> 9 #include "ctree.h" 10 #include "disk-io.h" 11 #include "backref.h" 12 #include "ulist.h" 13 #include "transaction.h" 14 #include "delayed-ref.h" 15 #include "locking.h" 16 #include "misc.h" 17 18 /* Just an arbitrary number so we can be sure this happened */ 19 #define BACKREF_FOUND_SHARED 6 20 21 struct extent_inode_elem { 22 u64 inum; 23 u64 offset; 24 struct extent_inode_elem *next; 25 }; 26 27 static int check_extent_in_eb(const struct btrfs_key *key, 28 const struct extent_buffer *eb, 29 const struct btrfs_file_extent_item *fi, 30 u64 extent_item_pos, 31 struct extent_inode_elem **eie, 32 bool ignore_offset) 33 { 34 u64 offset = 0; 35 struct extent_inode_elem *e; 36 37 if (!ignore_offset && 38 !btrfs_file_extent_compression(eb, fi) && 39 !btrfs_file_extent_encryption(eb, fi) && 40 !btrfs_file_extent_other_encoding(eb, fi)) { 41 u64 data_offset; 42 u64 data_len; 43 44 data_offset = btrfs_file_extent_offset(eb, fi); 45 data_len = btrfs_file_extent_num_bytes(eb, fi); 46 47 if (extent_item_pos < data_offset || 48 extent_item_pos >= data_offset + data_len) 49 return 1; 50 offset = extent_item_pos - data_offset; 51 } 52 53 e = kmalloc(sizeof(*e), GFP_NOFS); 54 if (!e) 55 return -ENOMEM; 56 57 e->next = *eie; 58 e->inum = key->objectid; 59 e->offset = key->offset + offset; 60 *eie = e; 61 62 return 0; 63 } 64 65 static void free_inode_elem_list(struct extent_inode_elem *eie) 66 { 67 struct extent_inode_elem *eie_next; 68 69 for (; eie; eie = eie_next) { 70 eie_next = eie->next; 71 kfree(eie); 72 } 73 } 74 75 static int find_extent_in_eb(const struct extent_buffer *eb, 76 u64 wanted_disk_byte, u64 extent_item_pos, 77 struct extent_inode_elem **eie, 78 bool ignore_offset) 79 { 80 u64 disk_byte; 81 struct btrfs_key key; 82 struct btrfs_file_extent_item *fi; 83 int slot; 84 int nritems; 85 int extent_type; 86 int ret; 87 88 /* 89 * from the shared data ref, we only have the leaf but we need 90 * the key. thus, we must look into all items and see that we 91 * find one (some) with a reference to our extent item. 92 */ 93 nritems = btrfs_header_nritems(eb); 94 for (slot = 0; slot < nritems; ++slot) { 95 btrfs_item_key_to_cpu(eb, &key, slot); 96 if (key.type != BTRFS_EXTENT_DATA_KEY) 97 continue; 98 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 99 extent_type = btrfs_file_extent_type(eb, fi); 100 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 101 continue; 102 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ 103 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 104 if (disk_byte != wanted_disk_byte) 105 continue; 106 107 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset); 108 if (ret < 0) 109 return ret; 110 } 111 112 return 0; 113 } 114 115 struct preftree { 116 struct rb_root_cached root; 117 unsigned int count; 118 }; 119 120 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 } 121 122 struct preftrees { 123 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */ 124 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */ 125 struct preftree indirect_missing_keys; 126 }; 127 128 /* 129 * Checks for a shared extent during backref search. 130 * 131 * The share_count tracks prelim_refs (direct and indirect) having a 132 * ref->count >0: 133 * - incremented when a ref->count transitions to >0 134 * - decremented when a ref->count transitions to <1 135 */ 136 struct share_check { 137 u64 root_objectid; 138 u64 inum; 139 int share_count; 140 }; 141 142 static inline int extent_is_shared(struct share_check *sc) 143 { 144 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; 145 } 146 147 static struct kmem_cache *btrfs_prelim_ref_cache; 148 149 int __init btrfs_prelim_ref_init(void) 150 { 151 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref", 152 sizeof(struct prelim_ref), 153 0, 154 SLAB_MEM_SPREAD, 155 NULL); 156 if (!btrfs_prelim_ref_cache) 157 return -ENOMEM; 158 return 0; 159 } 160 161 void __cold btrfs_prelim_ref_exit(void) 162 { 163 kmem_cache_destroy(btrfs_prelim_ref_cache); 164 } 165 166 static void free_pref(struct prelim_ref *ref) 167 { 168 kmem_cache_free(btrfs_prelim_ref_cache, ref); 169 } 170 171 /* 172 * Return 0 when both refs are for the same block (and can be merged). 173 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1 174 * indicates a 'higher' block. 175 */ 176 static int prelim_ref_compare(struct prelim_ref *ref1, 177 struct prelim_ref *ref2) 178 { 179 if (ref1->level < ref2->level) 180 return -1; 181 if (ref1->level > ref2->level) 182 return 1; 183 if (ref1->root_id < ref2->root_id) 184 return -1; 185 if (ref1->root_id > ref2->root_id) 186 return 1; 187 if (ref1->key_for_search.type < ref2->key_for_search.type) 188 return -1; 189 if (ref1->key_for_search.type > ref2->key_for_search.type) 190 return 1; 191 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) 192 return -1; 193 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) 194 return 1; 195 if (ref1->key_for_search.offset < ref2->key_for_search.offset) 196 return -1; 197 if (ref1->key_for_search.offset > ref2->key_for_search.offset) 198 return 1; 199 if (ref1->parent < ref2->parent) 200 return -1; 201 if (ref1->parent > ref2->parent) 202 return 1; 203 204 return 0; 205 } 206 207 static void update_share_count(struct share_check *sc, int oldcount, 208 int newcount) 209 { 210 if ((!sc) || (oldcount == 0 && newcount < 1)) 211 return; 212 213 if (oldcount > 0 && newcount < 1) 214 sc->share_count--; 215 else if (oldcount < 1 && newcount > 0) 216 sc->share_count++; 217 } 218 219 /* 220 * Add @newref to the @root rbtree, merging identical refs. 221 * 222 * Callers should assume that newref has been freed after calling. 223 */ 224 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info, 225 struct preftree *preftree, 226 struct prelim_ref *newref, 227 struct share_check *sc) 228 { 229 struct rb_root_cached *root; 230 struct rb_node **p; 231 struct rb_node *parent = NULL; 232 struct prelim_ref *ref; 233 int result; 234 bool leftmost = true; 235 236 root = &preftree->root; 237 p = &root->rb_root.rb_node; 238 239 while (*p) { 240 parent = *p; 241 ref = rb_entry(parent, struct prelim_ref, rbnode); 242 result = prelim_ref_compare(ref, newref); 243 if (result < 0) { 244 p = &(*p)->rb_left; 245 } else if (result > 0) { 246 p = &(*p)->rb_right; 247 leftmost = false; 248 } else { 249 /* Identical refs, merge them and free @newref */ 250 struct extent_inode_elem *eie = ref->inode_list; 251 252 while (eie && eie->next) 253 eie = eie->next; 254 255 if (!eie) 256 ref->inode_list = newref->inode_list; 257 else 258 eie->next = newref->inode_list; 259 trace_btrfs_prelim_ref_merge(fs_info, ref, newref, 260 preftree->count); 261 /* 262 * A delayed ref can have newref->count < 0. 263 * The ref->count is updated to follow any 264 * BTRFS_[ADD|DROP]_DELAYED_REF actions. 265 */ 266 update_share_count(sc, ref->count, 267 ref->count + newref->count); 268 ref->count += newref->count; 269 free_pref(newref); 270 return; 271 } 272 } 273 274 update_share_count(sc, 0, newref->count); 275 preftree->count++; 276 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); 277 rb_link_node(&newref->rbnode, parent, p); 278 rb_insert_color_cached(&newref->rbnode, root, leftmost); 279 } 280 281 /* 282 * Release the entire tree. We don't care about internal consistency so 283 * just free everything and then reset the tree root. 284 */ 285 static void prelim_release(struct preftree *preftree) 286 { 287 struct prelim_ref *ref, *next_ref; 288 289 rbtree_postorder_for_each_entry_safe(ref, next_ref, 290 &preftree->root.rb_root, rbnode) 291 free_pref(ref); 292 293 preftree->root = RB_ROOT_CACHED; 294 preftree->count = 0; 295 } 296 297 /* 298 * the rules for all callers of this function are: 299 * - obtaining the parent is the goal 300 * - if you add a key, you must know that it is a correct key 301 * - if you cannot add the parent or a correct key, then we will look into the 302 * block later to set a correct key 303 * 304 * delayed refs 305 * ============ 306 * backref type | shared | indirect | shared | indirect 307 * information | tree | tree | data | data 308 * --------------------+--------+----------+--------+---------- 309 * parent logical | y | - | - | - 310 * key to resolve | - | y | y | y 311 * tree block logical | - | - | - | - 312 * root for resolving | y | y | y | y 313 * 314 * - column 1: we've the parent -> done 315 * - column 2, 3, 4: we use the key to find the parent 316 * 317 * on disk refs (inline or keyed) 318 * ============================== 319 * backref type | shared | indirect | shared | indirect 320 * information | tree | tree | data | data 321 * --------------------+--------+----------+--------+---------- 322 * parent logical | y | - | y | - 323 * key to resolve | - | - | - | y 324 * tree block logical | y | y | y | y 325 * root for resolving | - | y | y | y 326 * 327 * - column 1, 3: we've the parent -> done 328 * - column 2: we take the first key from the block to find the parent 329 * (see add_missing_keys) 330 * - column 4: we use the key to find the parent 331 * 332 * additional information that's available but not required to find the parent 333 * block might help in merging entries to gain some speed. 334 */ 335 static int add_prelim_ref(const struct btrfs_fs_info *fs_info, 336 struct preftree *preftree, u64 root_id, 337 const struct btrfs_key *key, int level, u64 parent, 338 u64 wanted_disk_byte, int count, 339 struct share_check *sc, gfp_t gfp_mask) 340 { 341 struct prelim_ref *ref; 342 343 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID) 344 return 0; 345 346 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); 347 if (!ref) 348 return -ENOMEM; 349 350 ref->root_id = root_id; 351 if (key) 352 ref->key_for_search = *key; 353 else 354 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); 355 356 ref->inode_list = NULL; 357 ref->level = level; 358 ref->count = count; 359 ref->parent = parent; 360 ref->wanted_disk_byte = wanted_disk_byte; 361 prelim_ref_insert(fs_info, preftree, ref, sc); 362 return extent_is_shared(sc); 363 } 364 365 /* direct refs use root == 0, key == NULL */ 366 static int add_direct_ref(const struct btrfs_fs_info *fs_info, 367 struct preftrees *preftrees, int level, u64 parent, 368 u64 wanted_disk_byte, int count, 369 struct share_check *sc, gfp_t gfp_mask) 370 { 371 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, 372 parent, wanted_disk_byte, count, sc, gfp_mask); 373 } 374 375 /* indirect refs use parent == 0 */ 376 static int add_indirect_ref(const struct btrfs_fs_info *fs_info, 377 struct preftrees *preftrees, u64 root_id, 378 const struct btrfs_key *key, int level, 379 u64 wanted_disk_byte, int count, 380 struct share_check *sc, gfp_t gfp_mask) 381 { 382 struct preftree *tree = &preftrees->indirect; 383 384 if (!key) 385 tree = &preftrees->indirect_missing_keys; 386 return add_prelim_ref(fs_info, tree, root_id, key, level, 0, 387 wanted_disk_byte, count, sc, gfp_mask); 388 } 389 390 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr) 391 { 392 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; 393 struct rb_node *parent = NULL; 394 struct prelim_ref *ref = NULL; 395 struct prelim_ref target = {}; 396 int result; 397 398 target.parent = bytenr; 399 400 while (*p) { 401 parent = *p; 402 ref = rb_entry(parent, struct prelim_ref, rbnode); 403 result = prelim_ref_compare(ref, &target); 404 405 if (result < 0) 406 p = &(*p)->rb_left; 407 else if (result > 0) 408 p = &(*p)->rb_right; 409 else 410 return 1; 411 } 412 return 0; 413 } 414 415 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 416 struct ulist *parents, 417 struct preftrees *preftrees, struct prelim_ref *ref, 418 int level, u64 time_seq, const u64 *extent_item_pos, 419 bool ignore_offset) 420 { 421 int ret = 0; 422 int slot; 423 struct extent_buffer *eb; 424 struct btrfs_key key; 425 struct btrfs_key *key_for_search = &ref->key_for_search; 426 struct btrfs_file_extent_item *fi; 427 struct extent_inode_elem *eie = NULL, *old = NULL; 428 u64 disk_byte; 429 u64 wanted_disk_byte = ref->wanted_disk_byte; 430 u64 count = 0; 431 u64 data_offset; 432 433 if (level != 0) { 434 eb = path->nodes[level]; 435 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 436 if (ret < 0) 437 return ret; 438 return 0; 439 } 440 441 /* 442 * 1. We normally enter this function with the path already pointing to 443 * the first item to check. But sometimes, we may enter it with 444 * slot == nritems. 445 * 2. We are searching for normal backref but bytenr of this leaf 446 * matches shared data backref 447 * 3. The leaf owner is not equal to the root we are searching 448 * 449 * For these cases, go to the next leaf before we continue. 450 */ 451 eb = path->nodes[0]; 452 if (path->slots[0] >= btrfs_header_nritems(eb) || 453 is_shared_data_backref(preftrees, eb->start) || 454 ref->root_id != btrfs_header_owner(eb)) { 455 if (time_seq == SEQ_LAST) 456 ret = btrfs_next_leaf(root, path); 457 else 458 ret = btrfs_next_old_leaf(root, path, time_seq); 459 } 460 461 while (!ret && count < ref->count) { 462 eb = path->nodes[0]; 463 slot = path->slots[0]; 464 465 btrfs_item_key_to_cpu(eb, &key, slot); 466 467 if (key.objectid != key_for_search->objectid || 468 key.type != BTRFS_EXTENT_DATA_KEY) 469 break; 470 471 /* 472 * We are searching for normal backref but bytenr of this leaf 473 * matches shared data backref, OR 474 * the leaf owner is not equal to the root we are searching for 475 */ 476 if (slot == 0 && 477 (is_shared_data_backref(preftrees, eb->start) || 478 ref->root_id != btrfs_header_owner(eb))) { 479 if (time_seq == SEQ_LAST) 480 ret = btrfs_next_leaf(root, path); 481 else 482 ret = btrfs_next_old_leaf(root, path, time_seq); 483 continue; 484 } 485 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 486 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 487 data_offset = btrfs_file_extent_offset(eb, fi); 488 489 if (disk_byte == wanted_disk_byte) { 490 eie = NULL; 491 old = NULL; 492 if (ref->key_for_search.offset == key.offset - data_offset) 493 count++; 494 else 495 goto next; 496 if (extent_item_pos) { 497 ret = check_extent_in_eb(&key, eb, fi, 498 *extent_item_pos, 499 &eie, ignore_offset); 500 if (ret < 0) 501 break; 502 } 503 if (ret > 0) 504 goto next; 505 ret = ulist_add_merge_ptr(parents, eb->start, 506 eie, (void **)&old, GFP_NOFS); 507 if (ret < 0) 508 break; 509 if (!ret && extent_item_pos) { 510 while (old->next) 511 old = old->next; 512 old->next = eie; 513 } 514 eie = NULL; 515 } 516 next: 517 if (time_seq == SEQ_LAST) 518 ret = btrfs_next_item(root, path); 519 else 520 ret = btrfs_next_old_item(root, path, time_seq); 521 } 522 523 if (ret > 0) 524 ret = 0; 525 else if (ret < 0) 526 free_inode_elem_list(eie); 527 return ret; 528 } 529 530 /* 531 * resolve an indirect backref in the form (root_id, key, level) 532 * to a logical address 533 */ 534 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, 535 struct btrfs_path *path, u64 time_seq, 536 struct preftrees *preftrees, 537 struct prelim_ref *ref, struct ulist *parents, 538 const u64 *extent_item_pos, bool ignore_offset) 539 { 540 struct btrfs_root *root; 541 struct extent_buffer *eb; 542 int ret = 0; 543 int root_level; 544 int level = ref->level; 545 struct btrfs_key search_key = ref->key_for_search; 546 547 /* 548 * If we're search_commit_root we could possibly be holding locks on 549 * other tree nodes. This happens when qgroups does backref walks when 550 * adding new delayed refs. To deal with this we need to look in cache 551 * for the root, and if we don't find it then we need to search the 552 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage 553 * here. 554 */ 555 if (path->search_commit_root) 556 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id); 557 else 558 root = btrfs_get_fs_root(fs_info, ref->root_id, false); 559 if (IS_ERR(root)) { 560 ret = PTR_ERR(root); 561 goto out_free; 562 } 563 564 if (!path->search_commit_root && 565 test_bit(BTRFS_ROOT_DELETING, &root->state)) { 566 ret = -ENOENT; 567 goto out; 568 } 569 570 if (btrfs_is_testing(fs_info)) { 571 ret = -ENOENT; 572 goto out; 573 } 574 575 if (path->search_commit_root) 576 root_level = btrfs_header_level(root->commit_root); 577 else if (time_seq == SEQ_LAST) 578 root_level = btrfs_header_level(root->node); 579 else 580 root_level = btrfs_old_root_level(root, time_seq); 581 582 if (root_level + 1 == level) 583 goto out; 584 585 /* 586 * We can often find data backrefs with an offset that is too large 587 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when 588 * subtracting a file's offset with the data offset of its 589 * corresponding extent data item. This can happen for example in the 590 * clone ioctl. 591 * 592 * So if we detect such case we set the search key's offset to zero to 593 * make sure we will find the matching file extent item at 594 * add_all_parents(), otherwise we will miss it because the offset 595 * taken form the backref is much larger then the offset of the file 596 * extent item. This can make us scan a very large number of file 597 * extent items, but at least it will not make us miss any. 598 * 599 * This is an ugly workaround for a behaviour that should have never 600 * existed, but it does and a fix for the clone ioctl would touch a lot 601 * of places, cause backwards incompatibility and would not fix the 602 * problem for extents cloned with older kernels. 603 */ 604 if (search_key.type == BTRFS_EXTENT_DATA_KEY && 605 search_key.offset >= LLONG_MAX) 606 search_key.offset = 0; 607 path->lowest_level = level; 608 if (time_seq == SEQ_LAST) 609 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 610 else 611 ret = btrfs_search_old_slot(root, &search_key, path, time_seq); 612 613 btrfs_debug(fs_info, 614 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)", 615 ref->root_id, level, ref->count, ret, 616 ref->key_for_search.objectid, ref->key_for_search.type, 617 ref->key_for_search.offset); 618 if (ret < 0) 619 goto out; 620 621 eb = path->nodes[level]; 622 while (!eb) { 623 if (WARN_ON(!level)) { 624 ret = 1; 625 goto out; 626 } 627 level--; 628 eb = path->nodes[level]; 629 } 630 631 ret = add_all_parents(root, path, parents, preftrees, ref, level, 632 time_seq, extent_item_pos, ignore_offset); 633 out: 634 btrfs_put_root(root); 635 out_free: 636 path->lowest_level = 0; 637 btrfs_release_path(path); 638 return ret; 639 } 640 641 static struct extent_inode_elem * 642 unode_aux_to_inode_list(struct ulist_node *node) 643 { 644 if (!node) 645 return NULL; 646 return (struct extent_inode_elem *)(uintptr_t)node->aux; 647 } 648 649 /* 650 * We maintain three separate rbtrees: one for direct refs, one for 651 * indirect refs which have a key, and one for indirect refs which do not 652 * have a key. Each tree does merge on insertion. 653 * 654 * Once all of the references are located, we iterate over the tree of 655 * indirect refs with missing keys. An appropriate key is located and 656 * the ref is moved onto the tree for indirect refs. After all missing 657 * keys are thus located, we iterate over the indirect ref tree, resolve 658 * each reference, and then insert the resolved reference onto the 659 * direct tree (merging there too). 660 * 661 * New backrefs (i.e., for parent nodes) are added to the appropriate 662 * rbtree as they are encountered. The new backrefs are subsequently 663 * resolved as above. 664 */ 665 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, 666 struct btrfs_path *path, u64 time_seq, 667 struct preftrees *preftrees, 668 const u64 *extent_item_pos, 669 struct share_check *sc, bool ignore_offset) 670 { 671 int err; 672 int ret = 0; 673 struct ulist *parents; 674 struct ulist_node *node; 675 struct ulist_iterator uiter; 676 struct rb_node *rnode; 677 678 parents = ulist_alloc(GFP_NOFS); 679 if (!parents) 680 return -ENOMEM; 681 682 /* 683 * We could trade memory usage for performance here by iterating 684 * the tree, allocating new refs for each insertion, and then 685 * freeing the entire indirect tree when we're done. In some test 686 * cases, the tree can grow quite large (~200k objects). 687 */ 688 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { 689 struct prelim_ref *ref; 690 691 ref = rb_entry(rnode, struct prelim_ref, rbnode); 692 if (WARN(ref->parent, 693 "BUG: direct ref found in indirect tree")) { 694 ret = -EINVAL; 695 goto out; 696 } 697 698 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); 699 preftrees->indirect.count--; 700 701 if (ref->count == 0) { 702 free_pref(ref); 703 continue; 704 } 705 706 if (sc && sc->root_objectid && 707 ref->root_id != sc->root_objectid) { 708 free_pref(ref); 709 ret = BACKREF_FOUND_SHARED; 710 goto out; 711 } 712 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees, 713 ref, parents, extent_item_pos, 714 ignore_offset); 715 /* 716 * we can only tolerate ENOENT,otherwise,we should catch error 717 * and return directly. 718 */ 719 if (err == -ENOENT) { 720 prelim_ref_insert(fs_info, &preftrees->direct, ref, 721 NULL); 722 continue; 723 } else if (err) { 724 free_pref(ref); 725 ret = err; 726 goto out; 727 } 728 729 /* we put the first parent into the ref at hand */ 730 ULIST_ITER_INIT(&uiter); 731 node = ulist_next(parents, &uiter); 732 ref->parent = node ? node->val : 0; 733 ref->inode_list = unode_aux_to_inode_list(node); 734 735 /* Add a prelim_ref(s) for any other parent(s). */ 736 while ((node = ulist_next(parents, &uiter))) { 737 struct prelim_ref *new_ref; 738 739 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache, 740 GFP_NOFS); 741 if (!new_ref) { 742 free_pref(ref); 743 ret = -ENOMEM; 744 goto out; 745 } 746 memcpy(new_ref, ref, sizeof(*ref)); 747 new_ref->parent = node->val; 748 new_ref->inode_list = unode_aux_to_inode_list(node); 749 prelim_ref_insert(fs_info, &preftrees->direct, 750 new_ref, NULL); 751 } 752 753 /* 754 * Now it's a direct ref, put it in the direct tree. We must 755 * do this last because the ref could be merged/freed here. 756 */ 757 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); 758 759 ulist_reinit(parents); 760 cond_resched(); 761 } 762 out: 763 ulist_free(parents); 764 return ret; 765 } 766 767 /* 768 * read tree blocks and add keys where required. 769 */ 770 static int add_missing_keys(struct btrfs_fs_info *fs_info, 771 struct preftrees *preftrees, bool lock) 772 { 773 struct prelim_ref *ref; 774 struct extent_buffer *eb; 775 struct preftree *tree = &preftrees->indirect_missing_keys; 776 struct rb_node *node; 777 778 while ((node = rb_first_cached(&tree->root))) { 779 ref = rb_entry(node, struct prelim_ref, rbnode); 780 rb_erase_cached(node, &tree->root); 781 782 BUG_ON(ref->parent); /* should not be a direct ref */ 783 BUG_ON(ref->key_for_search.type); 784 BUG_ON(!ref->wanted_disk_byte); 785 786 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0, 787 ref->level - 1, NULL); 788 if (IS_ERR(eb)) { 789 free_pref(ref); 790 return PTR_ERR(eb); 791 } else if (!extent_buffer_uptodate(eb)) { 792 free_pref(ref); 793 free_extent_buffer(eb); 794 return -EIO; 795 } 796 if (lock) 797 btrfs_tree_read_lock(eb); 798 if (btrfs_header_level(eb) == 0) 799 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); 800 else 801 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); 802 if (lock) 803 btrfs_tree_read_unlock(eb); 804 free_extent_buffer(eb); 805 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); 806 cond_resched(); 807 } 808 return 0; 809 } 810 811 /* 812 * add all currently queued delayed refs from this head whose seq nr is 813 * smaller or equal that seq to the list 814 */ 815 static int add_delayed_refs(const struct btrfs_fs_info *fs_info, 816 struct btrfs_delayed_ref_head *head, u64 seq, 817 struct preftrees *preftrees, struct share_check *sc) 818 { 819 struct btrfs_delayed_ref_node *node; 820 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 821 struct btrfs_key key; 822 struct btrfs_key tmp_op_key; 823 struct rb_node *n; 824 int count; 825 int ret = 0; 826 827 if (extent_op && extent_op->update_key) 828 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); 829 830 spin_lock(&head->lock); 831 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { 832 node = rb_entry(n, struct btrfs_delayed_ref_node, 833 ref_node); 834 if (node->seq > seq) 835 continue; 836 837 switch (node->action) { 838 case BTRFS_ADD_DELAYED_EXTENT: 839 case BTRFS_UPDATE_DELAYED_HEAD: 840 WARN_ON(1); 841 continue; 842 case BTRFS_ADD_DELAYED_REF: 843 count = node->ref_mod; 844 break; 845 case BTRFS_DROP_DELAYED_REF: 846 count = node->ref_mod * -1; 847 break; 848 default: 849 BUG(); 850 } 851 switch (node->type) { 852 case BTRFS_TREE_BLOCK_REF_KEY: { 853 /* NORMAL INDIRECT METADATA backref */ 854 struct btrfs_delayed_tree_ref *ref; 855 856 ref = btrfs_delayed_node_to_tree_ref(node); 857 ret = add_indirect_ref(fs_info, preftrees, ref->root, 858 &tmp_op_key, ref->level + 1, 859 node->bytenr, count, sc, 860 GFP_ATOMIC); 861 break; 862 } 863 case BTRFS_SHARED_BLOCK_REF_KEY: { 864 /* SHARED DIRECT METADATA backref */ 865 struct btrfs_delayed_tree_ref *ref; 866 867 ref = btrfs_delayed_node_to_tree_ref(node); 868 869 ret = add_direct_ref(fs_info, preftrees, ref->level + 1, 870 ref->parent, node->bytenr, count, 871 sc, GFP_ATOMIC); 872 break; 873 } 874 case BTRFS_EXTENT_DATA_REF_KEY: { 875 /* NORMAL INDIRECT DATA backref */ 876 struct btrfs_delayed_data_ref *ref; 877 ref = btrfs_delayed_node_to_data_ref(node); 878 879 key.objectid = ref->objectid; 880 key.type = BTRFS_EXTENT_DATA_KEY; 881 key.offset = ref->offset; 882 883 /* 884 * Found a inum that doesn't match our known inum, we 885 * know it's shared. 886 */ 887 if (sc && sc->inum && ref->objectid != sc->inum) { 888 ret = BACKREF_FOUND_SHARED; 889 goto out; 890 } 891 892 ret = add_indirect_ref(fs_info, preftrees, ref->root, 893 &key, 0, node->bytenr, count, sc, 894 GFP_ATOMIC); 895 break; 896 } 897 case BTRFS_SHARED_DATA_REF_KEY: { 898 /* SHARED DIRECT FULL backref */ 899 struct btrfs_delayed_data_ref *ref; 900 901 ref = btrfs_delayed_node_to_data_ref(node); 902 903 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, 904 node->bytenr, count, sc, 905 GFP_ATOMIC); 906 break; 907 } 908 default: 909 WARN_ON(1); 910 } 911 /* 912 * We must ignore BACKREF_FOUND_SHARED until all delayed 913 * refs have been checked. 914 */ 915 if (ret && (ret != BACKREF_FOUND_SHARED)) 916 break; 917 } 918 if (!ret) 919 ret = extent_is_shared(sc); 920 out: 921 spin_unlock(&head->lock); 922 return ret; 923 } 924 925 /* 926 * add all inline backrefs for bytenr to the list 927 * 928 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. 929 */ 930 static int add_inline_refs(const struct btrfs_fs_info *fs_info, 931 struct btrfs_path *path, u64 bytenr, 932 int *info_level, struct preftrees *preftrees, 933 struct share_check *sc) 934 { 935 int ret = 0; 936 int slot; 937 struct extent_buffer *leaf; 938 struct btrfs_key key; 939 struct btrfs_key found_key; 940 unsigned long ptr; 941 unsigned long end; 942 struct btrfs_extent_item *ei; 943 u64 flags; 944 u64 item_size; 945 946 /* 947 * enumerate all inline refs 948 */ 949 leaf = path->nodes[0]; 950 slot = path->slots[0]; 951 952 item_size = btrfs_item_size_nr(leaf, slot); 953 BUG_ON(item_size < sizeof(*ei)); 954 955 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 956 flags = btrfs_extent_flags(leaf, ei); 957 btrfs_item_key_to_cpu(leaf, &found_key, slot); 958 959 ptr = (unsigned long)(ei + 1); 960 end = (unsigned long)ei + item_size; 961 962 if (found_key.type == BTRFS_EXTENT_ITEM_KEY && 963 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 964 struct btrfs_tree_block_info *info; 965 966 info = (struct btrfs_tree_block_info *)ptr; 967 *info_level = btrfs_tree_block_level(leaf, info); 968 ptr += sizeof(struct btrfs_tree_block_info); 969 BUG_ON(ptr > end); 970 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) { 971 *info_level = found_key.offset; 972 } else { 973 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 974 } 975 976 while (ptr < end) { 977 struct btrfs_extent_inline_ref *iref; 978 u64 offset; 979 int type; 980 981 iref = (struct btrfs_extent_inline_ref *)ptr; 982 type = btrfs_get_extent_inline_ref_type(leaf, iref, 983 BTRFS_REF_TYPE_ANY); 984 if (type == BTRFS_REF_TYPE_INVALID) 985 return -EUCLEAN; 986 987 offset = btrfs_extent_inline_ref_offset(leaf, iref); 988 989 switch (type) { 990 case BTRFS_SHARED_BLOCK_REF_KEY: 991 ret = add_direct_ref(fs_info, preftrees, 992 *info_level + 1, offset, 993 bytenr, 1, NULL, GFP_NOFS); 994 break; 995 case BTRFS_SHARED_DATA_REF_KEY: { 996 struct btrfs_shared_data_ref *sdref; 997 int count; 998 999 sdref = (struct btrfs_shared_data_ref *)(iref + 1); 1000 count = btrfs_shared_data_ref_count(leaf, sdref); 1001 1002 ret = add_direct_ref(fs_info, preftrees, 0, offset, 1003 bytenr, count, sc, GFP_NOFS); 1004 break; 1005 } 1006 case BTRFS_TREE_BLOCK_REF_KEY: 1007 ret = add_indirect_ref(fs_info, preftrees, offset, 1008 NULL, *info_level + 1, 1009 bytenr, 1, NULL, GFP_NOFS); 1010 break; 1011 case BTRFS_EXTENT_DATA_REF_KEY: { 1012 struct btrfs_extent_data_ref *dref; 1013 int count; 1014 u64 root; 1015 1016 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1017 count = btrfs_extent_data_ref_count(leaf, dref); 1018 key.objectid = btrfs_extent_data_ref_objectid(leaf, 1019 dref); 1020 key.type = BTRFS_EXTENT_DATA_KEY; 1021 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 1022 1023 if (sc && sc->inum && key.objectid != sc->inum) { 1024 ret = BACKREF_FOUND_SHARED; 1025 break; 1026 } 1027 1028 root = btrfs_extent_data_ref_root(leaf, dref); 1029 1030 ret = add_indirect_ref(fs_info, preftrees, root, 1031 &key, 0, bytenr, count, 1032 sc, GFP_NOFS); 1033 break; 1034 } 1035 default: 1036 WARN_ON(1); 1037 } 1038 if (ret) 1039 return ret; 1040 ptr += btrfs_extent_inline_ref_size(type); 1041 } 1042 1043 return 0; 1044 } 1045 1046 /* 1047 * add all non-inline backrefs for bytenr to the list 1048 * 1049 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. 1050 */ 1051 static int add_keyed_refs(struct btrfs_fs_info *fs_info, 1052 struct btrfs_path *path, u64 bytenr, 1053 int info_level, struct preftrees *preftrees, 1054 struct share_check *sc) 1055 { 1056 struct btrfs_root *extent_root = fs_info->extent_root; 1057 int ret; 1058 int slot; 1059 struct extent_buffer *leaf; 1060 struct btrfs_key key; 1061 1062 while (1) { 1063 ret = btrfs_next_item(extent_root, path); 1064 if (ret < 0) 1065 break; 1066 if (ret) { 1067 ret = 0; 1068 break; 1069 } 1070 1071 slot = path->slots[0]; 1072 leaf = path->nodes[0]; 1073 btrfs_item_key_to_cpu(leaf, &key, slot); 1074 1075 if (key.objectid != bytenr) 1076 break; 1077 if (key.type < BTRFS_TREE_BLOCK_REF_KEY) 1078 continue; 1079 if (key.type > BTRFS_SHARED_DATA_REF_KEY) 1080 break; 1081 1082 switch (key.type) { 1083 case BTRFS_SHARED_BLOCK_REF_KEY: 1084 /* SHARED DIRECT METADATA backref */ 1085 ret = add_direct_ref(fs_info, preftrees, 1086 info_level + 1, key.offset, 1087 bytenr, 1, NULL, GFP_NOFS); 1088 break; 1089 case BTRFS_SHARED_DATA_REF_KEY: { 1090 /* SHARED DIRECT FULL backref */ 1091 struct btrfs_shared_data_ref *sdref; 1092 int count; 1093 1094 sdref = btrfs_item_ptr(leaf, slot, 1095 struct btrfs_shared_data_ref); 1096 count = btrfs_shared_data_ref_count(leaf, sdref); 1097 ret = add_direct_ref(fs_info, preftrees, 0, 1098 key.offset, bytenr, count, 1099 sc, GFP_NOFS); 1100 break; 1101 } 1102 case BTRFS_TREE_BLOCK_REF_KEY: 1103 /* NORMAL INDIRECT METADATA backref */ 1104 ret = add_indirect_ref(fs_info, preftrees, key.offset, 1105 NULL, info_level + 1, bytenr, 1106 1, NULL, GFP_NOFS); 1107 break; 1108 case BTRFS_EXTENT_DATA_REF_KEY: { 1109 /* NORMAL INDIRECT DATA backref */ 1110 struct btrfs_extent_data_ref *dref; 1111 int count; 1112 u64 root; 1113 1114 dref = btrfs_item_ptr(leaf, slot, 1115 struct btrfs_extent_data_ref); 1116 count = btrfs_extent_data_ref_count(leaf, dref); 1117 key.objectid = btrfs_extent_data_ref_objectid(leaf, 1118 dref); 1119 key.type = BTRFS_EXTENT_DATA_KEY; 1120 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 1121 1122 if (sc && sc->inum && key.objectid != sc->inum) { 1123 ret = BACKREF_FOUND_SHARED; 1124 break; 1125 } 1126 1127 root = btrfs_extent_data_ref_root(leaf, dref); 1128 ret = add_indirect_ref(fs_info, preftrees, root, 1129 &key, 0, bytenr, count, 1130 sc, GFP_NOFS); 1131 break; 1132 } 1133 default: 1134 WARN_ON(1); 1135 } 1136 if (ret) 1137 return ret; 1138 1139 } 1140 1141 return ret; 1142 } 1143 1144 /* 1145 * this adds all existing backrefs (inline backrefs, backrefs and delayed 1146 * refs) for the given bytenr to the refs list, merges duplicates and resolves 1147 * indirect refs to their parent bytenr. 1148 * When roots are found, they're added to the roots list 1149 * 1150 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave 1151 * much like trans == NULL case, the difference only lies in it will not 1152 * commit root. 1153 * The special case is for qgroup to search roots in commit_transaction(). 1154 * 1155 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a 1156 * shared extent is detected. 1157 * 1158 * Otherwise this returns 0 for success and <0 for an error. 1159 * 1160 * If ignore_offset is set to false, only extent refs whose offsets match 1161 * extent_item_pos are returned. If true, every extent ref is returned 1162 * and extent_item_pos is ignored. 1163 * 1164 * FIXME some caching might speed things up 1165 */ 1166 static int find_parent_nodes(struct btrfs_trans_handle *trans, 1167 struct btrfs_fs_info *fs_info, u64 bytenr, 1168 u64 time_seq, struct ulist *refs, 1169 struct ulist *roots, const u64 *extent_item_pos, 1170 struct share_check *sc, bool ignore_offset) 1171 { 1172 struct btrfs_key key; 1173 struct btrfs_path *path; 1174 struct btrfs_delayed_ref_root *delayed_refs = NULL; 1175 struct btrfs_delayed_ref_head *head; 1176 int info_level = 0; 1177 int ret; 1178 struct prelim_ref *ref; 1179 struct rb_node *node; 1180 struct extent_inode_elem *eie = NULL; 1181 struct preftrees preftrees = { 1182 .direct = PREFTREE_INIT, 1183 .indirect = PREFTREE_INIT, 1184 .indirect_missing_keys = PREFTREE_INIT 1185 }; 1186 1187 key.objectid = bytenr; 1188 key.offset = (u64)-1; 1189 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1190 key.type = BTRFS_METADATA_ITEM_KEY; 1191 else 1192 key.type = BTRFS_EXTENT_ITEM_KEY; 1193 1194 path = btrfs_alloc_path(); 1195 if (!path) 1196 return -ENOMEM; 1197 if (!trans) { 1198 path->search_commit_root = 1; 1199 path->skip_locking = 1; 1200 } 1201 1202 if (time_seq == SEQ_LAST) 1203 path->skip_locking = 1; 1204 1205 /* 1206 * grab both a lock on the path and a lock on the delayed ref head. 1207 * We need both to get a consistent picture of how the refs look 1208 * at a specified point in time 1209 */ 1210 again: 1211 head = NULL; 1212 1213 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 1214 if (ret < 0) 1215 goto out; 1216 BUG_ON(ret == 0); 1217 1218 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1219 if (trans && likely(trans->type != __TRANS_DUMMY) && 1220 time_seq != SEQ_LAST) { 1221 #else 1222 if (trans && time_seq != SEQ_LAST) { 1223 #endif 1224 /* 1225 * look if there are updates for this ref queued and lock the 1226 * head 1227 */ 1228 delayed_refs = &trans->transaction->delayed_refs; 1229 spin_lock(&delayed_refs->lock); 1230 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 1231 if (head) { 1232 if (!mutex_trylock(&head->mutex)) { 1233 refcount_inc(&head->refs); 1234 spin_unlock(&delayed_refs->lock); 1235 1236 btrfs_release_path(path); 1237 1238 /* 1239 * Mutex was contended, block until it's 1240 * released and try again 1241 */ 1242 mutex_lock(&head->mutex); 1243 mutex_unlock(&head->mutex); 1244 btrfs_put_delayed_ref_head(head); 1245 goto again; 1246 } 1247 spin_unlock(&delayed_refs->lock); 1248 ret = add_delayed_refs(fs_info, head, time_seq, 1249 &preftrees, sc); 1250 mutex_unlock(&head->mutex); 1251 if (ret) 1252 goto out; 1253 } else { 1254 spin_unlock(&delayed_refs->lock); 1255 } 1256 } 1257 1258 if (path->slots[0]) { 1259 struct extent_buffer *leaf; 1260 int slot; 1261 1262 path->slots[0]--; 1263 leaf = path->nodes[0]; 1264 slot = path->slots[0]; 1265 btrfs_item_key_to_cpu(leaf, &key, slot); 1266 if (key.objectid == bytenr && 1267 (key.type == BTRFS_EXTENT_ITEM_KEY || 1268 key.type == BTRFS_METADATA_ITEM_KEY)) { 1269 ret = add_inline_refs(fs_info, path, bytenr, 1270 &info_level, &preftrees, sc); 1271 if (ret) 1272 goto out; 1273 ret = add_keyed_refs(fs_info, path, bytenr, info_level, 1274 &preftrees, sc); 1275 if (ret) 1276 goto out; 1277 } 1278 } 1279 1280 btrfs_release_path(path); 1281 1282 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0); 1283 if (ret) 1284 goto out; 1285 1286 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root)); 1287 1288 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees, 1289 extent_item_pos, sc, ignore_offset); 1290 if (ret) 1291 goto out; 1292 1293 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root)); 1294 1295 /* 1296 * This walks the tree of merged and resolved refs. Tree blocks are 1297 * read in as needed. Unique entries are added to the ulist, and 1298 * the list of found roots is updated. 1299 * 1300 * We release the entire tree in one go before returning. 1301 */ 1302 node = rb_first_cached(&preftrees.direct.root); 1303 while (node) { 1304 ref = rb_entry(node, struct prelim_ref, rbnode); 1305 node = rb_next(&ref->rbnode); 1306 /* 1307 * ref->count < 0 can happen here if there are delayed 1308 * refs with a node->action of BTRFS_DROP_DELAYED_REF. 1309 * prelim_ref_insert() relies on this when merging 1310 * identical refs to keep the overall count correct. 1311 * prelim_ref_insert() will merge only those refs 1312 * which compare identically. Any refs having 1313 * e.g. different offsets would not be merged, 1314 * and would retain their original ref->count < 0. 1315 */ 1316 if (roots && ref->count && ref->root_id && ref->parent == 0) { 1317 if (sc && sc->root_objectid && 1318 ref->root_id != sc->root_objectid) { 1319 ret = BACKREF_FOUND_SHARED; 1320 goto out; 1321 } 1322 1323 /* no parent == root of tree */ 1324 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); 1325 if (ret < 0) 1326 goto out; 1327 } 1328 if (ref->count && ref->parent) { 1329 if (extent_item_pos && !ref->inode_list && 1330 ref->level == 0) { 1331 struct extent_buffer *eb; 1332 1333 eb = read_tree_block(fs_info, ref->parent, 0, 1334 ref->level, NULL); 1335 if (IS_ERR(eb)) { 1336 ret = PTR_ERR(eb); 1337 goto out; 1338 } else if (!extent_buffer_uptodate(eb)) { 1339 free_extent_buffer(eb); 1340 ret = -EIO; 1341 goto out; 1342 } 1343 1344 if (!path->skip_locking) { 1345 btrfs_tree_read_lock(eb); 1346 btrfs_set_lock_blocking_read(eb); 1347 } 1348 ret = find_extent_in_eb(eb, bytenr, 1349 *extent_item_pos, &eie, ignore_offset); 1350 if (!path->skip_locking) 1351 btrfs_tree_read_unlock_blocking(eb); 1352 free_extent_buffer(eb); 1353 if (ret < 0) 1354 goto out; 1355 ref->inode_list = eie; 1356 } 1357 ret = ulist_add_merge_ptr(refs, ref->parent, 1358 ref->inode_list, 1359 (void **)&eie, GFP_NOFS); 1360 if (ret < 0) 1361 goto out; 1362 if (!ret && extent_item_pos) { 1363 /* 1364 * we've recorded that parent, so we must extend 1365 * its inode list here 1366 */ 1367 BUG_ON(!eie); 1368 while (eie->next) 1369 eie = eie->next; 1370 eie->next = ref->inode_list; 1371 } 1372 eie = NULL; 1373 } 1374 cond_resched(); 1375 } 1376 1377 out: 1378 btrfs_free_path(path); 1379 1380 prelim_release(&preftrees.direct); 1381 prelim_release(&preftrees.indirect); 1382 prelim_release(&preftrees.indirect_missing_keys); 1383 1384 if (ret < 0) 1385 free_inode_elem_list(eie); 1386 return ret; 1387 } 1388 1389 static void free_leaf_list(struct ulist *blocks) 1390 { 1391 struct ulist_node *node = NULL; 1392 struct extent_inode_elem *eie; 1393 struct ulist_iterator uiter; 1394 1395 ULIST_ITER_INIT(&uiter); 1396 while ((node = ulist_next(blocks, &uiter))) { 1397 if (!node->aux) 1398 continue; 1399 eie = unode_aux_to_inode_list(node); 1400 free_inode_elem_list(eie); 1401 node->aux = 0; 1402 } 1403 1404 ulist_free(blocks); 1405 } 1406 1407 /* 1408 * Finds all leafs with a reference to the specified combination of bytenr and 1409 * offset. key_list_head will point to a list of corresponding keys (caller must 1410 * free each list element). The leafs will be stored in the leafs ulist, which 1411 * must be freed with ulist_free. 1412 * 1413 * returns 0 on success, <0 on error 1414 */ 1415 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 1416 struct btrfs_fs_info *fs_info, u64 bytenr, 1417 u64 time_seq, struct ulist **leafs, 1418 const u64 *extent_item_pos, bool ignore_offset) 1419 { 1420 int ret; 1421 1422 *leafs = ulist_alloc(GFP_NOFS); 1423 if (!*leafs) 1424 return -ENOMEM; 1425 1426 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq, 1427 *leafs, NULL, extent_item_pos, NULL, ignore_offset); 1428 if (ret < 0 && ret != -ENOENT) { 1429 free_leaf_list(*leafs); 1430 return ret; 1431 } 1432 1433 return 0; 1434 } 1435 1436 /* 1437 * walk all backrefs for a given extent to find all roots that reference this 1438 * extent. Walking a backref means finding all extents that reference this 1439 * extent and in turn walk the backrefs of those, too. Naturally this is a 1440 * recursive process, but here it is implemented in an iterative fashion: We 1441 * find all referencing extents for the extent in question and put them on a 1442 * list. In turn, we find all referencing extents for those, further appending 1443 * to the list. The way we iterate the list allows adding more elements after 1444 * the current while iterating. The process stops when we reach the end of the 1445 * list. Found roots are added to the roots list. 1446 * 1447 * returns 0 on success, < 0 on error. 1448 */ 1449 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, 1450 struct btrfs_fs_info *fs_info, u64 bytenr, 1451 u64 time_seq, struct ulist **roots, 1452 bool ignore_offset) 1453 { 1454 struct ulist *tmp; 1455 struct ulist_node *node = NULL; 1456 struct ulist_iterator uiter; 1457 int ret; 1458 1459 tmp = ulist_alloc(GFP_NOFS); 1460 if (!tmp) 1461 return -ENOMEM; 1462 *roots = ulist_alloc(GFP_NOFS); 1463 if (!*roots) { 1464 ulist_free(tmp); 1465 return -ENOMEM; 1466 } 1467 1468 ULIST_ITER_INIT(&uiter); 1469 while (1) { 1470 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq, 1471 tmp, *roots, NULL, NULL, ignore_offset); 1472 if (ret < 0 && ret != -ENOENT) { 1473 ulist_free(tmp); 1474 ulist_free(*roots); 1475 *roots = NULL; 1476 return ret; 1477 } 1478 node = ulist_next(tmp, &uiter); 1479 if (!node) 1480 break; 1481 bytenr = node->val; 1482 cond_resched(); 1483 } 1484 1485 ulist_free(tmp); 1486 return 0; 1487 } 1488 1489 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1490 struct btrfs_fs_info *fs_info, u64 bytenr, 1491 u64 time_seq, struct ulist **roots, 1492 bool ignore_offset) 1493 { 1494 int ret; 1495 1496 if (!trans) 1497 down_read(&fs_info->commit_root_sem); 1498 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr, 1499 time_seq, roots, ignore_offset); 1500 if (!trans) 1501 up_read(&fs_info->commit_root_sem); 1502 return ret; 1503 } 1504 1505 /** 1506 * btrfs_check_shared - tell us whether an extent is shared 1507 * 1508 * btrfs_check_shared uses the backref walking code but will short 1509 * circuit as soon as it finds a root or inode that doesn't match the 1510 * one passed in. This provides a significant performance benefit for 1511 * callers (such as fiemap) which want to know whether the extent is 1512 * shared but do not need a ref count. 1513 * 1514 * This attempts to attach to the running transaction in order to account for 1515 * delayed refs, but continues on even when no running transaction exists. 1516 * 1517 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. 1518 */ 1519 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, 1520 struct ulist *roots, struct ulist *tmp) 1521 { 1522 struct btrfs_fs_info *fs_info = root->fs_info; 1523 struct btrfs_trans_handle *trans; 1524 struct ulist_iterator uiter; 1525 struct ulist_node *node; 1526 struct seq_list elem = SEQ_LIST_INIT(elem); 1527 int ret = 0; 1528 struct share_check shared = { 1529 .root_objectid = root->root_key.objectid, 1530 .inum = inum, 1531 .share_count = 0, 1532 }; 1533 1534 ulist_init(roots); 1535 ulist_init(tmp); 1536 1537 trans = btrfs_join_transaction_nostart(root); 1538 if (IS_ERR(trans)) { 1539 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { 1540 ret = PTR_ERR(trans); 1541 goto out; 1542 } 1543 trans = NULL; 1544 down_read(&fs_info->commit_root_sem); 1545 } else { 1546 btrfs_get_tree_mod_seq(fs_info, &elem); 1547 } 1548 1549 ULIST_ITER_INIT(&uiter); 1550 while (1) { 1551 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, 1552 roots, NULL, &shared, false); 1553 if (ret == BACKREF_FOUND_SHARED) { 1554 /* this is the only condition under which we return 1 */ 1555 ret = 1; 1556 break; 1557 } 1558 if (ret < 0 && ret != -ENOENT) 1559 break; 1560 ret = 0; 1561 node = ulist_next(tmp, &uiter); 1562 if (!node) 1563 break; 1564 bytenr = node->val; 1565 shared.share_count = 0; 1566 cond_resched(); 1567 } 1568 1569 if (trans) { 1570 btrfs_put_tree_mod_seq(fs_info, &elem); 1571 btrfs_end_transaction(trans); 1572 } else { 1573 up_read(&fs_info->commit_root_sem); 1574 } 1575 out: 1576 ulist_release(roots); 1577 ulist_release(tmp); 1578 return ret; 1579 } 1580 1581 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, 1582 u64 start_off, struct btrfs_path *path, 1583 struct btrfs_inode_extref **ret_extref, 1584 u64 *found_off) 1585 { 1586 int ret, slot; 1587 struct btrfs_key key; 1588 struct btrfs_key found_key; 1589 struct btrfs_inode_extref *extref; 1590 const struct extent_buffer *leaf; 1591 unsigned long ptr; 1592 1593 key.objectid = inode_objectid; 1594 key.type = BTRFS_INODE_EXTREF_KEY; 1595 key.offset = start_off; 1596 1597 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1598 if (ret < 0) 1599 return ret; 1600 1601 while (1) { 1602 leaf = path->nodes[0]; 1603 slot = path->slots[0]; 1604 if (slot >= btrfs_header_nritems(leaf)) { 1605 /* 1606 * If the item at offset is not found, 1607 * btrfs_search_slot will point us to the slot 1608 * where it should be inserted. In our case 1609 * that will be the slot directly before the 1610 * next INODE_REF_KEY_V2 item. In the case 1611 * that we're pointing to the last slot in a 1612 * leaf, we must move one leaf over. 1613 */ 1614 ret = btrfs_next_leaf(root, path); 1615 if (ret) { 1616 if (ret >= 1) 1617 ret = -ENOENT; 1618 break; 1619 } 1620 continue; 1621 } 1622 1623 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1624 1625 /* 1626 * Check that we're still looking at an extended ref key for 1627 * this particular objectid. If we have different 1628 * objectid or type then there are no more to be found 1629 * in the tree and we can exit. 1630 */ 1631 ret = -ENOENT; 1632 if (found_key.objectid != inode_objectid) 1633 break; 1634 if (found_key.type != BTRFS_INODE_EXTREF_KEY) 1635 break; 1636 1637 ret = 0; 1638 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1639 extref = (struct btrfs_inode_extref *)ptr; 1640 *ret_extref = extref; 1641 if (found_off) 1642 *found_off = found_key.offset; 1643 break; 1644 } 1645 1646 return ret; 1647 } 1648 1649 /* 1650 * this iterates to turn a name (from iref/extref) into a full filesystem path. 1651 * Elements of the path are separated by '/' and the path is guaranteed to be 1652 * 0-terminated. the path is only given within the current file system. 1653 * Therefore, it never starts with a '/'. the caller is responsible to provide 1654 * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 1655 * the start point of the resulting string is returned. this pointer is within 1656 * dest, normally. 1657 * in case the path buffer would overflow, the pointer is decremented further 1658 * as if output was written to the buffer, though no more output is actually 1659 * generated. that way, the caller can determine how much space would be 1660 * required for the path to fit into the buffer. in that case, the returned 1661 * value will be smaller than dest. callers must check this! 1662 */ 1663 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 1664 u32 name_len, unsigned long name_off, 1665 struct extent_buffer *eb_in, u64 parent, 1666 char *dest, u32 size) 1667 { 1668 int slot; 1669 u64 next_inum; 1670 int ret; 1671 s64 bytes_left = ((s64)size) - 1; 1672 struct extent_buffer *eb = eb_in; 1673 struct btrfs_key found_key; 1674 int leave_spinning = path->leave_spinning; 1675 struct btrfs_inode_ref *iref; 1676 1677 if (bytes_left >= 0) 1678 dest[bytes_left] = '\0'; 1679 1680 path->leave_spinning = 1; 1681 while (1) { 1682 bytes_left -= name_len; 1683 if (bytes_left >= 0) 1684 read_extent_buffer(eb, dest + bytes_left, 1685 name_off, name_len); 1686 if (eb != eb_in) { 1687 if (!path->skip_locking) 1688 btrfs_tree_read_unlock_blocking(eb); 1689 free_extent_buffer(eb); 1690 } 1691 ret = btrfs_find_item(fs_root, path, parent, 0, 1692 BTRFS_INODE_REF_KEY, &found_key); 1693 if (ret > 0) 1694 ret = -ENOENT; 1695 if (ret) 1696 break; 1697 1698 next_inum = found_key.offset; 1699 1700 /* regular exit ahead */ 1701 if (parent == next_inum) 1702 break; 1703 1704 slot = path->slots[0]; 1705 eb = path->nodes[0]; 1706 /* make sure we can use eb after releasing the path */ 1707 if (eb != eb_in) { 1708 if (!path->skip_locking) 1709 btrfs_set_lock_blocking_read(eb); 1710 path->nodes[0] = NULL; 1711 path->locks[0] = 0; 1712 } 1713 btrfs_release_path(path); 1714 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1715 1716 name_len = btrfs_inode_ref_name_len(eb, iref); 1717 name_off = (unsigned long)(iref + 1); 1718 1719 parent = next_inum; 1720 --bytes_left; 1721 if (bytes_left >= 0) 1722 dest[bytes_left] = '/'; 1723 } 1724 1725 btrfs_release_path(path); 1726 path->leave_spinning = leave_spinning; 1727 1728 if (ret) 1729 return ERR_PTR(ret); 1730 1731 return dest + bytes_left; 1732 } 1733 1734 /* 1735 * this makes the path point to (logical EXTENT_ITEM *) 1736 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for 1737 * tree blocks and <0 on error. 1738 */ 1739 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 1740 struct btrfs_path *path, struct btrfs_key *found_key, 1741 u64 *flags_ret) 1742 { 1743 int ret; 1744 u64 flags; 1745 u64 size = 0; 1746 u32 item_size; 1747 const struct extent_buffer *eb; 1748 struct btrfs_extent_item *ei; 1749 struct btrfs_key key; 1750 1751 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1752 key.type = BTRFS_METADATA_ITEM_KEY; 1753 else 1754 key.type = BTRFS_EXTENT_ITEM_KEY; 1755 key.objectid = logical; 1756 key.offset = (u64)-1; 1757 1758 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 1759 if (ret < 0) 1760 return ret; 1761 1762 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0); 1763 if (ret) { 1764 if (ret > 0) 1765 ret = -ENOENT; 1766 return ret; 1767 } 1768 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); 1769 if (found_key->type == BTRFS_METADATA_ITEM_KEY) 1770 size = fs_info->nodesize; 1771 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) 1772 size = found_key->offset; 1773 1774 if (found_key->objectid > logical || 1775 found_key->objectid + size <= logical) { 1776 btrfs_debug(fs_info, 1777 "logical %llu is not within any extent", logical); 1778 return -ENOENT; 1779 } 1780 1781 eb = path->nodes[0]; 1782 item_size = btrfs_item_size_nr(eb, path->slots[0]); 1783 BUG_ON(item_size < sizeof(*ei)); 1784 1785 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 1786 flags = btrfs_extent_flags(eb, ei); 1787 1788 btrfs_debug(fs_info, 1789 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u", 1790 logical, logical - found_key->objectid, found_key->objectid, 1791 found_key->offset, flags, item_size); 1792 1793 WARN_ON(!flags_ret); 1794 if (flags_ret) { 1795 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1796 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; 1797 else if (flags & BTRFS_EXTENT_FLAG_DATA) 1798 *flags_ret = BTRFS_EXTENT_FLAG_DATA; 1799 else 1800 BUG(); 1801 return 0; 1802 } 1803 1804 return -EIO; 1805 } 1806 1807 /* 1808 * helper function to iterate extent inline refs. ptr must point to a 0 value 1809 * for the first call and may be modified. it is used to track state. 1810 * if more refs exist, 0 is returned and the next call to 1811 * get_extent_inline_ref must pass the modified ptr parameter to get the 1812 * next ref. after the last ref was processed, 1 is returned. 1813 * returns <0 on error 1814 */ 1815 static int get_extent_inline_ref(unsigned long *ptr, 1816 const struct extent_buffer *eb, 1817 const struct btrfs_key *key, 1818 const struct btrfs_extent_item *ei, 1819 u32 item_size, 1820 struct btrfs_extent_inline_ref **out_eiref, 1821 int *out_type) 1822 { 1823 unsigned long end; 1824 u64 flags; 1825 struct btrfs_tree_block_info *info; 1826 1827 if (!*ptr) { 1828 /* first call */ 1829 flags = btrfs_extent_flags(eb, ei); 1830 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1831 if (key->type == BTRFS_METADATA_ITEM_KEY) { 1832 /* a skinny metadata extent */ 1833 *out_eiref = 1834 (struct btrfs_extent_inline_ref *)(ei + 1); 1835 } else { 1836 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); 1837 info = (struct btrfs_tree_block_info *)(ei + 1); 1838 *out_eiref = 1839 (struct btrfs_extent_inline_ref *)(info + 1); 1840 } 1841 } else { 1842 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); 1843 } 1844 *ptr = (unsigned long)*out_eiref; 1845 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size) 1846 return -ENOENT; 1847 } 1848 1849 end = (unsigned long)ei + item_size; 1850 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr); 1851 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref, 1852 BTRFS_REF_TYPE_ANY); 1853 if (*out_type == BTRFS_REF_TYPE_INVALID) 1854 return -EUCLEAN; 1855 1856 *ptr += btrfs_extent_inline_ref_size(*out_type); 1857 WARN_ON(*ptr > end); 1858 if (*ptr == end) 1859 return 1; /* last */ 1860 1861 return 0; 1862 } 1863 1864 /* 1865 * reads the tree block backref for an extent. tree level and root are returned 1866 * through out_level and out_root. ptr must point to a 0 value for the first 1867 * call and may be modified (see get_extent_inline_ref comment). 1868 * returns 0 if data was provided, 1 if there was no more data to provide or 1869 * <0 on error. 1870 */ 1871 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, 1872 struct btrfs_key *key, struct btrfs_extent_item *ei, 1873 u32 item_size, u64 *out_root, u8 *out_level) 1874 { 1875 int ret; 1876 int type; 1877 struct btrfs_extent_inline_ref *eiref; 1878 1879 if (*ptr == (unsigned long)-1) 1880 return 1; 1881 1882 while (1) { 1883 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size, 1884 &eiref, &type); 1885 if (ret < 0) 1886 return ret; 1887 1888 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1889 type == BTRFS_SHARED_BLOCK_REF_KEY) 1890 break; 1891 1892 if (ret == 1) 1893 return 1; 1894 } 1895 1896 /* we can treat both ref types equally here */ 1897 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1898 1899 if (key->type == BTRFS_EXTENT_ITEM_KEY) { 1900 struct btrfs_tree_block_info *info; 1901 1902 info = (struct btrfs_tree_block_info *)(ei + 1); 1903 *out_level = btrfs_tree_block_level(eb, info); 1904 } else { 1905 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); 1906 *out_level = (u8)key->offset; 1907 } 1908 1909 if (ret == 1) 1910 *ptr = (unsigned long)-1; 1911 1912 return 0; 1913 } 1914 1915 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, 1916 struct extent_inode_elem *inode_list, 1917 u64 root, u64 extent_item_objectid, 1918 iterate_extent_inodes_t *iterate, void *ctx) 1919 { 1920 struct extent_inode_elem *eie; 1921 int ret = 0; 1922 1923 for (eie = inode_list; eie; eie = eie->next) { 1924 btrfs_debug(fs_info, 1925 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu", 1926 extent_item_objectid, eie->inum, 1927 eie->offset, root); 1928 ret = iterate(eie->inum, eie->offset, root, ctx); 1929 if (ret) { 1930 btrfs_debug(fs_info, 1931 "stopping iteration for %llu due to ret=%d", 1932 extent_item_objectid, ret); 1933 break; 1934 } 1935 } 1936 1937 return ret; 1938 } 1939 1940 /* 1941 * calls iterate() for every inode that references the extent identified by 1942 * the given parameters. 1943 * when the iterator function returns a non-zero value, iteration stops. 1944 */ 1945 int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 1946 u64 extent_item_objectid, u64 extent_item_pos, 1947 int search_commit_root, 1948 iterate_extent_inodes_t *iterate, void *ctx, 1949 bool ignore_offset) 1950 { 1951 int ret; 1952 struct btrfs_trans_handle *trans = NULL; 1953 struct ulist *refs = NULL; 1954 struct ulist *roots = NULL; 1955 struct ulist_node *ref_node = NULL; 1956 struct ulist_node *root_node = NULL; 1957 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); 1958 struct ulist_iterator ref_uiter; 1959 struct ulist_iterator root_uiter; 1960 1961 btrfs_debug(fs_info, "resolving all inodes for extent %llu", 1962 extent_item_objectid); 1963 1964 if (!search_commit_root) { 1965 trans = btrfs_attach_transaction(fs_info->extent_root); 1966 if (IS_ERR(trans)) { 1967 if (PTR_ERR(trans) != -ENOENT && 1968 PTR_ERR(trans) != -EROFS) 1969 return PTR_ERR(trans); 1970 trans = NULL; 1971 } 1972 } 1973 1974 if (trans) 1975 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1976 else 1977 down_read(&fs_info->commit_root_sem); 1978 1979 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1980 tree_mod_seq_elem.seq, &refs, 1981 &extent_item_pos, ignore_offset); 1982 if (ret) 1983 goto out; 1984 1985 ULIST_ITER_INIT(&ref_uiter); 1986 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { 1987 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val, 1988 tree_mod_seq_elem.seq, &roots, 1989 ignore_offset); 1990 if (ret) 1991 break; 1992 ULIST_ITER_INIT(&root_uiter); 1993 while (!ret && (root_node = ulist_next(roots, &root_uiter))) { 1994 btrfs_debug(fs_info, 1995 "root %llu references leaf %llu, data list %#llx", 1996 root_node->val, ref_node->val, 1997 ref_node->aux); 1998 ret = iterate_leaf_refs(fs_info, 1999 (struct extent_inode_elem *) 2000 (uintptr_t)ref_node->aux, 2001 root_node->val, 2002 extent_item_objectid, 2003 iterate, ctx); 2004 } 2005 ulist_free(roots); 2006 } 2007 2008 free_leaf_list(refs); 2009 out: 2010 if (trans) { 2011 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 2012 btrfs_end_transaction(trans); 2013 } else { 2014 up_read(&fs_info->commit_root_sem); 2015 } 2016 2017 return ret; 2018 } 2019 2020 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 2021 struct btrfs_path *path, 2022 iterate_extent_inodes_t *iterate, void *ctx, 2023 bool ignore_offset) 2024 { 2025 int ret; 2026 u64 extent_item_pos; 2027 u64 flags = 0; 2028 struct btrfs_key found_key; 2029 int search_commit_root = path->search_commit_root; 2030 2031 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); 2032 btrfs_release_path(path); 2033 if (ret < 0) 2034 return ret; 2035 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 2036 return -EINVAL; 2037 2038 extent_item_pos = logical - found_key.objectid; 2039 ret = iterate_extent_inodes(fs_info, found_key.objectid, 2040 extent_item_pos, search_commit_root, 2041 iterate, ctx, ignore_offset); 2042 2043 return ret; 2044 } 2045 2046 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off, 2047 struct extent_buffer *eb, void *ctx); 2048 2049 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, 2050 struct btrfs_path *path, 2051 iterate_irefs_t *iterate, void *ctx) 2052 { 2053 int ret = 0; 2054 int slot; 2055 u32 cur; 2056 u32 len; 2057 u32 name_len; 2058 u64 parent = 0; 2059 int found = 0; 2060 struct extent_buffer *eb; 2061 struct btrfs_item *item; 2062 struct btrfs_inode_ref *iref; 2063 struct btrfs_key found_key; 2064 2065 while (!ret) { 2066 ret = btrfs_find_item(fs_root, path, inum, 2067 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY, 2068 &found_key); 2069 2070 if (ret < 0) 2071 break; 2072 if (ret) { 2073 ret = found ? 0 : -ENOENT; 2074 break; 2075 } 2076 ++found; 2077 2078 parent = found_key.offset; 2079 slot = path->slots[0]; 2080 eb = btrfs_clone_extent_buffer(path->nodes[0]); 2081 if (!eb) { 2082 ret = -ENOMEM; 2083 break; 2084 } 2085 btrfs_release_path(path); 2086 2087 item = btrfs_item_nr(slot); 2088 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 2089 2090 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { 2091 name_len = btrfs_inode_ref_name_len(eb, iref); 2092 /* path must be released before calling iterate()! */ 2093 btrfs_debug(fs_root->fs_info, 2094 "following ref at offset %u for inode %llu in tree %llu", 2095 cur, found_key.objectid, 2096 fs_root->root_key.objectid); 2097 ret = iterate(parent, name_len, 2098 (unsigned long)(iref + 1), eb, ctx); 2099 if (ret) 2100 break; 2101 len = sizeof(*iref) + name_len; 2102 iref = (struct btrfs_inode_ref *)((char *)iref + len); 2103 } 2104 free_extent_buffer(eb); 2105 } 2106 2107 btrfs_release_path(path); 2108 2109 return ret; 2110 } 2111 2112 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, 2113 struct btrfs_path *path, 2114 iterate_irefs_t *iterate, void *ctx) 2115 { 2116 int ret; 2117 int slot; 2118 u64 offset = 0; 2119 u64 parent; 2120 int found = 0; 2121 struct extent_buffer *eb; 2122 struct btrfs_inode_extref *extref; 2123 u32 item_size; 2124 u32 cur_offset; 2125 unsigned long ptr; 2126 2127 while (1) { 2128 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, 2129 &offset); 2130 if (ret < 0) 2131 break; 2132 if (ret) { 2133 ret = found ? 0 : -ENOENT; 2134 break; 2135 } 2136 ++found; 2137 2138 slot = path->slots[0]; 2139 eb = btrfs_clone_extent_buffer(path->nodes[0]); 2140 if (!eb) { 2141 ret = -ENOMEM; 2142 break; 2143 } 2144 btrfs_release_path(path); 2145 2146 item_size = btrfs_item_size_nr(eb, slot); 2147 ptr = btrfs_item_ptr_offset(eb, slot); 2148 cur_offset = 0; 2149 2150 while (cur_offset < item_size) { 2151 u32 name_len; 2152 2153 extref = (struct btrfs_inode_extref *)(ptr + cur_offset); 2154 parent = btrfs_inode_extref_parent(eb, extref); 2155 name_len = btrfs_inode_extref_name_len(eb, extref); 2156 ret = iterate(parent, name_len, 2157 (unsigned long)&extref->name, eb, ctx); 2158 if (ret) 2159 break; 2160 2161 cur_offset += btrfs_inode_extref_name_len(eb, extref); 2162 cur_offset += sizeof(*extref); 2163 } 2164 free_extent_buffer(eb); 2165 2166 offset++; 2167 } 2168 2169 btrfs_release_path(path); 2170 2171 return ret; 2172 } 2173 2174 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, 2175 struct btrfs_path *path, iterate_irefs_t *iterate, 2176 void *ctx) 2177 { 2178 int ret; 2179 int found_refs = 0; 2180 2181 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx); 2182 if (!ret) 2183 ++found_refs; 2184 else if (ret != -ENOENT) 2185 return ret; 2186 2187 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx); 2188 if (ret == -ENOENT && found_refs) 2189 return 0; 2190 2191 return ret; 2192 } 2193 2194 /* 2195 * returns 0 if the path could be dumped (probably truncated) 2196 * returns <0 in case of an error 2197 */ 2198 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, 2199 struct extent_buffer *eb, void *ctx) 2200 { 2201 struct inode_fs_paths *ipath = ctx; 2202 char *fspath; 2203 char *fspath_min; 2204 int i = ipath->fspath->elem_cnt; 2205 const int s_ptr = sizeof(char *); 2206 u32 bytes_left; 2207 2208 bytes_left = ipath->fspath->bytes_left > s_ptr ? 2209 ipath->fspath->bytes_left - s_ptr : 0; 2210 2211 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 2212 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, 2213 name_off, eb, inum, fspath_min, bytes_left); 2214 if (IS_ERR(fspath)) 2215 return PTR_ERR(fspath); 2216 2217 if (fspath > fspath_min) { 2218 ipath->fspath->val[i] = (u64)(unsigned long)fspath; 2219 ++ipath->fspath->elem_cnt; 2220 ipath->fspath->bytes_left = fspath - fspath_min; 2221 } else { 2222 ++ipath->fspath->elem_missed; 2223 ipath->fspath->bytes_missing += fspath_min - fspath; 2224 ipath->fspath->bytes_left = 0; 2225 } 2226 2227 return 0; 2228 } 2229 2230 /* 2231 * this dumps all file system paths to the inode into the ipath struct, provided 2232 * is has been created large enough. each path is zero-terminated and accessed 2233 * from ipath->fspath->val[i]. 2234 * when it returns, there are ipath->fspath->elem_cnt number of paths available 2235 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 2236 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise, 2237 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 2238 * have been needed to return all paths. 2239 */ 2240 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) 2241 { 2242 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, 2243 inode_to_path, ipath); 2244 } 2245 2246 struct btrfs_data_container *init_data_container(u32 total_bytes) 2247 { 2248 struct btrfs_data_container *data; 2249 size_t alloc_bytes; 2250 2251 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); 2252 data = kvmalloc(alloc_bytes, GFP_KERNEL); 2253 if (!data) 2254 return ERR_PTR(-ENOMEM); 2255 2256 if (total_bytes >= sizeof(*data)) { 2257 data->bytes_left = total_bytes - sizeof(*data); 2258 data->bytes_missing = 0; 2259 } else { 2260 data->bytes_missing = sizeof(*data) - total_bytes; 2261 data->bytes_left = 0; 2262 } 2263 2264 data->elem_cnt = 0; 2265 data->elem_missed = 0; 2266 2267 return data; 2268 } 2269 2270 /* 2271 * allocates space to return multiple file system paths for an inode. 2272 * total_bytes to allocate are passed, note that space usable for actual path 2273 * information will be total_bytes - sizeof(struct inode_fs_paths). 2274 * the returned pointer must be freed with free_ipath() in the end. 2275 */ 2276 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 2277 struct btrfs_path *path) 2278 { 2279 struct inode_fs_paths *ifp; 2280 struct btrfs_data_container *fspath; 2281 2282 fspath = init_data_container(total_bytes); 2283 if (IS_ERR(fspath)) 2284 return ERR_CAST(fspath); 2285 2286 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL); 2287 if (!ifp) { 2288 kvfree(fspath); 2289 return ERR_PTR(-ENOMEM); 2290 } 2291 2292 ifp->btrfs_path = path; 2293 ifp->fspath = fspath; 2294 ifp->fs_root = fs_root; 2295 2296 return ifp; 2297 } 2298 2299 void free_ipath(struct inode_fs_paths *ipath) 2300 { 2301 if (!ipath) 2302 return; 2303 kvfree(ipath->fspath); 2304 kfree(ipath); 2305 } 2306 2307 struct btrfs_backref_iter *btrfs_backref_iter_alloc( 2308 struct btrfs_fs_info *fs_info, gfp_t gfp_flag) 2309 { 2310 struct btrfs_backref_iter *ret; 2311 2312 ret = kzalloc(sizeof(*ret), gfp_flag); 2313 if (!ret) 2314 return NULL; 2315 2316 ret->path = btrfs_alloc_path(); 2317 if (!ret->path) { 2318 kfree(ret); 2319 return NULL; 2320 } 2321 2322 /* Current backref iterator only supports iteration in commit root */ 2323 ret->path->search_commit_root = 1; 2324 ret->path->skip_locking = 1; 2325 ret->fs_info = fs_info; 2326 2327 return ret; 2328 } 2329 2330 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr) 2331 { 2332 struct btrfs_fs_info *fs_info = iter->fs_info; 2333 struct btrfs_path *path = iter->path; 2334 struct btrfs_extent_item *ei; 2335 struct btrfs_key key; 2336 int ret; 2337 2338 key.objectid = bytenr; 2339 key.type = BTRFS_METADATA_ITEM_KEY; 2340 key.offset = (u64)-1; 2341 iter->bytenr = bytenr; 2342 2343 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 2344 if (ret < 0) 2345 return ret; 2346 if (ret == 0) { 2347 ret = -EUCLEAN; 2348 goto release; 2349 } 2350 if (path->slots[0] == 0) { 2351 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 2352 ret = -EUCLEAN; 2353 goto release; 2354 } 2355 path->slots[0]--; 2356 2357 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2358 if ((key.type != BTRFS_EXTENT_ITEM_KEY && 2359 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) { 2360 ret = -ENOENT; 2361 goto release; 2362 } 2363 memcpy(&iter->cur_key, &key, sizeof(key)); 2364 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2365 path->slots[0]); 2366 iter->end_ptr = (u32)(iter->item_ptr + 2367 btrfs_item_size_nr(path->nodes[0], path->slots[0])); 2368 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 2369 struct btrfs_extent_item); 2370 2371 /* 2372 * Only support iteration on tree backref yet. 2373 * 2374 * This is an extra precaution for non skinny-metadata, where 2375 * EXTENT_ITEM is also used for tree blocks, that we can only use 2376 * extent flags to determine if it's a tree block. 2377 */ 2378 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { 2379 ret = -ENOTSUPP; 2380 goto release; 2381 } 2382 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); 2383 2384 /* If there is no inline backref, go search for keyed backref */ 2385 if (iter->cur_ptr >= iter->end_ptr) { 2386 ret = btrfs_next_item(fs_info->extent_root, path); 2387 2388 /* No inline nor keyed ref */ 2389 if (ret > 0) { 2390 ret = -ENOENT; 2391 goto release; 2392 } 2393 if (ret < 0) 2394 goto release; 2395 2396 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, 2397 path->slots[0]); 2398 if (iter->cur_key.objectid != bytenr || 2399 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && 2400 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { 2401 ret = -ENOENT; 2402 goto release; 2403 } 2404 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2405 path->slots[0]); 2406 iter->item_ptr = iter->cur_ptr; 2407 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr( 2408 path->nodes[0], path->slots[0])); 2409 } 2410 2411 return 0; 2412 release: 2413 btrfs_backref_iter_release(iter); 2414 return ret; 2415 } 2416 2417 /* 2418 * Go to the next backref item of current bytenr, can be either inlined or 2419 * keyed. 2420 * 2421 * Caller needs to check whether it's inline ref or not by iter->cur_key. 2422 * 2423 * Return 0 if we get next backref without problem. 2424 * Return >0 if there is no extra backref for this bytenr. 2425 * Return <0 if there is something wrong happened. 2426 */ 2427 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter) 2428 { 2429 struct extent_buffer *eb = btrfs_backref_get_eb(iter); 2430 struct btrfs_path *path = iter->path; 2431 struct btrfs_extent_inline_ref *iref; 2432 int ret; 2433 u32 size; 2434 2435 if (btrfs_backref_iter_is_inline_ref(iter)) { 2436 /* We're still inside the inline refs */ 2437 ASSERT(iter->cur_ptr < iter->end_ptr); 2438 2439 if (btrfs_backref_has_tree_block_info(iter)) { 2440 /* First tree block info */ 2441 size = sizeof(struct btrfs_tree_block_info); 2442 } else { 2443 /* Use inline ref type to determine the size */ 2444 int type; 2445 2446 iref = (struct btrfs_extent_inline_ref *) 2447 ((unsigned long)iter->cur_ptr); 2448 type = btrfs_extent_inline_ref_type(eb, iref); 2449 2450 size = btrfs_extent_inline_ref_size(type); 2451 } 2452 iter->cur_ptr += size; 2453 if (iter->cur_ptr < iter->end_ptr) 2454 return 0; 2455 2456 /* All inline items iterated, fall through */ 2457 } 2458 2459 /* We're at keyed items, there is no inline item, go to the next one */ 2460 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path); 2461 if (ret) 2462 return ret; 2463 2464 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); 2465 if (iter->cur_key.objectid != iter->bytenr || 2466 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && 2467 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) 2468 return 1; 2469 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2470 path->slots[0]); 2471 iter->cur_ptr = iter->item_ptr; 2472 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0], 2473 path->slots[0]); 2474 return 0; 2475 } 2476 2477 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, 2478 struct btrfs_backref_cache *cache, int is_reloc) 2479 { 2480 int i; 2481 2482 cache->rb_root = RB_ROOT; 2483 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 2484 INIT_LIST_HEAD(&cache->pending[i]); 2485 INIT_LIST_HEAD(&cache->changed); 2486 INIT_LIST_HEAD(&cache->detached); 2487 INIT_LIST_HEAD(&cache->leaves); 2488 INIT_LIST_HEAD(&cache->pending_edge); 2489 INIT_LIST_HEAD(&cache->useless_node); 2490 cache->fs_info = fs_info; 2491 cache->is_reloc = is_reloc; 2492 } 2493 2494 struct btrfs_backref_node *btrfs_backref_alloc_node( 2495 struct btrfs_backref_cache *cache, u64 bytenr, int level) 2496 { 2497 struct btrfs_backref_node *node; 2498 2499 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL); 2500 node = kzalloc(sizeof(*node), GFP_NOFS); 2501 if (!node) 2502 return node; 2503 2504 INIT_LIST_HEAD(&node->list); 2505 INIT_LIST_HEAD(&node->upper); 2506 INIT_LIST_HEAD(&node->lower); 2507 RB_CLEAR_NODE(&node->rb_node); 2508 cache->nr_nodes++; 2509 node->level = level; 2510 node->bytenr = bytenr; 2511 2512 return node; 2513 } 2514 2515 struct btrfs_backref_edge *btrfs_backref_alloc_edge( 2516 struct btrfs_backref_cache *cache) 2517 { 2518 struct btrfs_backref_edge *edge; 2519 2520 edge = kzalloc(sizeof(*edge), GFP_NOFS); 2521 if (edge) 2522 cache->nr_edges++; 2523 return edge; 2524 } 2525 2526 /* 2527 * Drop the backref node from cache, also cleaning up all its 2528 * upper edges and any uncached nodes in the path. 2529 * 2530 * This cleanup happens bottom up, thus the node should either 2531 * be the lowest node in the cache or a detached node. 2532 */ 2533 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, 2534 struct btrfs_backref_node *node) 2535 { 2536 struct btrfs_backref_node *upper; 2537 struct btrfs_backref_edge *edge; 2538 2539 if (!node) 2540 return; 2541 2542 BUG_ON(!node->lowest && !node->detached); 2543 while (!list_empty(&node->upper)) { 2544 edge = list_entry(node->upper.next, struct btrfs_backref_edge, 2545 list[LOWER]); 2546 upper = edge->node[UPPER]; 2547 list_del(&edge->list[LOWER]); 2548 list_del(&edge->list[UPPER]); 2549 btrfs_backref_free_edge(cache, edge); 2550 2551 if (RB_EMPTY_NODE(&upper->rb_node)) { 2552 BUG_ON(!list_empty(&node->upper)); 2553 btrfs_backref_drop_node(cache, node); 2554 node = upper; 2555 node->lowest = 1; 2556 continue; 2557 } 2558 /* 2559 * Add the node to leaf node list if no other child block 2560 * cached. 2561 */ 2562 if (list_empty(&upper->lower)) { 2563 list_add_tail(&upper->lower, &cache->leaves); 2564 upper->lowest = 1; 2565 } 2566 } 2567 2568 btrfs_backref_drop_node(cache, node); 2569 } 2570 2571 /* 2572 * Release all nodes/edges from current cache 2573 */ 2574 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache) 2575 { 2576 struct btrfs_backref_node *node; 2577 int i; 2578 2579 while (!list_empty(&cache->detached)) { 2580 node = list_entry(cache->detached.next, 2581 struct btrfs_backref_node, list); 2582 btrfs_backref_cleanup_node(cache, node); 2583 } 2584 2585 while (!list_empty(&cache->leaves)) { 2586 node = list_entry(cache->leaves.next, 2587 struct btrfs_backref_node, lower); 2588 btrfs_backref_cleanup_node(cache, node); 2589 } 2590 2591 cache->last_trans = 0; 2592 2593 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 2594 ASSERT(list_empty(&cache->pending[i])); 2595 ASSERT(list_empty(&cache->pending_edge)); 2596 ASSERT(list_empty(&cache->useless_node)); 2597 ASSERT(list_empty(&cache->changed)); 2598 ASSERT(list_empty(&cache->detached)); 2599 ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); 2600 ASSERT(!cache->nr_nodes); 2601 ASSERT(!cache->nr_edges); 2602 } 2603 2604 /* 2605 * Handle direct tree backref 2606 * 2607 * Direct tree backref means, the backref item shows its parent bytenr 2608 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined). 2609 * 2610 * @ref_key: The converted backref key. 2611 * For keyed backref, it's the item key. 2612 * For inlined backref, objectid is the bytenr, 2613 * type is btrfs_inline_ref_type, offset is 2614 * btrfs_inline_ref_offset. 2615 */ 2616 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache, 2617 struct btrfs_key *ref_key, 2618 struct btrfs_backref_node *cur) 2619 { 2620 struct btrfs_backref_edge *edge; 2621 struct btrfs_backref_node *upper; 2622 struct rb_node *rb_node; 2623 2624 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); 2625 2626 /* Only reloc root uses backref pointing to itself */ 2627 if (ref_key->objectid == ref_key->offset) { 2628 struct btrfs_root *root; 2629 2630 cur->is_reloc_root = 1; 2631 /* Only reloc backref cache cares about a specific root */ 2632 if (cache->is_reloc) { 2633 root = find_reloc_root(cache->fs_info, cur->bytenr); 2634 if (WARN_ON(!root)) 2635 return -ENOENT; 2636 cur->root = root; 2637 } else { 2638 /* 2639 * For generic purpose backref cache, reloc root node 2640 * is useless. 2641 */ 2642 list_add(&cur->list, &cache->useless_node); 2643 } 2644 return 0; 2645 } 2646 2647 edge = btrfs_backref_alloc_edge(cache); 2648 if (!edge) 2649 return -ENOMEM; 2650 2651 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); 2652 if (!rb_node) { 2653 /* Parent node not yet cached */ 2654 upper = btrfs_backref_alloc_node(cache, ref_key->offset, 2655 cur->level + 1); 2656 if (!upper) { 2657 btrfs_backref_free_edge(cache, edge); 2658 return -ENOMEM; 2659 } 2660 2661 /* 2662 * Backrefs for the upper level block isn't cached, add the 2663 * block to pending list 2664 */ 2665 list_add_tail(&edge->list[UPPER], &cache->pending_edge); 2666 } else { 2667 /* Parent node already cached */ 2668 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node); 2669 ASSERT(upper->checked); 2670 INIT_LIST_HEAD(&edge->list[UPPER]); 2671 } 2672 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); 2673 return 0; 2674 } 2675 2676 /* 2677 * Handle indirect tree backref 2678 * 2679 * Indirect tree backref means, we only know which tree the node belongs to. 2680 * We still need to do a tree search to find out the parents. This is for 2681 * TREE_BLOCK_REF backref (keyed or inlined). 2682 * 2683 * @ref_key: The same as @ref_key in handle_direct_tree_backref() 2684 * @tree_key: The first key of this tree block. 2685 * @path: A clean (released) path, to avoid allocating path everytime 2686 * the function get called. 2687 */ 2688 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache, 2689 struct btrfs_path *path, 2690 struct btrfs_key *ref_key, 2691 struct btrfs_key *tree_key, 2692 struct btrfs_backref_node *cur) 2693 { 2694 struct btrfs_fs_info *fs_info = cache->fs_info; 2695 struct btrfs_backref_node *upper; 2696 struct btrfs_backref_node *lower; 2697 struct btrfs_backref_edge *edge; 2698 struct extent_buffer *eb; 2699 struct btrfs_root *root; 2700 struct rb_node *rb_node; 2701 int level; 2702 bool need_check = true; 2703 int ret; 2704 2705 root = btrfs_get_fs_root(fs_info, ref_key->offset, false); 2706 if (IS_ERR(root)) 2707 return PTR_ERR(root); 2708 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2709 cur->cowonly = 1; 2710 2711 if (btrfs_root_level(&root->root_item) == cur->level) { 2712 /* Tree root */ 2713 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); 2714 /* 2715 * For reloc backref cache, we may ignore reloc root. But for 2716 * general purpose backref cache, we can't rely on 2717 * btrfs_should_ignore_reloc_root() as it may conflict with 2718 * current running relocation and lead to missing root. 2719 * 2720 * For general purpose backref cache, reloc root detection is 2721 * completely relying on direct backref (key->offset is parent 2722 * bytenr), thus only do such check for reloc cache. 2723 */ 2724 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { 2725 btrfs_put_root(root); 2726 list_add(&cur->list, &cache->useless_node); 2727 } else { 2728 cur->root = root; 2729 } 2730 return 0; 2731 } 2732 2733 level = cur->level + 1; 2734 2735 /* Search the tree to find parent blocks referring to the block */ 2736 path->search_commit_root = 1; 2737 path->skip_locking = 1; 2738 path->lowest_level = level; 2739 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0); 2740 path->lowest_level = 0; 2741 if (ret < 0) { 2742 btrfs_put_root(root); 2743 return ret; 2744 } 2745 if (ret > 0 && path->slots[level] > 0) 2746 path->slots[level]--; 2747 2748 eb = path->nodes[level]; 2749 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { 2750 btrfs_err(fs_info, 2751 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", 2752 cur->bytenr, level - 1, root->root_key.objectid, 2753 tree_key->objectid, tree_key->type, tree_key->offset); 2754 btrfs_put_root(root); 2755 ret = -ENOENT; 2756 goto out; 2757 } 2758 lower = cur; 2759 2760 /* Add all nodes and edges in the path */ 2761 for (; level < BTRFS_MAX_LEVEL; level++) { 2762 if (!path->nodes[level]) { 2763 ASSERT(btrfs_root_bytenr(&root->root_item) == 2764 lower->bytenr); 2765 /* Same as previous should_ignore_reloc_root() call */ 2766 if (btrfs_should_ignore_reloc_root(root) && 2767 cache->is_reloc) { 2768 btrfs_put_root(root); 2769 list_add(&lower->list, &cache->useless_node); 2770 } else { 2771 lower->root = root; 2772 } 2773 break; 2774 } 2775 2776 edge = btrfs_backref_alloc_edge(cache); 2777 if (!edge) { 2778 btrfs_put_root(root); 2779 ret = -ENOMEM; 2780 goto out; 2781 } 2782 2783 eb = path->nodes[level]; 2784 rb_node = rb_simple_search(&cache->rb_root, eb->start); 2785 if (!rb_node) { 2786 upper = btrfs_backref_alloc_node(cache, eb->start, 2787 lower->level + 1); 2788 if (!upper) { 2789 btrfs_put_root(root); 2790 btrfs_backref_free_edge(cache, edge); 2791 ret = -ENOMEM; 2792 goto out; 2793 } 2794 upper->owner = btrfs_header_owner(eb); 2795 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2796 upper->cowonly = 1; 2797 2798 /* 2799 * If we know the block isn't shared we can avoid 2800 * checking its backrefs. 2801 */ 2802 if (btrfs_block_can_be_shared(root, eb)) 2803 upper->checked = 0; 2804 else 2805 upper->checked = 1; 2806 2807 /* 2808 * Add the block to pending list if we need to check its 2809 * backrefs, we only do this once while walking up a 2810 * tree as we will catch anything else later on. 2811 */ 2812 if (!upper->checked && need_check) { 2813 need_check = false; 2814 list_add_tail(&edge->list[UPPER], 2815 &cache->pending_edge); 2816 } else { 2817 if (upper->checked) 2818 need_check = true; 2819 INIT_LIST_HEAD(&edge->list[UPPER]); 2820 } 2821 } else { 2822 upper = rb_entry(rb_node, struct btrfs_backref_node, 2823 rb_node); 2824 ASSERT(upper->checked); 2825 INIT_LIST_HEAD(&edge->list[UPPER]); 2826 if (!upper->owner) 2827 upper->owner = btrfs_header_owner(eb); 2828 } 2829 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); 2830 2831 if (rb_node) { 2832 btrfs_put_root(root); 2833 break; 2834 } 2835 lower = upper; 2836 upper = NULL; 2837 } 2838 out: 2839 btrfs_release_path(path); 2840 return ret; 2841 } 2842 2843 /* 2844 * Add backref node @cur into @cache. 2845 * 2846 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper 2847 * links aren't yet bi-directional. Needs to finish such links. 2848 * Use btrfs_backref_finish_upper_links() to finish such linkage. 2849 * 2850 * @path: Released path for indirect tree backref lookup 2851 * @iter: Released backref iter for extent tree search 2852 * @node_key: The first key of the tree block 2853 */ 2854 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache, 2855 struct btrfs_path *path, 2856 struct btrfs_backref_iter *iter, 2857 struct btrfs_key *node_key, 2858 struct btrfs_backref_node *cur) 2859 { 2860 struct btrfs_fs_info *fs_info = cache->fs_info; 2861 struct btrfs_backref_edge *edge; 2862 struct btrfs_backref_node *exist; 2863 int ret; 2864 2865 ret = btrfs_backref_iter_start(iter, cur->bytenr); 2866 if (ret < 0) 2867 return ret; 2868 /* 2869 * We skip the first btrfs_tree_block_info, as we don't use the key 2870 * stored in it, but fetch it from the tree block 2871 */ 2872 if (btrfs_backref_has_tree_block_info(iter)) { 2873 ret = btrfs_backref_iter_next(iter); 2874 if (ret < 0) 2875 goto out; 2876 /* No extra backref? This means the tree block is corrupted */ 2877 if (ret > 0) { 2878 ret = -EUCLEAN; 2879 goto out; 2880 } 2881 } 2882 WARN_ON(cur->checked); 2883 if (!list_empty(&cur->upper)) { 2884 /* 2885 * The backref was added previously when processing backref of 2886 * type BTRFS_TREE_BLOCK_REF_KEY 2887 */ 2888 ASSERT(list_is_singular(&cur->upper)); 2889 edge = list_entry(cur->upper.next, struct btrfs_backref_edge, 2890 list[LOWER]); 2891 ASSERT(list_empty(&edge->list[UPPER])); 2892 exist = edge->node[UPPER]; 2893 /* 2894 * Add the upper level block to pending list if we need check 2895 * its backrefs 2896 */ 2897 if (!exist->checked) 2898 list_add_tail(&edge->list[UPPER], &cache->pending_edge); 2899 } else { 2900 exist = NULL; 2901 } 2902 2903 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) { 2904 struct extent_buffer *eb; 2905 struct btrfs_key key; 2906 int type; 2907 2908 cond_resched(); 2909 eb = btrfs_backref_get_eb(iter); 2910 2911 key.objectid = iter->bytenr; 2912 if (btrfs_backref_iter_is_inline_ref(iter)) { 2913 struct btrfs_extent_inline_ref *iref; 2914 2915 /* Update key for inline backref */ 2916 iref = (struct btrfs_extent_inline_ref *) 2917 ((unsigned long)iter->cur_ptr); 2918 type = btrfs_get_extent_inline_ref_type(eb, iref, 2919 BTRFS_REF_TYPE_BLOCK); 2920 if (type == BTRFS_REF_TYPE_INVALID) { 2921 ret = -EUCLEAN; 2922 goto out; 2923 } 2924 key.type = type; 2925 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 2926 } else { 2927 key.type = iter->cur_key.type; 2928 key.offset = iter->cur_key.offset; 2929 } 2930 2931 /* 2932 * Parent node found and matches current inline ref, no need to 2933 * rebuild this node for this inline ref 2934 */ 2935 if (exist && 2936 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 2937 exist->owner == key.offset) || 2938 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 2939 exist->bytenr == key.offset))) { 2940 exist = NULL; 2941 continue; 2942 } 2943 2944 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ 2945 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 2946 ret = handle_direct_tree_backref(cache, &key, cur); 2947 if (ret < 0) 2948 goto out; 2949 continue; 2950 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { 2951 ret = -EINVAL; 2952 btrfs_print_v0_err(fs_info); 2953 btrfs_handle_fs_error(fs_info, ret, NULL); 2954 goto out; 2955 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 2956 continue; 2957 } 2958 2959 /* 2960 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset 2961 * means the root objectid. We need to search the tree to get 2962 * its parent bytenr. 2963 */ 2964 ret = handle_indirect_tree_backref(cache, path, &key, node_key, 2965 cur); 2966 if (ret < 0) 2967 goto out; 2968 } 2969 ret = 0; 2970 cur->checked = 1; 2971 WARN_ON(exist); 2972 out: 2973 btrfs_backref_iter_release(iter); 2974 return ret; 2975 } 2976 2977 /* 2978 * Finish the upwards linkage created by btrfs_backref_add_tree_node() 2979 */ 2980 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, 2981 struct btrfs_backref_node *start) 2982 { 2983 struct list_head *useless_node = &cache->useless_node; 2984 struct btrfs_backref_edge *edge; 2985 struct rb_node *rb_node; 2986 LIST_HEAD(pending_edge); 2987 2988 ASSERT(start->checked); 2989 2990 /* Insert this node to cache if it's not COW-only */ 2991 if (!start->cowonly) { 2992 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, 2993 &start->rb_node); 2994 if (rb_node) 2995 btrfs_backref_panic(cache->fs_info, start->bytenr, 2996 -EEXIST); 2997 list_add_tail(&start->lower, &cache->leaves); 2998 } 2999 3000 /* 3001 * Use breadth first search to iterate all related edges. 3002 * 3003 * The starting points are all the edges of this node 3004 */ 3005 list_for_each_entry(edge, &start->upper, list[LOWER]) 3006 list_add_tail(&edge->list[UPPER], &pending_edge); 3007 3008 while (!list_empty(&pending_edge)) { 3009 struct btrfs_backref_node *upper; 3010 struct btrfs_backref_node *lower; 3011 3012 edge = list_first_entry(&pending_edge, 3013 struct btrfs_backref_edge, list[UPPER]); 3014 list_del_init(&edge->list[UPPER]); 3015 upper = edge->node[UPPER]; 3016 lower = edge->node[LOWER]; 3017 3018 /* Parent is detached, no need to keep any edges */ 3019 if (upper->detached) { 3020 list_del(&edge->list[LOWER]); 3021 btrfs_backref_free_edge(cache, edge); 3022 3023 /* Lower node is orphan, queue for cleanup */ 3024 if (list_empty(&lower->upper)) 3025 list_add(&lower->list, useless_node); 3026 continue; 3027 } 3028 3029 /* 3030 * All new nodes added in current build_backref_tree() haven't 3031 * been linked to the cache rb tree. 3032 * So if we have upper->rb_node populated, this means a cache 3033 * hit. We only need to link the edge, as @upper and all its 3034 * parents have already been linked. 3035 */ 3036 if (!RB_EMPTY_NODE(&upper->rb_node)) { 3037 if (upper->lowest) { 3038 list_del_init(&upper->lower); 3039 upper->lowest = 0; 3040 } 3041 3042 list_add_tail(&edge->list[UPPER], &upper->lower); 3043 continue; 3044 } 3045 3046 /* Sanity check, we shouldn't have any unchecked nodes */ 3047 if (!upper->checked) { 3048 ASSERT(0); 3049 return -EUCLEAN; 3050 } 3051 3052 /* Sanity check, COW-only node has non-COW-only parent */ 3053 if (start->cowonly != upper->cowonly) { 3054 ASSERT(0); 3055 return -EUCLEAN; 3056 } 3057 3058 /* Only cache non-COW-only (subvolume trees) tree blocks */ 3059 if (!upper->cowonly) { 3060 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, 3061 &upper->rb_node); 3062 if (rb_node) { 3063 btrfs_backref_panic(cache->fs_info, 3064 upper->bytenr, -EEXIST); 3065 return -EUCLEAN; 3066 } 3067 } 3068 3069 list_add_tail(&edge->list[UPPER], &upper->lower); 3070 3071 /* 3072 * Also queue all the parent edges of this uncached node 3073 * to finish the upper linkage 3074 */ 3075 list_for_each_entry(edge, &upper->upper, list[LOWER]) 3076 list_add_tail(&edge->list[UPPER], &pending_edge); 3077 } 3078 return 0; 3079 } 3080 3081 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, 3082 struct btrfs_backref_node *node) 3083 { 3084 struct btrfs_backref_node *lower; 3085 struct btrfs_backref_node *upper; 3086 struct btrfs_backref_edge *edge; 3087 3088 while (!list_empty(&cache->useless_node)) { 3089 lower = list_first_entry(&cache->useless_node, 3090 struct btrfs_backref_node, list); 3091 list_del_init(&lower->list); 3092 } 3093 while (!list_empty(&cache->pending_edge)) { 3094 edge = list_first_entry(&cache->pending_edge, 3095 struct btrfs_backref_edge, list[UPPER]); 3096 list_del(&edge->list[UPPER]); 3097 list_del(&edge->list[LOWER]); 3098 lower = edge->node[LOWER]; 3099 upper = edge->node[UPPER]; 3100 btrfs_backref_free_edge(cache, edge); 3101 3102 /* 3103 * Lower is no longer linked to any upper backref nodes and 3104 * isn't in the cache, we can free it ourselves. 3105 */ 3106 if (list_empty(&lower->upper) && 3107 RB_EMPTY_NODE(&lower->rb_node)) 3108 list_add(&lower->list, &cache->useless_node); 3109 3110 if (!RB_EMPTY_NODE(&upper->rb_node)) 3111 continue; 3112 3113 /* Add this guy's upper edges to the list to process */ 3114 list_for_each_entry(edge, &upper->upper, list[LOWER]) 3115 list_add_tail(&edge->list[UPPER], 3116 &cache->pending_edge); 3117 if (list_empty(&upper->upper)) 3118 list_add(&upper->list, &cache->useless_node); 3119 } 3120 3121 while (!list_empty(&cache->useless_node)) { 3122 lower = list_first_entry(&cache->useless_node, 3123 struct btrfs_backref_node, list); 3124 list_del_init(&lower->list); 3125 if (lower == node) 3126 node = NULL; 3127 btrfs_backref_free_node(cache, lower); 3128 } 3129 3130 btrfs_backref_cleanup_node(cache, node); 3131 ASSERT(list_empty(&cache->useless_node) && 3132 list_empty(&cache->pending_edge)); 3133 } 3134