1 /* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/vmalloc.h> 20 #include "ctree.h" 21 #include "disk-io.h" 22 #include "backref.h" 23 #include "ulist.h" 24 #include "transaction.h" 25 #include "delayed-ref.h" 26 #include "locking.h" 27 28 /* Just an arbitrary number so we can be sure this happened */ 29 #define BACKREF_FOUND_SHARED 6 30 31 struct extent_inode_elem { 32 u64 inum; 33 u64 offset; 34 struct extent_inode_elem *next; 35 }; 36 37 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, 38 struct btrfs_file_extent_item *fi, 39 u64 extent_item_pos, 40 struct extent_inode_elem **eie) 41 { 42 u64 offset = 0; 43 struct extent_inode_elem *e; 44 45 if (!btrfs_file_extent_compression(eb, fi) && 46 !btrfs_file_extent_encryption(eb, fi) && 47 !btrfs_file_extent_other_encoding(eb, fi)) { 48 u64 data_offset; 49 u64 data_len; 50 51 data_offset = btrfs_file_extent_offset(eb, fi); 52 data_len = btrfs_file_extent_num_bytes(eb, fi); 53 54 if (extent_item_pos < data_offset || 55 extent_item_pos >= data_offset + data_len) 56 return 1; 57 offset = extent_item_pos - data_offset; 58 } 59 60 e = kmalloc(sizeof(*e), GFP_NOFS); 61 if (!e) 62 return -ENOMEM; 63 64 e->next = *eie; 65 e->inum = key->objectid; 66 e->offset = key->offset + offset; 67 *eie = e; 68 69 return 0; 70 } 71 72 static void free_inode_elem_list(struct extent_inode_elem *eie) 73 { 74 struct extent_inode_elem *eie_next; 75 76 for (; eie; eie = eie_next) { 77 eie_next = eie->next; 78 kfree(eie); 79 } 80 } 81 82 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte, 83 u64 extent_item_pos, 84 struct extent_inode_elem **eie) 85 { 86 u64 disk_byte; 87 struct btrfs_key key; 88 struct btrfs_file_extent_item *fi; 89 int slot; 90 int nritems; 91 int extent_type; 92 int ret; 93 94 /* 95 * from the shared data ref, we only have the leaf but we need 96 * the key. thus, we must look into all items and see that we 97 * find one (some) with a reference to our extent item. 98 */ 99 nritems = btrfs_header_nritems(eb); 100 for (slot = 0; slot < nritems; ++slot) { 101 btrfs_item_key_to_cpu(eb, &key, slot); 102 if (key.type != BTRFS_EXTENT_DATA_KEY) 103 continue; 104 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 105 extent_type = btrfs_file_extent_type(eb, fi); 106 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 107 continue; 108 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ 109 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 110 if (disk_byte != wanted_disk_byte) 111 continue; 112 113 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie); 114 if (ret < 0) 115 return ret; 116 } 117 118 return 0; 119 } 120 121 /* 122 * this structure records all encountered refs on the way up to the root 123 */ 124 struct __prelim_ref { 125 struct list_head list; 126 u64 root_id; 127 struct btrfs_key key_for_search; 128 int level; 129 int count; 130 struct extent_inode_elem *inode_list; 131 u64 parent; 132 u64 wanted_disk_byte; 133 }; 134 135 static struct kmem_cache *btrfs_prelim_ref_cache; 136 137 int __init btrfs_prelim_ref_init(void) 138 { 139 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref", 140 sizeof(struct __prelim_ref), 141 0, 142 SLAB_MEM_SPREAD, 143 NULL); 144 if (!btrfs_prelim_ref_cache) 145 return -ENOMEM; 146 return 0; 147 } 148 149 void btrfs_prelim_ref_exit(void) 150 { 151 kmem_cache_destroy(btrfs_prelim_ref_cache); 152 } 153 154 /* 155 * the rules for all callers of this function are: 156 * - obtaining the parent is the goal 157 * - if you add a key, you must know that it is a correct key 158 * - if you cannot add the parent or a correct key, then we will look into the 159 * block later to set a correct key 160 * 161 * delayed refs 162 * ============ 163 * backref type | shared | indirect | shared | indirect 164 * information | tree | tree | data | data 165 * --------------------+--------+----------+--------+---------- 166 * parent logical | y | - | - | - 167 * key to resolve | - | y | y | y 168 * tree block logical | - | - | - | - 169 * root for resolving | y | y | y | y 170 * 171 * - column 1: we've the parent -> done 172 * - column 2, 3, 4: we use the key to find the parent 173 * 174 * on disk refs (inline or keyed) 175 * ============================== 176 * backref type | shared | indirect | shared | indirect 177 * information | tree | tree | data | data 178 * --------------------+--------+----------+--------+---------- 179 * parent logical | y | - | y | - 180 * key to resolve | - | - | - | y 181 * tree block logical | y | y | y | y 182 * root for resolving | - | y | y | y 183 * 184 * - column 1, 3: we've the parent -> done 185 * - column 2: we take the first key from the block to find the parent 186 * (see __add_missing_keys) 187 * - column 4: we use the key to find the parent 188 * 189 * additional information that's available but not required to find the parent 190 * block might help in merging entries to gain some speed. 191 */ 192 193 static int __add_prelim_ref(struct list_head *head, u64 root_id, 194 struct btrfs_key *key, int level, 195 u64 parent, u64 wanted_disk_byte, int count, 196 gfp_t gfp_mask) 197 { 198 struct __prelim_ref *ref; 199 200 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID) 201 return 0; 202 203 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); 204 if (!ref) 205 return -ENOMEM; 206 207 ref->root_id = root_id; 208 if (key) { 209 ref->key_for_search = *key; 210 /* 211 * We can often find data backrefs with an offset that is too 212 * large (>= LLONG_MAX, maximum allowed file offset) due to 213 * underflows when subtracting a file's offset with the data 214 * offset of its corresponding extent data item. This can 215 * happen for example in the clone ioctl. 216 * So if we detect such case we set the search key's offset to 217 * zero to make sure we will find the matching file extent item 218 * at add_all_parents(), otherwise we will miss it because the 219 * offset taken form the backref is much larger then the offset 220 * of the file extent item. This can make us scan a very large 221 * number of file extent items, but at least it will not make 222 * us miss any. 223 * This is an ugly workaround for a behaviour that should have 224 * never existed, but it does and a fix for the clone ioctl 225 * would touch a lot of places, cause backwards incompatibility 226 * and would not fix the problem for extents cloned with older 227 * kernels. 228 */ 229 if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY && 230 ref->key_for_search.offset >= LLONG_MAX) 231 ref->key_for_search.offset = 0; 232 } else { 233 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); 234 } 235 236 ref->inode_list = NULL; 237 ref->level = level; 238 ref->count = count; 239 ref->parent = parent; 240 ref->wanted_disk_byte = wanted_disk_byte; 241 list_add_tail(&ref->list, head); 242 243 return 0; 244 } 245 246 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 247 struct ulist *parents, struct __prelim_ref *ref, 248 int level, u64 time_seq, const u64 *extent_item_pos, 249 u64 total_refs) 250 { 251 int ret = 0; 252 int slot; 253 struct extent_buffer *eb; 254 struct btrfs_key key; 255 struct btrfs_key *key_for_search = &ref->key_for_search; 256 struct btrfs_file_extent_item *fi; 257 struct extent_inode_elem *eie = NULL, *old = NULL; 258 u64 disk_byte; 259 u64 wanted_disk_byte = ref->wanted_disk_byte; 260 u64 count = 0; 261 262 if (level != 0) { 263 eb = path->nodes[level]; 264 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 265 if (ret < 0) 266 return ret; 267 return 0; 268 } 269 270 /* 271 * We normally enter this function with the path already pointing to 272 * the first item to check. But sometimes, we may enter it with 273 * slot==nritems. In that case, go to the next leaf before we continue. 274 */ 275 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 276 if (time_seq == (u64)-1) 277 ret = btrfs_next_leaf(root, path); 278 else 279 ret = btrfs_next_old_leaf(root, path, time_seq); 280 } 281 282 while (!ret && count < total_refs) { 283 eb = path->nodes[0]; 284 slot = path->slots[0]; 285 286 btrfs_item_key_to_cpu(eb, &key, slot); 287 288 if (key.objectid != key_for_search->objectid || 289 key.type != BTRFS_EXTENT_DATA_KEY) 290 break; 291 292 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 293 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 294 295 if (disk_byte == wanted_disk_byte) { 296 eie = NULL; 297 old = NULL; 298 count++; 299 if (extent_item_pos) { 300 ret = check_extent_in_eb(&key, eb, fi, 301 *extent_item_pos, 302 &eie); 303 if (ret < 0) 304 break; 305 } 306 if (ret > 0) 307 goto next; 308 ret = ulist_add_merge_ptr(parents, eb->start, 309 eie, (void **)&old, GFP_NOFS); 310 if (ret < 0) 311 break; 312 if (!ret && extent_item_pos) { 313 while (old->next) 314 old = old->next; 315 old->next = eie; 316 } 317 eie = NULL; 318 } 319 next: 320 if (time_seq == (u64)-1) 321 ret = btrfs_next_item(root, path); 322 else 323 ret = btrfs_next_old_item(root, path, time_seq); 324 } 325 326 if (ret > 0) 327 ret = 0; 328 else if (ret < 0) 329 free_inode_elem_list(eie); 330 return ret; 331 } 332 333 /* 334 * resolve an indirect backref in the form (root_id, key, level) 335 * to a logical address 336 */ 337 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, 338 struct btrfs_path *path, u64 time_seq, 339 struct __prelim_ref *ref, 340 struct ulist *parents, 341 const u64 *extent_item_pos, u64 total_refs) 342 { 343 struct btrfs_root *root; 344 struct btrfs_key root_key; 345 struct extent_buffer *eb; 346 int ret = 0; 347 int root_level; 348 int level = ref->level; 349 int index; 350 351 root_key.objectid = ref->root_id; 352 root_key.type = BTRFS_ROOT_ITEM_KEY; 353 root_key.offset = (u64)-1; 354 355 index = srcu_read_lock(&fs_info->subvol_srcu); 356 357 root = btrfs_get_fs_root(fs_info, &root_key, false); 358 if (IS_ERR(root)) { 359 srcu_read_unlock(&fs_info->subvol_srcu, index); 360 ret = PTR_ERR(root); 361 goto out; 362 } 363 364 if (btrfs_is_testing(fs_info)) { 365 srcu_read_unlock(&fs_info->subvol_srcu, index); 366 ret = -ENOENT; 367 goto out; 368 } 369 370 if (path->search_commit_root) 371 root_level = btrfs_header_level(root->commit_root); 372 else if (time_seq == (u64)-1) 373 root_level = btrfs_header_level(root->node); 374 else 375 root_level = btrfs_old_root_level(root, time_seq); 376 377 if (root_level + 1 == level) { 378 srcu_read_unlock(&fs_info->subvol_srcu, index); 379 goto out; 380 } 381 382 path->lowest_level = level; 383 if (time_seq == (u64)-1) 384 ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path, 385 0, 0); 386 else 387 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, 388 time_seq); 389 390 /* root node has been locked, we can release @subvol_srcu safely here */ 391 srcu_read_unlock(&fs_info->subvol_srcu, index); 392 393 pr_debug("search slot in root %llu (level %d, ref count %d) returned " 394 "%d for key (%llu %u %llu)\n", 395 ref->root_id, level, ref->count, ret, 396 ref->key_for_search.objectid, ref->key_for_search.type, 397 ref->key_for_search.offset); 398 if (ret < 0) 399 goto out; 400 401 eb = path->nodes[level]; 402 while (!eb) { 403 if (WARN_ON(!level)) { 404 ret = 1; 405 goto out; 406 } 407 level--; 408 eb = path->nodes[level]; 409 } 410 411 ret = add_all_parents(root, path, parents, ref, level, time_seq, 412 extent_item_pos, total_refs); 413 out: 414 path->lowest_level = 0; 415 btrfs_release_path(path); 416 return ret; 417 } 418 419 /* 420 * resolve all indirect backrefs from the list 421 */ 422 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 423 struct btrfs_path *path, u64 time_seq, 424 struct list_head *head, 425 const u64 *extent_item_pos, u64 total_refs, 426 u64 root_objectid) 427 { 428 int err; 429 int ret = 0; 430 struct __prelim_ref *ref; 431 struct __prelim_ref *ref_safe; 432 struct __prelim_ref *new_ref; 433 struct ulist *parents; 434 struct ulist_node *node; 435 struct ulist_iterator uiter; 436 437 parents = ulist_alloc(GFP_NOFS); 438 if (!parents) 439 return -ENOMEM; 440 441 /* 442 * _safe allows us to insert directly after the current item without 443 * iterating over the newly inserted items. 444 * we're also allowed to re-assign ref during iteration. 445 */ 446 list_for_each_entry_safe(ref, ref_safe, head, list) { 447 if (ref->parent) /* already direct */ 448 continue; 449 if (ref->count == 0) 450 continue; 451 if (root_objectid && ref->root_id != root_objectid) { 452 ret = BACKREF_FOUND_SHARED; 453 goto out; 454 } 455 err = __resolve_indirect_ref(fs_info, path, time_seq, ref, 456 parents, extent_item_pos, 457 total_refs); 458 /* 459 * we can only tolerate ENOENT,otherwise,we should catch error 460 * and return directly. 461 */ 462 if (err == -ENOENT) { 463 continue; 464 } else if (err) { 465 ret = err; 466 goto out; 467 } 468 469 /* we put the first parent into the ref at hand */ 470 ULIST_ITER_INIT(&uiter); 471 node = ulist_next(parents, &uiter); 472 ref->parent = node ? node->val : 0; 473 ref->inode_list = node ? 474 (struct extent_inode_elem *)(uintptr_t)node->aux : NULL; 475 476 /* additional parents require new refs being added here */ 477 while ((node = ulist_next(parents, &uiter))) { 478 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache, 479 GFP_NOFS); 480 if (!new_ref) { 481 ret = -ENOMEM; 482 goto out; 483 } 484 memcpy(new_ref, ref, sizeof(*ref)); 485 new_ref->parent = node->val; 486 new_ref->inode_list = (struct extent_inode_elem *) 487 (uintptr_t)node->aux; 488 list_add(&new_ref->list, &ref->list); 489 } 490 ulist_reinit(parents); 491 } 492 out: 493 ulist_free(parents); 494 return ret; 495 } 496 497 static inline int ref_for_same_block(struct __prelim_ref *ref1, 498 struct __prelim_ref *ref2) 499 { 500 if (ref1->level != ref2->level) 501 return 0; 502 if (ref1->root_id != ref2->root_id) 503 return 0; 504 if (ref1->key_for_search.type != ref2->key_for_search.type) 505 return 0; 506 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid) 507 return 0; 508 if (ref1->key_for_search.offset != ref2->key_for_search.offset) 509 return 0; 510 if (ref1->parent != ref2->parent) 511 return 0; 512 513 return 1; 514 } 515 516 /* 517 * read tree blocks and add keys where required. 518 */ 519 static int __add_missing_keys(struct btrfs_fs_info *fs_info, 520 struct list_head *head) 521 { 522 struct __prelim_ref *ref; 523 struct extent_buffer *eb; 524 525 list_for_each_entry(ref, head, list) { 526 if (ref->parent) 527 continue; 528 if (ref->key_for_search.type) 529 continue; 530 BUG_ON(!ref->wanted_disk_byte); 531 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte, 532 0); 533 if (IS_ERR(eb)) { 534 return PTR_ERR(eb); 535 } else if (!extent_buffer_uptodate(eb)) { 536 free_extent_buffer(eb); 537 return -EIO; 538 } 539 btrfs_tree_read_lock(eb); 540 if (btrfs_header_level(eb) == 0) 541 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); 542 else 543 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); 544 btrfs_tree_read_unlock(eb); 545 free_extent_buffer(eb); 546 } 547 return 0; 548 } 549 550 /* 551 * merge backrefs and adjust counts accordingly 552 * 553 * mode = 1: merge identical keys, if key is set 554 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here. 555 * additionally, we could even add a key range for the blocks we 556 * looked into to merge even more (-> replace unresolved refs by those 557 * having a parent). 558 * mode = 2: merge identical parents 559 */ 560 static void __merge_refs(struct list_head *head, int mode) 561 { 562 struct __prelim_ref *pos1; 563 564 list_for_each_entry(pos1, head, list) { 565 struct __prelim_ref *pos2 = pos1, *tmp; 566 567 list_for_each_entry_safe_continue(pos2, tmp, head, list) { 568 struct __prelim_ref *ref1 = pos1, *ref2 = pos2; 569 struct extent_inode_elem *eie; 570 571 if (!ref_for_same_block(ref1, ref2)) 572 continue; 573 if (mode == 1) { 574 if (!ref1->parent && ref2->parent) 575 swap(ref1, ref2); 576 } else { 577 if (ref1->parent != ref2->parent) 578 continue; 579 } 580 581 eie = ref1->inode_list; 582 while (eie && eie->next) 583 eie = eie->next; 584 if (eie) 585 eie->next = ref2->inode_list; 586 else 587 ref1->inode_list = ref2->inode_list; 588 ref1->count += ref2->count; 589 590 list_del(&ref2->list); 591 kmem_cache_free(btrfs_prelim_ref_cache, ref2); 592 cond_resched(); 593 } 594 595 } 596 } 597 598 /* 599 * add all currently queued delayed refs from this head whose seq nr is 600 * smaller or equal that seq to the list 601 */ 602 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, 603 struct list_head *prefs, u64 *total_refs, 604 u64 inum) 605 { 606 struct btrfs_delayed_ref_node *node; 607 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 608 struct btrfs_key key; 609 struct btrfs_key op_key = {0}; 610 int sgn; 611 int ret = 0; 612 613 if (extent_op && extent_op->update_key) 614 btrfs_disk_key_to_cpu(&op_key, &extent_op->key); 615 616 spin_lock(&head->lock); 617 list_for_each_entry(node, &head->ref_list, list) { 618 if (node->seq > seq) 619 continue; 620 621 switch (node->action) { 622 case BTRFS_ADD_DELAYED_EXTENT: 623 case BTRFS_UPDATE_DELAYED_HEAD: 624 WARN_ON(1); 625 continue; 626 case BTRFS_ADD_DELAYED_REF: 627 sgn = 1; 628 break; 629 case BTRFS_DROP_DELAYED_REF: 630 sgn = -1; 631 break; 632 default: 633 BUG_ON(1); 634 } 635 *total_refs += (node->ref_mod * sgn); 636 switch (node->type) { 637 case BTRFS_TREE_BLOCK_REF_KEY: { 638 struct btrfs_delayed_tree_ref *ref; 639 640 ref = btrfs_delayed_node_to_tree_ref(node); 641 ret = __add_prelim_ref(prefs, ref->root, &op_key, 642 ref->level + 1, 0, node->bytenr, 643 node->ref_mod * sgn, GFP_ATOMIC); 644 break; 645 } 646 case BTRFS_SHARED_BLOCK_REF_KEY: { 647 struct btrfs_delayed_tree_ref *ref; 648 649 ref = btrfs_delayed_node_to_tree_ref(node); 650 ret = __add_prelim_ref(prefs, 0, NULL, 651 ref->level + 1, ref->parent, 652 node->bytenr, 653 node->ref_mod * sgn, GFP_ATOMIC); 654 break; 655 } 656 case BTRFS_EXTENT_DATA_REF_KEY: { 657 struct btrfs_delayed_data_ref *ref; 658 ref = btrfs_delayed_node_to_data_ref(node); 659 660 key.objectid = ref->objectid; 661 key.type = BTRFS_EXTENT_DATA_KEY; 662 key.offset = ref->offset; 663 664 /* 665 * Found a inum that doesn't match our known inum, we 666 * know it's shared. 667 */ 668 if (inum && ref->objectid != inum) { 669 ret = BACKREF_FOUND_SHARED; 670 break; 671 } 672 673 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0, 674 node->bytenr, 675 node->ref_mod * sgn, GFP_ATOMIC); 676 break; 677 } 678 case BTRFS_SHARED_DATA_REF_KEY: { 679 struct btrfs_delayed_data_ref *ref; 680 681 ref = btrfs_delayed_node_to_data_ref(node); 682 ret = __add_prelim_ref(prefs, 0, NULL, 0, 683 ref->parent, node->bytenr, 684 node->ref_mod * sgn, GFP_ATOMIC); 685 break; 686 } 687 default: 688 WARN_ON(1); 689 } 690 if (ret) 691 break; 692 } 693 spin_unlock(&head->lock); 694 return ret; 695 } 696 697 /* 698 * add all inline backrefs for bytenr to the list 699 */ 700 static int __add_inline_refs(struct btrfs_fs_info *fs_info, 701 struct btrfs_path *path, u64 bytenr, 702 int *info_level, struct list_head *prefs, 703 u64 *total_refs, u64 inum) 704 { 705 int ret = 0; 706 int slot; 707 struct extent_buffer *leaf; 708 struct btrfs_key key; 709 struct btrfs_key found_key; 710 unsigned long ptr; 711 unsigned long end; 712 struct btrfs_extent_item *ei; 713 u64 flags; 714 u64 item_size; 715 716 /* 717 * enumerate all inline refs 718 */ 719 leaf = path->nodes[0]; 720 slot = path->slots[0]; 721 722 item_size = btrfs_item_size_nr(leaf, slot); 723 BUG_ON(item_size < sizeof(*ei)); 724 725 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 726 flags = btrfs_extent_flags(leaf, ei); 727 *total_refs += btrfs_extent_refs(leaf, ei); 728 btrfs_item_key_to_cpu(leaf, &found_key, slot); 729 730 ptr = (unsigned long)(ei + 1); 731 end = (unsigned long)ei + item_size; 732 733 if (found_key.type == BTRFS_EXTENT_ITEM_KEY && 734 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 735 struct btrfs_tree_block_info *info; 736 737 info = (struct btrfs_tree_block_info *)ptr; 738 *info_level = btrfs_tree_block_level(leaf, info); 739 ptr += sizeof(struct btrfs_tree_block_info); 740 BUG_ON(ptr > end); 741 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) { 742 *info_level = found_key.offset; 743 } else { 744 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 745 } 746 747 while (ptr < end) { 748 struct btrfs_extent_inline_ref *iref; 749 u64 offset; 750 int type; 751 752 iref = (struct btrfs_extent_inline_ref *)ptr; 753 type = btrfs_extent_inline_ref_type(leaf, iref); 754 offset = btrfs_extent_inline_ref_offset(leaf, iref); 755 756 switch (type) { 757 case BTRFS_SHARED_BLOCK_REF_KEY: 758 ret = __add_prelim_ref(prefs, 0, NULL, 759 *info_level + 1, offset, 760 bytenr, 1, GFP_NOFS); 761 break; 762 case BTRFS_SHARED_DATA_REF_KEY: { 763 struct btrfs_shared_data_ref *sdref; 764 int count; 765 766 sdref = (struct btrfs_shared_data_ref *)(iref + 1); 767 count = btrfs_shared_data_ref_count(leaf, sdref); 768 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset, 769 bytenr, count, GFP_NOFS); 770 break; 771 } 772 case BTRFS_TREE_BLOCK_REF_KEY: 773 ret = __add_prelim_ref(prefs, offset, NULL, 774 *info_level + 1, 0, 775 bytenr, 1, GFP_NOFS); 776 break; 777 case BTRFS_EXTENT_DATA_REF_KEY: { 778 struct btrfs_extent_data_ref *dref; 779 int count; 780 u64 root; 781 782 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 783 count = btrfs_extent_data_ref_count(leaf, dref); 784 key.objectid = btrfs_extent_data_ref_objectid(leaf, 785 dref); 786 key.type = BTRFS_EXTENT_DATA_KEY; 787 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 788 789 if (inum && key.objectid != inum) { 790 ret = BACKREF_FOUND_SHARED; 791 break; 792 } 793 794 root = btrfs_extent_data_ref_root(leaf, dref); 795 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 796 bytenr, count, GFP_NOFS); 797 break; 798 } 799 default: 800 WARN_ON(1); 801 } 802 if (ret) 803 return ret; 804 ptr += btrfs_extent_inline_ref_size(type); 805 } 806 807 return 0; 808 } 809 810 /* 811 * add all non-inline backrefs for bytenr to the list 812 */ 813 static int __add_keyed_refs(struct btrfs_fs_info *fs_info, 814 struct btrfs_path *path, u64 bytenr, 815 int info_level, struct list_head *prefs, u64 inum) 816 { 817 struct btrfs_root *extent_root = fs_info->extent_root; 818 int ret; 819 int slot; 820 struct extent_buffer *leaf; 821 struct btrfs_key key; 822 823 while (1) { 824 ret = btrfs_next_item(extent_root, path); 825 if (ret < 0) 826 break; 827 if (ret) { 828 ret = 0; 829 break; 830 } 831 832 slot = path->slots[0]; 833 leaf = path->nodes[0]; 834 btrfs_item_key_to_cpu(leaf, &key, slot); 835 836 if (key.objectid != bytenr) 837 break; 838 if (key.type < BTRFS_TREE_BLOCK_REF_KEY) 839 continue; 840 if (key.type > BTRFS_SHARED_DATA_REF_KEY) 841 break; 842 843 switch (key.type) { 844 case BTRFS_SHARED_BLOCK_REF_KEY: 845 ret = __add_prelim_ref(prefs, 0, NULL, 846 info_level + 1, key.offset, 847 bytenr, 1, GFP_NOFS); 848 break; 849 case BTRFS_SHARED_DATA_REF_KEY: { 850 struct btrfs_shared_data_ref *sdref; 851 int count; 852 853 sdref = btrfs_item_ptr(leaf, slot, 854 struct btrfs_shared_data_ref); 855 count = btrfs_shared_data_ref_count(leaf, sdref); 856 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset, 857 bytenr, count, GFP_NOFS); 858 break; 859 } 860 case BTRFS_TREE_BLOCK_REF_KEY: 861 ret = __add_prelim_ref(prefs, key.offset, NULL, 862 info_level + 1, 0, 863 bytenr, 1, GFP_NOFS); 864 break; 865 case BTRFS_EXTENT_DATA_REF_KEY: { 866 struct btrfs_extent_data_ref *dref; 867 int count; 868 u64 root; 869 870 dref = btrfs_item_ptr(leaf, slot, 871 struct btrfs_extent_data_ref); 872 count = btrfs_extent_data_ref_count(leaf, dref); 873 key.objectid = btrfs_extent_data_ref_objectid(leaf, 874 dref); 875 key.type = BTRFS_EXTENT_DATA_KEY; 876 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 877 878 if (inum && key.objectid != inum) { 879 ret = BACKREF_FOUND_SHARED; 880 break; 881 } 882 883 root = btrfs_extent_data_ref_root(leaf, dref); 884 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 885 bytenr, count, GFP_NOFS); 886 break; 887 } 888 default: 889 WARN_ON(1); 890 } 891 if (ret) 892 return ret; 893 894 } 895 896 return ret; 897 } 898 899 /* 900 * this adds all existing backrefs (inline backrefs, backrefs and delayed 901 * refs) for the given bytenr to the refs list, merges duplicates and resolves 902 * indirect refs to their parent bytenr. 903 * When roots are found, they're added to the roots list 904 * 905 * NOTE: This can return values > 0 906 * 907 * If time_seq is set to (u64)-1, it will not search delayed_refs, and behave 908 * much like trans == NULL case, the difference only lies in it will not 909 * commit root. 910 * The special case is for qgroup to search roots in commit_transaction(). 911 * 912 * FIXME some caching might speed things up 913 */ 914 static int find_parent_nodes(struct btrfs_trans_handle *trans, 915 struct btrfs_fs_info *fs_info, u64 bytenr, 916 u64 time_seq, struct ulist *refs, 917 struct ulist *roots, const u64 *extent_item_pos, 918 u64 root_objectid, u64 inum) 919 { 920 struct btrfs_key key; 921 struct btrfs_path *path; 922 struct btrfs_delayed_ref_root *delayed_refs = NULL; 923 struct btrfs_delayed_ref_head *head; 924 int info_level = 0; 925 int ret; 926 struct list_head prefs_delayed; 927 struct list_head prefs; 928 struct __prelim_ref *ref; 929 struct extent_inode_elem *eie = NULL; 930 u64 total_refs = 0; 931 932 INIT_LIST_HEAD(&prefs); 933 INIT_LIST_HEAD(&prefs_delayed); 934 935 key.objectid = bytenr; 936 key.offset = (u64)-1; 937 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 938 key.type = BTRFS_METADATA_ITEM_KEY; 939 else 940 key.type = BTRFS_EXTENT_ITEM_KEY; 941 942 path = btrfs_alloc_path(); 943 if (!path) 944 return -ENOMEM; 945 if (!trans) { 946 path->search_commit_root = 1; 947 path->skip_locking = 1; 948 } 949 950 if (time_seq == (u64)-1) 951 path->skip_locking = 1; 952 953 /* 954 * grab both a lock on the path and a lock on the delayed ref head. 955 * We need both to get a consistent picture of how the refs look 956 * at a specified point in time 957 */ 958 again: 959 head = NULL; 960 961 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 962 if (ret < 0) 963 goto out; 964 BUG_ON(ret == 0); 965 966 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 967 if (trans && likely(trans->type != __TRANS_DUMMY) && 968 time_seq != (u64)-1) { 969 #else 970 if (trans && time_seq != (u64)-1) { 971 #endif 972 /* 973 * look if there are updates for this ref queued and lock the 974 * head 975 */ 976 delayed_refs = &trans->transaction->delayed_refs; 977 spin_lock(&delayed_refs->lock); 978 head = btrfs_find_delayed_ref_head(trans, bytenr); 979 if (head) { 980 if (!mutex_trylock(&head->mutex)) { 981 atomic_inc(&head->node.refs); 982 spin_unlock(&delayed_refs->lock); 983 984 btrfs_release_path(path); 985 986 /* 987 * Mutex was contended, block until it's 988 * released and try again 989 */ 990 mutex_lock(&head->mutex); 991 mutex_unlock(&head->mutex); 992 btrfs_put_delayed_ref(&head->node); 993 goto again; 994 } 995 spin_unlock(&delayed_refs->lock); 996 ret = __add_delayed_refs(head, time_seq, 997 &prefs_delayed, &total_refs, 998 inum); 999 mutex_unlock(&head->mutex); 1000 if (ret) 1001 goto out; 1002 } else { 1003 spin_unlock(&delayed_refs->lock); 1004 } 1005 } 1006 1007 if (path->slots[0]) { 1008 struct extent_buffer *leaf; 1009 int slot; 1010 1011 path->slots[0]--; 1012 leaf = path->nodes[0]; 1013 slot = path->slots[0]; 1014 btrfs_item_key_to_cpu(leaf, &key, slot); 1015 if (key.objectid == bytenr && 1016 (key.type == BTRFS_EXTENT_ITEM_KEY || 1017 key.type == BTRFS_METADATA_ITEM_KEY)) { 1018 ret = __add_inline_refs(fs_info, path, bytenr, 1019 &info_level, &prefs, 1020 &total_refs, inum); 1021 if (ret) 1022 goto out; 1023 ret = __add_keyed_refs(fs_info, path, bytenr, 1024 info_level, &prefs, inum); 1025 if (ret) 1026 goto out; 1027 } 1028 } 1029 btrfs_release_path(path); 1030 1031 list_splice_init(&prefs_delayed, &prefs); 1032 1033 ret = __add_missing_keys(fs_info, &prefs); 1034 if (ret) 1035 goto out; 1036 1037 __merge_refs(&prefs, 1); 1038 1039 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs, 1040 extent_item_pos, total_refs, 1041 root_objectid); 1042 if (ret) 1043 goto out; 1044 1045 __merge_refs(&prefs, 2); 1046 1047 while (!list_empty(&prefs)) { 1048 ref = list_first_entry(&prefs, struct __prelim_ref, list); 1049 WARN_ON(ref->count < 0); 1050 if (roots && ref->count && ref->root_id && ref->parent == 0) { 1051 if (root_objectid && ref->root_id != root_objectid) { 1052 ret = BACKREF_FOUND_SHARED; 1053 goto out; 1054 } 1055 1056 /* no parent == root of tree */ 1057 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); 1058 if (ret < 0) 1059 goto out; 1060 } 1061 if (ref->count && ref->parent) { 1062 if (extent_item_pos && !ref->inode_list && 1063 ref->level == 0) { 1064 struct extent_buffer *eb; 1065 1066 eb = read_tree_block(fs_info->extent_root, 1067 ref->parent, 0); 1068 if (IS_ERR(eb)) { 1069 ret = PTR_ERR(eb); 1070 goto out; 1071 } else if (!extent_buffer_uptodate(eb)) { 1072 free_extent_buffer(eb); 1073 ret = -EIO; 1074 goto out; 1075 } 1076 btrfs_tree_read_lock(eb); 1077 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1078 ret = find_extent_in_eb(eb, bytenr, 1079 *extent_item_pos, &eie); 1080 btrfs_tree_read_unlock_blocking(eb); 1081 free_extent_buffer(eb); 1082 if (ret < 0) 1083 goto out; 1084 ref->inode_list = eie; 1085 } 1086 ret = ulist_add_merge_ptr(refs, ref->parent, 1087 ref->inode_list, 1088 (void **)&eie, GFP_NOFS); 1089 if (ret < 0) 1090 goto out; 1091 if (!ret && extent_item_pos) { 1092 /* 1093 * we've recorded that parent, so we must extend 1094 * its inode list here 1095 */ 1096 BUG_ON(!eie); 1097 while (eie->next) 1098 eie = eie->next; 1099 eie->next = ref->inode_list; 1100 } 1101 eie = NULL; 1102 } 1103 list_del(&ref->list); 1104 kmem_cache_free(btrfs_prelim_ref_cache, ref); 1105 } 1106 1107 out: 1108 btrfs_free_path(path); 1109 while (!list_empty(&prefs)) { 1110 ref = list_first_entry(&prefs, struct __prelim_ref, list); 1111 list_del(&ref->list); 1112 kmem_cache_free(btrfs_prelim_ref_cache, ref); 1113 } 1114 while (!list_empty(&prefs_delayed)) { 1115 ref = list_first_entry(&prefs_delayed, struct __prelim_ref, 1116 list); 1117 list_del(&ref->list); 1118 kmem_cache_free(btrfs_prelim_ref_cache, ref); 1119 } 1120 if (ret < 0) 1121 free_inode_elem_list(eie); 1122 return ret; 1123 } 1124 1125 static void free_leaf_list(struct ulist *blocks) 1126 { 1127 struct ulist_node *node = NULL; 1128 struct extent_inode_elem *eie; 1129 struct ulist_iterator uiter; 1130 1131 ULIST_ITER_INIT(&uiter); 1132 while ((node = ulist_next(blocks, &uiter))) { 1133 if (!node->aux) 1134 continue; 1135 eie = (struct extent_inode_elem *)(uintptr_t)node->aux; 1136 free_inode_elem_list(eie); 1137 node->aux = 0; 1138 } 1139 1140 ulist_free(blocks); 1141 } 1142 1143 /* 1144 * Finds all leafs with a reference to the specified combination of bytenr and 1145 * offset. key_list_head will point to a list of corresponding keys (caller must 1146 * free each list element). The leafs will be stored in the leafs ulist, which 1147 * must be freed with ulist_free. 1148 * 1149 * returns 0 on success, <0 on error 1150 */ 1151 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 1152 struct btrfs_fs_info *fs_info, u64 bytenr, 1153 u64 time_seq, struct ulist **leafs, 1154 const u64 *extent_item_pos) 1155 { 1156 int ret; 1157 1158 *leafs = ulist_alloc(GFP_NOFS); 1159 if (!*leafs) 1160 return -ENOMEM; 1161 1162 ret = find_parent_nodes(trans, fs_info, bytenr, 1163 time_seq, *leafs, NULL, extent_item_pos, 0, 0); 1164 if (ret < 0 && ret != -ENOENT) { 1165 free_leaf_list(*leafs); 1166 return ret; 1167 } 1168 1169 return 0; 1170 } 1171 1172 /* 1173 * walk all backrefs for a given extent to find all roots that reference this 1174 * extent. Walking a backref means finding all extents that reference this 1175 * extent and in turn walk the backrefs of those, too. Naturally this is a 1176 * recursive process, but here it is implemented in an iterative fashion: We 1177 * find all referencing extents for the extent in question and put them on a 1178 * list. In turn, we find all referencing extents for those, further appending 1179 * to the list. The way we iterate the list allows adding more elements after 1180 * the current while iterating. The process stops when we reach the end of the 1181 * list. Found roots are added to the roots list. 1182 * 1183 * returns 0 on success, < 0 on error. 1184 */ 1185 static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1186 struct btrfs_fs_info *fs_info, u64 bytenr, 1187 u64 time_seq, struct ulist **roots) 1188 { 1189 struct ulist *tmp; 1190 struct ulist_node *node = NULL; 1191 struct ulist_iterator uiter; 1192 int ret; 1193 1194 tmp = ulist_alloc(GFP_NOFS); 1195 if (!tmp) 1196 return -ENOMEM; 1197 *roots = ulist_alloc(GFP_NOFS); 1198 if (!*roots) { 1199 ulist_free(tmp); 1200 return -ENOMEM; 1201 } 1202 1203 ULIST_ITER_INIT(&uiter); 1204 while (1) { 1205 ret = find_parent_nodes(trans, fs_info, bytenr, 1206 time_seq, tmp, *roots, NULL, 0, 0); 1207 if (ret < 0 && ret != -ENOENT) { 1208 ulist_free(tmp); 1209 ulist_free(*roots); 1210 return ret; 1211 } 1212 node = ulist_next(tmp, &uiter); 1213 if (!node) 1214 break; 1215 bytenr = node->val; 1216 cond_resched(); 1217 } 1218 1219 ulist_free(tmp); 1220 return 0; 1221 } 1222 1223 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1224 struct btrfs_fs_info *fs_info, u64 bytenr, 1225 u64 time_seq, struct ulist **roots) 1226 { 1227 int ret; 1228 1229 if (!trans) 1230 down_read(&fs_info->commit_root_sem); 1231 ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots); 1232 if (!trans) 1233 up_read(&fs_info->commit_root_sem); 1234 return ret; 1235 } 1236 1237 /** 1238 * btrfs_check_shared - tell us whether an extent is shared 1239 * 1240 * @trans: optional trans handle 1241 * 1242 * btrfs_check_shared uses the backref walking code but will short 1243 * circuit as soon as it finds a root or inode that doesn't match the 1244 * one passed in. This provides a significant performance benefit for 1245 * callers (such as fiemap) which want to know whether the extent is 1246 * shared but do not need a ref count. 1247 * 1248 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. 1249 */ 1250 int btrfs_check_shared(struct btrfs_trans_handle *trans, 1251 struct btrfs_fs_info *fs_info, u64 root_objectid, 1252 u64 inum, u64 bytenr) 1253 { 1254 struct ulist *tmp = NULL; 1255 struct ulist *roots = NULL; 1256 struct ulist_iterator uiter; 1257 struct ulist_node *node; 1258 struct seq_list elem = SEQ_LIST_INIT(elem); 1259 int ret = 0; 1260 1261 tmp = ulist_alloc(GFP_NOFS); 1262 roots = ulist_alloc(GFP_NOFS); 1263 if (!tmp || !roots) { 1264 ulist_free(tmp); 1265 ulist_free(roots); 1266 return -ENOMEM; 1267 } 1268 1269 if (trans) 1270 btrfs_get_tree_mod_seq(fs_info, &elem); 1271 else 1272 down_read(&fs_info->commit_root_sem); 1273 ULIST_ITER_INIT(&uiter); 1274 while (1) { 1275 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, 1276 roots, NULL, root_objectid, inum); 1277 if (ret == BACKREF_FOUND_SHARED) { 1278 /* this is the only condition under which we return 1 */ 1279 ret = 1; 1280 break; 1281 } 1282 if (ret < 0 && ret != -ENOENT) 1283 break; 1284 ret = 0; 1285 node = ulist_next(tmp, &uiter); 1286 if (!node) 1287 break; 1288 bytenr = node->val; 1289 cond_resched(); 1290 } 1291 if (trans) 1292 btrfs_put_tree_mod_seq(fs_info, &elem); 1293 else 1294 up_read(&fs_info->commit_root_sem); 1295 ulist_free(tmp); 1296 ulist_free(roots); 1297 return ret; 1298 } 1299 1300 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, 1301 u64 start_off, struct btrfs_path *path, 1302 struct btrfs_inode_extref **ret_extref, 1303 u64 *found_off) 1304 { 1305 int ret, slot; 1306 struct btrfs_key key; 1307 struct btrfs_key found_key; 1308 struct btrfs_inode_extref *extref; 1309 struct extent_buffer *leaf; 1310 unsigned long ptr; 1311 1312 key.objectid = inode_objectid; 1313 key.type = BTRFS_INODE_EXTREF_KEY; 1314 key.offset = start_off; 1315 1316 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1317 if (ret < 0) 1318 return ret; 1319 1320 while (1) { 1321 leaf = path->nodes[0]; 1322 slot = path->slots[0]; 1323 if (slot >= btrfs_header_nritems(leaf)) { 1324 /* 1325 * If the item at offset is not found, 1326 * btrfs_search_slot will point us to the slot 1327 * where it should be inserted. In our case 1328 * that will be the slot directly before the 1329 * next INODE_REF_KEY_V2 item. In the case 1330 * that we're pointing to the last slot in a 1331 * leaf, we must move one leaf over. 1332 */ 1333 ret = btrfs_next_leaf(root, path); 1334 if (ret) { 1335 if (ret >= 1) 1336 ret = -ENOENT; 1337 break; 1338 } 1339 continue; 1340 } 1341 1342 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1343 1344 /* 1345 * Check that we're still looking at an extended ref key for 1346 * this particular objectid. If we have different 1347 * objectid or type then there are no more to be found 1348 * in the tree and we can exit. 1349 */ 1350 ret = -ENOENT; 1351 if (found_key.objectid != inode_objectid) 1352 break; 1353 if (found_key.type != BTRFS_INODE_EXTREF_KEY) 1354 break; 1355 1356 ret = 0; 1357 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1358 extref = (struct btrfs_inode_extref *)ptr; 1359 *ret_extref = extref; 1360 if (found_off) 1361 *found_off = found_key.offset; 1362 break; 1363 } 1364 1365 return ret; 1366 } 1367 1368 /* 1369 * this iterates to turn a name (from iref/extref) into a full filesystem path. 1370 * Elements of the path are separated by '/' and the path is guaranteed to be 1371 * 0-terminated. the path is only given within the current file system. 1372 * Therefore, it never starts with a '/'. the caller is responsible to provide 1373 * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 1374 * the start point of the resulting string is returned. this pointer is within 1375 * dest, normally. 1376 * in case the path buffer would overflow, the pointer is decremented further 1377 * as if output was written to the buffer, though no more output is actually 1378 * generated. that way, the caller can determine how much space would be 1379 * required for the path to fit into the buffer. in that case, the returned 1380 * value will be smaller than dest. callers must check this! 1381 */ 1382 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 1383 u32 name_len, unsigned long name_off, 1384 struct extent_buffer *eb_in, u64 parent, 1385 char *dest, u32 size) 1386 { 1387 int slot; 1388 u64 next_inum; 1389 int ret; 1390 s64 bytes_left = ((s64)size) - 1; 1391 struct extent_buffer *eb = eb_in; 1392 struct btrfs_key found_key; 1393 int leave_spinning = path->leave_spinning; 1394 struct btrfs_inode_ref *iref; 1395 1396 if (bytes_left >= 0) 1397 dest[bytes_left] = '\0'; 1398 1399 path->leave_spinning = 1; 1400 while (1) { 1401 bytes_left -= name_len; 1402 if (bytes_left >= 0) 1403 read_extent_buffer(eb, dest + bytes_left, 1404 name_off, name_len); 1405 if (eb != eb_in) { 1406 if (!path->skip_locking) 1407 btrfs_tree_read_unlock_blocking(eb); 1408 free_extent_buffer(eb); 1409 } 1410 ret = btrfs_find_item(fs_root, path, parent, 0, 1411 BTRFS_INODE_REF_KEY, &found_key); 1412 if (ret > 0) 1413 ret = -ENOENT; 1414 if (ret) 1415 break; 1416 1417 next_inum = found_key.offset; 1418 1419 /* regular exit ahead */ 1420 if (parent == next_inum) 1421 break; 1422 1423 slot = path->slots[0]; 1424 eb = path->nodes[0]; 1425 /* make sure we can use eb after releasing the path */ 1426 if (eb != eb_in) { 1427 if (!path->skip_locking) 1428 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1429 path->nodes[0] = NULL; 1430 path->locks[0] = 0; 1431 } 1432 btrfs_release_path(path); 1433 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1434 1435 name_len = btrfs_inode_ref_name_len(eb, iref); 1436 name_off = (unsigned long)(iref + 1); 1437 1438 parent = next_inum; 1439 --bytes_left; 1440 if (bytes_left >= 0) 1441 dest[bytes_left] = '/'; 1442 } 1443 1444 btrfs_release_path(path); 1445 path->leave_spinning = leave_spinning; 1446 1447 if (ret) 1448 return ERR_PTR(ret); 1449 1450 return dest + bytes_left; 1451 } 1452 1453 /* 1454 * this makes the path point to (logical EXTENT_ITEM *) 1455 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for 1456 * tree blocks and <0 on error. 1457 */ 1458 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 1459 struct btrfs_path *path, struct btrfs_key *found_key, 1460 u64 *flags_ret) 1461 { 1462 int ret; 1463 u64 flags; 1464 u64 size = 0; 1465 u32 item_size; 1466 struct extent_buffer *eb; 1467 struct btrfs_extent_item *ei; 1468 struct btrfs_key key; 1469 1470 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1471 key.type = BTRFS_METADATA_ITEM_KEY; 1472 else 1473 key.type = BTRFS_EXTENT_ITEM_KEY; 1474 key.objectid = logical; 1475 key.offset = (u64)-1; 1476 1477 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 1478 if (ret < 0) 1479 return ret; 1480 1481 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0); 1482 if (ret) { 1483 if (ret > 0) 1484 ret = -ENOENT; 1485 return ret; 1486 } 1487 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); 1488 if (found_key->type == BTRFS_METADATA_ITEM_KEY) 1489 size = fs_info->extent_root->nodesize; 1490 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) 1491 size = found_key->offset; 1492 1493 if (found_key->objectid > logical || 1494 found_key->objectid + size <= logical) { 1495 pr_debug("logical %llu is not within any extent\n", logical); 1496 return -ENOENT; 1497 } 1498 1499 eb = path->nodes[0]; 1500 item_size = btrfs_item_size_nr(eb, path->slots[0]); 1501 BUG_ON(item_size < sizeof(*ei)); 1502 1503 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 1504 flags = btrfs_extent_flags(eb, ei); 1505 1506 pr_debug("logical %llu is at position %llu within the extent (%llu " 1507 "EXTENT_ITEM %llu) flags %#llx size %u\n", 1508 logical, logical - found_key->objectid, found_key->objectid, 1509 found_key->offset, flags, item_size); 1510 1511 WARN_ON(!flags_ret); 1512 if (flags_ret) { 1513 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1514 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; 1515 else if (flags & BTRFS_EXTENT_FLAG_DATA) 1516 *flags_ret = BTRFS_EXTENT_FLAG_DATA; 1517 else 1518 BUG_ON(1); 1519 return 0; 1520 } 1521 1522 return -EIO; 1523 } 1524 1525 /* 1526 * helper function to iterate extent inline refs. ptr must point to a 0 value 1527 * for the first call and may be modified. it is used to track state. 1528 * if more refs exist, 0 is returned and the next call to 1529 * __get_extent_inline_ref must pass the modified ptr parameter to get the 1530 * next ref. after the last ref was processed, 1 is returned. 1531 * returns <0 on error 1532 */ 1533 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, 1534 struct btrfs_key *key, 1535 struct btrfs_extent_item *ei, u32 item_size, 1536 struct btrfs_extent_inline_ref **out_eiref, 1537 int *out_type) 1538 { 1539 unsigned long end; 1540 u64 flags; 1541 struct btrfs_tree_block_info *info; 1542 1543 if (!*ptr) { 1544 /* first call */ 1545 flags = btrfs_extent_flags(eb, ei); 1546 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1547 if (key->type == BTRFS_METADATA_ITEM_KEY) { 1548 /* a skinny metadata extent */ 1549 *out_eiref = 1550 (struct btrfs_extent_inline_ref *)(ei + 1); 1551 } else { 1552 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); 1553 info = (struct btrfs_tree_block_info *)(ei + 1); 1554 *out_eiref = 1555 (struct btrfs_extent_inline_ref *)(info + 1); 1556 } 1557 } else { 1558 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); 1559 } 1560 *ptr = (unsigned long)*out_eiref; 1561 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size) 1562 return -ENOENT; 1563 } 1564 1565 end = (unsigned long)ei + item_size; 1566 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr); 1567 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); 1568 1569 *ptr += btrfs_extent_inline_ref_size(*out_type); 1570 WARN_ON(*ptr > end); 1571 if (*ptr == end) 1572 return 1; /* last */ 1573 1574 return 0; 1575 } 1576 1577 /* 1578 * reads the tree block backref for an extent. tree level and root are returned 1579 * through out_level and out_root. ptr must point to a 0 value for the first 1580 * call and may be modified (see __get_extent_inline_ref comment). 1581 * returns 0 if data was provided, 1 if there was no more data to provide or 1582 * <0 on error. 1583 */ 1584 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, 1585 struct btrfs_key *key, struct btrfs_extent_item *ei, 1586 u32 item_size, u64 *out_root, u8 *out_level) 1587 { 1588 int ret; 1589 int type; 1590 struct btrfs_extent_inline_ref *eiref; 1591 1592 if (*ptr == (unsigned long)-1) 1593 return 1; 1594 1595 while (1) { 1596 ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size, 1597 &eiref, &type); 1598 if (ret < 0) 1599 return ret; 1600 1601 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1602 type == BTRFS_SHARED_BLOCK_REF_KEY) 1603 break; 1604 1605 if (ret == 1) 1606 return 1; 1607 } 1608 1609 /* we can treat both ref types equally here */ 1610 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1611 1612 if (key->type == BTRFS_EXTENT_ITEM_KEY) { 1613 struct btrfs_tree_block_info *info; 1614 1615 info = (struct btrfs_tree_block_info *)(ei + 1); 1616 *out_level = btrfs_tree_block_level(eb, info); 1617 } else { 1618 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); 1619 *out_level = (u8)key->offset; 1620 } 1621 1622 if (ret == 1) 1623 *ptr = (unsigned long)-1; 1624 1625 return 0; 1626 } 1627 1628 static int iterate_leaf_refs(struct extent_inode_elem *inode_list, 1629 u64 root, u64 extent_item_objectid, 1630 iterate_extent_inodes_t *iterate, void *ctx) 1631 { 1632 struct extent_inode_elem *eie; 1633 int ret = 0; 1634 1635 for (eie = inode_list; eie; eie = eie->next) { 1636 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " 1637 "root %llu\n", extent_item_objectid, 1638 eie->inum, eie->offset, root); 1639 ret = iterate(eie->inum, eie->offset, root, ctx); 1640 if (ret) { 1641 pr_debug("stopping iteration for %llu due to ret=%d\n", 1642 extent_item_objectid, ret); 1643 break; 1644 } 1645 } 1646 1647 return ret; 1648 } 1649 1650 /* 1651 * calls iterate() for every inode that references the extent identified by 1652 * the given parameters. 1653 * when the iterator function returns a non-zero value, iteration stops. 1654 */ 1655 int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 1656 u64 extent_item_objectid, u64 extent_item_pos, 1657 int search_commit_root, 1658 iterate_extent_inodes_t *iterate, void *ctx) 1659 { 1660 int ret; 1661 struct btrfs_trans_handle *trans = NULL; 1662 struct ulist *refs = NULL; 1663 struct ulist *roots = NULL; 1664 struct ulist_node *ref_node = NULL; 1665 struct ulist_node *root_node = NULL; 1666 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); 1667 struct ulist_iterator ref_uiter; 1668 struct ulist_iterator root_uiter; 1669 1670 pr_debug("resolving all inodes for extent %llu\n", 1671 extent_item_objectid); 1672 1673 if (!search_commit_root) { 1674 trans = btrfs_join_transaction(fs_info->extent_root); 1675 if (IS_ERR(trans)) 1676 return PTR_ERR(trans); 1677 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1678 } else { 1679 down_read(&fs_info->commit_root_sem); 1680 } 1681 1682 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1683 tree_mod_seq_elem.seq, &refs, 1684 &extent_item_pos); 1685 if (ret) 1686 goto out; 1687 1688 ULIST_ITER_INIT(&ref_uiter); 1689 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { 1690 ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val, 1691 tree_mod_seq_elem.seq, &roots); 1692 if (ret) 1693 break; 1694 ULIST_ITER_INIT(&root_uiter); 1695 while (!ret && (root_node = ulist_next(roots, &root_uiter))) { 1696 pr_debug("root %llu references leaf %llu, data list " 1697 "%#llx\n", root_node->val, ref_node->val, 1698 ref_node->aux); 1699 ret = iterate_leaf_refs((struct extent_inode_elem *) 1700 (uintptr_t)ref_node->aux, 1701 root_node->val, 1702 extent_item_objectid, 1703 iterate, ctx); 1704 } 1705 ulist_free(roots); 1706 } 1707 1708 free_leaf_list(refs); 1709 out: 1710 if (!search_commit_root) { 1711 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1712 btrfs_end_transaction(trans, fs_info->extent_root); 1713 } else { 1714 up_read(&fs_info->commit_root_sem); 1715 } 1716 1717 return ret; 1718 } 1719 1720 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 1721 struct btrfs_path *path, 1722 iterate_extent_inodes_t *iterate, void *ctx) 1723 { 1724 int ret; 1725 u64 extent_item_pos; 1726 u64 flags = 0; 1727 struct btrfs_key found_key; 1728 int search_commit_root = path->search_commit_root; 1729 1730 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); 1731 btrfs_release_path(path); 1732 if (ret < 0) 1733 return ret; 1734 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1735 return -EINVAL; 1736 1737 extent_item_pos = logical - found_key.objectid; 1738 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1739 extent_item_pos, search_commit_root, 1740 iterate, ctx); 1741 1742 return ret; 1743 } 1744 1745 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off, 1746 struct extent_buffer *eb, void *ctx); 1747 1748 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, 1749 struct btrfs_path *path, 1750 iterate_irefs_t *iterate, void *ctx) 1751 { 1752 int ret = 0; 1753 int slot; 1754 u32 cur; 1755 u32 len; 1756 u32 name_len; 1757 u64 parent = 0; 1758 int found = 0; 1759 struct extent_buffer *eb; 1760 struct btrfs_item *item; 1761 struct btrfs_inode_ref *iref; 1762 struct btrfs_key found_key; 1763 1764 while (!ret) { 1765 ret = btrfs_find_item(fs_root, path, inum, 1766 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY, 1767 &found_key); 1768 1769 if (ret < 0) 1770 break; 1771 if (ret) { 1772 ret = found ? 0 : -ENOENT; 1773 break; 1774 } 1775 ++found; 1776 1777 parent = found_key.offset; 1778 slot = path->slots[0]; 1779 eb = btrfs_clone_extent_buffer(path->nodes[0]); 1780 if (!eb) { 1781 ret = -ENOMEM; 1782 break; 1783 } 1784 extent_buffer_get(eb); 1785 btrfs_tree_read_lock(eb); 1786 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1787 btrfs_release_path(path); 1788 1789 item = btrfs_item_nr(slot); 1790 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1791 1792 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { 1793 name_len = btrfs_inode_ref_name_len(eb, iref); 1794 /* path must be released before calling iterate()! */ 1795 pr_debug("following ref at offset %u for inode %llu in " 1796 "tree %llu\n", cur, found_key.objectid, 1797 fs_root->objectid); 1798 ret = iterate(parent, name_len, 1799 (unsigned long)(iref + 1), eb, ctx); 1800 if (ret) 1801 break; 1802 len = sizeof(*iref) + name_len; 1803 iref = (struct btrfs_inode_ref *)((char *)iref + len); 1804 } 1805 btrfs_tree_read_unlock_blocking(eb); 1806 free_extent_buffer(eb); 1807 } 1808 1809 btrfs_release_path(path); 1810 1811 return ret; 1812 } 1813 1814 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, 1815 struct btrfs_path *path, 1816 iterate_irefs_t *iterate, void *ctx) 1817 { 1818 int ret; 1819 int slot; 1820 u64 offset = 0; 1821 u64 parent; 1822 int found = 0; 1823 struct extent_buffer *eb; 1824 struct btrfs_inode_extref *extref; 1825 u32 item_size; 1826 u32 cur_offset; 1827 unsigned long ptr; 1828 1829 while (1) { 1830 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, 1831 &offset); 1832 if (ret < 0) 1833 break; 1834 if (ret) { 1835 ret = found ? 0 : -ENOENT; 1836 break; 1837 } 1838 ++found; 1839 1840 slot = path->slots[0]; 1841 eb = btrfs_clone_extent_buffer(path->nodes[0]); 1842 if (!eb) { 1843 ret = -ENOMEM; 1844 break; 1845 } 1846 extent_buffer_get(eb); 1847 1848 btrfs_tree_read_lock(eb); 1849 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1850 btrfs_release_path(path); 1851 1852 item_size = btrfs_item_size_nr(eb, slot); 1853 ptr = btrfs_item_ptr_offset(eb, slot); 1854 cur_offset = 0; 1855 1856 while (cur_offset < item_size) { 1857 u32 name_len; 1858 1859 extref = (struct btrfs_inode_extref *)(ptr + cur_offset); 1860 parent = btrfs_inode_extref_parent(eb, extref); 1861 name_len = btrfs_inode_extref_name_len(eb, extref); 1862 ret = iterate(parent, name_len, 1863 (unsigned long)&extref->name, eb, ctx); 1864 if (ret) 1865 break; 1866 1867 cur_offset += btrfs_inode_extref_name_len(eb, extref); 1868 cur_offset += sizeof(*extref); 1869 } 1870 btrfs_tree_read_unlock_blocking(eb); 1871 free_extent_buffer(eb); 1872 1873 offset++; 1874 } 1875 1876 btrfs_release_path(path); 1877 1878 return ret; 1879 } 1880 1881 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, 1882 struct btrfs_path *path, iterate_irefs_t *iterate, 1883 void *ctx) 1884 { 1885 int ret; 1886 int found_refs = 0; 1887 1888 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx); 1889 if (!ret) 1890 ++found_refs; 1891 else if (ret != -ENOENT) 1892 return ret; 1893 1894 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx); 1895 if (ret == -ENOENT && found_refs) 1896 return 0; 1897 1898 return ret; 1899 } 1900 1901 /* 1902 * returns 0 if the path could be dumped (probably truncated) 1903 * returns <0 in case of an error 1904 */ 1905 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, 1906 struct extent_buffer *eb, void *ctx) 1907 { 1908 struct inode_fs_paths *ipath = ctx; 1909 char *fspath; 1910 char *fspath_min; 1911 int i = ipath->fspath->elem_cnt; 1912 const int s_ptr = sizeof(char *); 1913 u32 bytes_left; 1914 1915 bytes_left = ipath->fspath->bytes_left > s_ptr ? 1916 ipath->fspath->bytes_left - s_ptr : 0; 1917 1918 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 1919 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, 1920 name_off, eb, inum, fspath_min, bytes_left); 1921 if (IS_ERR(fspath)) 1922 return PTR_ERR(fspath); 1923 1924 if (fspath > fspath_min) { 1925 ipath->fspath->val[i] = (u64)(unsigned long)fspath; 1926 ++ipath->fspath->elem_cnt; 1927 ipath->fspath->bytes_left = fspath - fspath_min; 1928 } else { 1929 ++ipath->fspath->elem_missed; 1930 ipath->fspath->bytes_missing += fspath_min - fspath; 1931 ipath->fspath->bytes_left = 0; 1932 } 1933 1934 return 0; 1935 } 1936 1937 /* 1938 * this dumps all file system paths to the inode into the ipath struct, provided 1939 * is has been created large enough. each path is zero-terminated and accessed 1940 * from ipath->fspath->val[i]. 1941 * when it returns, there are ipath->fspath->elem_cnt number of paths available 1942 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 1943 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise, 1944 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 1945 * have been needed to return all paths. 1946 */ 1947 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) 1948 { 1949 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, 1950 inode_to_path, ipath); 1951 } 1952 1953 struct btrfs_data_container *init_data_container(u32 total_bytes) 1954 { 1955 struct btrfs_data_container *data; 1956 size_t alloc_bytes; 1957 1958 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); 1959 data = vmalloc(alloc_bytes); 1960 if (!data) 1961 return ERR_PTR(-ENOMEM); 1962 1963 if (total_bytes >= sizeof(*data)) { 1964 data->bytes_left = total_bytes - sizeof(*data); 1965 data->bytes_missing = 0; 1966 } else { 1967 data->bytes_missing = sizeof(*data) - total_bytes; 1968 data->bytes_left = 0; 1969 } 1970 1971 data->elem_cnt = 0; 1972 data->elem_missed = 0; 1973 1974 return data; 1975 } 1976 1977 /* 1978 * allocates space to return multiple file system paths for an inode. 1979 * total_bytes to allocate are passed, note that space usable for actual path 1980 * information will be total_bytes - sizeof(struct inode_fs_paths). 1981 * the returned pointer must be freed with free_ipath() in the end. 1982 */ 1983 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 1984 struct btrfs_path *path) 1985 { 1986 struct inode_fs_paths *ifp; 1987 struct btrfs_data_container *fspath; 1988 1989 fspath = init_data_container(total_bytes); 1990 if (IS_ERR(fspath)) 1991 return (void *)fspath; 1992 1993 ifp = kmalloc(sizeof(*ifp), GFP_NOFS); 1994 if (!ifp) { 1995 vfree(fspath); 1996 return ERR_PTR(-ENOMEM); 1997 } 1998 1999 ifp->btrfs_path = path; 2000 ifp->fspath = fspath; 2001 ifp->fs_root = fs_root; 2002 2003 return ifp; 2004 } 2005 2006 void free_ipath(struct inode_fs_paths *ipath) 2007 { 2008 if (!ipath) 2009 return; 2010 vfree(ipath->fspath); 2011 kfree(ipath); 2012 } 2013