1 /* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include "ctree.h" 20 #include "disk-io.h" 21 #include "backref.h" 22 #include "ulist.h" 23 #include "transaction.h" 24 #include "delayed-ref.h" 25 #include "locking.h" 26 27 struct extent_inode_elem { 28 u64 inum; 29 u64 offset; 30 struct extent_inode_elem *next; 31 }; 32 33 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, 34 struct btrfs_file_extent_item *fi, 35 u64 extent_item_pos, 36 struct extent_inode_elem **eie) 37 { 38 u64 data_offset; 39 u64 data_len; 40 struct extent_inode_elem *e; 41 42 data_offset = btrfs_file_extent_offset(eb, fi); 43 data_len = btrfs_file_extent_num_bytes(eb, fi); 44 45 if (extent_item_pos < data_offset || 46 extent_item_pos >= data_offset + data_len) 47 return 1; 48 49 e = kmalloc(sizeof(*e), GFP_NOFS); 50 if (!e) 51 return -ENOMEM; 52 53 e->next = *eie; 54 e->inum = key->objectid; 55 e->offset = key->offset + (extent_item_pos - data_offset); 56 *eie = e; 57 58 return 0; 59 } 60 61 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte, 62 u64 extent_item_pos, 63 struct extent_inode_elem **eie) 64 { 65 u64 disk_byte; 66 struct btrfs_key key; 67 struct btrfs_file_extent_item *fi; 68 int slot; 69 int nritems; 70 int extent_type; 71 int ret; 72 73 /* 74 * from the shared data ref, we only have the leaf but we need 75 * the key. thus, we must look into all items and see that we 76 * find one (some) with a reference to our extent item. 77 */ 78 nritems = btrfs_header_nritems(eb); 79 for (slot = 0; slot < nritems; ++slot) { 80 btrfs_item_key_to_cpu(eb, &key, slot); 81 if (key.type != BTRFS_EXTENT_DATA_KEY) 82 continue; 83 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 84 extent_type = btrfs_file_extent_type(eb, fi); 85 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 86 continue; 87 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ 88 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 89 if (disk_byte != wanted_disk_byte) 90 continue; 91 92 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie); 93 if (ret < 0) 94 return ret; 95 } 96 97 return 0; 98 } 99 100 /* 101 * this structure records all encountered refs on the way up to the root 102 */ 103 struct __prelim_ref { 104 struct list_head list; 105 u64 root_id; 106 struct btrfs_key key_for_search; 107 int level; 108 int count; 109 struct extent_inode_elem *inode_list; 110 u64 parent; 111 u64 wanted_disk_byte; 112 }; 113 114 /* 115 * the rules for all callers of this function are: 116 * - obtaining the parent is the goal 117 * - if you add a key, you must know that it is a correct key 118 * - if you cannot add the parent or a correct key, then we will look into the 119 * block later to set a correct key 120 * 121 * delayed refs 122 * ============ 123 * backref type | shared | indirect | shared | indirect 124 * information | tree | tree | data | data 125 * --------------------+--------+----------+--------+---------- 126 * parent logical | y | - | - | - 127 * key to resolve | - | y | y | y 128 * tree block logical | - | - | - | - 129 * root for resolving | y | y | y | y 130 * 131 * - column 1: we've the parent -> done 132 * - column 2, 3, 4: we use the key to find the parent 133 * 134 * on disk refs (inline or keyed) 135 * ============================== 136 * backref type | shared | indirect | shared | indirect 137 * information | tree | tree | data | data 138 * --------------------+--------+----------+--------+---------- 139 * parent logical | y | - | y | - 140 * key to resolve | - | - | - | y 141 * tree block logical | y | y | y | y 142 * root for resolving | - | y | y | y 143 * 144 * - column 1, 3: we've the parent -> done 145 * - column 2: we take the first key from the block to find the parent 146 * (see __add_missing_keys) 147 * - column 4: we use the key to find the parent 148 * 149 * additional information that's available but not required to find the parent 150 * block might help in merging entries to gain some speed. 151 */ 152 153 static int __add_prelim_ref(struct list_head *head, u64 root_id, 154 struct btrfs_key *key, int level, 155 u64 parent, u64 wanted_disk_byte, int count) 156 { 157 struct __prelim_ref *ref; 158 159 /* in case we're adding delayed refs, we're holding the refs spinlock */ 160 ref = kmalloc(sizeof(*ref), GFP_ATOMIC); 161 if (!ref) 162 return -ENOMEM; 163 164 ref->root_id = root_id; 165 if (key) 166 ref->key_for_search = *key; 167 else 168 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); 169 170 ref->inode_list = NULL; 171 ref->level = level; 172 ref->count = count; 173 ref->parent = parent; 174 ref->wanted_disk_byte = wanted_disk_byte; 175 list_add_tail(&ref->list, head); 176 177 return 0; 178 } 179 180 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 181 struct ulist *parents, int level, 182 struct btrfs_key *key_for_search, u64 time_seq, 183 u64 wanted_disk_byte, 184 const u64 *extent_item_pos) 185 { 186 int ret = 0; 187 int slot; 188 struct extent_buffer *eb; 189 struct btrfs_key key; 190 struct btrfs_file_extent_item *fi; 191 struct extent_inode_elem *eie = NULL; 192 u64 disk_byte; 193 194 if (level != 0) { 195 eb = path->nodes[level]; 196 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 197 if (ret < 0) 198 return ret; 199 return 0; 200 } 201 202 /* 203 * We normally enter this function with the path already pointing to 204 * the first item to check. But sometimes, we may enter it with 205 * slot==nritems. In that case, go to the next leaf before we continue. 206 */ 207 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 208 ret = btrfs_next_old_leaf(root, path, time_seq); 209 210 while (!ret) { 211 eb = path->nodes[0]; 212 slot = path->slots[0]; 213 214 btrfs_item_key_to_cpu(eb, &key, slot); 215 216 if (key.objectid != key_for_search->objectid || 217 key.type != BTRFS_EXTENT_DATA_KEY) 218 break; 219 220 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 221 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 222 223 if (disk_byte == wanted_disk_byte) { 224 eie = NULL; 225 if (extent_item_pos) { 226 ret = check_extent_in_eb(&key, eb, fi, 227 *extent_item_pos, 228 &eie); 229 if (ret < 0) 230 break; 231 } 232 if (!ret) { 233 ret = ulist_add(parents, eb->start, 234 (unsigned long)eie, GFP_NOFS); 235 if (ret < 0) 236 break; 237 if (!extent_item_pos) { 238 ret = btrfs_next_old_leaf(root, path, 239 time_seq); 240 continue; 241 } 242 } 243 } 244 ret = btrfs_next_old_item(root, path, time_seq); 245 } 246 247 if (ret > 0) 248 ret = 0; 249 return ret; 250 } 251 252 /* 253 * resolve an indirect backref in the form (root_id, key, level) 254 * to a logical address 255 */ 256 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, 257 int search_commit_root, 258 u64 time_seq, 259 struct __prelim_ref *ref, 260 struct ulist *parents, 261 const u64 *extent_item_pos) 262 { 263 struct btrfs_path *path; 264 struct btrfs_root *root; 265 struct btrfs_key root_key; 266 struct extent_buffer *eb; 267 int ret = 0; 268 int root_level; 269 int level = ref->level; 270 271 path = btrfs_alloc_path(); 272 if (!path) 273 return -ENOMEM; 274 path->search_commit_root = !!search_commit_root; 275 276 root_key.objectid = ref->root_id; 277 root_key.type = BTRFS_ROOT_ITEM_KEY; 278 root_key.offset = (u64)-1; 279 root = btrfs_read_fs_root_no_name(fs_info, &root_key); 280 if (IS_ERR(root)) { 281 ret = PTR_ERR(root); 282 goto out; 283 } 284 285 rcu_read_lock(); 286 root_level = btrfs_header_level(root->node); 287 rcu_read_unlock(); 288 289 if (root_level + 1 == level) 290 goto out; 291 292 path->lowest_level = level; 293 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq); 294 pr_debug("search slot in root %llu (level %d, ref count %d) returned " 295 "%d for key (%llu %u %llu)\n", 296 (unsigned long long)ref->root_id, level, ref->count, ret, 297 (unsigned long long)ref->key_for_search.objectid, 298 ref->key_for_search.type, 299 (unsigned long long)ref->key_for_search.offset); 300 if (ret < 0) 301 goto out; 302 303 eb = path->nodes[level]; 304 while (!eb) { 305 if (!level) { 306 WARN_ON(1); 307 ret = 1; 308 goto out; 309 } 310 level--; 311 eb = path->nodes[level]; 312 } 313 314 ret = add_all_parents(root, path, parents, level, &ref->key_for_search, 315 time_seq, ref->wanted_disk_byte, 316 extent_item_pos); 317 out: 318 btrfs_free_path(path); 319 return ret; 320 } 321 322 /* 323 * resolve all indirect backrefs from the list 324 */ 325 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 326 int search_commit_root, u64 time_seq, 327 struct list_head *head, 328 const u64 *extent_item_pos) 329 { 330 int err; 331 int ret = 0; 332 struct __prelim_ref *ref; 333 struct __prelim_ref *ref_safe; 334 struct __prelim_ref *new_ref; 335 struct ulist *parents; 336 struct ulist_node *node; 337 struct ulist_iterator uiter; 338 339 parents = ulist_alloc(GFP_NOFS); 340 if (!parents) 341 return -ENOMEM; 342 343 /* 344 * _safe allows us to insert directly after the current item without 345 * iterating over the newly inserted items. 346 * we're also allowed to re-assign ref during iteration. 347 */ 348 list_for_each_entry_safe(ref, ref_safe, head, list) { 349 if (ref->parent) /* already direct */ 350 continue; 351 if (ref->count == 0) 352 continue; 353 err = __resolve_indirect_ref(fs_info, search_commit_root, 354 time_seq, ref, parents, 355 extent_item_pos); 356 if (err) { 357 if (ret == 0) 358 ret = err; 359 continue; 360 } 361 362 /* we put the first parent into the ref at hand */ 363 ULIST_ITER_INIT(&uiter); 364 node = ulist_next(parents, &uiter); 365 ref->parent = node ? node->val : 0; 366 ref->inode_list = 367 node ? (struct extent_inode_elem *)node->aux : 0; 368 369 /* additional parents require new refs being added here */ 370 while ((node = ulist_next(parents, &uiter))) { 371 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS); 372 if (!new_ref) { 373 ret = -ENOMEM; 374 break; 375 } 376 memcpy(new_ref, ref, sizeof(*ref)); 377 new_ref->parent = node->val; 378 new_ref->inode_list = 379 (struct extent_inode_elem *)node->aux; 380 list_add(&new_ref->list, &ref->list); 381 } 382 ulist_reinit(parents); 383 } 384 385 ulist_free(parents); 386 return ret; 387 } 388 389 static inline int ref_for_same_block(struct __prelim_ref *ref1, 390 struct __prelim_ref *ref2) 391 { 392 if (ref1->level != ref2->level) 393 return 0; 394 if (ref1->root_id != ref2->root_id) 395 return 0; 396 if (ref1->key_for_search.type != ref2->key_for_search.type) 397 return 0; 398 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid) 399 return 0; 400 if (ref1->key_for_search.offset != ref2->key_for_search.offset) 401 return 0; 402 if (ref1->parent != ref2->parent) 403 return 0; 404 405 return 1; 406 } 407 408 /* 409 * read tree blocks and add keys where required. 410 */ 411 static int __add_missing_keys(struct btrfs_fs_info *fs_info, 412 struct list_head *head) 413 { 414 struct list_head *pos; 415 struct extent_buffer *eb; 416 417 list_for_each(pos, head) { 418 struct __prelim_ref *ref; 419 ref = list_entry(pos, struct __prelim_ref, list); 420 421 if (ref->parent) 422 continue; 423 if (ref->key_for_search.type) 424 continue; 425 BUG_ON(!ref->wanted_disk_byte); 426 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte, 427 fs_info->tree_root->leafsize, 0); 428 BUG_ON(!eb); 429 btrfs_tree_read_lock(eb); 430 if (btrfs_header_level(eb) == 0) 431 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); 432 else 433 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); 434 btrfs_tree_read_unlock(eb); 435 free_extent_buffer(eb); 436 } 437 return 0; 438 } 439 440 /* 441 * merge two lists of backrefs and adjust counts accordingly 442 * 443 * mode = 1: merge identical keys, if key is set 444 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here. 445 * additionally, we could even add a key range for the blocks we 446 * looked into to merge even more (-> replace unresolved refs by those 447 * having a parent). 448 * mode = 2: merge identical parents 449 */ 450 static int __merge_refs(struct list_head *head, int mode) 451 { 452 struct list_head *pos1; 453 454 list_for_each(pos1, head) { 455 struct list_head *n2; 456 struct list_head *pos2; 457 struct __prelim_ref *ref1; 458 459 ref1 = list_entry(pos1, struct __prelim_ref, list); 460 461 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head; 462 pos2 = n2, n2 = pos2->next) { 463 struct __prelim_ref *ref2; 464 struct __prelim_ref *xchg; 465 466 ref2 = list_entry(pos2, struct __prelim_ref, list); 467 468 if (mode == 1) { 469 if (!ref_for_same_block(ref1, ref2)) 470 continue; 471 if (!ref1->parent && ref2->parent) { 472 xchg = ref1; 473 ref1 = ref2; 474 ref2 = xchg; 475 } 476 ref1->count += ref2->count; 477 } else { 478 if (ref1->parent != ref2->parent) 479 continue; 480 ref1->count += ref2->count; 481 } 482 list_del(&ref2->list); 483 kfree(ref2); 484 } 485 486 } 487 return 0; 488 } 489 490 /* 491 * add all currently queued delayed refs from this head whose seq nr is 492 * smaller or equal that seq to the list 493 */ 494 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, 495 struct list_head *prefs) 496 { 497 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 498 struct rb_node *n = &head->node.rb_node; 499 struct btrfs_key key; 500 struct btrfs_key op_key = {0}; 501 int sgn; 502 int ret = 0; 503 504 if (extent_op && extent_op->update_key) 505 btrfs_disk_key_to_cpu(&op_key, &extent_op->key); 506 507 while ((n = rb_prev(n))) { 508 struct btrfs_delayed_ref_node *node; 509 node = rb_entry(n, struct btrfs_delayed_ref_node, 510 rb_node); 511 if (node->bytenr != head->node.bytenr) 512 break; 513 WARN_ON(node->is_head); 514 515 if (node->seq > seq) 516 continue; 517 518 switch (node->action) { 519 case BTRFS_ADD_DELAYED_EXTENT: 520 case BTRFS_UPDATE_DELAYED_HEAD: 521 WARN_ON(1); 522 continue; 523 case BTRFS_ADD_DELAYED_REF: 524 sgn = 1; 525 break; 526 case BTRFS_DROP_DELAYED_REF: 527 sgn = -1; 528 break; 529 default: 530 BUG_ON(1); 531 } 532 switch (node->type) { 533 case BTRFS_TREE_BLOCK_REF_KEY: { 534 struct btrfs_delayed_tree_ref *ref; 535 536 ref = btrfs_delayed_node_to_tree_ref(node); 537 ret = __add_prelim_ref(prefs, ref->root, &op_key, 538 ref->level + 1, 0, node->bytenr, 539 node->ref_mod * sgn); 540 break; 541 } 542 case BTRFS_SHARED_BLOCK_REF_KEY: { 543 struct btrfs_delayed_tree_ref *ref; 544 545 ref = btrfs_delayed_node_to_tree_ref(node); 546 ret = __add_prelim_ref(prefs, ref->root, NULL, 547 ref->level + 1, ref->parent, 548 node->bytenr, 549 node->ref_mod * sgn); 550 break; 551 } 552 case BTRFS_EXTENT_DATA_REF_KEY: { 553 struct btrfs_delayed_data_ref *ref; 554 ref = btrfs_delayed_node_to_data_ref(node); 555 556 key.objectid = ref->objectid; 557 key.type = BTRFS_EXTENT_DATA_KEY; 558 key.offset = ref->offset; 559 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0, 560 node->bytenr, 561 node->ref_mod * sgn); 562 break; 563 } 564 case BTRFS_SHARED_DATA_REF_KEY: { 565 struct btrfs_delayed_data_ref *ref; 566 567 ref = btrfs_delayed_node_to_data_ref(node); 568 569 key.objectid = ref->objectid; 570 key.type = BTRFS_EXTENT_DATA_KEY; 571 key.offset = ref->offset; 572 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 573 ref->parent, node->bytenr, 574 node->ref_mod * sgn); 575 break; 576 } 577 default: 578 WARN_ON(1); 579 } 580 BUG_ON(ret); 581 } 582 583 return 0; 584 } 585 586 /* 587 * add all inline backrefs for bytenr to the list 588 */ 589 static int __add_inline_refs(struct btrfs_fs_info *fs_info, 590 struct btrfs_path *path, u64 bytenr, 591 int *info_level, struct list_head *prefs) 592 { 593 int ret = 0; 594 int slot; 595 struct extent_buffer *leaf; 596 struct btrfs_key key; 597 unsigned long ptr; 598 unsigned long end; 599 struct btrfs_extent_item *ei; 600 u64 flags; 601 u64 item_size; 602 603 /* 604 * enumerate all inline refs 605 */ 606 leaf = path->nodes[0]; 607 slot = path->slots[0]; 608 609 item_size = btrfs_item_size_nr(leaf, slot); 610 BUG_ON(item_size < sizeof(*ei)); 611 612 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 613 flags = btrfs_extent_flags(leaf, ei); 614 615 ptr = (unsigned long)(ei + 1); 616 end = (unsigned long)ei + item_size; 617 618 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 619 struct btrfs_tree_block_info *info; 620 621 info = (struct btrfs_tree_block_info *)ptr; 622 *info_level = btrfs_tree_block_level(leaf, info); 623 ptr += sizeof(struct btrfs_tree_block_info); 624 BUG_ON(ptr > end); 625 } else { 626 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 627 } 628 629 while (ptr < end) { 630 struct btrfs_extent_inline_ref *iref; 631 u64 offset; 632 int type; 633 634 iref = (struct btrfs_extent_inline_ref *)ptr; 635 type = btrfs_extent_inline_ref_type(leaf, iref); 636 offset = btrfs_extent_inline_ref_offset(leaf, iref); 637 638 switch (type) { 639 case BTRFS_SHARED_BLOCK_REF_KEY: 640 ret = __add_prelim_ref(prefs, 0, NULL, 641 *info_level + 1, offset, 642 bytenr, 1); 643 break; 644 case BTRFS_SHARED_DATA_REF_KEY: { 645 struct btrfs_shared_data_ref *sdref; 646 int count; 647 648 sdref = (struct btrfs_shared_data_ref *)(iref + 1); 649 count = btrfs_shared_data_ref_count(leaf, sdref); 650 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset, 651 bytenr, count); 652 break; 653 } 654 case BTRFS_TREE_BLOCK_REF_KEY: 655 ret = __add_prelim_ref(prefs, offset, NULL, 656 *info_level + 1, 0, 657 bytenr, 1); 658 break; 659 case BTRFS_EXTENT_DATA_REF_KEY: { 660 struct btrfs_extent_data_ref *dref; 661 int count; 662 u64 root; 663 664 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 665 count = btrfs_extent_data_ref_count(leaf, dref); 666 key.objectid = btrfs_extent_data_ref_objectid(leaf, 667 dref); 668 key.type = BTRFS_EXTENT_DATA_KEY; 669 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 670 root = btrfs_extent_data_ref_root(leaf, dref); 671 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 672 bytenr, count); 673 break; 674 } 675 default: 676 WARN_ON(1); 677 } 678 BUG_ON(ret); 679 ptr += btrfs_extent_inline_ref_size(type); 680 } 681 682 return 0; 683 } 684 685 /* 686 * add all non-inline backrefs for bytenr to the list 687 */ 688 static int __add_keyed_refs(struct btrfs_fs_info *fs_info, 689 struct btrfs_path *path, u64 bytenr, 690 int info_level, struct list_head *prefs) 691 { 692 struct btrfs_root *extent_root = fs_info->extent_root; 693 int ret; 694 int slot; 695 struct extent_buffer *leaf; 696 struct btrfs_key key; 697 698 while (1) { 699 ret = btrfs_next_item(extent_root, path); 700 if (ret < 0) 701 break; 702 if (ret) { 703 ret = 0; 704 break; 705 } 706 707 slot = path->slots[0]; 708 leaf = path->nodes[0]; 709 btrfs_item_key_to_cpu(leaf, &key, slot); 710 711 if (key.objectid != bytenr) 712 break; 713 if (key.type < BTRFS_TREE_BLOCK_REF_KEY) 714 continue; 715 if (key.type > BTRFS_SHARED_DATA_REF_KEY) 716 break; 717 718 switch (key.type) { 719 case BTRFS_SHARED_BLOCK_REF_KEY: 720 ret = __add_prelim_ref(prefs, 0, NULL, 721 info_level + 1, key.offset, 722 bytenr, 1); 723 break; 724 case BTRFS_SHARED_DATA_REF_KEY: { 725 struct btrfs_shared_data_ref *sdref; 726 int count; 727 728 sdref = btrfs_item_ptr(leaf, slot, 729 struct btrfs_shared_data_ref); 730 count = btrfs_shared_data_ref_count(leaf, sdref); 731 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset, 732 bytenr, count); 733 break; 734 } 735 case BTRFS_TREE_BLOCK_REF_KEY: 736 ret = __add_prelim_ref(prefs, key.offset, NULL, 737 info_level + 1, 0, 738 bytenr, 1); 739 break; 740 case BTRFS_EXTENT_DATA_REF_KEY: { 741 struct btrfs_extent_data_ref *dref; 742 int count; 743 u64 root; 744 745 dref = btrfs_item_ptr(leaf, slot, 746 struct btrfs_extent_data_ref); 747 count = btrfs_extent_data_ref_count(leaf, dref); 748 key.objectid = btrfs_extent_data_ref_objectid(leaf, 749 dref); 750 key.type = BTRFS_EXTENT_DATA_KEY; 751 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 752 root = btrfs_extent_data_ref_root(leaf, dref); 753 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 754 bytenr, count); 755 break; 756 } 757 default: 758 WARN_ON(1); 759 } 760 BUG_ON(ret); 761 } 762 763 return ret; 764 } 765 766 /* 767 * this adds all existing backrefs (inline backrefs, backrefs and delayed 768 * refs) for the given bytenr to the refs list, merges duplicates and resolves 769 * indirect refs to their parent bytenr. 770 * When roots are found, they're added to the roots list 771 * 772 * FIXME some caching might speed things up 773 */ 774 static int find_parent_nodes(struct btrfs_trans_handle *trans, 775 struct btrfs_fs_info *fs_info, u64 bytenr, 776 u64 time_seq, struct ulist *refs, 777 struct ulist *roots, const u64 *extent_item_pos) 778 { 779 struct btrfs_key key; 780 struct btrfs_path *path; 781 struct btrfs_delayed_ref_root *delayed_refs = NULL; 782 struct btrfs_delayed_ref_head *head; 783 int info_level = 0; 784 int ret; 785 int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT); 786 struct list_head prefs_delayed; 787 struct list_head prefs; 788 struct __prelim_ref *ref; 789 790 INIT_LIST_HEAD(&prefs); 791 INIT_LIST_HEAD(&prefs_delayed); 792 793 key.objectid = bytenr; 794 key.type = BTRFS_EXTENT_ITEM_KEY; 795 key.offset = (u64)-1; 796 797 path = btrfs_alloc_path(); 798 if (!path) 799 return -ENOMEM; 800 path->search_commit_root = !!search_commit_root; 801 802 /* 803 * grab both a lock on the path and a lock on the delayed ref head. 804 * We need both to get a consistent picture of how the refs look 805 * at a specified point in time 806 */ 807 again: 808 head = NULL; 809 810 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 811 if (ret < 0) 812 goto out; 813 BUG_ON(ret == 0); 814 815 if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) { 816 /* 817 * look if there are updates for this ref queued and lock the 818 * head 819 */ 820 delayed_refs = &trans->transaction->delayed_refs; 821 spin_lock(&delayed_refs->lock); 822 head = btrfs_find_delayed_ref_head(trans, bytenr); 823 if (head) { 824 if (!mutex_trylock(&head->mutex)) { 825 atomic_inc(&head->node.refs); 826 spin_unlock(&delayed_refs->lock); 827 828 btrfs_release_path(path); 829 830 /* 831 * Mutex was contended, block until it's 832 * released and try again 833 */ 834 mutex_lock(&head->mutex); 835 mutex_unlock(&head->mutex); 836 btrfs_put_delayed_ref(&head->node); 837 goto again; 838 } 839 ret = __add_delayed_refs(head, time_seq, 840 &prefs_delayed); 841 mutex_unlock(&head->mutex); 842 if (ret) { 843 spin_unlock(&delayed_refs->lock); 844 goto out; 845 } 846 } 847 spin_unlock(&delayed_refs->lock); 848 } 849 850 if (path->slots[0]) { 851 struct extent_buffer *leaf; 852 int slot; 853 854 path->slots[0]--; 855 leaf = path->nodes[0]; 856 slot = path->slots[0]; 857 btrfs_item_key_to_cpu(leaf, &key, slot); 858 if (key.objectid == bytenr && 859 key.type == BTRFS_EXTENT_ITEM_KEY) { 860 ret = __add_inline_refs(fs_info, path, bytenr, 861 &info_level, &prefs); 862 if (ret) 863 goto out; 864 ret = __add_keyed_refs(fs_info, path, bytenr, 865 info_level, &prefs); 866 if (ret) 867 goto out; 868 } 869 } 870 btrfs_release_path(path); 871 872 list_splice_init(&prefs_delayed, &prefs); 873 874 ret = __add_missing_keys(fs_info, &prefs); 875 if (ret) 876 goto out; 877 878 ret = __merge_refs(&prefs, 1); 879 if (ret) 880 goto out; 881 882 ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq, 883 &prefs, extent_item_pos); 884 if (ret) 885 goto out; 886 887 ret = __merge_refs(&prefs, 2); 888 if (ret) 889 goto out; 890 891 while (!list_empty(&prefs)) { 892 ref = list_first_entry(&prefs, struct __prelim_ref, list); 893 list_del(&ref->list); 894 if (ref->count < 0) 895 WARN_ON(1); 896 if (ref->count && ref->root_id && ref->parent == 0) { 897 /* no parent == root of tree */ 898 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); 899 BUG_ON(ret < 0); 900 } 901 if (ref->count && ref->parent) { 902 struct extent_inode_elem *eie = NULL; 903 if (extent_item_pos && !ref->inode_list) { 904 u32 bsz; 905 struct extent_buffer *eb; 906 bsz = btrfs_level_size(fs_info->extent_root, 907 info_level); 908 eb = read_tree_block(fs_info->extent_root, 909 ref->parent, bsz, 0); 910 BUG_ON(!eb); 911 ret = find_extent_in_eb(eb, bytenr, 912 *extent_item_pos, &eie); 913 ref->inode_list = eie; 914 free_extent_buffer(eb); 915 } 916 ret = ulist_add_merge(refs, ref->parent, 917 (unsigned long)ref->inode_list, 918 (unsigned long *)&eie, GFP_NOFS); 919 if (!ret && extent_item_pos) { 920 /* 921 * we've recorded that parent, so we must extend 922 * its inode list here 923 */ 924 BUG_ON(!eie); 925 while (eie->next) 926 eie = eie->next; 927 eie->next = ref->inode_list; 928 } 929 BUG_ON(ret < 0); 930 } 931 kfree(ref); 932 } 933 934 out: 935 btrfs_free_path(path); 936 while (!list_empty(&prefs)) { 937 ref = list_first_entry(&prefs, struct __prelim_ref, list); 938 list_del(&ref->list); 939 kfree(ref); 940 } 941 while (!list_empty(&prefs_delayed)) { 942 ref = list_first_entry(&prefs_delayed, struct __prelim_ref, 943 list); 944 list_del(&ref->list); 945 kfree(ref); 946 } 947 948 return ret; 949 } 950 951 static void free_leaf_list(struct ulist *blocks) 952 { 953 struct ulist_node *node = NULL; 954 struct extent_inode_elem *eie; 955 struct extent_inode_elem *eie_next; 956 struct ulist_iterator uiter; 957 958 ULIST_ITER_INIT(&uiter); 959 while ((node = ulist_next(blocks, &uiter))) { 960 if (!node->aux) 961 continue; 962 eie = (struct extent_inode_elem *)node->aux; 963 for (; eie; eie = eie_next) { 964 eie_next = eie->next; 965 kfree(eie); 966 } 967 node->aux = 0; 968 } 969 970 ulist_free(blocks); 971 } 972 973 /* 974 * Finds all leafs with a reference to the specified combination of bytenr and 975 * offset. key_list_head will point to a list of corresponding keys (caller must 976 * free each list element). The leafs will be stored in the leafs ulist, which 977 * must be freed with ulist_free. 978 * 979 * returns 0 on success, <0 on error 980 */ 981 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 982 struct btrfs_fs_info *fs_info, u64 bytenr, 983 u64 time_seq, struct ulist **leafs, 984 const u64 *extent_item_pos) 985 { 986 struct ulist *tmp; 987 int ret; 988 989 tmp = ulist_alloc(GFP_NOFS); 990 if (!tmp) 991 return -ENOMEM; 992 *leafs = ulist_alloc(GFP_NOFS); 993 if (!*leafs) { 994 ulist_free(tmp); 995 return -ENOMEM; 996 } 997 998 ret = find_parent_nodes(trans, fs_info, bytenr, 999 time_seq, *leafs, tmp, extent_item_pos); 1000 ulist_free(tmp); 1001 1002 if (ret < 0 && ret != -ENOENT) { 1003 free_leaf_list(*leafs); 1004 return ret; 1005 } 1006 1007 return 0; 1008 } 1009 1010 /* 1011 * walk all backrefs for a given extent to find all roots that reference this 1012 * extent. Walking a backref means finding all extents that reference this 1013 * extent and in turn walk the backrefs of those, too. Naturally this is a 1014 * recursive process, but here it is implemented in an iterative fashion: We 1015 * find all referencing extents for the extent in question and put them on a 1016 * list. In turn, we find all referencing extents for those, further appending 1017 * to the list. The way we iterate the list allows adding more elements after 1018 * the current while iterating. The process stops when we reach the end of the 1019 * list. Found roots are added to the roots list. 1020 * 1021 * returns 0 on success, < 0 on error. 1022 */ 1023 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1024 struct btrfs_fs_info *fs_info, u64 bytenr, 1025 u64 time_seq, struct ulist **roots) 1026 { 1027 struct ulist *tmp; 1028 struct ulist_node *node = NULL; 1029 struct ulist_iterator uiter; 1030 int ret; 1031 1032 tmp = ulist_alloc(GFP_NOFS); 1033 if (!tmp) 1034 return -ENOMEM; 1035 *roots = ulist_alloc(GFP_NOFS); 1036 if (!*roots) { 1037 ulist_free(tmp); 1038 return -ENOMEM; 1039 } 1040 1041 ULIST_ITER_INIT(&uiter); 1042 while (1) { 1043 ret = find_parent_nodes(trans, fs_info, bytenr, 1044 time_seq, tmp, *roots, NULL); 1045 if (ret < 0 && ret != -ENOENT) { 1046 ulist_free(tmp); 1047 ulist_free(*roots); 1048 return ret; 1049 } 1050 node = ulist_next(tmp, &uiter); 1051 if (!node) 1052 break; 1053 bytenr = node->val; 1054 } 1055 1056 ulist_free(tmp); 1057 return 0; 1058 } 1059 1060 1061 static int __inode_info(u64 inum, u64 ioff, u8 key_type, 1062 struct btrfs_root *fs_root, struct btrfs_path *path, 1063 struct btrfs_key *found_key) 1064 { 1065 int ret; 1066 struct btrfs_key key; 1067 struct extent_buffer *eb; 1068 1069 key.type = key_type; 1070 key.objectid = inum; 1071 key.offset = ioff; 1072 1073 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1074 if (ret < 0) 1075 return ret; 1076 1077 eb = path->nodes[0]; 1078 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1079 ret = btrfs_next_leaf(fs_root, path); 1080 if (ret) 1081 return ret; 1082 eb = path->nodes[0]; 1083 } 1084 1085 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1086 if (found_key->type != key.type || found_key->objectid != key.objectid) 1087 return 1; 1088 1089 return 0; 1090 } 1091 1092 /* 1093 * this makes the path point to (inum INODE_ITEM ioff) 1094 */ 1095 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 1096 struct btrfs_path *path) 1097 { 1098 struct btrfs_key key; 1099 return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path, 1100 &key); 1101 } 1102 1103 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 1104 struct btrfs_path *path, 1105 struct btrfs_key *found_key) 1106 { 1107 return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path, 1108 found_key); 1109 } 1110 1111 /* 1112 * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements 1113 * of the path are separated by '/' and the path is guaranteed to be 1114 * 0-terminated. the path is only given within the current file system. 1115 * Therefore, it never starts with a '/'. the caller is responsible to provide 1116 * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 1117 * the start point of the resulting string is returned. this pointer is within 1118 * dest, normally. 1119 * in case the path buffer would overflow, the pointer is decremented further 1120 * as if output was written to the buffer, though no more output is actually 1121 * generated. that way, the caller can determine how much space would be 1122 * required for the path to fit into the buffer. in that case, the returned 1123 * value will be smaller than dest. callers must check this! 1124 */ 1125 char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 1126 struct btrfs_inode_ref *iref, 1127 struct extent_buffer *eb_in, u64 parent, 1128 char *dest, u32 size) 1129 { 1130 u32 len; 1131 int slot; 1132 u64 next_inum; 1133 int ret; 1134 s64 bytes_left = size - 1; 1135 struct extent_buffer *eb = eb_in; 1136 struct btrfs_key found_key; 1137 int leave_spinning = path->leave_spinning; 1138 1139 if (bytes_left >= 0) 1140 dest[bytes_left] = '\0'; 1141 1142 path->leave_spinning = 1; 1143 while (1) { 1144 len = btrfs_inode_ref_name_len(eb, iref); 1145 bytes_left -= len; 1146 if (bytes_left >= 0) 1147 read_extent_buffer(eb, dest + bytes_left, 1148 (unsigned long)(iref + 1), len); 1149 if (eb != eb_in) { 1150 btrfs_tree_read_unlock_blocking(eb); 1151 free_extent_buffer(eb); 1152 } 1153 ret = inode_ref_info(parent, 0, fs_root, path, &found_key); 1154 if (ret > 0) 1155 ret = -ENOENT; 1156 if (ret) 1157 break; 1158 next_inum = found_key.offset; 1159 1160 /* regular exit ahead */ 1161 if (parent == next_inum) 1162 break; 1163 1164 slot = path->slots[0]; 1165 eb = path->nodes[0]; 1166 /* make sure we can use eb after releasing the path */ 1167 if (eb != eb_in) { 1168 atomic_inc(&eb->refs); 1169 btrfs_tree_read_lock(eb); 1170 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1171 } 1172 btrfs_release_path(path); 1173 1174 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1175 parent = next_inum; 1176 --bytes_left; 1177 if (bytes_left >= 0) 1178 dest[bytes_left] = '/'; 1179 } 1180 1181 btrfs_release_path(path); 1182 path->leave_spinning = leave_spinning; 1183 1184 if (ret) 1185 return ERR_PTR(ret); 1186 1187 return dest + bytes_left; 1188 } 1189 1190 /* 1191 * this makes the path point to (logical EXTENT_ITEM *) 1192 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for 1193 * tree blocks and <0 on error. 1194 */ 1195 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 1196 struct btrfs_path *path, struct btrfs_key *found_key) 1197 { 1198 int ret; 1199 u64 flags; 1200 u32 item_size; 1201 struct extent_buffer *eb; 1202 struct btrfs_extent_item *ei; 1203 struct btrfs_key key; 1204 1205 key.type = BTRFS_EXTENT_ITEM_KEY; 1206 key.objectid = logical; 1207 key.offset = (u64)-1; 1208 1209 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 1210 if (ret < 0) 1211 return ret; 1212 ret = btrfs_previous_item(fs_info->extent_root, path, 1213 0, BTRFS_EXTENT_ITEM_KEY); 1214 if (ret < 0) 1215 return ret; 1216 1217 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); 1218 if (found_key->type != BTRFS_EXTENT_ITEM_KEY || 1219 found_key->objectid > logical || 1220 found_key->objectid + found_key->offset <= logical) { 1221 pr_debug("logical %llu is not within any extent\n", 1222 (unsigned long long)logical); 1223 return -ENOENT; 1224 } 1225 1226 eb = path->nodes[0]; 1227 item_size = btrfs_item_size_nr(eb, path->slots[0]); 1228 BUG_ON(item_size < sizeof(*ei)); 1229 1230 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 1231 flags = btrfs_extent_flags(eb, ei); 1232 1233 pr_debug("logical %llu is at position %llu within the extent (%llu " 1234 "EXTENT_ITEM %llu) flags %#llx size %u\n", 1235 (unsigned long long)logical, 1236 (unsigned long long)(logical - found_key->objectid), 1237 (unsigned long long)found_key->objectid, 1238 (unsigned long long)found_key->offset, 1239 (unsigned long long)flags, item_size); 1240 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1241 return BTRFS_EXTENT_FLAG_TREE_BLOCK; 1242 if (flags & BTRFS_EXTENT_FLAG_DATA) 1243 return BTRFS_EXTENT_FLAG_DATA; 1244 1245 return -EIO; 1246 } 1247 1248 /* 1249 * helper function to iterate extent inline refs. ptr must point to a 0 value 1250 * for the first call and may be modified. it is used to track state. 1251 * if more refs exist, 0 is returned and the next call to 1252 * __get_extent_inline_ref must pass the modified ptr parameter to get the 1253 * next ref. after the last ref was processed, 1 is returned. 1254 * returns <0 on error 1255 */ 1256 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, 1257 struct btrfs_extent_item *ei, u32 item_size, 1258 struct btrfs_extent_inline_ref **out_eiref, 1259 int *out_type) 1260 { 1261 unsigned long end; 1262 u64 flags; 1263 struct btrfs_tree_block_info *info; 1264 1265 if (!*ptr) { 1266 /* first call */ 1267 flags = btrfs_extent_flags(eb, ei); 1268 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1269 info = (struct btrfs_tree_block_info *)(ei + 1); 1270 *out_eiref = 1271 (struct btrfs_extent_inline_ref *)(info + 1); 1272 } else { 1273 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); 1274 } 1275 *ptr = (unsigned long)*out_eiref; 1276 if ((void *)*ptr >= (void *)ei + item_size) 1277 return -ENOENT; 1278 } 1279 1280 end = (unsigned long)ei + item_size; 1281 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr; 1282 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); 1283 1284 *ptr += btrfs_extent_inline_ref_size(*out_type); 1285 WARN_ON(*ptr > end); 1286 if (*ptr == end) 1287 return 1; /* last */ 1288 1289 return 0; 1290 } 1291 1292 /* 1293 * reads the tree block backref for an extent. tree level and root are returned 1294 * through out_level and out_root. ptr must point to a 0 value for the first 1295 * call and may be modified (see __get_extent_inline_ref comment). 1296 * returns 0 if data was provided, 1 if there was no more data to provide or 1297 * <0 on error. 1298 */ 1299 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, 1300 struct btrfs_extent_item *ei, u32 item_size, 1301 u64 *out_root, u8 *out_level) 1302 { 1303 int ret; 1304 int type; 1305 struct btrfs_tree_block_info *info; 1306 struct btrfs_extent_inline_ref *eiref; 1307 1308 if (*ptr == (unsigned long)-1) 1309 return 1; 1310 1311 while (1) { 1312 ret = __get_extent_inline_ref(ptr, eb, ei, item_size, 1313 &eiref, &type); 1314 if (ret < 0) 1315 return ret; 1316 1317 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1318 type == BTRFS_SHARED_BLOCK_REF_KEY) 1319 break; 1320 1321 if (ret == 1) 1322 return 1; 1323 } 1324 1325 /* we can treat both ref types equally here */ 1326 info = (struct btrfs_tree_block_info *)(ei + 1); 1327 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1328 *out_level = btrfs_tree_block_level(eb, info); 1329 1330 if (ret == 1) 1331 *ptr = (unsigned long)-1; 1332 1333 return 0; 1334 } 1335 1336 static int iterate_leaf_refs(struct extent_inode_elem *inode_list, 1337 u64 root, u64 extent_item_objectid, 1338 iterate_extent_inodes_t *iterate, void *ctx) 1339 { 1340 struct extent_inode_elem *eie; 1341 int ret = 0; 1342 1343 for (eie = inode_list; eie; eie = eie->next) { 1344 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " 1345 "root %llu\n", extent_item_objectid, 1346 eie->inum, eie->offset, root); 1347 ret = iterate(eie->inum, eie->offset, root, ctx); 1348 if (ret) { 1349 pr_debug("stopping iteration for %llu due to ret=%d\n", 1350 extent_item_objectid, ret); 1351 break; 1352 } 1353 } 1354 1355 return ret; 1356 } 1357 1358 /* 1359 * calls iterate() for every inode that references the extent identified by 1360 * the given parameters. 1361 * when the iterator function returns a non-zero value, iteration stops. 1362 */ 1363 int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 1364 u64 extent_item_objectid, u64 extent_item_pos, 1365 int search_commit_root, 1366 iterate_extent_inodes_t *iterate, void *ctx) 1367 { 1368 int ret; 1369 struct list_head data_refs = LIST_HEAD_INIT(data_refs); 1370 struct list_head shared_refs = LIST_HEAD_INIT(shared_refs); 1371 struct btrfs_trans_handle *trans; 1372 struct ulist *refs = NULL; 1373 struct ulist *roots = NULL; 1374 struct ulist_node *ref_node = NULL; 1375 struct ulist_node *root_node = NULL; 1376 struct seq_list tree_mod_seq_elem = {}; 1377 struct ulist_iterator ref_uiter; 1378 struct ulist_iterator root_uiter; 1379 1380 pr_debug("resolving all inodes for extent %llu\n", 1381 extent_item_objectid); 1382 1383 if (search_commit_root) { 1384 trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT; 1385 } else { 1386 trans = btrfs_join_transaction(fs_info->extent_root); 1387 if (IS_ERR(trans)) 1388 return PTR_ERR(trans); 1389 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1390 } 1391 1392 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1393 tree_mod_seq_elem.seq, &refs, 1394 &extent_item_pos); 1395 if (ret) 1396 goto out; 1397 1398 ULIST_ITER_INIT(&ref_uiter); 1399 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { 1400 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, 1401 tree_mod_seq_elem.seq, &roots); 1402 if (ret) 1403 break; 1404 ULIST_ITER_INIT(&root_uiter); 1405 while (!ret && (root_node = ulist_next(roots, &root_uiter))) { 1406 pr_debug("root %llu references leaf %llu, data list " 1407 "%#lx\n", root_node->val, ref_node->val, 1408 ref_node->aux); 1409 ret = iterate_leaf_refs( 1410 (struct extent_inode_elem *)ref_node->aux, 1411 root_node->val, extent_item_objectid, 1412 iterate, ctx); 1413 } 1414 ulist_free(roots); 1415 roots = NULL; 1416 } 1417 1418 free_leaf_list(refs); 1419 ulist_free(roots); 1420 out: 1421 if (!search_commit_root) { 1422 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1423 btrfs_end_transaction(trans, fs_info->extent_root); 1424 } 1425 1426 return ret; 1427 } 1428 1429 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 1430 struct btrfs_path *path, 1431 iterate_extent_inodes_t *iterate, void *ctx) 1432 { 1433 int ret; 1434 u64 extent_item_pos; 1435 struct btrfs_key found_key; 1436 int search_commit_root = path->search_commit_root; 1437 1438 ret = extent_from_logical(fs_info, logical, path, 1439 &found_key); 1440 btrfs_release_path(path); 1441 if (ret < 0) 1442 return ret; 1443 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1444 return -EINVAL; 1445 1446 extent_item_pos = logical - found_key.objectid; 1447 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1448 extent_item_pos, search_commit_root, 1449 iterate, ctx); 1450 1451 return ret; 1452 } 1453 1454 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, 1455 struct btrfs_path *path, 1456 iterate_irefs_t *iterate, void *ctx) 1457 { 1458 int ret = 0; 1459 int slot; 1460 u32 cur; 1461 u32 len; 1462 u32 name_len; 1463 u64 parent = 0; 1464 int found = 0; 1465 struct extent_buffer *eb; 1466 struct btrfs_item *item; 1467 struct btrfs_inode_ref *iref; 1468 struct btrfs_key found_key; 1469 1470 while (!ret) { 1471 path->leave_spinning = 1; 1472 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, 1473 &found_key); 1474 if (ret < 0) 1475 break; 1476 if (ret) { 1477 ret = found ? 0 : -ENOENT; 1478 break; 1479 } 1480 ++found; 1481 1482 parent = found_key.offset; 1483 slot = path->slots[0]; 1484 eb = path->nodes[0]; 1485 /* make sure we can use eb after releasing the path */ 1486 atomic_inc(&eb->refs); 1487 btrfs_tree_read_lock(eb); 1488 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1489 btrfs_release_path(path); 1490 1491 item = btrfs_item_nr(eb, slot); 1492 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1493 1494 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { 1495 name_len = btrfs_inode_ref_name_len(eb, iref); 1496 /* path must be released before calling iterate()! */ 1497 pr_debug("following ref at offset %u for inode %llu in " 1498 "tree %llu\n", cur, 1499 (unsigned long long)found_key.objectid, 1500 (unsigned long long)fs_root->objectid); 1501 ret = iterate(parent, iref, eb, ctx); 1502 if (ret) 1503 break; 1504 len = sizeof(*iref) + name_len; 1505 iref = (struct btrfs_inode_ref *)((char *)iref + len); 1506 } 1507 btrfs_tree_read_unlock_blocking(eb); 1508 free_extent_buffer(eb); 1509 } 1510 1511 btrfs_release_path(path); 1512 1513 return ret; 1514 } 1515 1516 /* 1517 * returns 0 if the path could be dumped (probably truncated) 1518 * returns <0 in case of an error 1519 */ 1520 static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, 1521 struct extent_buffer *eb, void *ctx) 1522 { 1523 struct inode_fs_paths *ipath = ctx; 1524 char *fspath; 1525 char *fspath_min; 1526 int i = ipath->fspath->elem_cnt; 1527 const int s_ptr = sizeof(char *); 1528 u32 bytes_left; 1529 1530 bytes_left = ipath->fspath->bytes_left > s_ptr ? 1531 ipath->fspath->bytes_left - s_ptr : 0; 1532 1533 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 1534 fspath = btrfs_iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb, 1535 inum, fspath_min, bytes_left); 1536 if (IS_ERR(fspath)) 1537 return PTR_ERR(fspath); 1538 1539 if (fspath > fspath_min) { 1540 pr_debug("path resolved: %s\n", fspath); 1541 ipath->fspath->val[i] = (u64)(unsigned long)fspath; 1542 ++ipath->fspath->elem_cnt; 1543 ipath->fspath->bytes_left = fspath - fspath_min; 1544 } else { 1545 pr_debug("missed path, not enough space. missing bytes: %lu, " 1546 "constructed so far: %s\n", 1547 (unsigned long)(fspath_min - fspath), fspath_min); 1548 ++ipath->fspath->elem_missed; 1549 ipath->fspath->bytes_missing += fspath_min - fspath; 1550 ipath->fspath->bytes_left = 0; 1551 } 1552 1553 return 0; 1554 } 1555 1556 /* 1557 * this dumps all file system paths to the inode into the ipath struct, provided 1558 * is has been created large enough. each path is zero-terminated and accessed 1559 * from ipath->fspath->val[i]. 1560 * when it returns, there are ipath->fspath->elem_cnt number of paths available 1561 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 1562 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise, 1563 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 1564 * have been needed to return all paths. 1565 */ 1566 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) 1567 { 1568 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, 1569 inode_to_path, ipath); 1570 } 1571 1572 struct btrfs_data_container *init_data_container(u32 total_bytes) 1573 { 1574 struct btrfs_data_container *data; 1575 size_t alloc_bytes; 1576 1577 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); 1578 data = kmalloc(alloc_bytes, GFP_NOFS); 1579 if (!data) 1580 return ERR_PTR(-ENOMEM); 1581 1582 if (total_bytes >= sizeof(*data)) { 1583 data->bytes_left = total_bytes - sizeof(*data); 1584 data->bytes_missing = 0; 1585 } else { 1586 data->bytes_missing = sizeof(*data) - total_bytes; 1587 data->bytes_left = 0; 1588 } 1589 1590 data->elem_cnt = 0; 1591 data->elem_missed = 0; 1592 1593 return data; 1594 } 1595 1596 /* 1597 * allocates space to return multiple file system paths for an inode. 1598 * total_bytes to allocate are passed, note that space usable for actual path 1599 * information will be total_bytes - sizeof(struct inode_fs_paths). 1600 * the returned pointer must be freed with free_ipath() in the end. 1601 */ 1602 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 1603 struct btrfs_path *path) 1604 { 1605 struct inode_fs_paths *ifp; 1606 struct btrfs_data_container *fspath; 1607 1608 fspath = init_data_container(total_bytes); 1609 if (IS_ERR(fspath)) 1610 return (void *)fspath; 1611 1612 ifp = kmalloc(sizeof(*ifp), GFP_NOFS); 1613 if (!ifp) { 1614 kfree(fspath); 1615 return ERR_PTR(-ENOMEM); 1616 } 1617 1618 ifp->btrfs_path = path; 1619 ifp->fspath = fspath; 1620 ifp->fs_root = fs_root; 1621 1622 return ifp; 1623 } 1624 1625 void free_ipath(struct inode_fs_paths *ipath) 1626 { 1627 if (!ipath) 1628 return; 1629 kfree(ipath->fspath); 1630 kfree(ipath); 1631 } 1632