1 /* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/vmalloc.h> 20 #include "ctree.h" 21 #include "disk-io.h" 22 #include "backref.h" 23 #include "ulist.h" 24 #include "transaction.h" 25 #include "delayed-ref.h" 26 #include "locking.h" 27 28 struct extent_inode_elem { 29 u64 inum; 30 u64 offset; 31 struct extent_inode_elem *next; 32 }; 33 34 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, 35 struct btrfs_file_extent_item *fi, 36 u64 extent_item_pos, 37 struct extent_inode_elem **eie) 38 { 39 u64 offset = 0; 40 struct extent_inode_elem *e; 41 42 if (!btrfs_file_extent_compression(eb, fi) && 43 !btrfs_file_extent_encryption(eb, fi) && 44 !btrfs_file_extent_other_encoding(eb, fi)) { 45 u64 data_offset; 46 u64 data_len; 47 48 data_offset = btrfs_file_extent_offset(eb, fi); 49 data_len = btrfs_file_extent_num_bytes(eb, fi); 50 51 if (extent_item_pos < data_offset || 52 extent_item_pos >= data_offset + data_len) 53 return 1; 54 offset = extent_item_pos - data_offset; 55 } 56 57 e = kmalloc(sizeof(*e), GFP_NOFS); 58 if (!e) 59 return -ENOMEM; 60 61 e->next = *eie; 62 e->inum = key->objectid; 63 e->offset = key->offset + offset; 64 *eie = e; 65 66 return 0; 67 } 68 69 static void free_inode_elem_list(struct extent_inode_elem *eie) 70 { 71 struct extent_inode_elem *eie_next; 72 73 for (; eie; eie = eie_next) { 74 eie_next = eie->next; 75 kfree(eie); 76 } 77 } 78 79 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte, 80 u64 extent_item_pos, 81 struct extent_inode_elem **eie) 82 { 83 u64 disk_byte; 84 struct btrfs_key key; 85 struct btrfs_file_extent_item *fi; 86 int slot; 87 int nritems; 88 int extent_type; 89 int ret; 90 91 /* 92 * from the shared data ref, we only have the leaf but we need 93 * the key. thus, we must look into all items and see that we 94 * find one (some) with a reference to our extent item. 95 */ 96 nritems = btrfs_header_nritems(eb); 97 for (slot = 0; slot < nritems; ++slot) { 98 btrfs_item_key_to_cpu(eb, &key, slot); 99 if (key.type != BTRFS_EXTENT_DATA_KEY) 100 continue; 101 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 102 extent_type = btrfs_file_extent_type(eb, fi); 103 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 104 continue; 105 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ 106 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 107 if (disk_byte != wanted_disk_byte) 108 continue; 109 110 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie); 111 if (ret < 0) 112 return ret; 113 } 114 115 return 0; 116 } 117 118 /* 119 * this structure records all encountered refs on the way up to the root 120 */ 121 struct __prelim_ref { 122 struct list_head list; 123 u64 root_id; 124 struct btrfs_key key_for_search; 125 int level; 126 int count; 127 struct extent_inode_elem *inode_list; 128 u64 parent; 129 u64 wanted_disk_byte; 130 }; 131 132 static struct kmem_cache *btrfs_prelim_ref_cache; 133 134 int __init btrfs_prelim_ref_init(void) 135 { 136 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref", 137 sizeof(struct __prelim_ref), 138 0, 139 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 140 NULL); 141 if (!btrfs_prelim_ref_cache) 142 return -ENOMEM; 143 return 0; 144 } 145 146 void btrfs_prelim_ref_exit(void) 147 { 148 if (btrfs_prelim_ref_cache) 149 kmem_cache_destroy(btrfs_prelim_ref_cache); 150 } 151 152 /* 153 * the rules for all callers of this function are: 154 * - obtaining the parent is the goal 155 * - if you add a key, you must know that it is a correct key 156 * - if you cannot add the parent or a correct key, then we will look into the 157 * block later to set a correct key 158 * 159 * delayed refs 160 * ============ 161 * backref type | shared | indirect | shared | indirect 162 * information | tree | tree | data | data 163 * --------------------+--------+----------+--------+---------- 164 * parent logical | y | - | - | - 165 * key to resolve | - | y | y | y 166 * tree block logical | - | - | - | - 167 * root for resolving | y | y | y | y 168 * 169 * - column 1: we've the parent -> done 170 * - column 2, 3, 4: we use the key to find the parent 171 * 172 * on disk refs (inline or keyed) 173 * ============================== 174 * backref type | shared | indirect | shared | indirect 175 * information | tree | tree | data | data 176 * --------------------+--------+----------+--------+---------- 177 * parent logical | y | - | y | - 178 * key to resolve | - | - | - | y 179 * tree block logical | y | y | y | y 180 * root for resolving | - | y | y | y 181 * 182 * - column 1, 3: we've the parent -> done 183 * - column 2: we take the first key from the block to find the parent 184 * (see __add_missing_keys) 185 * - column 4: we use the key to find the parent 186 * 187 * additional information that's available but not required to find the parent 188 * block might help in merging entries to gain some speed. 189 */ 190 191 static int __add_prelim_ref(struct list_head *head, u64 root_id, 192 struct btrfs_key *key, int level, 193 u64 parent, u64 wanted_disk_byte, int count, 194 gfp_t gfp_mask) 195 { 196 struct __prelim_ref *ref; 197 198 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID) 199 return 0; 200 201 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); 202 if (!ref) 203 return -ENOMEM; 204 205 ref->root_id = root_id; 206 if (key) 207 ref->key_for_search = *key; 208 else 209 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); 210 211 ref->inode_list = NULL; 212 ref->level = level; 213 ref->count = count; 214 ref->parent = parent; 215 ref->wanted_disk_byte = wanted_disk_byte; 216 list_add_tail(&ref->list, head); 217 218 return 0; 219 } 220 221 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 222 struct ulist *parents, struct __prelim_ref *ref, 223 int level, u64 time_seq, const u64 *extent_item_pos, 224 u64 total_refs) 225 { 226 int ret = 0; 227 int slot; 228 struct extent_buffer *eb; 229 struct btrfs_key key; 230 struct btrfs_key *key_for_search = &ref->key_for_search; 231 struct btrfs_file_extent_item *fi; 232 struct extent_inode_elem *eie = NULL, *old = NULL; 233 u64 disk_byte; 234 u64 wanted_disk_byte = ref->wanted_disk_byte; 235 u64 count = 0; 236 237 if (level != 0) { 238 eb = path->nodes[level]; 239 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 240 if (ret < 0) 241 return ret; 242 return 0; 243 } 244 245 /* 246 * We normally enter this function with the path already pointing to 247 * the first item to check. But sometimes, we may enter it with 248 * slot==nritems. In that case, go to the next leaf before we continue. 249 */ 250 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 251 ret = btrfs_next_old_leaf(root, path, time_seq); 252 253 while (!ret && count < total_refs) { 254 eb = path->nodes[0]; 255 slot = path->slots[0]; 256 257 btrfs_item_key_to_cpu(eb, &key, slot); 258 259 if (key.objectid != key_for_search->objectid || 260 key.type != BTRFS_EXTENT_DATA_KEY) 261 break; 262 263 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 264 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 265 266 if (disk_byte == wanted_disk_byte) { 267 eie = NULL; 268 old = NULL; 269 count++; 270 if (extent_item_pos) { 271 ret = check_extent_in_eb(&key, eb, fi, 272 *extent_item_pos, 273 &eie); 274 if (ret < 0) 275 break; 276 } 277 if (ret > 0) 278 goto next; 279 ret = ulist_add_merge(parents, eb->start, 280 (uintptr_t)eie, 281 (u64 *)&old, GFP_NOFS); 282 if (ret < 0) 283 break; 284 if (!ret && extent_item_pos) { 285 while (old->next) 286 old = old->next; 287 old->next = eie; 288 } 289 eie = NULL; 290 } 291 next: 292 ret = btrfs_next_old_item(root, path, time_seq); 293 } 294 295 if (ret > 0) 296 ret = 0; 297 else if (ret < 0) 298 free_inode_elem_list(eie); 299 return ret; 300 } 301 302 /* 303 * resolve an indirect backref in the form (root_id, key, level) 304 * to a logical address 305 */ 306 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, 307 struct btrfs_path *path, u64 time_seq, 308 struct __prelim_ref *ref, 309 struct ulist *parents, 310 const u64 *extent_item_pos, u64 total_refs) 311 { 312 struct btrfs_root *root; 313 struct btrfs_key root_key; 314 struct extent_buffer *eb; 315 int ret = 0; 316 int root_level; 317 int level = ref->level; 318 int index; 319 320 root_key.objectid = ref->root_id; 321 root_key.type = BTRFS_ROOT_ITEM_KEY; 322 root_key.offset = (u64)-1; 323 324 index = srcu_read_lock(&fs_info->subvol_srcu); 325 326 root = btrfs_read_fs_root_no_name(fs_info, &root_key); 327 if (IS_ERR(root)) { 328 srcu_read_unlock(&fs_info->subvol_srcu, index); 329 ret = PTR_ERR(root); 330 goto out; 331 } 332 333 if (path->search_commit_root) 334 root_level = btrfs_header_level(root->commit_root); 335 else 336 root_level = btrfs_old_root_level(root, time_seq); 337 338 if (root_level + 1 == level) { 339 srcu_read_unlock(&fs_info->subvol_srcu, index); 340 goto out; 341 } 342 343 path->lowest_level = level; 344 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq); 345 346 /* root node has been locked, we can release @subvol_srcu safely here */ 347 srcu_read_unlock(&fs_info->subvol_srcu, index); 348 349 pr_debug("search slot in root %llu (level %d, ref count %d) returned " 350 "%d for key (%llu %u %llu)\n", 351 ref->root_id, level, ref->count, ret, 352 ref->key_for_search.objectid, ref->key_for_search.type, 353 ref->key_for_search.offset); 354 if (ret < 0) 355 goto out; 356 357 eb = path->nodes[level]; 358 while (!eb) { 359 if (WARN_ON(!level)) { 360 ret = 1; 361 goto out; 362 } 363 level--; 364 eb = path->nodes[level]; 365 } 366 367 ret = add_all_parents(root, path, parents, ref, level, time_seq, 368 extent_item_pos, total_refs); 369 out: 370 path->lowest_level = 0; 371 btrfs_release_path(path); 372 return ret; 373 } 374 375 /* 376 * resolve all indirect backrefs from the list 377 */ 378 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 379 struct btrfs_path *path, u64 time_seq, 380 struct list_head *head, 381 const u64 *extent_item_pos, u64 total_refs) 382 { 383 int err; 384 int ret = 0; 385 struct __prelim_ref *ref; 386 struct __prelim_ref *ref_safe; 387 struct __prelim_ref *new_ref; 388 struct ulist *parents; 389 struct ulist_node *node; 390 struct ulist_iterator uiter; 391 392 parents = ulist_alloc(GFP_NOFS); 393 if (!parents) 394 return -ENOMEM; 395 396 /* 397 * _safe allows us to insert directly after the current item without 398 * iterating over the newly inserted items. 399 * we're also allowed to re-assign ref during iteration. 400 */ 401 list_for_each_entry_safe(ref, ref_safe, head, list) { 402 if (ref->parent) /* already direct */ 403 continue; 404 if (ref->count == 0) 405 continue; 406 err = __resolve_indirect_ref(fs_info, path, time_seq, ref, 407 parents, extent_item_pos, 408 total_refs); 409 /* 410 * we can only tolerate ENOENT,otherwise,we should catch error 411 * and return directly. 412 */ 413 if (err == -ENOENT) { 414 continue; 415 } else if (err) { 416 ret = err; 417 goto out; 418 } 419 420 /* we put the first parent into the ref at hand */ 421 ULIST_ITER_INIT(&uiter); 422 node = ulist_next(parents, &uiter); 423 ref->parent = node ? node->val : 0; 424 ref->inode_list = node ? 425 (struct extent_inode_elem *)(uintptr_t)node->aux : NULL; 426 427 /* additional parents require new refs being added here */ 428 while ((node = ulist_next(parents, &uiter))) { 429 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache, 430 GFP_NOFS); 431 if (!new_ref) { 432 ret = -ENOMEM; 433 goto out; 434 } 435 memcpy(new_ref, ref, sizeof(*ref)); 436 new_ref->parent = node->val; 437 new_ref->inode_list = (struct extent_inode_elem *) 438 (uintptr_t)node->aux; 439 list_add(&new_ref->list, &ref->list); 440 } 441 ulist_reinit(parents); 442 } 443 out: 444 ulist_free(parents); 445 return ret; 446 } 447 448 static inline int ref_for_same_block(struct __prelim_ref *ref1, 449 struct __prelim_ref *ref2) 450 { 451 if (ref1->level != ref2->level) 452 return 0; 453 if (ref1->root_id != ref2->root_id) 454 return 0; 455 if (ref1->key_for_search.type != ref2->key_for_search.type) 456 return 0; 457 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid) 458 return 0; 459 if (ref1->key_for_search.offset != ref2->key_for_search.offset) 460 return 0; 461 if (ref1->parent != ref2->parent) 462 return 0; 463 464 return 1; 465 } 466 467 /* 468 * read tree blocks and add keys where required. 469 */ 470 static int __add_missing_keys(struct btrfs_fs_info *fs_info, 471 struct list_head *head) 472 { 473 struct list_head *pos; 474 struct extent_buffer *eb; 475 476 list_for_each(pos, head) { 477 struct __prelim_ref *ref; 478 ref = list_entry(pos, struct __prelim_ref, list); 479 480 if (ref->parent) 481 continue; 482 if (ref->key_for_search.type) 483 continue; 484 BUG_ON(!ref->wanted_disk_byte); 485 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte, 486 fs_info->tree_root->leafsize, 0); 487 if (!eb || !extent_buffer_uptodate(eb)) { 488 free_extent_buffer(eb); 489 return -EIO; 490 } 491 btrfs_tree_read_lock(eb); 492 if (btrfs_header_level(eb) == 0) 493 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); 494 else 495 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); 496 btrfs_tree_read_unlock(eb); 497 free_extent_buffer(eb); 498 } 499 return 0; 500 } 501 502 /* 503 * merge two lists of backrefs and adjust counts accordingly 504 * 505 * mode = 1: merge identical keys, if key is set 506 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here. 507 * additionally, we could even add a key range for the blocks we 508 * looked into to merge even more (-> replace unresolved refs by those 509 * having a parent). 510 * mode = 2: merge identical parents 511 */ 512 static void __merge_refs(struct list_head *head, int mode) 513 { 514 struct list_head *pos1; 515 516 list_for_each(pos1, head) { 517 struct list_head *n2; 518 struct list_head *pos2; 519 struct __prelim_ref *ref1; 520 521 ref1 = list_entry(pos1, struct __prelim_ref, list); 522 523 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head; 524 pos2 = n2, n2 = pos2->next) { 525 struct __prelim_ref *ref2; 526 struct __prelim_ref *xchg; 527 struct extent_inode_elem *eie; 528 529 ref2 = list_entry(pos2, struct __prelim_ref, list); 530 531 if (mode == 1) { 532 if (!ref_for_same_block(ref1, ref2)) 533 continue; 534 if (!ref1->parent && ref2->parent) { 535 xchg = ref1; 536 ref1 = ref2; 537 ref2 = xchg; 538 } 539 } else { 540 if (ref1->parent != ref2->parent) 541 continue; 542 } 543 544 eie = ref1->inode_list; 545 while (eie && eie->next) 546 eie = eie->next; 547 if (eie) 548 eie->next = ref2->inode_list; 549 else 550 ref1->inode_list = ref2->inode_list; 551 ref1->count += ref2->count; 552 553 list_del(&ref2->list); 554 kmem_cache_free(btrfs_prelim_ref_cache, ref2); 555 } 556 557 } 558 } 559 560 /* 561 * add all currently queued delayed refs from this head whose seq nr is 562 * smaller or equal that seq to the list 563 */ 564 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, 565 struct list_head *prefs, u64 *total_refs) 566 { 567 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 568 struct rb_node *n = &head->node.rb_node; 569 struct btrfs_key key; 570 struct btrfs_key op_key = {0}; 571 int sgn; 572 int ret = 0; 573 574 if (extent_op && extent_op->update_key) 575 btrfs_disk_key_to_cpu(&op_key, &extent_op->key); 576 577 spin_lock(&head->lock); 578 n = rb_first(&head->ref_root); 579 while (n) { 580 struct btrfs_delayed_ref_node *node; 581 node = rb_entry(n, struct btrfs_delayed_ref_node, 582 rb_node); 583 n = rb_next(n); 584 if (node->seq > seq) 585 continue; 586 587 switch (node->action) { 588 case BTRFS_ADD_DELAYED_EXTENT: 589 case BTRFS_UPDATE_DELAYED_HEAD: 590 WARN_ON(1); 591 continue; 592 case BTRFS_ADD_DELAYED_REF: 593 sgn = 1; 594 break; 595 case BTRFS_DROP_DELAYED_REF: 596 sgn = -1; 597 break; 598 default: 599 BUG_ON(1); 600 } 601 *total_refs += (node->ref_mod * sgn); 602 switch (node->type) { 603 case BTRFS_TREE_BLOCK_REF_KEY: { 604 struct btrfs_delayed_tree_ref *ref; 605 606 ref = btrfs_delayed_node_to_tree_ref(node); 607 ret = __add_prelim_ref(prefs, ref->root, &op_key, 608 ref->level + 1, 0, node->bytenr, 609 node->ref_mod * sgn, GFP_ATOMIC); 610 break; 611 } 612 case BTRFS_SHARED_BLOCK_REF_KEY: { 613 struct btrfs_delayed_tree_ref *ref; 614 615 ref = btrfs_delayed_node_to_tree_ref(node); 616 ret = __add_prelim_ref(prefs, ref->root, NULL, 617 ref->level + 1, ref->parent, 618 node->bytenr, 619 node->ref_mod * sgn, GFP_ATOMIC); 620 break; 621 } 622 case BTRFS_EXTENT_DATA_REF_KEY: { 623 struct btrfs_delayed_data_ref *ref; 624 ref = btrfs_delayed_node_to_data_ref(node); 625 626 key.objectid = ref->objectid; 627 key.type = BTRFS_EXTENT_DATA_KEY; 628 key.offset = ref->offset; 629 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0, 630 node->bytenr, 631 node->ref_mod * sgn, GFP_ATOMIC); 632 break; 633 } 634 case BTRFS_SHARED_DATA_REF_KEY: { 635 struct btrfs_delayed_data_ref *ref; 636 637 ref = btrfs_delayed_node_to_data_ref(node); 638 639 key.objectid = ref->objectid; 640 key.type = BTRFS_EXTENT_DATA_KEY; 641 key.offset = ref->offset; 642 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 643 ref->parent, node->bytenr, 644 node->ref_mod * sgn, GFP_ATOMIC); 645 break; 646 } 647 default: 648 WARN_ON(1); 649 } 650 if (ret) 651 break; 652 } 653 spin_unlock(&head->lock); 654 return ret; 655 } 656 657 /* 658 * add all inline backrefs for bytenr to the list 659 */ 660 static int __add_inline_refs(struct btrfs_fs_info *fs_info, 661 struct btrfs_path *path, u64 bytenr, 662 int *info_level, struct list_head *prefs, 663 u64 *total_refs) 664 { 665 int ret = 0; 666 int slot; 667 struct extent_buffer *leaf; 668 struct btrfs_key key; 669 struct btrfs_key found_key; 670 unsigned long ptr; 671 unsigned long end; 672 struct btrfs_extent_item *ei; 673 u64 flags; 674 u64 item_size; 675 676 /* 677 * enumerate all inline refs 678 */ 679 leaf = path->nodes[0]; 680 slot = path->slots[0]; 681 682 item_size = btrfs_item_size_nr(leaf, slot); 683 BUG_ON(item_size < sizeof(*ei)); 684 685 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 686 flags = btrfs_extent_flags(leaf, ei); 687 *total_refs += btrfs_extent_refs(leaf, ei); 688 btrfs_item_key_to_cpu(leaf, &found_key, slot); 689 690 ptr = (unsigned long)(ei + 1); 691 end = (unsigned long)ei + item_size; 692 693 if (found_key.type == BTRFS_EXTENT_ITEM_KEY && 694 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 695 struct btrfs_tree_block_info *info; 696 697 info = (struct btrfs_tree_block_info *)ptr; 698 *info_level = btrfs_tree_block_level(leaf, info); 699 ptr += sizeof(struct btrfs_tree_block_info); 700 BUG_ON(ptr > end); 701 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) { 702 *info_level = found_key.offset; 703 } else { 704 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 705 } 706 707 while (ptr < end) { 708 struct btrfs_extent_inline_ref *iref; 709 u64 offset; 710 int type; 711 712 iref = (struct btrfs_extent_inline_ref *)ptr; 713 type = btrfs_extent_inline_ref_type(leaf, iref); 714 offset = btrfs_extent_inline_ref_offset(leaf, iref); 715 716 switch (type) { 717 case BTRFS_SHARED_BLOCK_REF_KEY: 718 ret = __add_prelim_ref(prefs, 0, NULL, 719 *info_level + 1, offset, 720 bytenr, 1, GFP_NOFS); 721 break; 722 case BTRFS_SHARED_DATA_REF_KEY: { 723 struct btrfs_shared_data_ref *sdref; 724 int count; 725 726 sdref = (struct btrfs_shared_data_ref *)(iref + 1); 727 count = btrfs_shared_data_ref_count(leaf, sdref); 728 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset, 729 bytenr, count, GFP_NOFS); 730 break; 731 } 732 case BTRFS_TREE_BLOCK_REF_KEY: 733 ret = __add_prelim_ref(prefs, offset, NULL, 734 *info_level + 1, 0, 735 bytenr, 1, GFP_NOFS); 736 break; 737 case BTRFS_EXTENT_DATA_REF_KEY: { 738 struct btrfs_extent_data_ref *dref; 739 int count; 740 u64 root; 741 742 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 743 count = btrfs_extent_data_ref_count(leaf, dref); 744 key.objectid = btrfs_extent_data_ref_objectid(leaf, 745 dref); 746 key.type = BTRFS_EXTENT_DATA_KEY; 747 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 748 root = btrfs_extent_data_ref_root(leaf, dref); 749 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 750 bytenr, count, GFP_NOFS); 751 break; 752 } 753 default: 754 WARN_ON(1); 755 } 756 if (ret) 757 return ret; 758 ptr += btrfs_extent_inline_ref_size(type); 759 } 760 761 return 0; 762 } 763 764 /* 765 * add all non-inline backrefs for bytenr to the list 766 */ 767 static int __add_keyed_refs(struct btrfs_fs_info *fs_info, 768 struct btrfs_path *path, u64 bytenr, 769 int info_level, struct list_head *prefs) 770 { 771 struct btrfs_root *extent_root = fs_info->extent_root; 772 int ret; 773 int slot; 774 struct extent_buffer *leaf; 775 struct btrfs_key key; 776 777 while (1) { 778 ret = btrfs_next_item(extent_root, path); 779 if (ret < 0) 780 break; 781 if (ret) { 782 ret = 0; 783 break; 784 } 785 786 slot = path->slots[0]; 787 leaf = path->nodes[0]; 788 btrfs_item_key_to_cpu(leaf, &key, slot); 789 790 if (key.objectid != bytenr) 791 break; 792 if (key.type < BTRFS_TREE_BLOCK_REF_KEY) 793 continue; 794 if (key.type > BTRFS_SHARED_DATA_REF_KEY) 795 break; 796 797 switch (key.type) { 798 case BTRFS_SHARED_BLOCK_REF_KEY: 799 ret = __add_prelim_ref(prefs, 0, NULL, 800 info_level + 1, key.offset, 801 bytenr, 1, GFP_NOFS); 802 break; 803 case BTRFS_SHARED_DATA_REF_KEY: { 804 struct btrfs_shared_data_ref *sdref; 805 int count; 806 807 sdref = btrfs_item_ptr(leaf, slot, 808 struct btrfs_shared_data_ref); 809 count = btrfs_shared_data_ref_count(leaf, sdref); 810 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset, 811 bytenr, count, GFP_NOFS); 812 break; 813 } 814 case BTRFS_TREE_BLOCK_REF_KEY: 815 ret = __add_prelim_ref(prefs, key.offset, NULL, 816 info_level + 1, 0, 817 bytenr, 1, GFP_NOFS); 818 break; 819 case BTRFS_EXTENT_DATA_REF_KEY: { 820 struct btrfs_extent_data_ref *dref; 821 int count; 822 u64 root; 823 824 dref = btrfs_item_ptr(leaf, slot, 825 struct btrfs_extent_data_ref); 826 count = btrfs_extent_data_ref_count(leaf, dref); 827 key.objectid = btrfs_extent_data_ref_objectid(leaf, 828 dref); 829 key.type = BTRFS_EXTENT_DATA_KEY; 830 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 831 root = btrfs_extent_data_ref_root(leaf, dref); 832 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 833 bytenr, count, GFP_NOFS); 834 break; 835 } 836 default: 837 WARN_ON(1); 838 } 839 if (ret) 840 return ret; 841 842 } 843 844 return ret; 845 } 846 847 /* 848 * this adds all existing backrefs (inline backrefs, backrefs and delayed 849 * refs) for the given bytenr to the refs list, merges duplicates and resolves 850 * indirect refs to their parent bytenr. 851 * When roots are found, they're added to the roots list 852 * 853 * FIXME some caching might speed things up 854 */ 855 static int find_parent_nodes(struct btrfs_trans_handle *trans, 856 struct btrfs_fs_info *fs_info, u64 bytenr, 857 u64 time_seq, struct ulist *refs, 858 struct ulist *roots, const u64 *extent_item_pos) 859 { 860 struct btrfs_key key; 861 struct btrfs_path *path; 862 struct btrfs_delayed_ref_root *delayed_refs = NULL; 863 struct btrfs_delayed_ref_head *head; 864 int info_level = 0; 865 int ret; 866 struct list_head prefs_delayed; 867 struct list_head prefs; 868 struct __prelim_ref *ref; 869 struct extent_inode_elem *eie = NULL; 870 u64 total_refs = 0; 871 872 INIT_LIST_HEAD(&prefs); 873 INIT_LIST_HEAD(&prefs_delayed); 874 875 key.objectid = bytenr; 876 key.offset = (u64)-1; 877 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 878 key.type = BTRFS_METADATA_ITEM_KEY; 879 else 880 key.type = BTRFS_EXTENT_ITEM_KEY; 881 882 path = btrfs_alloc_path(); 883 if (!path) 884 return -ENOMEM; 885 if (!trans) { 886 path->search_commit_root = 1; 887 path->skip_locking = 1; 888 } 889 890 /* 891 * grab both a lock on the path and a lock on the delayed ref head. 892 * We need both to get a consistent picture of how the refs look 893 * at a specified point in time 894 */ 895 again: 896 head = NULL; 897 898 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 899 if (ret < 0) 900 goto out; 901 BUG_ON(ret == 0); 902 903 if (trans) { 904 /* 905 * look if there are updates for this ref queued and lock the 906 * head 907 */ 908 delayed_refs = &trans->transaction->delayed_refs; 909 spin_lock(&delayed_refs->lock); 910 head = btrfs_find_delayed_ref_head(trans, bytenr); 911 if (head) { 912 if (!mutex_trylock(&head->mutex)) { 913 atomic_inc(&head->node.refs); 914 spin_unlock(&delayed_refs->lock); 915 916 btrfs_release_path(path); 917 918 /* 919 * Mutex was contended, block until it's 920 * released and try again 921 */ 922 mutex_lock(&head->mutex); 923 mutex_unlock(&head->mutex); 924 btrfs_put_delayed_ref(&head->node); 925 goto again; 926 } 927 spin_unlock(&delayed_refs->lock); 928 ret = __add_delayed_refs(head, time_seq, 929 &prefs_delayed, &total_refs); 930 mutex_unlock(&head->mutex); 931 if (ret) 932 goto out; 933 } else { 934 spin_unlock(&delayed_refs->lock); 935 } 936 } 937 938 if (path->slots[0]) { 939 struct extent_buffer *leaf; 940 int slot; 941 942 path->slots[0]--; 943 leaf = path->nodes[0]; 944 slot = path->slots[0]; 945 btrfs_item_key_to_cpu(leaf, &key, slot); 946 if (key.objectid == bytenr && 947 (key.type == BTRFS_EXTENT_ITEM_KEY || 948 key.type == BTRFS_METADATA_ITEM_KEY)) { 949 ret = __add_inline_refs(fs_info, path, bytenr, 950 &info_level, &prefs, 951 &total_refs); 952 if (ret) 953 goto out; 954 ret = __add_keyed_refs(fs_info, path, bytenr, 955 info_level, &prefs); 956 if (ret) 957 goto out; 958 } 959 } 960 btrfs_release_path(path); 961 962 list_splice_init(&prefs_delayed, &prefs); 963 964 ret = __add_missing_keys(fs_info, &prefs); 965 if (ret) 966 goto out; 967 968 __merge_refs(&prefs, 1); 969 970 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs, 971 extent_item_pos, total_refs); 972 if (ret) 973 goto out; 974 975 __merge_refs(&prefs, 2); 976 977 while (!list_empty(&prefs)) { 978 ref = list_first_entry(&prefs, struct __prelim_ref, list); 979 WARN_ON(ref->count < 0); 980 if (roots && ref->count && ref->root_id && ref->parent == 0) { 981 /* no parent == root of tree */ 982 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); 983 if (ret < 0) 984 goto out; 985 } 986 if (ref->count && ref->parent) { 987 if (extent_item_pos && !ref->inode_list) { 988 u32 bsz; 989 struct extent_buffer *eb; 990 bsz = btrfs_level_size(fs_info->extent_root, 991 info_level); 992 eb = read_tree_block(fs_info->extent_root, 993 ref->parent, bsz, 0); 994 if (!eb || !extent_buffer_uptodate(eb)) { 995 free_extent_buffer(eb); 996 ret = -EIO; 997 goto out; 998 } 999 ret = find_extent_in_eb(eb, bytenr, 1000 *extent_item_pos, &eie); 1001 free_extent_buffer(eb); 1002 if (ret < 0) 1003 goto out; 1004 ref->inode_list = eie; 1005 } 1006 ret = ulist_add_merge(refs, ref->parent, 1007 (uintptr_t)ref->inode_list, 1008 (u64 *)&eie, GFP_NOFS); 1009 if (ret < 0) 1010 goto out; 1011 if (!ret && extent_item_pos) { 1012 /* 1013 * we've recorded that parent, so we must extend 1014 * its inode list here 1015 */ 1016 BUG_ON(!eie); 1017 while (eie->next) 1018 eie = eie->next; 1019 eie->next = ref->inode_list; 1020 } 1021 eie = NULL; 1022 } 1023 list_del(&ref->list); 1024 kmem_cache_free(btrfs_prelim_ref_cache, ref); 1025 } 1026 1027 out: 1028 btrfs_free_path(path); 1029 while (!list_empty(&prefs)) { 1030 ref = list_first_entry(&prefs, struct __prelim_ref, list); 1031 list_del(&ref->list); 1032 kmem_cache_free(btrfs_prelim_ref_cache, ref); 1033 } 1034 while (!list_empty(&prefs_delayed)) { 1035 ref = list_first_entry(&prefs_delayed, struct __prelim_ref, 1036 list); 1037 list_del(&ref->list); 1038 kmem_cache_free(btrfs_prelim_ref_cache, ref); 1039 } 1040 if (ret < 0) 1041 free_inode_elem_list(eie); 1042 return ret; 1043 } 1044 1045 static void free_leaf_list(struct ulist *blocks) 1046 { 1047 struct ulist_node *node = NULL; 1048 struct extent_inode_elem *eie; 1049 struct ulist_iterator uiter; 1050 1051 ULIST_ITER_INIT(&uiter); 1052 while ((node = ulist_next(blocks, &uiter))) { 1053 if (!node->aux) 1054 continue; 1055 eie = (struct extent_inode_elem *)(uintptr_t)node->aux; 1056 free_inode_elem_list(eie); 1057 node->aux = 0; 1058 } 1059 1060 ulist_free(blocks); 1061 } 1062 1063 /* 1064 * Finds all leafs with a reference to the specified combination of bytenr and 1065 * offset. key_list_head will point to a list of corresponding keys (caller must 1066 * free each list element). The leafs will be stored in the leafs ulist, which 1067 * must be freed with ulist_free. 1068 * 1069 * returns 0 on success, <0 on error 1070 */ 1071 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 1072 struct btrfs_fs_info *fs_info, u64 bytenr, 1073 u64 time_seq, struct ulist **leafs, 1074 const u64 *extent_item_pos) 1075 { 1076 int ret; 1077 1078 *leafs = ulist_alloc(GFP_NOFS); 1079 if (!*leafs) 1080 return -ENOMEM; 1081 1082 ret = find_parent_nodes(trans, fs_info, bytenr, 1083 time_seq, *leafs, NULL, extent_item_pos); 1084 if (ret < 0 && ret != -ENOENT) { 1085 free_leaf_list(*leafs); 1086 return ret; 1087 } 1088 1089 return 0; 1090 } 1091 1092 /* 1093 * walk all backrefs for a given extent to find all roots that reference this 1094 * extent. Walking a backref means finding all extents that reference this 1095 * extent and in turn walk the backrefs of those, too. Naturally this is a 1096 * recursive process, but here it is implemented in an iterative fashion: We 1097 * find all referencing extents for the extent in question and put them on a 1098 * list. In turn, we find all referencing extents for those, further appending 1099 * to the list. The way we iterate the list allows adding more elements after 1100 * the current while iterating. The process stops when we reach the end of the 1101 * list. Found roots are added to the roots list. 1102 * 1103 * returns 0 on success, < 0 on error. 1104 */ 1105 static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1106 struct btrfs_fs_info *fs_info, u64 bytenr, 1107 u64 time_seq, struct ulist **roots) 1108 { 1109 struct ulist *tmp; 1110 struct ulist_node *node = NULL; 1111 struct ulist_iterator uiter; 1112 int ret; 1113 1114 tmp = ulist_alloc(GFP_NOFS); 1115 if (!tmp) 1116 return -ENOMEM; 1117 *roots = ulist_alloc(GFP_NOFS); 1118 if (!*roots) { 1119 ulist_free(tmp); 1120 return -ENOMEM; 1121 } 1122 1123 ULIST_ITER_INIT(&uiter); 1124 while (1) { 1125 ret = find_parent_nodes(trans, fs_info, bytenr, 1126 time_seq, tmp, *roots, NULL); 1127 if (ret < 0 && ret != -ENOENT) { 1128 ulist_free(tmp); 1129 ulist_free(*roots); 1130 return ret; 1131 } 1132 node = ulist_next(tmp, &uiter); 1133 if (!node) 1134 break; 1135 bytenr = node->val; 1136 cond_resched(); 1137 } 1138 1139 ulist_free(tmp); 1140 return 0; 1141 } 1142 1143 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1144 struct btrfs_fs_info *fs_info, u64 bytenr, 1145 u64 time_seq, struct ulist **roots) 1146 { 1147 int ret; 1148 1149 if (!trans) 1150 down_read(&fs_info->commit_root_sem); 1151 ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots); 1152 if (!trans) 1153 up_read(&fs_info->commit_root_sem); 1154 return ret; 1155 } 1156 1157 /* 1158 * this makes the path point to (inum INODE_ITEM ioff) 1159 */ 1160 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 1161 struct btrfs_path *path) 1162 { 1163 struct btrfs_key key; 1164 return btrfs_find_item(fs_root, path, inum, ioff, 1165 BTRFS_INODE_ITEM_KEY, &key); 1166 } 1167 1168 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 1169 struct btrfs_path *path, 1170 struct btrfs_key *found_key) 1171 { 1172 return btrfs_find_item(fs_root, path, inum, ioff, 1173 BTRFS_INODE_REF_KEY, found_key); 1174 } 1175 1176 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, 1177 u64 start_off, struct btrfs_path *path, 1178 struct btrfs_inode_extref **ret_extref, 1179 u64 *found_off) 1180 { 1181 int ret, slot; 1182 struct btrfs_key key; 1183 struct btrfs_key found_key; 1184 struct btrfs_inode_extref *extref; 1185 struct extent_buffer *leaf; 1186 unsigned long ptr; 1187 1188 key.objectid = inode_objectid; 1189 btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY); 1190 key.offset = start_off; 1191 1192 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1193 if (ret < 0) 1194 return ret; 1195 1196 while (1) { 1197 leaf = path->nodes[0]; 1198 slot = path->slots[0]; 1199 if (slot >= btrfs_header_nritems(leaf)) { 1200 /* 1201 * If the item at offset is not found, 1202 * btrfs_search_slot will point us to the slot 1203 * where it should be inserted. In our case 1204 * that will be the slot directly before the 1205 * next INODE_REF_KEY_V2 item. In the case 1206 * that we're pointing to the last slot in a 1207 * leaf, we must move one leaf over. 1208 */ 1209 ret = btrfs_next_leaf(root, path); 1210 if (ret) { 1211 if (ret >= 1) 1212 ret = -ENOENT; 1213 break; 1214 } 1215 continue; 1216 } 1217 1218 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1219 1220 /* 1221 * Check that we're still looking at an extended ref key for 1222 * this particular objectid. If we have different 1223 * objectid or type then there are no more to be found 1224 * in the tree and we can exit. 1225 */ 1226 ret = -ENOENT; 1227 if (found_key.objectid != inode_objectid) 1228 break; 1229 if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY) 1230 break; 1231 1232 ret = 0; 1233 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1234 extref = (struct btrfs_inode_extref *)ptr; 1235 *ret_extref = extref; 1236 if (found_off) 1237 *found_off = found_key.offset; 1238 break; 1239 } 1240 1241 return ret; 1242 } 1243 1244 /* 1245 * this iterates to turn a name (from iref/extref) into a full filesystem path. 1246 * Elements of the path are separated by '/' and the path is guaranteed to be 1247 * 0-terminated. the path is only given within the current file system. 1248 * Therefore, it never starts with a '/'. the caller is responsible to provide 1249 * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 1250 * the start point of the resulting string is returned. this pointer is within 1251 * dest, normally. 1252 * in case the path buffer would overflow, the pointer is decremented further 1253 * as if output was written to the buffer, though no more output is actually 1254 * generated. that way, the caller can determine how much space would be 1255 * required for the path to fit into the buffer. in that case, the returned 1256 * value will be smaller than dest. callers must check this! 1257 */ 1258 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 1259 u32 name_len, unsigned long name_off, 1260 struct extent_buffer *eb_in, u64 parent, 1261 char *dest, u32 size) 1262 { 1263 int slot; 1264 u64 next_inum; 1265 int ret; 1266 s64 bytes_left = ((s64)size) - 1; 1267 struct extent_buffer *eb = eb_in; 1268 struct btrfs_key found_key; 1269 int leave_spinning = path->leave_spinning; 1270 struct btrfs_inode_ref *iref; 1271 1272 if (bytes_left >= 0) 1273 dest[bytes_left] = '\0'; 1274 1275 path->leave_spinning = 1; 1276 while (1) { 1277 bytes_left -= name_len; 1278 if (bytes_left >= 0) 1279 read_extent_buffer(eb, dest + bytes_left, 1280 name_off, name_len); 1281 if (eb != eb_in) { 1282 btrfs_tree_read_unlock_blocking(eb); 1283 free_extent_buffer(eb); 1284 } 1285 ret = inode_ref_info(parent, 0, fs_root, path, &found_key); 1286 if (ret > 0) 1287 ret = -ENOENT; 1288 if (ret) 1289 break; 1290 1291 next_inum = found_key.offset; 1292 1293 /* regular exit ahead */ 1294 if (parent == next_inum) 1295 break; 1296 1297 slot = path->slots[0]; 1298 eb = path->nodes[0]; 1299 /* make sure we can use eb after releasing the path */ 1300 if (eb != eb_in) { 1301 atomic_inc(&eb->refs); 1302 btrfs_tree_read_lock(eb); 1303 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1304 } 1305 btrfs_release_path(path); 1306 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1307 1308 name_len = btrfs_inode_ref_name_len(eb, iref); 1309 name_off = (unsigned long)(iref + 1); 1310 1311 parent = next_inum; 1312 --bytes_left; 1313 if (bytes_left >= 0) 1314 dest[bytes_left] = '/'; 1315 } 1316 1317 btrfs_release_path(path); 1318 path->leave_spinning = leave_spinning; 1319 1320 if (ret) 1321 return ERR_PTR(ret); 1322 1323 return dest + bytes_left; 1324 } 1325 1326 /* 1327 * this makes the path point to (logical EXTENT_ITEM *) 1328 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for 1329 * tree blocks and <0 on error. 1330 */ 1331 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 1332 struct btrfs_path *path, struct btrfs_key *found_key, 1333 u64 *flags_ret) 1334 { 1335 int ret; 1336 u64 flags; 1337 u64 size = 0; 1338 u32 item_size; 1339 struct extent_buffer *eb; 1340 struct btrfs_extent_item *ei; 1341 struct btrfs_key key; 1342 1343 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1344 key.type = BTRFS_METADATA_ITEM_KEY; 1345 else 1346 key.type = BTRFS_EXTENT_ITEM_KEY; 1347 key.objectid = logical; 1348 key.offset = (u64)-1; 1349 1350 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 1351 if (ret < 0) 1352 return ret; 1353 1354 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0); 1355 if (ret) { 1356 if (ret > 0) 1357 ret = -ENOENT; 1358 return ret; 1359 } 1360 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); 1361 if (found_key->type == BTRFS_METADATA_ITEM_KEY) 1362 size = fs_info->extent_root->leafsize; 1363 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) 1364 size = found_key->offset; 1365 1366 if (found_key->objectid > logical || 1367 found_key->objectid + size <= logical) { 1368 pr_debug("logical %llu is not within any extent\n", logical); 1369 return -ENOENT; 1370 } 1371 1372 eb = path->nodes[0]; 1373 item_size = btrfs_item_size_nr(eb, path->slots[0]); 1374 BUG_ON(item_size < sizeof(*ei)); 1375 1376 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 1377 flags = btrfs_extent_flags(eb, ei); 1378 1379 pr_debug("logical %llu is at position %llu within the extent (%llu " 1380 "EXTENT_ITEM %llu) flags %#llx size %u\n", 1381 logical, logical - found_key->objectid, found_key->objectid, 1382 found_key->offset, flags, item_size); 1383 1384 WARN_ON(!flags_ret); 1385 if (flags_ret) { 1386 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1387 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; 1388 else if (flags & BTRFS_EXTENT_FLAG_DATA) 1389 *flags_ret = BTRFS_EXTENT_FLAG_DATA; 1390 else 1391 BUG_ON(1); 1392 return 0; 1393 } 1394 1395 return -EIO; 1396 } 1397 1398 /* 1399 * helper function to iterate extent inline refs. ptr must point to a 0 value 1400 * for the first call and may be modified. it is used to track state. 1401 * if more refs exist, 0 is returned and the next call to 1402 * __get_extent_inline_ref must pass the modified ptr parameter to get the 1403 * next ref. after the last ref was processed, 1 is returned. 1404 * returns <0 on error 1405 */ 1406 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, 1407 struct btrfs_extent_item *ei, u32 item_size, 1408 struct btrfs_extent_inline_ref **out_eiref, 1409 int *out_type) 1410 { 1411 unsigned long end; 1412 u64 flags; 1413 struct btrfs_tree_block_info *info; 1414 1415 if (!*ptr) { 1416 /* first call */ 1417 flags = btrfs_extent_flags(eb, ei); 1418 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1419 info = (struct btrfs_tree_block_info *)(ei + 1); 1420 *out_eiref = 1421 (struct btrfs_extent_inline_ref *)(info + 1); 1422 } else { 1423 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); 1424 } 1425 *ptr = (unsigned long)*out_eiref; 1426 if ((void *)*ptr >= (void *)ei + item_size) 1427 return -ENOENT; 1428 } 1429 1430 end = (unsigned long)ei + item_size; 1431 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr; 1432 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); 1433 1434 *ptr += btrfs_extent_inline_ref_size(*out_type); 1435 WARN_ON(*ptr > end); 1436 if (*ptr == end) 1437 return 1; /* last */ 1438 1439 return 0; 1440 } 1441 1442 /* 1443 * reads the tree block backref for an extent. tree level and root are returned 1444 * through out_level and out_root. ptr must point to a 0 value for the first 1445 * call and may be modified (see __get_extent_inline_ref comment). 1446 * returns 0 if data was provided, 1 if there was no more data to provide or 1447 * <0 on error. 1448 */ 1449 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, 1450 struct btrfs_extent_item *ei, u32 item_size, 1451 u64 *out_root, u8 *out_level) 1452 { 1453 int ret; 1454 int type; 1455 struct btrfs_tree_block_info *info; 1456 struct btrfs_extent_inline_ref *eiref; 1457 1458 if (*ptr == (unsigned long)-1) 1459 return 1; 1460 1461 while (1) { 1462 ret = __get_extent_inline_ref(ptr, eb, ei, item_size, 1463 &eiref, &type); 1464 if (ret < 0) 1465 return ret; 1466 1467 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1468 type == BTRFS_SHARED_BLOCK_REF_KEY) 1469 break; 1470 1471 if (ret == 1) 1472 return 1; 1473 } 1474 1475 /* we can treat both ref types equally here */ 1476 info = (struct btrfs_tree_block_info *)(ei + 1); 1477 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1478 *out_level = btrfs_tree_block_level(eb, info); 1479 1480 if (ret == 1) 1481 *ptr = (unsigned long)-1; 1482 1483 return 0; 1484 } 1485 1486 static int iterate_leaf_refs(struct extent_inode_elem *inode_list, 1487 u64 root, u64 extent_item_objectid, 1488 iterate_extent_inodes_t *iterate, void *ctx) 1489 { 1490 struct extent_inode_elem *eie; 1491 int ret = 0; 1492 1493 for (eie = inode_list; eie; eie = eie->next) { 1494 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " 1495 "root %llu\n", extent_item_objectid, 1496 eie->inum, eie->offset, root); 1497 ret = iterate(eie->inum, eie->offset, root, ctx); 1498 if (ret) { 1499 pr_debug("stopping iteration for %llu due to ret=%d\n", 1500 extent_item_objectid, ret); 1501 break; 1502 } 1503 } 1504 1505 return ret; 1506 } 1507 1508 /* 1509 * calls iterate() for every inode that references the extent identified by 1510 * the given parameters. 1511 * when the iterator function returns a non-zero value, iteration stops. 1512 */ 1513 int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 1514 u64 extent_item_objectid, u64 extent_item_pos, 1515 int search_commit_root, 1516 iterate_extent_inodes_t *iterate, void *ctx) 1517 { 1518 int ret; 1519 struct btrfs_trans_handle *trans = NULL; 1520 struct ulist *refs = NULL; 1521 struct ulist *roots = NULL; 1522 struct ulist_node *ref_node = NULL; 1523 struct ulist_node *root_node = NULL; 1524 struct seq_list tree_mod_seq_elem = {}; 1525 struct ulist_iterator ref_uiter; 1526 struct ulist_iterator root_uiter; 1527 1528 pr_debug("resolving all inodes for extent %llu\n", 1529 extent_item_objectid); 1530 1531 if (!search_commit_root) { 1532 trans = btrfs_join_transaction(fs_info->extent_root); 1533 if (IS_ERR(trans)) 1534 return PTR_ERR(trans); 1535 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1536 } else { 1537 down_read(&fs_info->commit_root_sem); 1538 } 1539 1540 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1541 tree_mod_seq_elem.seq, &refs, 1542 &extent_item_pos); 1543 if (ret) 1544 goto out; 1545 1546 ULIST_ITER_INIT(&ref_uiter); 1547 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { 1548 ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val, 1549 tree_mod_seq_elem.seq, &roots); 1550 if (ret) 1551 break; 1552 ULIST_ITER_INIT(&root_uiter); 1553 while (!ret && (root_node = ulist_next(roots, &root_uiter))) { 1554 pr_debug("root %llu references leaf %llu, data list " 1555 "%#llx\n", root_node->val, ref_node->val, 1556 ref_node->aux); 1557 ret = iterate_leaf_refs((struct extent_inode_elem *) 1558 (uintptr_t)ref_node->aux, 1559 root_node->val, 1560 extent_item_objectid, 1561 iterate, ctx); 1562 } 1563 ulist_free(roots); 1564 } 1565 1566 free_leaf_list(refs); 1567 out: 1568 if (!search_commit_root) { 1569 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1570 btrfs_end_transaction(trans, fs_info->extent_root); 1571 } else { 1572 up_read(&fs_info->commit_root_sem); 1573 } 1574 1575 return ret; 1576 } 1577 1578 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 1579 struct btrfs_path *path, 1580 iterate_extent_inodes_t *iterate, void *ctx) 1581 { 1582 int ret; 1583 u64 extent_item_pos; 1584 u64 flags = 0; 1585 struct btrfs_key found_key; 1586 int search_commit_root = path->search_commit_root; 1587 1588 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); 1589 btrfs_release_path(path); 1590 if (ret < 0) 1591 return ret; 1592 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1593 return -EINVAL; 1594 1595 extent_item_pos = logical - found_key.objectid; 1596 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1597 extent_item_pos, search_commit_root, 1598 iterate, ctx); 1599 1600 return ret; 1601 } 1602 1603 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off, 1604 struct extent_buffer *eb, void *ctx); 1605 1606 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, 1607 struct btrfs_path *path, 1608 iterate_irefs_t *iterate, void *ctx) 1609 { 1610 int ret = 0; 1611 int slot; 1612 u32 cur; 1613 u32 len; 1614 u32 name_len; 1615 u64 parent = 0; 1616 int found = 0; 1617 struct extent_buffer *eb; 1618 struct btrfs_item *item; 1619 struct btrfs_inode_ref *iref; 1620 struct btrfs_key found_key; 1621 1622 while (!ret) { 1623 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, 1624 &found_key); 1625 if (ret < 0) 1626 break; 1627 if (ret) { 1628 ret = found ? 0 : -ENOENT; 1629 break; 1630 } 1631 ++found; 1632 1633 parent = found_key.offset; 1634 slot = path->slots[0]; 1635 eb = btrfs_clone_extent_buffer(path->nodes[0]); 1636 if (!eb) { 1637 ret = -ENOMEM; 1638 break; 1639 } 1640 extent_buffer_get(eb); 1641 btrfs_tree_read_lock(eb); 1642 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1643 btrfs_release_path(path); 1644 1645 item = btrfs_item_nr(slot); 1646 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1647 1648 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { 1649 name_len = btrfs_inode_ref_name_len(eb, iref); 1650 /* path must be released before calling iterate()! */ 1651 pr_debug("following ref at offset %u for inode %llu in " 1652 "tree %llu\n", cur, found_key.objectid, 1653 fs_root->objectid); 1654 ret = iterate(parent, name_len, 1655 (unsigned long)(iref + 1), eb, ctx); 1656 if (ret) 1657 break; 1658 len = sizeof(*iref) + name_len; 1659 iref = (struct btrfs_inode_ref *)((char *)iref + len); 1660 } 1661 btrfs_tree_read_unlock_blocking(eb); 1662 free_extent_buffer(eb); 1663 } 1664 1665 btrfs_release_path(path); 1666 1667 return ret; 1668 } 1669 1670 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, 1671 struct btrfs_path *path, 1672 iterate_irefs_t *iterate, void *ctx) 1673 { 1674 int ret; 1675 int slot; 1676 u64 offset = 0; 1677 u64 parent; 1678 int found = 0; 1679 struct extent_buffer *eb; 1680 struct btrfs_inode_extref *extref; 1681 struct extent_buffer *leaf; 1682 u32 item_size; 1683 u32 cur_offset; 1684 unsigned long ptr; 1685 1686 while (1) { 1687 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, 1688 &offset); 1689 if (ret < 0) 1690 break; 1691 if (ret) { 1692 ret = found ? 0 : -ENOENT; 1693 break; 1694 } 1695 ++found; 1696 1697 slot = path->slots[0]; 1698 eb = btrfs_clone_extent_buffer(path->nodes[0]); 1699 if (!eb) { 1700 ret = -ENOMEM; 1701 break; 1702 } 1703 extent_buffer_get(eb); 1704 1705 btrfs_tree_read_lock(eb); 1706 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1707 btrfs_release_path(path); 1708 1709 leaf = path->nodes[0]; 1710 item_size = btrfs_item_size_nr(leaf, slot); 1711 ptr = btrfs_item_ptr_offset(leaf, slot); 1712 cur_offset = 0; 1713 1714 while (cur_offset < item_size) { 1715 u32 name_len; 1716 1717 extref = (struct btrfs_inode_extref *)(ptr + cur_offset); 1718 parent = btrfs_inode_extref_parent(eb, extref); 1719 name_len = btrfs_inode_extref_name_len(eb, extref); 1720 ret = iterate(parent, name_len, 1721 (unsigned long)&extref->name, eb, ctx); 1722 if (ret) 1723 break; 1724 1725 cur_offset += btrfs_inode_extref_name_len(leaf, extref); 1726 cur_offset += sizeof(*extref); 1727 } 1728 btrfs_tree_read_unlock_blocking(eb); 1729 free_extent_buffer(eb); 1730 1731 offset++; 1732 } 1733 1734 btrfs_release_path(path); 1735 1736 return ret; 1737 } 1738 1739 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, 1740 struct btrfs_path *path, iterate_irefs_t *iterate, 1741 void *ctx) 1742 { 1743 int ret; 1744 int found_refs = 0; 1745 1746 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx); 1747 if (!ret) 1748 ++found_refs; 1749 else if (ret != -ENOENT) 1750 return ret; 1751 1752 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx); 1753 if (ret == -ENOENT && found_refs) 1754 return 0; 1755 1756 return ret; 1757 } 1758 1759 /* 1760 * returns 0 if the path could be dumped (probably truncated) 1761 * returns <0 in case of an error 1762 */ 1763 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, 1764 struct extent_buffer *eb, void *ctx) 1765 { 1766 struct inode_fs_paths *ipath = ctx; 1767 char *fspath; 1768 char *fspath_min; 1769 int i = ipath->fspath->elem_cnt; 1770 const int s_ptr = sizeof(char *); 1771 u32 bytes_left; 1772 1773 bytes_left = ipath->fspath->bytes_left > s_ptr ? 1774 ipath->fspath->bytes_left - s_ptr : 0; 1775 1776 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 1777 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, 1778 name_off, eb, inum, fspath_min, bytes_left); 1779 if (IS_ERR(fspath)) 1780 return PTR_ERR(fspath); 1781 1782 if (fspath > fspath_min) { 1783 ipath->fspath->val[i] = (u64)(unsigned long)fspath; 1784 ++ipath->fspath->elem_cnt; 1785 ipath->fspath->bytes_left = fspath - fspath_min; 1786 } else { 1787 ++ipath->fspath->elem_missed; 1788 ipath->fspath->bytes_missing += fspath_min - fspath; 1789 ipath->fspath->bytes_left = 0; 1790 } 1791 1792 return 0; 1793 } 1794 1795 /* 1796 * this dumps all file system paths to the inode into the ipath struct, provided 1797 * is has been created large enough. each path is zero-terminated and accessed 1798 * from ipath->fspath->val[i]. 1799 * when it returns, there are ipath->fspath->elem_cnt number of paths available 1800 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 1801 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise, 1802 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 1803 * have been needed to return all paths. 1804 */ 1805 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) 1806 { 1807 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, 1808 inode_to_path, ipath); 1809 } 1810 1811 struct btrfs_data_container *init_data_container(u32 total_bytes) 1812 { 1813 struct btrfs_data_container *data; 1814 size_t alloc_bytes; 1815 1816 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); 1817 data = vmalloc(alloc_bytes); 1818 if (!data) 1819 return ERR_PTR(-ENOMEM); 1820 1821 if (total_bytes >= sizeof(*data)) { 1822 data->bytes_left = total_bytes - sizeof(*data); 1823 data->bytes_missing = 0; 1824 } else { 1825 data->bytes_missing = sizeof(*data) - total_bytes; 1826 data->bytes_left = 0; 1827 } 1828 1829 data->elem_cnt = 0; 1830 data->elem_missed = 0; 1831 1832 return data; 1833 } 1834 1835 /* 1836 * allocates space to return multiple file system paths for an inode. 1837 * total_bytes to allocate are passed, note that space usable for actual path 1838 * information will be total_bytes - sizeof(struct inode_fs_paths). 1839 * the returned pointer must be freed with free_ipath() in the end. 1840 */ 1841 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 1842 struct btrfs_path *path) 1843 { 1844 struct inode_fs_paths *ifp; 1845 struct btrfs_data_container *fspath; 1846 1847 fspath = init_data_container(total_bytes); 1848 if (IS_ERR(fspath)) 1849 return (void *)fspath; 1850 1851 ifp = kmalloc(sizeof(*ifp), GFP_NOFS); 1852 if (!ifp) { 1853 kfree(fspath); 1854 return ERR_PTR(-ENOMEM); 1855 } 1856 1857 ifp->btrfs_path = path; 1858 ifp->fspath = fspath; 1859 ifp->fs_root = fs_root; 1860 1861 return ifp; 1862 } 1863 1864 void free_ipath(struct inode_fs_paths *ipath) 1865 { 1866 if (!ipath) 1867 return; 1868 vfree(ipath->fspath); 1869 kfree(ipath); 1870 } 1871