1 /* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/vmalloc.h> 20 #include "ctree.h" 21 #include "disk-io.h" 22 #include "backref.h" 23 #include "ulist.h" 24 #include "transaction.h" 25 #include "delayed-ref.h" 26 #include "locking.h" 27 28 struct extent_inode_elem { 29 u64 inum; 30 u64 offset; 31 struct extent_inode_elem *next; 32 }; 33 34 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, 35 struct btrfs_file_extent_item *fi, 36 u64 extent_item_pos, 37 struct extent_inode_elem **eie) 38 { 39 u64 offset = 0; 40 struct extent_inode_elem *e; 41 42 if (!btrfs_file_extent_compression(eb, fi) && 43 !btrfs_file_extent_encryption(eb, fi) && 44 !btrfs_file_extent_other_encoding(eb, fi)) { 45 u64 data_offset; 46 u64 data_len; 47 48 data_offset = btrfs_file_extent_offset(eb, fi); 49 data_len = btrfs_file_extent_num_bytes(eb, fi); 50 51 if (extent_item_pos < data_offset || 52 extent_item_pos >= data_offset + data_len) 53 return 1; 54 offset = extent_item_pos - data_offset; 55 } 56 57 e = kmalloc(sizeof(*e), GFP_NOFS); 58 if (!e) 59 return -ENOMEM; 60 61 e->next = *eie; 62 e->inum = key->objectid; 63 e->offset = key->offset + offset; 64 *eie = e; 65 66 return 0; 67 } 68 69 static void free_inode_elem_list(struct extent_inode_elem *eie) 70 { 71 struct extent_inode_elem *eie_next; 72 73 for (; eie; eie = eie_next) { 74 eie_next = eie->next; 75 kfree(eie); 76 } 77 } 78 79 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte, 80 u64 extent_item_pos, 81 struct extent_inode_elem **eie) 82 { 83 u64 disk_byte; 84 struct btrfs_key key; 85 struct btrfs_file_extent_item *fi; 86 int slot; 87 int nritems; 88 int extent_type; 89 int ret; 90 91 /* 92 * from the shared data ref, we only have the leaf but we need 93 * the key. thus, we must look into all items and see that we 94 * find one (some) with a reference to our extent item. 95 */ 96 nritems = btrfs_header_nritems(eb); 97 for (slot = 0; slot < nritems; ++slot) { 98 btrfs_item_key_to_cpu(eb, &key, slot); 99 if (key.type != BTRFS_EXTENT_DATA_KEY) 100 continue; 101 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 102 extent_type = btrfs_file_extent_type(eb, fi); 103 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 104 continue; 105 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ 106 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 107 if (disk_byte != wanted_disk_byte) 108 continue; 109 110 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie); 111 if (ret < 0) 112 return ret; 113 } 114 115 return 0; 116 } 117 118 /* 119 * this structure records all encountered refs on the way up to the root 120 */ 121 struct __prelim_ref { 122 struct list_head list; 123 u64 root_id; 124 struct btrfs_key key_for_search; 125 int level; 126 int count; 127 struct extent_inode_elem *inode_list; 128 u64 parent; 129 u64 wanted_disk_byte; 130 }; 131 132 static struct kmem_cache *btrfs_prelim_ref_cache; 133 134 int __init btrfs_prelim_ref_init(void) 135 { 136 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref", 137 sizeof(struct __prelim_ref), 138 0, 139 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 140 NULL); 141 if (!btrfs_prelim_ref_cache) 142 return -ENOMEM; 143 return 0; 144 } 145 146 void btrfs_prelim_ref_exit(void) 147 { 148 if (btrfs_prelim_ref_cache) 149 kmem_cache_destroy(btrfs_prelim_ref_cache); 150 } 151 152 /* 153 * the rules for all callers of this function are: 154 * - obtaining the parent is the goal 155 * - if you add a key, you must know that it is a correct key 156 * - if you cannot add the parent or a correct key, then we will look into the 157 * block later to set a correct key 158 * 159 * delayed refs 160 * ============ 161 * backref type | shared | indirect | shared | indirect 162 * information | tree | tree | data | data 163 * --------------------+--------+----------+--------+---------- 164 * parent logical | y | - | - | - 165 * key to resolve | - | y | y | y 166 * tree block logical | - | - | - | - 167 * root for resolving | y | y | y | y 168 * 169 * - column 1: we've the parent -> done 170 * - column 2, 3, 4: we use the key to find the parent 171 * 172 * on disk refs (inline or keyed) 173 * ============================== 174 * backref type | shared | indirect | shared | indirect 175 * information | tree | tree | data | data 176 * --------------------+--------+----------+--------+---------- 177 * parent logical | y | - | y | - 178 * key to resolve | - | - | - | y 179 * tree block logical | y | y | y | y 180 * root for resolving | - | y | y | y 181 * 182 * - column 1, 3: we've the parent -> done 183 * - column 2: we take the first key from the block to find the parent 184 * (see __add_missing_keys) 185 * - column 4: we use the key to find the parent 186 * 187 * additional information that's available but not required to find the parent 188 * block might help in merging entries to gain some speed. 189 */ 190 191 static int __add_prelim_ref(struct list_head *head, u64 root_id, 192 struct btrfs_key *key, int level, 193 u64 parent, u64 wanted_disk_byte, int count, 194 gfp_t gfp_mask) 195 { 196 struct __prelim_ref *ref; 197 198 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID) 199 return 0; 200 201 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); 202 if (!ref) 203 return -ENOMEM; 204 205 ref->root_id = root_id; 206 if (key) 207 ref->key_for_search = *key; 208 else 209 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); 210 211 ref->inode_list = NULL; 212 ref->level = level; 213 ref->count = count; 214 ref->parent = parent; 215 ref->wanted_disk_byte = wanted_disk_byte; 216 list_add_tail(&ref->list, head); 217 218 return 0; 219 } 220 221 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 222 struct ulist *parents, struct __prelim_ref *ref, 223 int level, u64 time_seq, const u64 *extent_item_pos, 224 u64 total_refs) 225 { 226 int ret = 0; 227 int slot; 228 struct extent_buffer *eb; 229 struct btrfs_key key; 230 struct btrfs_key *key_for_search = &ref->key_for_search; 231 struct btrfs_file_extent_item *fi; 232 struct extent_inode_elem *eie = NULL, *old = NULL; 233 u64 disk_byte; 234 u64 wanted_disk_byte = ref->wanted_disk_byte; 235 u64 count = 0; 236 237 if (level != 0) { 238 eb = path->nodes[level]; 239 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 240 if (ret < 0) 241 return ret; 242 return 0; 243 } 244 245 /* 246 * We normally enter this function with the path already pointing to 247 * the first item to check. But sometimes, we may enter it with 248 * slot==nritems. In that case, go to the next leaf before we continue. 249 */ 250 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 251 ret = btrfs_next_old_leaf(root, path, time_seq); 252 253 while (!ret && count < total_refs) { 254 eb = path->nodes[0]; 255 slot = path->slots[0]; 256 257 btrfs_item_key_to_cpu(eb, &key, slot); 258 259 if (key.objectid != key_for_search->objectid || 260 key.type != BTRFS_EXTENT_DATA_KEY) 261 break; 262 263 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 264 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 265 266 if (disk_byte == wanted_disk_byte) { 267 eie = NULL; 268 old = NULL; 269 count++; 270 if (extent_item_pos) { 271 ret = check_extent_in_eb(&key, eb, fi, 272 *extent_item_pos, 273 &eie); 274 if (ret < 0) 275 break; 276 } 277 if (ret > 0) 278 goto next; 279 ret = ulist_add_merge(parents, eb->start, 280 (uintptr_t)eie, 281 (u64 *)&old, GFP_NOFS); 282 if (ret < 0) 283 break; 284 if (!ret && extent_item_pos) { 285 while (old->next) 286 old = old->next; 287 old->next = eie; 288 } 289 eie = NULL; 290 } 291 next: 292 ret = btrfs_next_old_item(root, path, time_seq); 293 } 294 295 if (ret > 0) 296 ret = 0; 297 else if (ret < 0) 298 free_inode_elem_list(eie); 299 return ret; 300 } 301 302 /* 303 * resolve an indirect backref in the form (root_id, key, level) 304 * to a logical address 305 */ 306 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, 307 struct btrfs_path *path, u64 time_seq, 308 struct __prelim_ref *ref, 309 struct ulist *parents, 310 const u64 *extent_item_pos, u64 total_refs) 311 { 312 struct btrfs_root *root; 313 struct btrfs_key root_key; 314 struct extent_buffer *eb; 315 int ret = 0; 316 int root_level; 317 int level = ref->level; 318 int index; 319 320 root_key.objectid = ref->root_id; 321 root_key.type = BTRFS_ROOT_ITEM_KEY; 322 root_key.offset = (u64)-1; 323 324 index = srcu_read_lock(&fs_info->subvol_srcu); 325 326 root = btrfs_read_fs_root_no_name(fs_info, &root_key); 327 if (IS_ERR(root)) { 328 srcu_read_unlock(&fs_info->subvol_srcu, index); 329 ret = PTR_ERR(root); 330 goto out; 331 } 332 333 if (path->search_commit_root) 334 root_level = btrfs_header_level(root->commit_root); 335 else 336 root_level = btrfs_old_root_level(root, time_seq); 337 338 if (root_level + 1 == level) { 339 srcu_read_unlock(&fs_info->subvol_srcu, index); 340 goto out; 341 } 342 343 path->lowest_level = level; 344 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq); 345 346 /* root node has been locked, we can release @subvol_srcu safely here */ 347 srcu_read_unlock(&fs_info->subvol_srcu, index); 348 349 pr_debug("search slot in root %llu (level %d, ref count %d) returned " 350 "%d for key (%llu %u %llu)\n", 351 ref->root_id, level, ref->count, ret, 352 ref->key_for_search.objectid, ref->key_for_search.type, 353 ref->key_for_search.offset); 354 if (ret < 0) 355 goto out; 356 357 eb = path->nodes[level]; 358 while (!eb) { 359 if (WARN_ON(!level)) { 360 ret = 1; 361 goto out; 362 } 363 level--; 364 eb = path->nodes[level]; 365 } 366 367 ret = add_all_parents(root, path, parents, ref, level, time_seq, 368 extent_item_pos, total_refs); 369 out: 370 path->lowest_level = 0; 371 btrfs_release_path(path); 372 return ret; 373 } 374 375 /* 376 * resolve all indirect backrefs from the list 377 */ 378 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 379 struct btrfs_path *path, u64 time_seq, 380 struct list_head *head, 381 const u64 *extent_item_pos, u64 total_refs) 382 { 383 int err; 384 int ret = 0; 385 struct __prelim_ref *ref; 386 struct __prelim_ref *ref_safe; 387 struct __prelim_ref *new_ref; 388 struct ulist *parents; 389 struct ulist_node *node; 390 struct ulist_iterator uiter; 391 392 parents = ulist_alloc(GFP_NOFS); 393 if (!parents) 394 return -ENOMEM; 395 396 /* 397 * _safe allows us to insert directly after the current item without 398 * iterating over the newly inserted items. 399 * we're also allowed to re-assign ref during iteration. 400 */ 401 list_for_each_entry_safe(ref, ref_safe, head, list) { 402 if (ref->parent) /* already direct */ 403 continue; 404 if (ref->count == 0) 405 continue; 406 err = __resolve_indirect_ref(fs_info, path, time_seq, ref, 407 parents, extent_item_pos, 408 total_refs); 409 /* 410 * we can only tolerate ENOENT,otherwise,we should catch error 411 * and return directly. 412 */ 413 if (err == -ENOENT) { 414 continue; 415 } else if (err) { 416 ret = err; 417 goto out; 418 } 419 420 /* we put the first parent into the ref at hand */ 421 ULIST_ITER_INIT(&uiter); 422 node = ulist_next(parents, &uiter); 423 ref->parent = node ? node->val : 0; 424 ref->inode_list = node ? 425 (struct extent_inode_elem *)(uintptr_t)node->aux : NULL; 426 427 /* additional parents require new refs being added here */ 428 while ((node = ulist_next(parents, &uiter))) { 429 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache, 430 GFP_NOFS); 431 if (!new_ref) { 432 ret = -ENOMEM; 433 goto out; 434 } 435 memcpy(new_ref, ref, sizeof(*ref)); 436 new_ref->parent = node->val; 437 new_ref->inode_list = (struct extent_inode_elem *) 438 (uintptr_t)node->aux; 439 list_add(&new_ref->list, &ref->list); 440 } 441 ulist_reinit(parents); 442 } 443 out: 444 ulist_free(parents); 445 return ret; 446 } 447 448 static inline int ref_for_same_block(struct __prelim_ref *ref1, 449 struct __prelim_ref *ref2) 450 { 451 if (ref1->level != ref2->level) 452 return 0; 453 if (ref1->root_id != ref2->root_id) 454 return 0; 455 if (ref1->key_for_search.type != ref2->key_for_search.type) 456 return 0; 457 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid) 458 return 0; 459 if (ref1->key_for_search.offset != ref2->key_for_search.offset) 460 return 0; 461 if (ref1->parent != ref2->parent) 462 return 0; 463 464 return 1; 465 } 466 467 /* 468 * read tree blocks and add keys where required. 469 */ 470 static int __add_missing_keys(struct btrfs_fs_info *fs_info, 471 struct list_head *head) 472 { 473 struct list_head *pos; 474 struct extent_buffer *eb; 475 476 list_for_each(pos, head) { 477 struct __prelim_ref *ref; 478 ref = list_entry(pos, struct __prelim_ref, list); 479 480 if (ref->parent) 481 continue; 482 if (ref->key_for_search.type) 483 continue; 484 BUG_ON(!ref->wanted_disk_byte); 485 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte, 486 fs_info->tree_root->leafsize, 0); 487 if (!eb || !extent_buffer_uptodate(eb)) { 488 free_extent_buffer(eb); 489 return -EIO; 490 } 491 btrfs_tree_read_lock(eb); 492 if (btrfs_header_level(eb) == 0) 493 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); 494 else 495 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); 496 btrfs_tree_read_unlock(eb); 497 free_extent_buffer(eb); 498 } 499 return 0; 500 } 501 502 /* 503 * merge two lists of backrefs and adjust counts accordingly 504 * 505 * mode = 1: merge identical keys, if key is set 506 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here. 507 * additionally, we could even add a key range for the blocks we 508 * looked into to merge even more (-> replace unresolved refs by those 509 * having a parent). 510 * mode = 2: merge identical parents 511 */ 512 static void __merge_refs(struct list_head *head, int mode) 513 { 514 struct list_head *pos1; 515 516 list_for_each(pos1, head) { 517 struct list_head *n2; 518 struct list_head *pos2; 519 struct __prelim_ref *ref1; 520 521 ref1 = list_entry(pos1, struct __prelim_ref, list); 522 523 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head; 524 pos2 = n2, n2 = pos2->next) { 525 struct __prelim_ref *ref2; 526 struct __prelim_ref *xchg; 527 struct extent_inode_elem *eie; 528 529 ref2 = list_entry(pos2, struct __prelim_ref, list); 530 531 if (mode == 1) { 532 if (!ref_for_same_block(ref1, ref2)) 533 continue; 534 if (!ref1->parent && ref2->parent) { 535 xchg = ref1; 536 ref1 = ref2; 537 ref2 = xchg; 538 } 539 } else { 540 if (ref1->parent != ref2->parent) 541 continue; 542 } 543 544 eie = ref1->inode_list; 545 while (eie && eie->next) 546 eie = eie->next; 547 if (eie) 548 eie->next = ref2->inode_list; 549 else 550 ref1->inode_list = ref2->inode_list; 551 ref1->count += ref2->count; 552 553 list_del(&ref2->list); 554 kmem_cache_free(btrfs_prelim_ref_cache, ref2); 555 } 556 557 } 558 } 559 560 /* 561 * add all currently queued delayed refs from this head whose seq nr is 562 * smaller or equal that seq to the list 563 */ 564 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, 565 struct list_head *prefs, u64 *total_refs) 566 { 567 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 568 struct rb_node *n = &head->node.rb_node; 569 struct btrfs_key key; 570 struct btrfs_key op_key = {0}; 571 int sgn; 572 int ret = 0; 573 574 if (extent_op && extent_op->update_key) 575 btrfs_disk_key_to_cpu(&op_key, &extent_op->key); 576 577 spin_lock(&head->lock); 578 n = rb_first(&head->ref_root); 579 while (n) { 580 struct btrfs_delayed_ref_node *node; 581 node = rb_entry(n, struct btrfs_delayed_ref_node, 582 rb_node); 583 n = rb_next(n); 584 if (node->seq > seq) 585 continue; 586 587 switch (node->action) { 588 case BTRFS_ADD_DELAYED_EXTENT: 589 case BTRFS_UPDATE_DELAYED_HEAD: 590 WARN_ON(1); 591 continue; 592 case BTRFS_ADD_DELAYED_REF: 593 sgn = 1; 594 break; 595 case BTRFS_DROP_DELAYED_REF: 596 sgn = -1; 597 break; 598 default: 599 BUG_ON(1); 600 } 601 *total_refs += (node->ref_mod * sgn); 602 switch (node->type) { 603 case BTRFS_TREE_BLOCK_REF_KEY: { 604 struct btrfs_delayed_tree_ref *ref; 605 606 ref = btrfs_delayed_node_to_tree_ref(node); 607 ret = __add_prelim_ref(prefs, ref->root, &op_key, 608 ref->level + 1, 0, node->bytenr, 609 node->ref_mod * sgn, GFP_ATOMIC); 610 break; 611 } 612 case BTRFS_SHARED_BLOCK_REF_KEY: { 613 struct btrfs_delayed_tree_ref *ref; 614 615 ref = btrfs_delayed_node_to_tree_ref(node); 616 ret = __add_prelim_ref(prefs, ref->root, NULL, 617 ref->level + 1, ref->parent, 618 node->bytenr, 619 node->ref_mod * sgn, GFP_ATOMIC); 620 break; 621 } 622 case BTRFS_EXTENT_DATA_REF_KEY: { 623 struct btrfs_delayed_data_ref *ref; 624 ref = btrfs_delayed_node_to_data_ref(node); 625 626 key.objectid = ref->objectid; 627 key.type = BTRFS_EXTENT_DATA_KEY; 628 key.offset = ref->offset; 629 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0, 630 node->bytenr, 631 node->ref_mod * sgn, GFP_ATOMIC); 632 break; 633 } 634 case BTRFS_SHARED_DATA_REF_KEY: { 635 struct btrfs_delayed_data_ref *ref; 636 637 ref = btrfs_delayed_node_to_data_ref(node); 638 639 key.objectid = ref->objectid; 640 key.type = BTRFS_EXTENT_DATA_KEY; 641 key.offset = ref->offset; 642 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 643 ref->parent, node->bytenr, 644 node->ref_mod * sgn, GFP_ATOMIC); 645 break; 646 } 647 default: 648 WARN_ON(1); 649 } 650 if (ret) 651 break; 652 } 653 spin_unlock(&head->lock); 654 return ret; 655 } 656 657 /* 658 * add all inline backrefs for bytenr to the list 659 */ 660 static int __add_inline_refs(struct btrfs_fs_info *fs_info, 661 struct btrfs_path *path, u64 bytenr, 662 int *info_level, struct list_head *prefs, 663 u64 *total_refs) 664 { 665 int ret = 0; 666 int slot; 667 struct extent_buffer *leaf; 668 struct btrfs_key key; 669 struct btrfs_key found_key; 670 unsigned long ptr; 671 unsigned long end; 672 struct btrfs_extent_item *ei; 673 u64 flags; 674 u64 item_size; 675 676 /* 677 * enumerate all inline refs 678 */ 679 leaf = path->nodes[0]; 680 slot = path->slots[0]; 681 682 item_size = btrfs_item_size_nr(leaf, slot); 683 BUG_ON(item_size < sizeof(*ei)); 684 685 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 686 flags = btrfs_extent_flags(leaf, ei); 687 *total_refs += btrfs_extent_refs(leaf, ei); 688 btrfs_item_key_to_cpu(leaf, &found_key, slot); 689 690 ptr = (unsigned long)(ei + 1); 691 end = (unsigned long)ei + item_size; 692 693 if (found_key.type == BTRFS_EXTENT_ITEM_KEY && 694 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 695 struct btrfs_tree_block_info *info; 696 697 info = (struct btrfs_tree_block_info *)ptr; 698 *info_level = btrfs_tree_block_level(leaf, info); 699 ptr += sizeof(struct btrfs_tree_block_info); 700 BUG_ON(ptr > end); 701 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) { 702 *info_level = found_key.offset; 703 } else { 704 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 705 } 706 707 while (ptr < end) { 708 struct btrfs_extent_inline_ref *iref; 709 u64 offset; 710 int type; 711 712 iref = (struct btrfs_extent_inline_ref *)ptr; 713 type = btrfs_extent_inline_ref_type(leaf, iref); 714 offset = btrfs_extent_inline_ref_offset(leaf, iref); 715 716 switch (type) { 717 case BTRFS_SHARED_BLOCK_REF_KEY: 718 ret = __add_prelim_ref(prefs, 0, NULL, 719 *info_level + 1, offset, 720 bytenr, 1, GFP_NOFS); 721 break; 722 case BTRFS_SHARED_DATA_REF_KEY: { 723 struct btrfs_shared_data_ref *sdref; 724 int count; 725 726 sdref = (struct btrfs_shared_data_ref *)(iref + 1); 727 count = btrfs_shared_data_ref_count(leaf, sdref); 728 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset, 729 bytenr, count, GFP_NOFS); 730 break; 731 } 732 case BTRFS_TREE_BLOCK_REF_KEY: 733 ret = __add_prelim_ref(prefs, offset, NULL, 734 *info_level + 1, 0, 735 bytenr, 1, GFP_NOFS); 736 break; 737 case BTRFS_EXTENT_DATA_REF_KEY: { 738 struct btrfs_extent_data_ref *dref; 739 int count; 740 u64 root; 741 742 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 743 count = btrfs_extent_data_ref_count(leaf, dref); 744 key.objectid = btrfs_extent_data_ref_objectid(leaf, 745 dref); 746 key.type = BTRFS_EXTENT_DATA_KEY; 747 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 748 root = btrfs_extent_data_ref_root(leaf, dref); 749 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 750 bytenr, count, GFP_NOFS); 751 break; 752 } 753 default: 754 WARN_ON(1); 755 } 756 if (ret) 757 return ret; 758 ptr += btrfs_extent_inline_ref_size(type); 759 } 760 761 return 0; 762 } 763 764 /* 765 * add all non-inline backrefs for bytenr to the list 766 */ 767 static int __add_keyed_refs(struct btrfs_fs_info *fs_info, 768 struct btrfs_path *path, u64 bytenr, 769 int info_level, struct list_head *prefs) 770 { 771 struct btrfs_root *extent_root = fs_info->extent_root; 772 int ret; 773 int slot; 774 struct extent_buffer *leaf; 775 struct btrfs_key key; 776 777 while (1) { 778 ret = btrfs_next_item(extent_root, path); 779 if (ret < 0) 780 break; 781 if (ret) { 782 ret = 0; 783 break; 784 } 785 786 slot = path->slots[0]; 787 leaf = path->nodes[0]; 788 btrfs_item_key_to_cpu(leaf, &key, slot); 789 790 if (key.objectid != bytenr) 791 break; 792 if (key.type < BTRFS_TREE_BLOCK_REF_KEY) 793 continue; 794 if (key.type > BTRFS_SHARED_DATA_REF_KEY) 795 break; 796 797 switch (key.type) { 798 case BTRFS_SHARED_BLOCK_REF_KEY: 799 ret = __add_prelim_ref(prefs, 0, NULL, 800 info_level + 1, key.offset, 801 bytenr, 1, GFP_NOFS); 802 break; 803 case BTRFS_SHARED_DATA_REF_KEY: { 804 struct btrfs_shared_data_ref *sdref; 805 int count; 806 807 sdref = btrfs_item_ptr(leaf, slot, 808 struct btrfs_shared_data_ref); 809 count = btrfs_shared_data_ref_count(leaf, sdref); 810 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset, 811 bytenr, count, GFP_NOFS); 812 break; 813 } 814 case BTRFS_TREE_BLOCK_REF_KEY: 815 ret = __add_prelim_ref(prefs, key.offset, NULL, 816 info_level + 1, 0, 817 bytenr, 1, GFP_NOFS); 818 break; 819 case BTRFS_EXTENT_DATA_REF_KEY: { 820 struct btrfs_extent_data_ref *dref; 821 int count; 822 u64 root; 823 824 dref = btrfs_item_ptr(leaf, slot, 825 struct btrfs_extent_data_ref); 826 count = btrfs_extent_data_ref_count(leaf, dref); 827 key.objectid = btrfs_extent_data_ref_objectid(leaf, 828 dref); 829 key.type = BTRFS_EXTENT_DATA_KEY; 830 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 831 root = btrfs_extent_data_ref_root(leaf, dref); 832 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 833 bytenr, count, GFP_NOFS); 834 break; 835 } 836 default: 837 WARN_ON(1); 838 } 839 if (ret) 840 return ret; 841 842 } 843 844 return ret; 845 } 846 847 /* 848 * this adds all existing backrefs (inline backrefs, backrefs and delayed 849 * refs) for the given bytenr to the refs list, merges duplicates and resolves 850 * indirect refs to their parent bytenr. 851 * When roots are found, they're added to the roots list 852 * 853 * FIXME some caching might speed things up 854 */ 855 static int find_parent_nodes(struct btrfs_trans_handle *trans, 856 struct btrfs_fs_info *fs_info, u64 bytenr, 857 u64 time_seq, struct ulist *refs, 858 struct ulist *roots, const u64 *extent_item_pos) 859 { 860 struct btrfs_key key; 861 struct btrfs_path *path; 862 struct btrfs_delayed_ref_root *delayed_refs = NULL; 863 struct btrfs_delayed_ref_head *head; 864 int info_level = 0; 865 int ret; 866 struct list_head prefs_delayed; 867 struct list_head prefs; 868 struct __prelim_ref *ref; 869 struct extent_inode_elem *eie = NULL; 870 u64 total_refs = 0; 871 872 INIT_LIST_HEAD(&prefs); 873 INIT_LIST_HEAD(&prefs_delayed); 874 875 key.objectid = bytenr; 876 key.offset = (u64)-1; 877 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 878 key.type = BTRFS_METADATA_ITEM_KEY; 879 else 880 key.type = BTRFS_EXTENT_ITEM_KEY; 881 882 path = btrfs_alloc_path(); 883 if (!path) 884 return -ENOMEM; 885 if (!trans) { 886 path->search_commit_root = 1; 887 path->skip_locking = 1; 888 } 889 890 /* 891 * grab both a lock on the path and a lock on the delayed ref head. 892 * We need both to get a consistent picture of how the refs look 893 * at a specified point in time 894 */ 895 again: 896 head = NULL; 897 898 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 899 if (ret < 0) 900 goto out; 901 BUG_ON(ret == 0); 902 903 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 904 if (trans && likely(trans->type != __TRANS_DUMMY)) { 905 #else 906 if (trans) { 907 #endif 908 /* 909 * look if there are updates for this ref queued and lock the 910 * head 911 */ 912 delayed_refs = &trans->transaction->delayed_refs; 913 spin_lock(&delayed_refs->lock); 914 head = btrfs_find_delayed_ref_head(trans, bytenr); 915 if (head) { 916 if (!mutex_trylock(&head->mutex)) { 917 atomic_inc(&head->node.refs); 918 spin_unlock(&delayed_refs->lock); 919 920 btrfs_release_path(path); 921 922 /* 923 * Mutex was contended, block until it's 924 * released and try again 925 */ 926 mutex_lock(&head->mutex); 927 mutex_unlock(&head->mutex); 928 btrfs_put_delayed_ref(&head->node); 929 goto again; 930 } 931 spin_unlock(&delayed_refs->lock); 932 ret = __add_delayed_refs(head, time_seq, 933 &prefs_delayed, &total_refs); 934 mutex_unlock(&head->mutex); 935 if (ret) 936 goto out; 937 } else { 938 spin_unlock(&delayed_refs->lock); 939 } 940 } 941 942 if (path->slots[0]) { 943 struct extent_buffer *leaf; 944 int slot; 945 946 path->slots[0]--; 947 leaf = path->nodes[0]; 948 slot = path->slots[0]; 949 btrfs_item_key_to_cpu(leaf, &key, slot); 950 if (key.objectid == bytenr && 951 (key.type == BTRFS_EXTENT_ITEM_KEY || 952 key.type == BTRFS_METADATA_ITEM_KEY)) { 953 ret = __add_inline_refs(fs_info, path, bytenr, 954 &info_level, &prefs, 955 &total_refs); 956 if (ret) 957 goto out; 958 ret = __add_keyed_refs(fs_info, path, bytenr, 959 info_level, &prefs); 960 if (ret) 961 goto out; 962 } 963 } 964 btrfs_release_path(path); 965 966 list_splice_init(&prefs_delayed, &prefs); 967 968 ret = __add_missing_keys(fs_info, &prefs); 969 if (ret) 970 goto out; 971 972 __merge_refs(&prefs, 1); 973 974 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs, 975 extent_item_pos, total_refs); 976 if (ret) 977 goto out; 978 979 __merge_refs(&prefs, 2); 980 981 while (!list_empty(&prefs)) { 982 ref = list_first_entry(&prefs, struct __prelim_ref, list); 983 WARN_ON(ref->count < 0); 984 if (roots && ref->count && ref->root_id && ref->parent == 0) { 985 /* no parent == root of tree */ 986 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); 987 if (ret < 0) 988 goto out; 989 } 990 if (ref->count && ref->parent) { 991 if (extent_item_pos && !ref->inode_list && 992 ref->level == 0) { 993 u32 bsz; 994 struct extent_buffer *eb; 995 bsz = btrfs_level_size(fs_info->extent_root, 996 ref->level); 997 eb = read_tree_block(fs_info->extent_root, 998 ref->parent, bsz, 0); 999 if (!eb || !extent_buffer_uptodate(eb)) { 1000 free_extent_buffer(eb); 1001 ret = -EIO; 1002 goto out; 1003 } 1004 ret = find_extent_in_eb(eb, bytenr, 1005 *extent_item_pos, &eie); 1006 free_extent_buffer(eb); 1007 if (ret < 0) 1008 goto out; 1009 ref->inode_list = eie; 1010 } 1011 ret = ulist_add_merge(refs, ref->parent, 1012 (uintptr_t)ref->inode_list, 1013 (u64 *)&eie, GFP_NOFS); 1014 if (ret < 0) 1015 goto out; 1016 if (!ret && extent_item_pos) { 1017 /* 1018 * we've recorded that parent, so we must extend 1019 * its inode list here 1020 */ 1021 BUG_ON(!eie); 1022 while (eie->next) 1023 eie = eie->next; 1024 eie->next = ref->inode_list; 1025 } 1026 eie = NULL; 1027 } 1028 list_del(&ref->list); 1029 kmem_cache_free(btrfs_prelim_ref_cache, ref); 1030 } 1031 1032 out: 1033 btrfs_free_path(path); 1034 while (!list_empty(&prefs)) { 1035 ref = list_first_entry(&prefs, struct __prelim_ref, list); 1036 list_del(&ref->list); 1037 kmem_cache_free(btrfs_prelim_ref_cache, ref); 1038 } 1039 while (!list_empty(&prefs_delayed)) { 1040 ref = list_first_entry(&prefs_delayed, struct __prelim_ref, 1041 list); 1042 list_del(&ref->list); 1043 kmem_cache_free(btrfs_prelim_ref_cache, ref); 1044 } 1045 if (ret < 0) 1046 free_inode_elem_list(eie); 1047 return ret; 1048 } 1049 1050 static void free_leaf_list(struct ulist *blocks) 1051 { 1052 struct ulist_node *node = NULL; 1053 struct extent_inode_elem *eie; 1054 struct ulist_iterator uiter; 1055 1056 ULIST_ITER_INIT(&uiter); 1057 while ((node = ulist_next(blocks, &uiter))) { 1058 if (!node->aux) 1059 continue; 1060 eie = (struct extent_inode_elem *)(uintptr_t)node->aux; 1061 free_inode_elem_list(eie); 1062 node->aux = 0; 1063 } 1064 1065 ulist_free(blocks); 1066 } 1067 1068 /* 1069 * Finds all leafs with a reference to the specified combination of bytenr and 1070 * offset. key_list_head will point to a list of corresponding keys (caller must 1071 * free each list element). The leafs will be stored in the leafs ulist, which 1072 * must be freed with ulist_free. 1073 * 1074 * returns 0 on success, <0 on error 1075 */ 1076 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 1077 struct btrfs_fs_info *fs_info, u64 bytenr, 1078 u64 time_seq, struct ulist **leafs, 1079 const u64 *extent_item_pos) 1080 { 1081 int ret; 1082 1083 *leafs = ulist_alloc(GFP_NOFS); 1084 if (!*leafs) 1085 return -ENOMEM; 1086 1087 ret = find_parent_nodes(trans, fs_info, bytenr, 1088 time_seq, *leafs, NULL, extent_item_pos); 1089 if (ret < 0 && ret != -ENOENT) { 1090 free_leaf_list(*leafs); 1091 return ret; 1092 } 1093 1094 return 0; 1095 } 1096 1097 /* 1098 * walk all backrefs for a given extent to find all roots that reference this 1099 * extent. Walking a backref means finding all extents that reference this 1100 * extent and in turn walk the backrefs of those, too. Naturally this is a 1101 * recursive process, but here it is implemented in an iterative fashion: We 1102 * find all referencing extents for the extent in question and put them on a 1103 * list. In turn, we find all referencing extents for those, further appending 1104 * to the list. The way we iterate the list allows adding more elements after 1105 * the current while iterating. The process stops when we reach the end of the 1106 * list. Found roots are added to the roots list. 1107 * 1108 * returns 0 on success, < 0 on error. 1109 */ 1110 static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1111 struct btrfs_fs_info *fs_info, u64 bytenr, 1112 u64 time_seq, struct ulist **roots) 1113 { 1114 struct ulist *tmp; 1115 struct ulist_node *node = NULL; 1116 struct ulist_iterator uiter; 1117 int ret; 1118 1119 tmp = ulist_alloc(GFP_NOFS); 1120 if (!tmp) 1121 return -ENOMEM; 1122 *roots = ulist_alloc(GFP_NOFS); 1123 if (!*roots) { 1124 ulist_free(tmp); 1125 return -ENOMEM; 1126 } 1127 1128 ULIST_ITER_INIT(&uiter); 1129 while (1) { 1130 ret = find_parent_nodes(trans, fs_info, bytenr, 1131 time_seq, tmp, *roots, NULL); 1132 if (ret < 0 && ret != -ENOENT) { 1133 ulist_free(tmp); 1134 ulist_free(*roots); 1135 return ret; 1136 } 1137 node = ulist_next(tmp, &uiter); 1138 if (!node) 1139 break; 1140 bytenr = node->val; 1141 cond_resched(); 1142 } 1143 1144 ulist_free(tmp); 1145 return 0; 1146 } 1147 1148 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1149 struct btrfs_fs_info *fs_info, u64 bytenr, 1150 u64 time_seq, struct ulist **roots) 1151 { 1152 int ret; 1153 1154 if (!trans) 1155 down_read(&fs_info->commit_root_sem); 1156 ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots); 1157 if (!trans) 1158 up_read(&fs_info->commit_root_sem); 1159 return ret; 1160 } 1161 1162 /* 1163 * this makes the path point to (inum INODE_ITEM ioff) 1164 */ 1165 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 1166 struct btrfs_path *path) 1167 { 1168 struct btrfs_key key; 1169 return btrfs_find_item(fs_root, path, inum, ioff, 1170 BTRFS_INODE_ITEM_KEY, &key); 1171 } 1172 1173 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 1174 struct btrfs_path *path, 1175 struct btrfs_key *found_key) 1176 { 1177 return btrfs_find_item(fs_root, path, inum, ioff, 1178 BTRFS_INODE_REF_KEY, found_key); 1179 } 1180 1181 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, 1182 u64 start_off, struct btrfs_path *path, 1183 struct btrfs_inode_extref **ret_extref, 1184 u64 *found_off) 1185 { 1186 int ret, slot; 1187 struct btrfs_key key; 1188 struct btrfs_key found_key; 1189 struct btrfs_inode_extref *extref; 1190 struct extent_buffer *leaf; 1191 unsigned long ptr; 1192 1193 key.objectid = inode_objectid; 1194 btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY); 1195 key.offset = start_off; 1196 1197 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1198 if (ret < 0) 1199 return ret; 1200 1201 while (1) { 1202 leaf = path->nodes[0]; 1203 slot = path->slots[0]; 1204 if (slot >= btrfs_header_nritems(leaf)) { 1205 /* 1206 * If the item at offset is not found, 1207 * btrfs_search_slot will point us to the slot 1208 * where it should be inserted. In our case 1209 * that will be the slot directly before the 1210 * next INODE_REF_KEY_V2 item. In the case 1211 * that we're pointing to the last slot in a 1212 * leaf, we must move one leaf over. 1213 */ 1214 ret = btrfs_next_leaf(root, path); 1215 if (ret) { 1216 if (ret >= 1) 1217 ret = -ENOENT; 1218 break; 1219 } 1220 continue; 1221 } 1222 1223 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1224 1225 /* 1226 * Check that we're still looking at an extended ref key for 1227 * this particular objectid. If we have different 1228 * objectid or type then there are no more to be found 1229 * in the tree and we can exit. 1230 */ 1231 ret = -ENOENT; 1232 if (found_key.objectid != inode_objectid) 1233 break; 1234 if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY) 1235 break; 1236 1237 ret = 0; 1238 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1239 extref = (struct btrfs_inode_extref *)ptr; 1240 *ret_extref = extref; 1241 if (found_off) 1242 *found_off = found_key.offset; 1243 break; 1244 } 1245 1246 return ret; 1247 } 1248 1249 /* 1250 * this iterates to turn a name (from iref/extref) into a full filesystem path. 1251 * Elements of the path are separated by '/' and the path is guaranteed to be 1252 * 0-terminated. the path is only given within the current file system. 1253 * Therefore, it never starts with a '/'. the caller is responsible to provide 1254 * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 1255 * the start point of the resulting string is returned. this pointer is within 1256 * dest, normally. 1257 * in case the path buffer would overflow, the pointer is decremented further 1258 * as if output was written to the buffer, though no more output is actually 1259 * generated. that way, the caller can determine how much space would be 1260 * required for the path to fit into the buffer. in that case, the returned 1261 * value will be smaller than dest. callers must check this! 1262 */ 1263 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 1264 u32 name_len, unsigned long name_off, 1265 struct extent_buffer *eb_in, u64 parent, 1266 char *dest, u32 size) 1267 { 1268 int slot; 1269 u64 next_inum; 1270 int ret; 1271 s64 bytes_left = ((s64)size) - 1; 1272 struct extent_buffer *eb = eb_in; 1273 struct btrfs_key found_key; 1274 int leave_spinning = path->leave_spinning; 1275 struct btrfs_inode_ref *iref; 1276 1277 if (bytes_left >= 0) 1278 dest[bytes_left] = '\0'; 1279 1280 path->leave_spinning = 1; 1281 while (1) { 1282 bytes_left -= name_len; 1283 if (bytes_left >= 0) 1284 read_extent_buffer(eb, dest + bytes_left, 1285 name_off, name_len); 1286 if (eb != eb_in) { 1287 btrfs_tree_read_unlock_blocking(eb); 1288 free_extent_buffer(eb); 1289 } 1290 ret = inode_ref_info(parent, 0, fs_root, path, &found_key); 1291 if (ret > 0) 1292 ret = -ENOENT; 1293 if (ret) 1294 break; 1295 1296 next_inum = found_key.offset; 1297 1298 /* regular exit ahead */ 1299 if (parent == next_inum) 1300 break; 1301 1302 slot = path->slots[0]; 1303 eb = path->nodes[0]; 1304 /* make sure we can use eb after releasing the path */ 1305 if (eb != eb_in) { 1306 atomic_inc(&eb->refs); 1307 btrfs_tree_read_lock(eb); 1308 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1309 } 1310 btrfs_release_path(path); 1311 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1312 1313 name_len = btrfs_inode_ref_name_len(eb, iref); 1314 name_off = (unsigned long)(iref + 1); 1315 1316 parent = next_inum; 1317 --bytes_left; 1318 if (bytes_left >= 0) 1319 dest[bytes_left] = '/'; 1320 } 1321 1322 btrfs_release_path(path); 1323 path->leave_spinning = leave_spinning; 1324 1325 if (ret) 1326 return ERR_PTR(ret); 1327 1328 return dest + bytes_left; 1329 } 1330 1331 /* 1332 * this makes the path point to (logical EXTENT_ITEM *) 1333 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for 1334 * tree blocks and <0 on error. 1335 */ 1336 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 1337 struct btrfs_path *path, struct btrfs_key *found_key, 1338 u64 *flags_ret) 1339 { 1340 int ret; 1341 u64 flags; 1342 u64 size = 0; 1343 u32 item_size; 1344 struct extent_buffer *eb; 1345 struct btrfs_extent_item *ei; 1346 struct btrfs_key key; 1347 1348 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1349 key.type = BTRFS_METADATA_ITEM_KEY; 1350 else 1351 key.type = BTRFS_EXTENT_ITEM_KEY; 1352 key.objectid = logical; 1353 key.offset = (u64)-1; 1354 1355 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 1356 if (ret < 0) 1357 return ret; 1358 1359 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0); 1360 if (ret) { 1361 if (ret > 0) 1362 ret = -ENOENT; 1363 return ret; 1364 } 1365 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); 1366 if (found_key->type == BTRFS_METADATA_ITEM_KEY) 1367 size = fs_info->extent_root->leafsize; 1368 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) 1369 size = found_key->offset; 1370 1371 if (found_key->objectid > logical || 1372 found_key->objectid + size <= logical) { 1373 pr_debug("logical %llu is not within any extent\n", logical); 1374 return -ENOENT; 1375 } 1376 1377 eb = path->nodes[0]; 1378 item_size = btrfs_item_size_nr(eb, path->slots[0]); 1379 BUG_ON(item_size < sizeof(*ei)); 1380 1381 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 1382 flags = btrfs_extent_flags(eb, ei); 1383 1384 pr_debug("logical %llu is at position %llu within the extent (%llu " 1385 "EXTENT_ITEM %llu) flags %#llx size %u\n", 1386 logical, logical - found_key->objectid, found_key->objectid, 1387 found_key->offset, flags, item_size); 1388 1389 WARN_ON(!flags_ret); 1390 if (flags_ret) { 1391 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1392 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; 1393 else if (flags & BTRFS_EXTENT_FLAG_DATA) 1394 *flags_ret = BTRFS_EXTENT_FLAG_DATA; 1395 else 1396 BUG_ON(1); 1397 return 0; 1398 } 1399 1400 return -EIO; 1401 } 1402 1403 /* 1404 * helper function to iterate extent inline refs. ptr must point to a 0 value 1405 * for the first call and may be modified. it is used to track state. 1406 * if more refs exist, 0 is returned and the next call to 1407 * __get_extent_inline_ref must pass the modified ptr parameter to get the 1408 * next ref. after the last ref was processed, 1 is returned. 1409 * returns <0 on error 1410 */ 1411 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, 1412 struct btrfs_key *key, 1413 struct btrfs_extent_item *ei, u32 item_size, 1414 struct btrfs_extent_inline_ref **out_eiref, 1415 int *out_type) 1416 { 1417 unsigned long end; 1418 u64 flags; 1419 struct btrfs_tree_block_info *info; 1420 1421 if (!*ptr) { 1422 /* first call */ 1423 flags = btrfs_extent_flags(eb, ei); 1424 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1425 if (key->type == BTRFS_METADATA_ITEM_KEY) { 1426 /* a skinny metadata extent */ 1427 *out_eiref = 1428 (struct btrfs_extent_inline_ref *)(ei + 1); 1429 } else { 1430 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); 1431 info = (struct btrfs_tree_block_info *)(ei + 1); 1432 *out_eiref = 1433 (struct btrfs_extent_inline_ref *)(info + 1); 1434 } 1435 } else { 1436 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); 1437 } 1438 *ptr = (unsigned long)*out_eiref; 1439 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size) 1440 return -ENOENT; 1441 } 1442 1443 end = (unsigned long)ei + item_size; 1444 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr); 1445 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); 1446 1447 *ptr += btrfs_extent_inline_ref_size(*out_type); 1448 WARN_ON(*ptr > end); 1449 if (*ptr == end) 1450 return 1; /* last */ 1451 1452 return 0; 1453 } 1454 1455 /* 1456 * reads the tree block backref for an extent. tree level and root are returned 1457 * through out_level and out_root. ptr must point to a 0 value for the first 1458 * call and may be modified (see __get_extent_inline_ref comment). 1459 * returns 0 if data was provided, 1 if there was no more data to provide or 1460 * <0 on error. 1461 */ 1462 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, 1463 struct btrfs_key *key, struct btrfs_extent_item *ei, 1464 u32 item_size, u64 *out_root, u8 *out_level) 1465 { 1466 int ret; 1467 int type; 1468 struct btrfs_tree_block_info *info; 1469 struct btrfs_extent_inline_ref *eiref; 1470 1471 if (*ptr == (unsigned long)-1) 1472 return 1; 1473 1474 while (1) { 1475 ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size, 1476 &eiref, &type); 1477 if (ret < 0) 1478 return ret; 1479 1480 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1481 type == BTRFS_SHARED_BLOCK_REF_KEY) 1482 break; 1483 1484 if (ret == 1) 1485 return 1; 1486 } 1487 1488 /* we can treat both ref types equally here */ 1489 info = (struct btrfs_tree_block_info *)(ei + 1); 1490 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1491 *out_level = btrfs_tree_block_level(eb, info); 1492 1493 if (ret == 1) 1494 *ptr = (unsigned long)-1; 1495 1496 return 0; 1497 } 1498 1499 static int iterate_leaf_refs(struct extent_inode_elem *inode_list, 1500 u64 root, u64 extent_item_objectid, 1501 iterate_extent_inodes_t *iterate, void *ctx) 1502 { 1503 struct extent_inode_elem *eie; 1504 int ret = 0; 1505 1506 for (eie = inode_list; eie; eie = eie->next) { 1507 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " 1508 "root %llu\n", extent_item_objectid, 1509 eie->inum, eie->offset, root); 1510 ret = iterate(eie->inum, eie->offset, root, ctx); 1511 if (ret) { 1512 pr_debug("stopping iteration for %llu due to ret=%d\n", 1513 extent_item_objectid, ret); 1514 break; 1515 } 1516 } 1517 1518 return ret; 1519 } 1520 1521 /* 1522 * calls iterate() for every inode that references the extent identified by 1523 * the given parameters. 1524 * when the iterator function returns a non-zero value, iteration stops. 1525 */ 1526 int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 1527 u64 extent_item_objectid, u64 extent_item_pos, 1528 int search_commit_root, 1529 iterate_extent_inodes_t *iterate, void *ctx) 1530 { 1531 int ret; 1532 struct btrfs_trans_handle *trans = NULL; 1533 struct ulist *refs = NULL; 1534 struct ulist *roots = NULL; 1535 struct ulist_node *ref_node = NULL; 1536 struct ulist_node *root_node = NULL; 1537 struct seq_list tree_mod_seq_elem = {}; 1538 struct ulist_iterator ref_uiter; 1539 struct ulist_iterator root_uiter; 1540 1541 pr_debug("resolving all inodes for extent %llu\n", 1542 extent_item_objectid); 1543 1544 if (!search_commit_root) { 1545 trans = btrfs_join_transaction(fs_info->extent_root); 1546 if (IS_ERR(trans)) 1547 return PTR_ERR(trans); 1548 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1549 } else { 1550 down_read(&fs_info->commit_root_sem); 1551 } 1552 1553 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1554 tree_mod_seq_elem.seq, &refs, 1555 &extent_item_pos); 1556 if (ret) 1557 goto out; 1558 1559 ULIST_ITER_INIT(&ref_uiter); 1560 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { 1561 ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val, 1562 tree_mod_seq_elem.seq, &roots); 1563 if (ret) 1564 break; 1565 ULIST_ITER_INIT(&root_uiter); 1566 while (!ret && (root_node = ulist_next(roots, &root_uiter))) { 1567 pr_debug("root %llu references leaf %llu, data list " 1568 "%#llx\n", root_node->val, ref_node->val, 1569 ref_node->aux); 1570 ret = iterate_leaf_refs((struct extent_inode_elem *) 1571 (uintptr_t)ref_node->aux, 1572 root_node->val, 1573 extent_item_objectid, 1574 iterate, ctx); 1575 } 1576 ulist_free(roots); 1577 } 1578 1579 free_leaf_list(refs); 1580 out: 1581 if (!search_commit_root) { 1582 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1583 btrfs_end_transaction(trans, fs_info->extent_root); 1584 } else { 1585 up_read(&fs_info->commit_root_sem); 1586 } 1587 1588 return ret; 1589 } 1590 1591 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 1592 struct btrfs_path *path, 1593 iterate_extent_inodes_t *iterate, void *ctx) 1594 { 1595 int ret; 1596 u64 extent_item_pos; 1597 u64 flags = 0; 1598 struct btrfs_key found_key; 1599 int search_commit_root = path->search_commit_root; 1600 1601 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); 1602 btrfs_release_path(path); 1603 if (ret < 0) 1604 return ret; 1605 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1606 return -EINVAL; 1607 1608 extent_item_pos = logical - found_key.objectid; 1609 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1610 extent_item_pos, search_commit_root, 1611 iterate, ctx); 1612 1613 return ret; 1614 } 1615 1616 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off, 1617 struct extent_buffer *eb, void *ctx); 1618 1619 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, 1620 struct btrfs_path *path, 1621 iterate_irefs_t *iterate, void *ctx) 1622 { 1623 int ret = 0; 1624 int slot; 1625 u32 cur; 1626 u32 len; 1627 u32 name_len; 1628 u64 parent = 0; 1629 int found = 0; 1630 struct extent_buffer *eb; 1631 struct btrfs_item *item; 1632 struct btrfs_inode_ref *iref; 1633 struct btrfs_key found_key; 1634 1635 while (!ret) { 1636 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, 1637 &found_key); 1638 if (ret < 0) 1639 break; 1640 if (ret) { 1641 ret = found ? 0 : -ENOENT; 1642 break; 1643 } 1644 ++found; 1645 1646 parent = found_key.offset; 1647 slot = path->slots[0]; 1648 eb = btrfs_clone_extent_buffer(path->nodes[0]); 1649 if (!eb) { 1650 ret = -ENOMEM; 1651 break; 1652 } 1653 extent_buffer_get(eb); 1654 btrfs_tree_read_lock(eb); 1655 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1656 btrfs_release_path(path); 1657 1658 item = btrfs_item_nr(slot); 1659 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1660 1661 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { 1662 name_len = btrfs_inode_ref_name_len(eb, iref); 1663 /* path must be released before calling iterate()! */ 1664 pr_debug("following ref at offset %u for inode %llu in " 1665 "tree %llu\n", cur, found_key.objectid, 1666 fs_root->objectid); 1667 ret = iterate(parent, name_len, 1668 (unsigned long)(iref + 1), eb, ctx); 1669 if (ret) 1670 break; 1671 len = sizeof(*iref) + name_len; 1672 iref = (struct btrfs_inode_ref *)((char *)iref + len); 1673 } 1674 btrfs_tree_read_unlock_blocking(eb); 1675 free_extent_buffer(eb); 1676 } 1677 1678 btrfs_release_path(path); 1679 1680 return ret; 1681 } 1682 1683 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, 1684 struct btrfs_path *path, 1685 iterate_irefs_t *iterate, void *ctx) 1686 { 1687 int ret; 1688 int slot; 1689 u64 offset = 0; 1690 u64 parent; 1691 int found = 0; 1692 struct extent_buffer *eb; 1693 struct btrfs_inode_extref *extref; 1694 struct extent_buffer *leaf; 1695 u32 item_size; 1696 u32 cur_offset; 1697 unsigned long ptr; 1698 1699 while (1) { 1700 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, 1701 &offset); 1702 if (ret < 0) 1703 break; 1704 if (ret) { 1705 ret = found ? 0 : -ENOENT; 1706 break; 1707 } 1708 ++found; 1709 1710 slot = path->slots[0]; 1711 eb = btrfs_clone_extent_buffer(path->nodes[0]); 1712 if (!eb) { 1713 ret = -ENOMEM; 1714 break; 1715 } 1716 extent_buffer_get(eb); 1717 1718 btrfs_tree_read_lock(eb); 1719 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1720 btrfs_release_path(path); 1721 1722 leaf = path->nodes[0]; 1723 item_size = btrfs_item_size_nr(leaf, slot); 1724 ptr = btrfs_item_ptr_offset(leaf, slot); 1725 cur_offset = 0; 1726 1727 while (cur_offset < item_size) { 1728 u32 name_len; 1729 1730 extref = (struct btrfs_inode_extref *)(ptr + cur_offset); 1731 parent = btrfs_inode_extref_parent(eb, extref); 1732 name_len = btrfs_inode_extref_name_len(eb, extref); 1733 ret = iterate(parent, name_len, 1734 (unsigned long)&extref->name, eb, ctx); 1735 if (ret) 1736 break; 1737 1738 cur_offset += btrfs_inode_extref_name_len(leaf, extref); 1739 cur_offset += sizeof(*extref); 1740 } 1741 btrfs_tree_read_unlock_blocking(eb); 1742 free_extent_buffer(eb); 1743 1744 offset++; 1745 } 1746 1747 btrfs_release_path(path); 1748 1749 return ret; 1750 } 1751 1752 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, 1753 struct btrfs_path *path, iterate_irefs_t *iterate, 1754 void *ctx) 1755 { 1756 int ret; 1757 int found_refs = 0; 1758 1759 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx); 1760 if (!ret) 1761 ++found_refs; 1762 else if (ret != -ENOENT) 1763 return ret; 1764 1765 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx); 1766 if (ret == -ENOENT && found_refs) 1767 return 0; 1768 1769 return ret; 1770 } 1771 1772 /* 1773 * returns 0 if the path could be dumped (probably truncated) 1774 * returns <0 in case of an error 1775 */ 1776 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, 1777 struct extent_buffer *eb, void *ctx) 1778 { 1779 struct inode_fs_paths *ipath = ctx; 1780 char *fspath; 1781 char *fspath_min; 1782 int i = ipath->fspath->elem_cnt; 1783 const int s_ptr = sizeof(char *); 1784 u32 bytes_left; 1785 1786 bytes_left = ipath->fspath->bytes_left > s_ptr ? 1787 ipath->fspath->bytes_left - s_ptr : 0; 1788 1789 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 1790 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, 1791 name_off, eb, inum, fspath_min, bytes_left); 1792 if (IS_ERR(fspath)) 1793 return PTR_ERR(fspath); 1794 1795 if (fspath > fspath_min) { 1796 ipath->fspath->val[i] = (u64)(unsigned long)fspath; 1797 ++ipath->fspath->elem_cnt; 1798 ipath->fspath->bytes_left = fspath - fspath_min; 1799 } else { 1800 ++ipath->fspath->elem_missed; 1801 ipath->fspath->bytes_missing += fspath_min - fspath; 1802 ipath->fspath->bytes_left = 0; 1803 } 1804 1805 return 0; 1806 } 1807 1808 /* 1809 * this dumps all file system paths to the inode into the ipath struct, provided 1810 * is has been created large enough. each path is zero-terminated and accessed 1811 * from ipath->fspath->val[i]. 1812 * when it returns, there are ipath->fspath->elem_cnt number of paths available 1813 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 1814 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise, 1815 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 1816 * have been needed to return all paths. 1817 */ 1818 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) 1819 { 1820 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, 1821 inode_to_path, ipath); 1822 } 1823 1824 struct btrfs_data_container *init_data_container(u32 total_bytes) 1825 { 1826 struct btrfs_data_container *data; 1827 size_t alloc_bytes; 1828 1829 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); 1830 data = vmalloc(alloc_bytes); 1831 if (!data) 1832 return ERR_PTR(-ENOMEM); 1833 1834 if (total_bytes >= sizeof(*data)) { 1835 data->bytes_left = total_bytes - sizeof(*data); 1836 data->bytes_missing = 0; 1837 } else { 1838 data->bytes_missing = sizeof(*data) - total_bytes; 1839 data->bytes_left = 0; 1840 } 1841 1842 data->elem_cnt = 0; 1843 data->elem_missed = 0; 1844 1845 return data; 1846 } 1847 1848 /* 1849 * allocates space to return multiple file system paths for an inode. 1850 * total_bytes to allocate are passed, note that space usable for actual path 1851 * information will be total_bytes - sizeof(struct inode_fs_paths). 1852 * the returned pointer must be freed with free_ipath() in the end. 1853 */ 1854 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 1855 struct btrfs_path *path) 1856 { 1857 struct inode_fs_paths *ifp; 1858 struct btrfs_data_container *fspath; 1859 1860 fspath = init_data_container(total_bytes); 1861 if (IS_ERR(fspath)) 1862 return (void *)fspath; 1863 1864 ifp = kmalloc(sizeof(*ifp), GFP_NOFS); 1865 if (!ifp) { 1866 kfree(fspath); 1867 return ERR_PTR(-ENOMEM); 1868 } 1869 1870 ifp->btrfs_path = path; 1871 ifp->fspath = fspath; 1872 ifp->fs_root = fs_root; 1873 1874 return ifp; 1875 } 1876 1877 void free_ipath(struct inode_fs_paths *ipath) 1878 { 1879 if (!ipath) 1880 return; 1881 vfree(ipath->fspath); 1882 kfree(ipath); 1883 } 1884