xref: /openbmc/linux/fs/btrfs/backref.c (revision 680ef72a)
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/mm.h>
20 #include <linux/rbtree.h>
21 #include <trace/events/btrfs.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "backref.h"
25 #include "ulist.h"
26 #include "transaction.h"
27 #include "delayed-ref.h"
28 #include "locking.h"
29 
30 /* Just an arbitrary number so we can be sure this happened */
31 #define BACKREF_FOUND_SHARED 6
32 
33 struct extent_inode_elem {
34 	u64 inum;
35 	u64 offset;
36 	struct extent_inode_elem *next;
37 };
38 
39 static int check_extent_in_eb(const struct btrfs_key *key,
40 			      const struct extent_buffer *eb,
41 			      const struct btrfs_file_extent_item *fi,
42 			      u64 extent_item_pos,
43 			      struct extent_inode_elem **eie,
44 			      bool ignore_offset)
45 {
46 	u64 offset = 0;
47 	struct extent_inode_elem *e;
48 
49 	if (!ignore_offset &&
50 	    !btrfs_file_extent_compression(eb, fi) &&
51 	    !btrfs_file_extent_encryption(eb, fi) &&
52 	    !btrfs_file_extent_other_encoding(eb, fi)) {
53 		u64 data_offset;
54 		u64 data_len;
55 
56 		data_offset = btrfs_file_extent_offset(eb, fi);
57 		data_len = btrfs_file_extent_num_bytes(eb, fi);
58 
59 		if (extent_item_pos < data_offset ||
60 		    extent_item_pos >= data_offset + data_len)
61 			return 1;
62 		offset = extent_item_pos - data_offset;
63 	}
64 
65 	e = kmalloc(sizeof(*e), GFP_NOFS);
66 	if (!e)
67 		return -ENOMEM;
68 
69 	e->next = *eie;
70 	e->inum = key->objectid;
71 	e->offset = key->offset + offset;
72 	*eie = e;
73 
74 	return 0;
75 }
76 
77 static void free_inode_elem_list(struct extent_inode_elem *eie)
78 {
79 	struct extent_inode_elem *eie_next;
80 
81 	for (; eie; eie = eie_next) {
82 		eie_next = eie->next;
83 		kfree(eie);
84 	}
85 }
86 
87 static int find_extent_in_eb(const struct extent_buffer *eb,
88 			     u64 wanted_disk_byte, u64 extent_item_pos,
89 			     struct extent_inode_elem **eie,
90 			     bool ignore_offset)
91 {
92 	u64 disk_byte;
93 	struct btrfs_key key;
94 	struct btrfs_file_extent_item *fi;
95 	int slot;
96 	int nritems;
97 	int extent_type;
98 	int ret;
99 
100 	/*
101 	 * from the shared data ref, we only have the leaf but we need
102 	 * the key. thus, we must look into all items and see that we
103 	 * find one (some) with a reference to our extent item.
104 	 */
105 	nritems = btrfs_header_nritems(eb);
106 	for (slot = 0; slot < nritems; ++slot) {
107 		btrfs_item_key_to_cpu(eb, &key, slot);
108 		if (key.type != BTRFS_EXTENT_DATA_KEY)
109 			continue;
110 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
111 		extent_type = btrfs_file_extent_type(eb, fi);
112 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
113 			continue;
114 		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
115 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
116 		if (disk_byte != wanted_disk_byte)
117 			continue;
118 
119 		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
120 		if (ret < 0)
121 			return ret;
122 	}
123 
124 	return 0;
125 }
126 
127 struct preftree {
128 	struct rb_root root;
129 	unsigned int count;
130 };
131 
132 #define PREFTREE_INIT	{ .root = RB_ROOT, .count = 0 }
133 
134 struct preftrees {
135 	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
136 	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
137 	struct preftree indirect_missing_keys;
138 };
139 
140 /*
141  * Checks for a shared extent during backref search.
142  *
143  * The share_count tracks prelim_refs (direct and indirect) having a
144  * ref->count >0:
145  *  - incremented when a ref->count transitions to >0
146  *  - decremented when a ref->count transitions to <1
147  */
148 struct share_check {
149 	u64 root_objectid;
150 	u64 inum;
151 	int share_count;
152 };
153 
154 static inline int extent_is_shared(struct share_check *sc)
155 {
156 	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
157 }
158 
159 static struct kmem_cache *btrfs_prelim_ref_cache;
160 
161 int __init btrfs_prelim_ref_init(void)
162 {
163 	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
164 					sizeof(struct prelim_ref),
165 					0,
166 					SLAB_MEM_SPREAD,
167 					NULL);
168 	if (!btrfs_prelim_ref_cache)
169 		return -ENOMEM;
170 	return 0;
171 }
172 
173 void btrfs_prelim_ref_exit(void)
174 {
175 	kmem_cache_destroy(btrfs_prelim_ref_cache);
176 }
177 
178 static void free_pref(struct prelim_ref *ref)
179 {
180 	kmem_cache_free(btrfs_prelim_ref_cache, ref);
181 }
182 
183 /*
184  * Return 0 when both refs are for the same block (and can be merged).
185  * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
186  * indicates a 'higher' block.
187  */
188 static int prelim_ref_compare(struct prelim_ref *ref1,
189 			      struct prelim_ref *ref2)
190 {
191 	if (ref1->level < ref2->level)
192 		return -1;
193 	if (ref1->level > ref2->level)
194 		return 1;
195 	if (ref1->root_id < ref2->root_id)
196 		return -1;
197 	if (ref1->root_id > ref2->root_id)
198 		return 1;
199 	if (ref1->key_for_search.type < ref2->key_for_search.type)
200 		return -1;
201 	if (ref1->key_for_search.type > ref2->key_for_search.type)
202 		return 1;
203 	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
204 		return -1;
205 	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
206 		return 1;
207 	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
208 		return -1;
209 	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
210 		return 1;
211 	if (ref1->parent < ref2->parent)
212 		return -1;
213 	if (ref1->parent > ref2->parent)
214 		return 1;
215 
216 	return 0;
217 }
218 
219 void update_share_count(struct share_check *sc, int oldcount, int newcount)
220 {
221 	if ((!sc) || (oldcount == 0 && newcount < 1))
222 		return;
223 
224 	if (oldcount > 0 && newcount < 1)
225 		sc->share_count--;
226 	else if (oldcount < 1 && newcount > 0)
227 		sc->share_count++;
228 }
229 
230 /*
231  * Add @newref to the @root rbtree, merging identical refs.
232  *
233  * Callers should assume that newref has been freed after calling.
234  */
235 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
236 			      struct preftree *preftree,
237 			      struct prelim_ref *newref,
238 			      struct share_check *sc)
239 {
240 	struct rb_root *root;
241 	struct rb_node **p;
242 	struct rb_node *parent = NULL;
243 	struct prelim_ref *ref;
244 	int result;
245 
246 	root = &preftree->root;
247 	p = &root->rb_node;
248 
249 	while (*p) {
250 		parent = *p;
251 		ref = rb_entry(parent, struct prelim_ref, rbnode);
252 		result = prelim_ref_compare(ref, newref);
253 		if (result < 0) {
254 			p = &(*p)->rb_left;
255 		} else if (result > 0) {
256 			p = &(*p)->rb_right;
257 		} else {
258 			/* Identical refs, merge them and free @newref */
259 			struct extent_inode_elem *eie = ref->inode_list;
260 
261 			while (eie && eie->next)
262 				eie = eie->next;
263 
264 			if (!eie)
265 				ref->inode_list = newref->inode_list;
266 			else
267 				eie->next = newref->inode_list;
268 			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
269 						     preftree->count);
270 			/*
271 			 * A delayed ref can have newref->count < 0.
272 			 * The ref->count is updated to follow any
273 			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
274 			 */
275 			update_share_count(sc, ref->count,
276 					   ref->count + newref->count);
277 			ref->count += newref->count;
278 			free_pref(newref);
279 			return;
280 		}
281 	}
282 
283 	update_share_count(sc, 0, newref->count);
284 	preftree->count++;
285 	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
286 	rb_link_node(&newref->rbnode, parent, p);
287 	rb_insert_color(&newref->rbnode, root);
288 }
289 
290 /*
291  * Release the entire tree.  We don't care about internal consistency so
292  * just free everything and then reset the tree root.
293  */
294 static void prelim_release(struct preftree *preftree)
295 {
296 	struct prelim_ref *ref, *next_ref;
297 
298 	rbtree_postorder_for_each_entry_safe(ref, next_ref, &preftree->root,
299 					     rbnode)
300 		free_pref(ref);
301 
302 	preftree->root = RB_ROOT;
303 	preftree->count = 0;
304 }
305 
306 /*
307  * the rules for all callers of this function are:
308  * - obtaining the parent is the goal
309  * - if you add a key, you must know that it is a correct key
310  * - if you cannot add the parent or a correct key, then we will look into the
311  *   block later to set a correct key
312  *
313  * delayed refs
314  * ============
315  *        backref type | shared | indirect | shared | indirect
316  * information         |   tree |     tree |   data |     data
317  * --------------------+--------+----------+--------+----------
318  *      parent logical |    y   |     -    |    -   |     -
319  *      key to resolve |    -   |     y    |    y   |     y
320  *  tree block logical |    -   |     -    |    -   |     -
321  *  root for resolving |    y   |     y    |    y   |     y
322  *
323  * - column 1:       we've the parent -> done
324  * - column 2, 3, 4: we use the key to find the parent
325  *
326  * on disk refs (inline or keyed)
327  * ==============================
328  *        backref type | shared | indirect | shared | indirect
329  * information         |   tree |     tree |   data |     data
330  * --------------------+--------+----------+--------+----------
331  *      parent logical |    y   |     -    |    y   |     -
332  *      key to resolve |    -   |     -    |    -   |     y
333  *  tree block logical |    y   |     y    |    y   |     y
334  *  root for resolving |    -   |     y    |    y   |     y
335  *
336  * - column 1, 3: we've the parent -> done
337  * - column 2:    we take the first key from the block to find the parent
338  *                (see add_missing_keys)
339  * - column 4:    we use the key to find the parent
340  *
341  * additional information that's available but not required to find the parent
342  * block might help in merging entries to gain some speed.
343  */
344 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
345 			  struct preftree *preftree, u64 root_id,
346 			  const struct btrfs_key *key, int level, u64 parent,
347 			  u64 wanted_disk_byte, int count,
348 			  struct share_check *sc, gfp_t gfp_mask)
349 {
350 	struct prelim_ref *ref;
351 
352 	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
353 		return 0;
354 
355 	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
356 	if (!ref)
357 		return -ENOMEM;
358 
359 	ref->root_id = root_id;
360 	if (key) {
361 		ref->key_for_search = *key;
362 		/*
363 		 * We can often find data backrefs with an offset that is too
364 		 * large (>= LLONG_MAX, maximum allowed file offset) due to
365 		 * underflows when subtracting a file's offset with the data
366 		 * offset of its corresponding extent data item. This can
367 		 * happen for example in the clone ioctl.
368 		 * So if we detect such case we set the search key's offset to
369 		 * zero to make sure we will find the matching file extent item
370 		 * at add_all_parents(), otherwise we will miss it because the
371 		 * offset taken form the backref is much larger then the offset
372 		 * of the file extent item. This can make us scan a very large
373 		 * number of file extent items, but at least it will not make
374 		 * us miss any.
375 		 * This is an ugly workaround for a behaviour that should have
376 		 * never existed, but it does and a fix for the clone ioctl
377 		 * would touch a lot of places, cause backwards incompatibility
378 		 * and would not fix the problem for extents cloned with older
379 		 * kernels.
380 		 */
381 		if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
382 		    ref->key_for_search.offset >= LLONG_MAX)
383 			ref->key_for_search.offset = 0;
384 	} else {
385 		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
386 	}
387 
388 	ref->inode_list = NULL;
389 	ref->level = level;
390 	ref->count = count;
391 	ref->parent = parent;
392 	ref->wanted_disk_byte = wanted_disk_byte;
393 	prelim_ref_insert(fs_info, preftree, ref, sc);
394 	return extent_is_shared(sc);
395 }
396 
397 /* direct refs use root == 0, key == NULL */
398 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
399 			  struct preftrees *preftrees, int level, u64 parent,
400 			  u64 wanted_disk_byte, int count,
401 			  struct share_check *sc, gfp_t gfp_mask)
402 {
403 	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
404 			      parent, wanted_disk_byte, count, sc, gfp_mask);
405 }
406 
407 /* indirect refs use parent == 0 */
408 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
409 			    struct preftrees *preftrees, u64 root_id,
410 			    const struct btrfs_key *key, int level,
411 			    u64 wanted_disk_byte, int count,
412 			    struct share_check *sc, gfp_t gfp_mask)
413 {
414 	struct preftree *tree = &preftrees->indirect;
415 
416 	if (!key)
417 		tree = &preftrees->indirect_missing_keys;
418 	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
419 			      wanted_disk_byte, count, sc, gfp_mask);
420 }
421 
422 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
423 			   struct ulist *parents, struct prelim_ref *ref,
424 			   int level, u64 time_seq, const u64 *extent_item_pos,
425 			   u64 total_refs, bool ignore_offset)
426 {
427 	int ret = 0;
428 	int slot;
429 	struct extent_buffer *eb;
430 	struct btrfs_key key;
431 	struct btrfs_key *key_for_search = &ref->key_for_search;
432 	struct btrfs_file_extent_item *fi;
433 	struct extent_inode_elem *eie = NULL, *old = NULL;
434 	u64 disk_byte;
435 	u64 wanted_disk_byte = ref->wanted_disk_byte;
436 	u64 count = 0;
437 
438 	if (level != 0) {
439 		eb = path->nodes[level];
440 		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
441 		if (ret < 0)
442 			return ret;
443 		return 0;
444 	}
445 
446 	/*
447 	 * We normally enter this function with the path already pointing to
448 	 * the first item to check. But sometimes, we may enter it with
449 	 * slot==nritems. In that case, go to the next leaf before we continue.
450 	 */
451 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
452 		if (time_seq == SEQ_LAST)
453 			ret = btrfs_next_leaf(root, path);
454 		else
455 			ret = btrfs_next_old_leaf(root, path, time_seq);
456 	}
457 
458 	while (!ret && count < total_refs) {
459 		eb = path->nodes[0];
460 		slot = path->slots[0];
461 
462 		btrfs_item_key_to_cpu(eb, &key, slot);
463 
464 		if (key.objectid != key_for_search->objectid ||
465 		    key.type != BTRFS_EXTENT_DATA_KEY)
466 			break;
467 
468 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
469 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
470 
471 		if (disk_byte == wanted_disk_byte) {
472 			eie = NULL;
473 			old = NULL;
474 			count++;
475 			if (extent_item_pos) {
476 				ret = check_extent_in_eb(&key, eb, fi,
477 						*extent_item_pos,
478 						&eie, ignore_offset);
479 				if (ret < 0)
480 					break;
481 			}
482 			if (ret > 0)
483 				goto next;
484 			ret = ulist_add_merge_ptr(parents, eb->start,
485 						  eie, (void **)&old, GFP_NOFS);
486 			if (ret < 0)
487 				break;
488 			if (!ret && extent_item_pos) {
489 				while (old->next)
490 					old = old->next;
491 				old->next = eie;
492 			}
493 			eie = NULL;
494 		}
495 next:
496 		if (time_seq == SEQ_LAST)
497 			ret = btrfs_next_item(root, path);
498 		else
499 			ret = btrfs_next_old_item(root, path, time_seq);
500 	}
501 
502 	if (ret > 0)
503 		ret = 0;
504 	else if (ret < 0)
505 		free_inode_elem_list(eie);
506 	return ret;
507 }
508 
509 /*
510  * resolve an indirect backref in the form (root_id, key, level)
511  * to a logical address
512  */
513 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
514 				struct btrfs_path *path, u64 time_seq,
515 				struct prelim_ref *ref, struct ulist *parents,
516 				const u64 *extent_item_pos, u64 total_refs,
517 				bool ignore_offset)
518 {
519 	struct btrfs_root *root;
520 	struct btrfs_key root_key;
521 	struct extent_buffer *eb;
522 	int ret = 0;
523 	int root_level;
524 	int level = ref->level;
525 	int index;
526 
527 	root_key.objectid = ref->root_id;
528 	root_key.type = BTRFS_ROOT_ITEM_KEY;
529 	root_key.offset = (u64)-1;
530 
531 	index = srcu_read_lock(&fs_info->subvol_srcu);
532 
533 	root = btrfs_get_fs_root(fs_info, &root_key, false);
534 	if (IS_ERR(root)) {
535 		srcu_read_unlock(&fs_info->subvol_srcu, index);
536 		ret = PTR_ERR(root);
537 		goto out;
538 	}
539 
540 	if (btrfs_is_testing(fs_info)) {
541 		srcu_read_unlock(&fs_info->subvol_srcu, index);
542 		ret = -ENOENT;
543 		goto out;
544 	}
545 
546 	if (path->search_commit_root)
547 		root_level = btrfs_header_level(root->commit_root);
548 	else if (time_seq == SEQ_LAST)
549 		root_level = btrfs_header_level(root->node);
550 	else
551 		root_level = btrfs_old_root_level(root, time_seq);
552 
553 	if (root_level + 1 == level) {
554 		srcu_read_unlock(&fs_info->subvol_srcu, index);
555 		goto out;
556 	}
557 
558 	path->lowest_level = level;
559 	if (time_seq == SEQ_LAST)
560 		ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
561 					0, 0);
562 	else
563 		ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
564 					    time_seq);
565 
566 	/* root node has been locked, we can release @subvol_srcu safely here */
567 	srcu_read_unlock(&fs_info->subvol_srcu, index);
568 
569 	btrfs_debug(fs_info,
570 		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
571 		 ref->root_id, level, ref->count, ret,
572 		 ref->key_for_search.objectid, ref->key_for_search.type,
573 		 ref->key_for_search.offset);
574 	if (ret < 0)
575 		goto out;
576 
577 	eb = path->nodes[level];
578 	while (!eb) {
579 		if (WARN_ON(!level)) {
580 			ret = 1;
581 			goto out;
582 		}
583 		level--;
584 		eb = path->nodes[level];
585 	}
586 
587 	ret = add_all_parents(root, path, parents, ref, level, time_seq,
588 			      extent_item_pos, total_refs, ignore_offset);
589 out:
590 	path->lowest_level = 0;
591 	btrfs_release_path(path);
592 	return ret;
593 }
594 
595 static struct extent_inode_elem *
596 unode_aux_to_inode_list(struct ulist_node *node)
597 {
598 	if (!node)
599 		return NULL;
600 	return (struct extent_inode_elem *)(uintptr_t)node->aux;
601 }
602 
603 /*
604  * We maintain three seperate rbtrees: one for direct refs, one for
605  * indirect refs which have a key, and one for indirect refs which do not
606  * have a key. Each tree does merge on insertion.
607  *
608  * Once all of the references are located, we iterate over the tree of
609  * indirect refs with missing keys. An appropriate key is located and
610  * the ref is moved onto the tree for indirect refs. After all missing
611  * keys are thus located, we iterate over the indirect ref tree, resolve
612  * each reference, and then insert the resolved reference onto the
613  * direct tree (merging there too).
614  *
615  * New backrefs (i.e., for parent nodes) are added to the appropriate
616  * rbtree as they are encountered. The new backrefs are subsequently
617  * resolved as above.
618  */
619 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
620 				 struct btrfs_path *path, u64 time_seq,
621 				 struct preftrees *preftrees,
622 				 const u64 *extent_item_pos, u64 total_refs,
623 				 struct share_check *sc, bool ignore_offset)
624 {
625 	int err;
626 	int ret = 0;
627 	struct ulist *parents;
628 	struct ulist_node *node;
629 	struct ulist_iterator uiter;
630 	struct rb_node *rnode;
631 
632 	parents = ulist_alloc(GFP_NOFS);
633 	if (!parents)
634 		return -ENOMEM;
635 
636 	/*
637 	 * We could trade memory usage for performance here by iterating
638 	 * the tree, allocating new refs for each insertion, and then
639 	 * freeing the entire indirect tree when we're done.  In some test
640 	 * cases, the tree can grow quite large (~200k objects).
641 	 */
642 	while ((rnode = rb_first(&preftrees->indirect.root))) {
643 		struct prelim_ref *ref;
644 
645 		ref = rb_entry(rnode, struct prelim_ref, rbnode);
646 		if (WARN(ref->parent,
647 			 "BUG: direct ref found in indirect tree")) {
648 			ret = -EINVAL;
649 			goto out;
650 		}
651 
652 		rb_erase(&ref->rbnode, &preftrees->indirect.root);
653 		preftrees->indirect.count--;
654 
655 		if (ref->count == 0) {
656 			free_pref(ref);
657 			continue;
658 		}
659 
660 		if (sc && sc->root_objectid &&
661 		    ref->root_id != sc->root_objectid) {
662 			free_pref(ref);
663 			ret = BACKREF_FOUND_SHARED;
664 			goto out;
665 		}
666 		err = resolve_indirect_ref(fs_info, path, time_seq, ref,
667 					   parents, extent_item_pos,
668 					   total_refs, ignore_offset);
669 		/*
670 		 * we can only tolerate ENOENT,otherwise,we should catch error
671 		 * and return directly.
672 		 */
673 		if (err == -ENOENT) {
674 			prelim_ref_insert(fs_info, &preftrees->direct, ref,
675 					  NULL);
676 			continue;
677 		} else if (err) {
678 			free_pref(ref);
679 			ret = err;
680 			goto out;
681 		}
682 
683 		/* we put the first parent into the ref at hand */
684 		ULIST_ITER_INIT(&uiter);
685 		node = ulist_next(parents, &uiter);
686 		ref->parent = node ? node->val : 0;
687 		ref->inode_list = unode_aux_to_inode_list(node);
688 
689 		/* Add a prelim_ref(s) for any other parent(s). */
690 		while ((node = ulist_next(parents, &uiter))) {
691 			struct prelim_ref *new_ref;
692 
693 			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
694 						   GFP_NOFS);
695 			if (!new_ref) {
696 				free_pref(ref);
697 				ret = -ENOMEM;
698 				goto out;
699 			}
700 			memcpy(new_ref, ref, sizeof(*ref));
701 			new_ref->parent = node->val;
702 			new_ref->inode_list = unode_aux_to_inode_list(node);
703 			prelim_ref_insert(fs_info, &preftrees->direct,
704 					  new_ref, NULL);
705 		}
706 
707 		/*
708 		 * Now it's a direct ref, put it in the the direct tree. We must
709 		 * do this last because the ref could be merged/freed here.
710 		 */
711 		prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
712 
713 		ulist_reinit(parents);
714 		cond_resched();
715 	}
716 out:
717 	ulist_free(parents);
718 	return ret;
719 }
720 
721 /*
722  * read tree blocks and add keys where required.
723  */
724 static int add_missing_keys(struct btrfs_fs_info *fs_info,
725 			    struct preftrees *preftrees)
726 {
727 	struct prelim_ref *ref;
728 	struct extent_buffer *eb;
729 	struct preftree *tree = &preftrees->indirect_missing_keys;
730 	struct rb_node *node;
731 
732 	while ((node = rb_first(&tree->root))) {
733 		ref = rb_entry(node, struct prelim_ref, rbnode);
734 		rb_erase(node, &tree->root);
735 
736 		BUG_ON(ref->parent);	/* should not be a direct ref */
737 		BUG_ON(ref->key_for_search.type);
738 		BUG_ON(!ref->wanted_disk_byte);
739 
740 		eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0);
741 		if (IS_ERR(eb)) {
742 			free_pref(ref);
743 			return PTR_ERR(eb);
744 		} else if (!extent_buffer_uptodate(eb)) {
745 			free_pref(ref);
746 			free_extent_buffer(eb);
747 			return -EIO;
748 		}
749 		btrfs_tree_read_lock(eb);
750 		if (btrfs_header_level(eb) == 0)
751 			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
752 		else
753 			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
754 		btrfs_tree_read_unlock(eb);
755 		free_extent_buffer(eb);
756 		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
757 		cond_resched();
758 	}
759 	return 0;
760 }
761 
762 /*
763  * add all currently queued delayed refs from this head whose seq nr is
764  * smaller or equal that seq to the list
765  */
766 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
767 			    struct btrfs_delayed_ref_head *head, u64 seq,
768 			    struct preftrees *preftrees, u64 *total_refs,
769 			    struct share_check *sc)
770 {
771 	struct btrfs_delayed_ref_node *node;
772 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
773 	struct btrfs_key key;
774 	struct btrfs_key tmp_op_key;
775 	struct btrfs_key *op_key = NULL;
776 	struct rb_node *n;
777 	int count;
778 	int ret = 0;
779 
780 	if (extent_op && extent_op->update_key) {
781 		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
782 		op_key = &tmp_op_key;
783 	}
784 
785 	spin_lock(&head->lock);
786 	for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
787 		node = rb_entry(n, struct btrfs_delayed_ref_node,
788 				ref_node);
789 		if (node->seq > seq)
790 			continue;
791 
792 		switch (node->action) {
793 		case BTRFS_ADD_DELAYED_EXTENT:
794 		case BTRFS_UPDATE_DELAYED_HEAD:
795 			WARN_ON(1);
796 			continue;
797 		case BTRFS_ADD_DELAYED_REF:
798 			count = node->ref_mod;
799 			break;
800 		case BTRFS_DROP_DELAYED_REF:
801 			count = node->ref_mod * -1;
802 			break;
803 		default:
804 			BUG_ON(1);
805 		}
806 		*total_refs += count;
807 		switch (node->type) {
808 		case BTRFS_TREE_BLOCK_REF_KEY: {
809 			/* NORMAL INDIRECT METADATA backref */
810 			struct btrfs_delayed_tree_ref *ref;
811 
812 			ref = btrfs_delayed_node_to_tree_ref(node);
813 			ret = add_indirect_ref(fs_info, preftrees, ref->root,
814 					       &tmp_op_key, ref->level + 1,
815 					       node->bytenr, count, sc,
816 					       GFP_ATOMIC);
817 			break;
818 		}
819 		case BTRFS_SHARED_BLOCK_REF_KEY: {
820 			/* SHARED DIRECT METADATA backref */
821 			struct btrfs_delayed_tree_ref *ref;
822 
823 			ref = btrfs_delayed_node_to_tree_ref(node);
824 
825 			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
826 					     ref->parent, node->bytenr, count,
827 					     sc, GFP_ATOMIC);
828 			break;
829 		}
830 		case BTRFS_EXTENT_DATA_REF_KEY: {
831 			/* NORMAL INDIRECT DATA backref */
832 			struct btrfs_delayed_data_ref *ref;
833 			ref = btrfs_delayed_node_to_data_ref(node);
834 
835 			key.objectid = ref->objectid;
836 			key.type = BTRFS_EXTENT_DATA_KEY;
837 			key.offset = ref->offset;
838 
839 			/*
840 			 * Found a inum that doesn't match our known inum, we
841 			 * know it's shared.
842 			 */
843 			if (sc && sc->inum && ref->objectid != sc->inum) {
844 				ret = BACKREF_FOUND_SHARED;
845 				goto out;
846 			}
847 
848 			ret = add_indirect_ref(fs_info, preftrees, ref->root,
849 					       &key, 0, node->bytenr, count, sc,
850 					       GFP_ATOMIC);
851 			break;
852 		}
853 		case BTRFS_SHARED_DATA_REF_KEY: {
854 			/* SHARED DIRECT FULL backref */
855 			struct btrfs_delayed_data_ref *ref;
856 
857 			ref = btrfs_delayed_node_to_data_ref(node);
858 
859 			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
860 					     node->bytenr, count, sc,
861 					     GFP_ATOMIC);
862 			break;
863 		}
864 		default:
865 			WARN_ON(1);
866 		}
867 		/*
868 		 * We must ignore BACKREF_FOUND_SHARED until all delayed
869 		 * refs have been checked.
870 		 */
871 		if (ret && (ret != BACKREF_FOUND_SHARED))
872 			break;
873 	}
874 	if (!ret)
875 		ret = extent_is_shared(sc);
876 out:
877 	spin_unlock(&head->lock);
878 	return ret;
879 }
880 
881 /*
882  * add all inline backrefs for bytenr to the list
883  *
884  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
885  */
886 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
887 			   struct btrfs_path *path, u64 bytenr,
888 			   int *info_level, struct preftrees *preftrees,
889 			   u64 *total_refs, struct share_check *sc)
890 {
891 	int ret = 0;
892 	int slot;
893 	struct extent_buffer *leaf;
894 	struct btrfs_key key;
895 	struct btrfs_key found_key;
896 	unsigned long ptr;
897 	unsigned long end;
898 	struct btrfs_extent_item *ei;
899 	u64 flags;
900 	u64 item_size;
901 
902 	/*
903 	 * enumerate all inline refs
904 	 */
905 	leaf = path->nodes[0];
906 	slot = path->slots[0];
907 
908 	item_size = btrfs_item_size_nr(leaf, slot);
909 	BUG_ON(item_size < sizeof(*ei));
910 
911 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
912 	flags = btrfs_extent_flags(leaf, ei);
913 	*total_refs += btrfs_extent_refs(leaf, ei);
914 	btrfs_item_key_to_cpu(leaf, &found_key, slot);
915 
916 	ptr = (unsigned long)(ei + 1);
917 	end = (unsigned long)ei + item_size;
918 
919 	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
920 	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
921 		struct btrfs_tree_block_info *info;
922 
923 		info = (struct btrfs_tree_block_info *)ptr;
924 		*info_level = btrfs_tree_block_level(leaf, info);
925 		ptr += sizeof(struct btrfs_tree_block_info);
926 		BUG_ON(ptr > end);
927 	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
928 		*info_level = found_key.offset;
929 	} else {
930 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
931 	}
932 
933 	while (ptr < end) {
934 		struct btrfs_extent_inline_ref *iref;
935 		u64 offset;
936 		int type;
937 
938 		iref = (struct btrfs_extent_inline_ref *)ptr;
939 		type = btrfs_get_extent_inline_ref_type(leaf, iref,
940 							BTRFS_REF_TYPE_ANY);
941 		if (type == BTRFS_REF_TYPE_INVALID)
942 			return -EINVAL;
943 
944 		offset = btrfs_extent_inline_ref_offset(leaf, iref);
945 
946 		switch (type) {
947 		case BTRFS_SHARED_BLOCK_REF_KEY:
948 			ret = add_direct_ref(fs_info, preftrees,
949 					     *info_level + 1, offset,
950 					     bytenr, 1, NULL, GFP_NOFS);
951 			break;
952 		case BTRFS_SHARED_DATA_REF_KEY: {
953 			struct btrfs_shared_data_ref *sdref;
954 			int count;
955 
956 			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
957 			count = btrfs_shared_data_ref_count(leaf, sdref);
958 
959 			ret = add_direct_ref(fs_info, preftrees, 0, offset,
960 					     bytenr, count, sc, GFP_NOFS);
961 			break;
962 		}
963 		case BTRFS_TREE_BLOCK_REF_KEY:
964 			ret = add_indirect_ref(fs_info, preftrees, offset,
965 					       NULL, *info_level + 1,
966 					       bytenr, 1, NULL, GFP_NOFS);
967 			break;
968 		case BTRFS_EXTENT_DATA_REF_KEY: {
969 			struct btrfs_extent_data_ref *dref;
970 			int count;
971 			u64 root;
972 
973 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
974 			count = btrfs_extent_data_ref_count(leaf, dref);
975 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
976 								      dref);
977 			key.type = BTRFS_EXTENT_DATA_KEY;
978 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
979 
980 			if (sc && sc->inum && key.objectid != sc->inum) {
981 				ret = BACKREF_FOUND_SHARED;
982 				break;
983 			}
984 
985 			root = btrfs_extent_data_ref_root(leaf, dref);
986 
987 			ret = add_indirect_ref(fs_info, preftrees, root,
988 					       &key, 0, bytenr, count,
989 					       sc, GFP_NOFS);
990 			break;
991 		}
992 		default:
993 			WARN_ON(1);
994 		}
995 		if (ret)
996 			return ret;
997 		ptr += btrfs_extent_inline_ref_size(type);
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 /*
1004  * add all non-inline backrefs for bytenr to the list
1005  *
1006  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1007  */
1008 static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1009 			  struct btrfs_path *path, u64 bytenr,
1010 			  int info_level, struct preftrees *preftrees,
1011 			  struct share_check *sc)
1012 {
1013 	struct btrfs_root *extent_root = fs_info->extent_root;
1014 	int ret;
1015 	int slot;
1016 	struct extent_buffer *leaf;
1017 	struct btrfs_key key;
1018 
1019 	while (1) {
1020 		ret = btrfs_next_item(extent_root, path);
1021 		if (ret < 0)
1022 			break;
1023 		if (ret) {
1024 			ret = 0;
1025 			break;
1026 		}
1027 
1028 		slot = path->slots[0];
1029 		leaf = path->nodes[0];
1030 		btrfs_item_key_to_cpu(leaf, &key, slot);
1031 
1032 		if (key.objectid != bytenr)
1033 			break;
1034 		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1035 			continue;
1036 		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1037 			break;
1038 
1039 		switch (key.type) {
1040 		case BTRFS_SHARED_BLOCK_REF_KEY:
1041 			/* SHARED DIRECT METADATA backref */
1042 			ret = add_direct_ref(fs_info, preftrees,
1043 					     info_level + 1, key.offset,
1044 					     bytenr, 1, NULL, GFP_NOFS);
1045 			break;
1046 		case BTRFS_SHARED_DATA_REF_KEY: {
1047 			/* SHARED DIRECT FULL backref */
1048 			struct btrfs_shared_data_ref *sdref;
1049 			int count;
1050 
1051 			sdref = btrfs_item_ptr(leaf, slot,
1052 					      struct btrfs_shared_data_ref);
1053 			count = btrfs_shared_data_ref_count(leaf, sdref);
1054 			ret = add_direct_ref(fs_info, preftrees, 0,
1055 					     key.offset, bytenr, count,
1056 					     sc, GFP_NOFS);
1057 			break;
1058 		}
1059 		case BTRFS_TREE_BLOCK_REF_KEY:
1060 			/* NORMAL INDIRECT METADATA backref */
1061 			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1062 					       NULL, info_level + 1, bytenr,
1063 					       1, NULL, GFP_NOFS);
1064 			break;
1065 		case BTRFS_EXTENT_DATA_REF_KEY: {
1066 			/* NORMAL INDIRECT DATA backref */
1067 			struct btrfs_extent_data_ref *dref;
1068 			int count;
1069 			u64 root;
1070 
1071 			dref = btrfs_item_ptr(leaf, slot,
1072 					      struct btrfs_extent_data_ref);
1073 			count = btrfs_extent_data_ref_count(leaf, dref);
1074 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1075 								      dref);
1076 			key.type = BTRFS_EXTENT_DATA_KEY;
1077 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1078 
1079 			if (sc && sc->inum && key.objectid != sc->inum) {
1080 				ret = BACKREF_FOUND_SHARED;
1081 				break;
1082 			}
1083 
1084 			root = btrfs_extent_data_ref_root(leaf, dref);
1085 			ret = add_indirect_ref(fs_info, preftrees, root,
1086 					       &key, 0, bytenr, count,
1087 					       sc, GFP_NOFS);
1088 			break;
1089 		}
1090 		default:
1091 			WARN_ON(1);
1092 		}
1093 		if (ret)
1094 			return ret;
1095 
1096 	}
1097 
1098 	return ret;
1099 }
1100 
1101 /*
1102  * this adds all existing backrefs (inline backrefs, backrefs and delayed
1103  * refs) for the given bytenr to the refs list, merges duplicates and resolves
1104  * indirect refs to their parent bytenr.
1105  * When roots are found, they're added to the roots list
1106  *
1107  * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1108  * much like trans == NULL case, the difference only lies in it will not
1109  * commit root.
1110  * The special case is for qgroup to search roots in commit_transaction().
1111  *
1112  * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1113  * shared extent is detected.
1114  *
1115  * Otherwise this returns 0 for success and <0 for an error.
1116  *
1117  * If ignore_offset is set to false, only extent refs whose offsets match
1118  * extent_item_pos are returned.  If true, every extent ref is returned
1119  * and extent_item_pos is ignored.
1120  *
1121  * FIXME some caching might speed things up
1122  */
1123 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1124 			     struct btrfs_fs_info *fs_info, u64 bytenr,
1125 			     u64 time_seq, struct ulist *refs,
1126 			     struct ulist *roots, const u64 *extent_item_pos,
1127 			     struct share_check *sc, bool ignore_offset)
1128 {
1129 	struct btrfs_key key;
1130 	struct btrfs_path *path;
1131 	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1132 	struct btrfs_delayed_ref_head *head;
1133 	int info_level = 0;
1134 	int ret;
1135 	struct prelim_ref *ref;
1136 	struct rb_node *node;
1137 	struct extent_inode_elem *eie = NULL;
1138 	/* total of both direct AND indirect refs! */
1139 	u64 total_refs = 0;
1140 	struct preftrees preftrees = {
1141 		.direct = PREFTREE_INIT,
1142 		.indirect = PREFTREE_INIT,
1143 		.indirect_missing_keys = PREFTREE_INIT
1144 	};
1145 
1146 	key.objectid = bytenr;
1147 	key.offset = (u64)-1;
1148 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1149 		key.type = BTRFS_METADATA_ITEM_KEY;
1150 	else
1151 		key.type = BTRFS_EXTENT_ITEM_KEY;
1152 
1153 	path = btrfs_alloc_path();
1154 	if (!path)
1155 		return -ENOMEM;
1156 	if (!trans) {
1157 		path->search_commit_root = 1;
1158 		path->skip_locking = 1;
1159 	}
1160 
1161 	if (time_seq == SEQ_LAST)
1162 		path->skip_locking = 1;
1163 
1164 	/*
1165 	 * grab both a lock on the path and a lock on the delayed ref head.
1166 	 * We need both to get a consistent picture of how the refs look
1167 	 * at a specified point in time
1168 	 */
1169 again:
1170 	head = NULL;
1171 
1172 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1173 	if (ret < 0)
1174 		goto out;
1175 	BUG_ON(ret == 0);
1176 
1177 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1178 	if (trans && likely(trans->type != __TRANS_DUMMY) &&
1179 	    time_seq != SEQ_LAST) {
1180 #else
1181 	if (trans && time_seq != SEQ_LAST) {
1182 #endif
1183 		/*
1184 		 * look if there are updates for this ref queued and lock the
1185 		 * head
1186 		 */
1187 		delayed_refs = &trans->transaction->delayed_refs;
1188 		spin_lock(&delayed_refs->lock);
1189 		head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1190 		if (head) {
1191 			if (!mutex_trylock(&head->mutex)) {
1192 				refcount_inc(&head->refs);
1193 				spin_unlock(&delayed_refs->lock);
1194 
1195 				btrfs_release_path(path);
1196 
1197 				/*
1198 				 * Mutex was contended, block until it's
1199 				 * released and try again
1200 				 */
1201 				mutex_lock(&head->mutex);
1202 				mutex_unlock(&head->mutex);
1203 				btrfs_put_delayed_ref_head(head);
1204 				goto again;
1205 			}
1206 			spin_unlock(&delayed_refs->lock);
1207 			ret = add_delayed_refs(fs_info, head, time_seq,
1208 					       &preftrees, &total_refs, sc);
1209 			mutex_unlock(&head->mutex);
1210 			if (ret)
1211 				goto out;
1212 		} else {
1213 			spin_unlock(&delayed_refs->lock);
1214 		}
1215 	}
1216 
1217 	if (path->slots[0]) {
1218 		struct extent_buffer *leaf;
1219 		int slot;
1220 
1221 		path->slots[0]--;
1222 		leaf = path->nodes[0];
1223 		slot = path->slots[0];
1224 		btrfs_item_key_to_cpu(leaf, &key, slot);
1225 		if (key.objectid == bytenr &&
1226 		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1227 		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1228 			ret = add_inline_refs(fs_info, path, bytenr,
1229 					      &info_level, &preftrees,
1230 					      &total_refs, sc);
1231 			if (ret)
1232 				goto out;
1233 			ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1234 					     &preftrees, sc);
1235 			if (ret)
1236 				goto out;
1237 		}
1238 	}
1239 
1240 	btrfs_release_path(path);
1241 
1242 	ret = add_missing_keys(fs_info, &preftrees);
1243 	if (ret)
1244 		goto out;
1245 
1246 	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root));
1247 
1248 	ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1249 				    extent_item_pos, total_refs, sc, ignore_offset);
1250 	if (ret)
1251 		goto out;
1252 
1253 	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root));
1254 
1255 	/*
1256 	 * This walks the tree of merged and resolved refs. Tree blocks are
1257 	 * read in as needed. Unique entries are added to the ulist, and
1258 	 * the list of found roots is updated.
1259 	 *
1260 	 * We release the entire tree in one go before returning.
1261 	 */
1262 	node = rb_first(&preftrees.direct.root);
1263 	while (node) {
1264 		ref = rb_entry(node, struct prelim_ref, rbnode);
1265 		node = rb_next(&ref->rbnode);
1266 		WARN_ON(ref->count < 0);
1267 		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1268 			if (sc && sc->root_objectid &&
1269 			    ref->root_id != sc->root_objectid) {
1270 				ret = BACKREF_FOUND_SHARED;
1271 				goto out;
1272 			}
1273 
1274 			/* no parent == root of tree */
1275 			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1276 			if (ret < 0)
1277 				goto out;
1278 		}
1279 		if (ref->count && ref->parent) {
1280 			if (extent_item_pos && !ref->inode_list &&
1281 			    ref->level == 0) {
1282 				struct extent_buffer *eb;
1283 
1284 				eb = read_tree_block(fs_info, ref->parent, 0);
1285 				if (IS_ERR(eb)) {
1286 					ret = PTR_ERR(eb);
1287 					goto out;
1288 				} else if (!extent_buffer_uptodate(eb)) {
1289 					free_extent_buffer(eb);
1290 					ret = -EIO;
1291 					goto out;
1292 				}
1293 				btrfs_tree_read_lock(eb);
1294 				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1295 				ret = find_extent_in_eb(eb, bytenr,
1296 							*extent_item_pos, &eie, ignore_offset);
1297 				btrfs_tree_read_unlock_blocking(eb);
1298 				free_extent_buffer(eb);
1299 				if (ret < 0)
1300 					goto out;
1301 				ref->inode_list = eie;
1302 			}
1303 			ret = ulist_add_merge_ptr(refs, ref->parent,
1304 						  ref->inode_list,
1305 						  (void **)&eie, GFP_NOFS);
1306 			if (ret < 0)
1307 				goto out;
1308 			if (!ret && extent_item_pos) {
1309 				/*
1310 				 * we've recorded that parent, so we must extend
1311 				 * its inode list here
1312 				 */
1313 				BUG_ON(!eie);
1314 				while (eie->next)
1315 					eie = eie->next;
1316 				eie->next = ref->inode_list;
1317 			}
1318 			eie = NULL;
1319 		}
1320 		cond_resched();
1321 	}
1322 
1323 out:
1324 	btrfs_free_path(path);
1325 
1326 	prelim_release(&preftrees.direct);
1327 	prelim_release(&preftrees.indirect);
1328 	prelim_release(&preftrees.indirect_missing_keys);
1329 
1330 	if (ret < 0)
1331 		free_inode_elem_list(eie);
1332 	return ret;
1333 }
1334 
1335 static void free_leaf_list(struct ulist *blocks)
1336 {
1337 	struct ulist_node *node = NULL;
1338 	struct extent_inode_elem *eie;
1339 	struct ulist_iterator uiter;
1340 
1341 	ULIST_ITER_INIT(&uiter);
1342 	while ((node = ulist_next(blocks, &uiter))) {
1343 		if (!node->aux)
1344 			continue;
1345 		eie = unode_aux_to_inode_list(node);
1346 		free_inode_elem_list(eie);
1347 		node->aux = 0;
1348 	}
1349 
1350 	ulist_free(blocks);
1351 }
1352 
1353 /*
1354  * Finds all leafs with a reference to the specified combination of bytenr and
1355  * offset. key_list_head will point to a list of corresponding keys (caller must
1356  * free each list element). The leafs will be stored in the leafs ulist, which
1357  * must be freed with ulist_free.
1358  *
1359  * returns 0 on success, <0 on error
1360  */
1361 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1362 				struct btrfs_fs_info *fs_info, u64 bytenr,
1363 				u64 time_seq, struct ulist **leafs,
1364 				const u64 *extent_item_pos, bool ignore_offset)
1365 {
1366 	int ret;
1367 
1368 	*leafs = ulist_alloc(GFP_NOFS);
1369 	if (!*leafs)
1370 		return -ENOMEM;
1371 
1372 	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1373 				*leafs, NULL, extent_item_pos, NULL, ignore_offset);
1374 	if (ret < 0 && ret != -ENOENT) {
1375 		free_leaf_list(*leafs);
1376 		return ret;
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 /*
1383  * walk all backrefs for a given extent to find all roots that reference this
1384  * extent. Walking a backref means finding all extents that reference this
1385  * extent and in turn walk the backrefs of those, too. Naturally this is a
1386  * recursive process, but here it is implemented in an iterative fashion: We
1387  * find all referencing extents for the extent in question and put them on a
1388  * list. In turn, we find all referencing extents for those, further appending
1389  * to the list. The way we iterate the list allows adding more elements after
1390  * the current while iterating. The process stops when we reach the end of the
1391  * list. Found roots are added to the roots list.
1392  *
1393  * returns 0 on success, < 0 on error.
1394  */
1395 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1396 				     struct btrfs_fs_info *fs_info, u64 bytenr,
1397 				     u64 time_seq, struct ulist **roots,
1398 				     bool ignore_offset)
1399 {
1400 	struct ulist *tmp;
1401 	struct ulist_node *node = NULL;
1402 	struct ulist_iterator uiter;
1403 	int ret;
1404 
1405 	tmp = ulist_alloc(GFP_NOFS);
1406 	if (!tmp)
1407 		return -ENOMEM;
1408 	*roots = ulist_alloc(GFP_NOFS);
1409 	if (!*roots) {
1410 		ulist_free(tmp);
1411 		return -ENOMEM;
1412 	}
1413 
1414 	ULIST_ITER_INIT(&uiter);
1415 	while (1) {
1416 		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1417 					tmp, *roots, NULL, NULL, ignore_offset);
1418 		if (ret < 0 && ret != -ENOENT) {
1419 			ulist_free(tmp);
1420 			ulist_free(*roots);
1421 			return ret;
1422 		}
1423 		node = ulist_next(tmp, &uiter);
1424 		if (!node)
1425 			break;
1426 		bytenr = node->val;
1427 		cond_resched();
1428 	}
1429 
1430 	ulist_free(tmp);
1431 	return 0;
1432 }
1433 
1434 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1435 			 struct btrfs_fs_info *fs_info, u64 bytenr,
1436 			 u64 time_seq, struct ulist **roots,
1437 			 bool ignore_offset)
1438 {
1439 	int ret;
1440 
1441 	if (!trans)
1442 		down_read(&fs_info->commit_root_sem);
1443 	ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1444 					time_seq, roots, ignore_offset);
1445 	if (!trans)
1446 		up_read(&fs_info->commit_root_sem);
1447 	return ret;
1448 }
1449 
1450 /**
1451  * btrfs_check_shared - tell us whether an extent is shared
1452  *
1453  * btrfs_check_shared uses the backref walking code but will short
1454  * circuit as soon as it finds a root or inode that doesn't match the
1455  * one passed in. This provides a significant performance benefit for
1456  * callers (such as fiemap) which want to know whether the extent is
1457  * shared but do not need a ref count.
1458  *
1459  * This attempts to allocate a transaction in order to account for
1460  * delayed refs, but continues on even when the alloc fails.
1461  *
1462  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1463  */
1464 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
1465 {
1466 	struct btrfs_fs_info *fs_info = root->fs_info;
1467 	struct btrfs_trans_handle *trans;
1468 	struct ulist *tmp = NULL;
1469 	struct ulist *roots = NULL;
1470 	struct ulist_iterator uiter;
1471 	struct ulist_node *node;
1472 	struct seq_list elem = SEQ_LIST_INIT(elem);
1473 	int ret = 0;
1474 	struct share_check shared = {
1475 		.root_objectid = root->objectid,
1476 		.inum = inum,
1477 		.share_count = 0,
1478 	};
1479 
1480 	tmp = ulist_alloc(GFP_NOFS);
1481 	roots = ulist_alloc(GFP_NOFS);
1482 	if (!tmp || !roots) {
1483 		ulist_free(tmp);
1484 		ulist_free(roots);
1485 		return -ENOMEM;
1486 	}
1487 
1488 	trans = btrfs_join_transaction(root);
1489 	if (IS_ERR(trans)) {
1490 		trans = NULL;
1491 		down_read(&fs_info->commit_root_sem);
1492 	} else {
1493 		btrfs_get_tree_mod_seq(fs_info, &elem);
1494 	}
1495 
1496 	ULIST_ITER_INIT(&uiter);
1497 	while (1) {
1498 		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1499 					roots, NULL, &shared, false);
1500 		if (ret == BACKREF_FOUND_SHARED) {
1501 			/* this is the only condition under which we return 1 */
1502 			ret = 1;
1503 			break;
1504 		}
1505 		if (ret < 0 && ret != -ENOENT)
1506 			break;
1507 		ret = 0;
1508 		node = ulist_next(tmp, &uiter);
1509 		if (!node)
1510 			break;
1511 		bytenr = node->val;
1512 		cond_resched();
1513 	}
1514 
1515 	if (trans) {
1516 		btrfs_put_tree_mod_seq(fs_info, &elem);
1517 		btrfs_end_transaction(trans);
1518 	} else {
1519 		up_read(&fs_info->commit_root_sem);
1520 	}
1521 	ulist_free(tmp);
1522 	ulist_free(roots);
1523 	return ret;
1524 }
1525 
1526 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1527 			  u64 start_off, struct btrfs_path *path,
1528 			  struct btrfs_inode_extref **ret_extref,
1529 			  u64 *found_off)
1530 {
1531 	int ret, slot;
1532 	struct btrfs_key key;
1533 	struct btrfs_key found_key;
1534 	struct btrfs_inode_extref *extref;
1535 	const struct extent_buffer *leaf;
1536 	unsigned long ptr;
1537 
1538 	key.objectid = inode_objectid;
1539 	key.type = BTRFS_INODE_EXTREF_KEY;
1540 	key.offset = start_off;
1541 
1542 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1543 	if (ret < 0)
1544 		return ret;
1545 
1546 	while (1) {
1547 		leaf = path->nodes[0];
1548 		slot = path->slots[0];
1549 		if (slot >= btrfs_header_nritems(leaf)) {
1550 			/*
1551 			 * If the item at offset is not found,
1552 			 * btrfs_search_slot will point us to the slot
1553 			 * where it should be inserted. In our case
1554 			 * that will be the slot directly before the
1555 			 * next INODE_REF_KEY_V2 item. In the case
1556 			 * that we're pointing to the last slot in a
1557 			 * leaf, we must move one leaf over.
1558 			 */
1559 			ret = btrfs_next_leaf(root, path);
1560 			if (ret) {
1561 				if (ret >= 1)
1562 					ret = -ENOENT;
1563 				break;
1564 			}
1565 			continue;
1566 		}
1567 
1568 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1569 
1570 		/*
1571 		 * Check that we're still looking at an extended ref key for
1572 		 * this particular objectid. If we have different
1573 		 * objectid or type then there are no more to be found
1574 		 * in the tree and we can exit.
1575 		 */
1576 		ret = -ENOENT;
1577 		if (found_key.objectid != inode_objectid)
1578 			break;
1579 		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1580 			break;
1581 
1582 		ret = 0;
1583 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1584 		extref = (struct btrfs_inode_extref *)ptr;
1585 		*ret_extref = extref;
1586 		if (found_off)
1587 			*found_off = found_key.offset;
1588 		break;
1589 	}
1590 
1591 	return ret;
1592 }
1593 
1594 /*
1595  * this iterates to turn a name (from iref/extref) into a full filesystem path.
1596  * Elements of the path are separated by '/' and the path is guaranteed to be
1597  * 0-terminated. the path is only given within the current file system.
1598  * Therefore, it never starts with a '/'. the caller is responsible to provide
1599  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1600  * the start point of the resulting string is returned. this pointer is within
1601  * dest, normally.
1602  * in case the path buffer would overflow, the pointer is decremented further
1603  * as if output was written to the buffer, though no more output is actually
1604  * generated. that way, the caller can determine how much space would be
1605  * required for the path to fit into the buffer. in that case, the returned
1606  * value will be smaller than dest. callers must check this!
1607  */
1608 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1609 			u32 name_len, unsigned long name_off,
1610 			struct extent_buffer *eb_in, u64 parent,
1611 			char *dest, u32 size)
1612 {
1613 	int slot;
1614 	u64 next_inum;
1615 	int ret;
1616 	s64 bytes_left = ((s64)size) - 1;
1617 	struct extent_buffer *eb = eb_in;
1618 	struct btrfs_key found_key;
1619 	int leave_spinning = path->leave_spinning;
1620 	struct btrfs_inode_ref *iref;
1621 
1622 	if (bytes_left >= 0)
1623 		dest[bytes_left] = '\0';
1624 
1625 	path->leave_spinning = 1;
1626 	while (1) {
1627 		bytes_left -= name_len;
1628 		if (bytes_left >= 0)
1629 			read_extent_buffer(eb, dest + bytes_left,
1630 					   name_off, name_len);
1631 		if (eb != eb_in) {
1632 			if (!path->skip_locking)
1633 				btrfs_tree_read_unlock_blocking(eb);
1634 			free_extent_buffer(eb);
1635 		}
1636 		ret = btrfs_find_item(fs_root, path, parent, 0,
1637 				BTRFS_INODE_REF_KEY, &found_key);
1638 		if (ret > 0)
1639 			ret = -ENOENT;
1640 		if (ret)
1641 			break;
1642 
1643 		next_inum = found_key.offset;
1644 
1645 		/* regular exit ahead */
1646 		if (parent == next_inum)
1647 			break;
1648 
1649 		slot = path->slots[0];
1650 		eb = path->nodes[0];
1651 		/* make sure we can use eb after releasing the path */
1652 		if (eb != eb_in) {
1653 			if (!path->skip_locking)
1654 				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1655 			path->nodes[0] = NULL;
1656 			path->locks[0] = 0;
1657 		}
1658 		btrfs_release_path(path);
1659 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1660 
1661 		name_len = btrfs_inode_ref_name_len(eb, iref);
1662 		name_off = (unsigned long)(iref + 1);
1663 
1664 		parent = next_inum;
1665 		--bytes_left;
1666 		if (bytes_left >= 0)
1667 			dest[bytes_left] = '/';
1668 	}
1669 
1670 	btrfs_release_path(path);
1671 	path->leave_spinning = leave_spinning;
1672 
1673 	if (ret)
1674 		return ERR_PTR(ret);
1675 
1676 	return dest + bytes_left;
1677 }
1678 
1679 /*
1680  * this makes the path point to (logical EXTENT_ITEM *)
1681  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1682  * tree blocks and <0 on error.
1683  */
1684 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1685 			struct btrfs_path *path, struct btrfs_key *found_key,
1686 			u64 *flags_ret)
1687 {
1688 	int ret;
1689 	u64 flags;
1690 	u64 size = 0;
1691 	u32 item_size;
1692 	const struct extent_buffer *eb;
1693 	struct btrfs_extent_item *ei;
1694 	struct btrfs_key key;
1695 
1696 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1697 		key.type = BTRFS_METADATA_ITEM_KEY;
1698 	else
1699 		key.type = BTRFS_EXTENT_ITEM_KEY;
1700 	key.objectid = logical;
1701 	key.offset = (u64)-1;
1702 
1703 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1704 	if (ret < 0)
1705 		return ret;
1706 
1707 	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1708 	if (ret) {
1709 		if (ret > 0)
1710 			ret = -ENOENT;
1711 		return ret;
1712 	}
1713 	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1714 	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1715 		size = fs_info->nodesize;
1716 	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1717 		size = found_key->offset;
1718 
1719 	if (found_key->objectid > logical ||
1720 	    found_key->objectid + size <= logical) {
1721 		btrfs_debug(fs_info,
1722 			"logical %llu is not within any extent", logical);
1723 		return -ENOENT;
1724 	}
1725 
1726 	eb = path->nodes[0];
1727 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1728 	BUG_ON(item_size < sizeof(*ei));
1729 
1730 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1731 	flags = btrfs_extent_flags(eb, ei);
1732 
1733 	btrfs_debug(fs_info,
1734 		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1735 		 logical, logical - found_key->objectid, found_key->objectid,
1736 		 found_key->offset, flags, item_size);
1737 
1738 	WARN_ON(!flags_ret);
1739 	if (flags_ret) {
1740 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1741 			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1742 		else if (flags & BTRFS_EXTENT_FLAG_DATA)
1743 			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
1744 		else
1745 			BUG_ON(1);
1746 		return 0;
1747 	}
1748 
1749 	return -EIO;
1750 }
1751 
1752 /*
1753  * helper function to iterate extent inline refs. ptr must point to a 0 value
1754  * for the first call and may be modified. it is used to track state.
1755  * if more refs exist, 0 is returned and the next call to
1756  * get_extent_inline_ref must pass the modified ptr parameter to get the
1757  * next ref. after the last ref was processed, 1 is returned.
1758  * returns <0 on error
1759  */
1760 static int get_extent_inline_ref(unsigned long *ptr,
1761 				 const struct extent_buffer *eb,
1762 				 const struct btrfs_key *key,
1763 				 const struct btrfs_extent_item *ei,
1764 				 u32 item_size,
1765 				 struct btrfs_extent_inline_ref **out_eiref,
1766 				 int *out_type)
1767 {
1768 	unsigned long end;
1769 	u64 flags;
1770 	struct btrfs_tree_block_info *info;
1771 
1772 	if (!*ptr) {
1773 		/* first call */
1774 		flags = btrfs_extent_flags(eb, ei);
1775 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1776 			if (key->type == BTRFS_METADATA_ITEM_KEY) {
1777 				/* a skinny metadata extent */
1778 				*out_eiref =
1779 				     (struct btrfs_extent_inline_ref *)(ei + 1);
1780 			} else {
1781 				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1782 				info = (struct btrfs_tree_block_info *)(ei + 1);
1783 				*out_eiref =
1784 				   (struct btrfs_extent_inline_ref *)(info + 1);
1785 			}
1786 		} else {
1787 			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1788 		}
1789 		*ptr = (unsigned long)*out_eiref;
1790 		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1791 			return -ENOENT;
1792 	}
1793 
1794 	end = (unsigned long)ei + item_size;
1795 	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1796 	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1797 						     BTRFS_REF_TYPE_ANY);
1798 	if (*out_type == BTRFS_REF_TYPE_INVALID)
1799 		return -EINVAL;
1800 
1801 	*ptr += btrfs_extent_inline_ref_size(*out_type);
1802 	WARN_ON(*ptr > end);
1803 	if (*ptr == end)
1804 		return 1; /* last */
1805 
1806 	return 0;
1807 }
1808 
1809 /*
1810  * reads the tree block backref for an extent. tree level and root are returned
1811  * through out_level and out_root. ptr must point to a 0 value for the first
1812  * call and may be modified (see get_extent_inline_ref comment).
1813  * returns 0 if data was provided, 1 if there was no more data to provide or
1814  * <0 on error.
1815  */
1816 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1817 			    struct btrfs_key *key, struct btrfs_extent_item *ei,
1818 			    u32 item_size, u64 *out_root, u8 *out_level)
1819 {
1820 	int ret;
1821 	int type;
1822 	struct btrfs_extent_inline_ref *eiref;
1823 
1824 	if (*ptr == (unsigned long)-1)
1825 		return 1;
1826 
1827 	while (1) {
1828 		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1829 					      &eiref, &type);
1830 		if (ret < 0)
1831 			return ret;
1832 
1833 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1834 		    type == BTRFS_SHARED_BLOCK_REF_KEY)
1835 			break;
1836 
1837 		if (ret == 1)
1838 			return 1;
1839 	}
1840 
1841 	/* we can treat both ref types equally here */
1842 	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1843 
1844 	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1845 		struct btrfs_tree_block_info *info;
1846 
1847 		info = (struct btrfs_tree_block_info *)(ei + 1);
1848 		*out_level = btrfs_tree_block_level(eb, info);
1849 	} else {
1850 		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1851 		*out_level = (u8)key->offset;
1852 	}
1853 
1854 	if (ret == 1)
1855 		*ptr = (unsigned long)-1;
1856 
1857 	return 0;
1858 }
1859 
1860 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1861 			     struct extent_inode_elem *inode_list,
1862 			     u64 root, u64 extent_item_objectid,
1863 			     iterate_extent_inodes_t *iterate, void *ctx)
1864 {
1865 	struct extent_inode_elem *eie;
1866 	int ret = 0;
1867 
1868 	for (eie = inode_list; eie; eie = eie->next) {
1869 		btrfs_debug(fs_info,
1870 			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1871 			    extent_item_objectid, eie->inum,
1872 			    eie->offset, root);
1873 		ret = iterate(eie->inum, eie->offset, root, ctx);
1874 		if (ret) {
1875 			btrfs_debug(fs_info,
1876 				    "stopping iteration for %llu due to ret=%d",
1877 				    extent_item_objectid, ret);
1878 			break;
1879 		}
1880 	}
1881 
1882 	return ret;
1883 }
1884 
1885 /*
1886  * calls iterate() for every inode that references the extent identified by
1887  * the given parameters.
1888  * when the iterator function returns a non-zero value, iteration stops.
1889  */
1890 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1891 				u64 extent_item_objectid, u64 extent_item_pos,
1892 				int search_commit_root,
1893 				iterate_extent_inodes_t *iterate, void *ctx,
1894 				bool ignore_offset)
1895 {
1896 	int ret;
1897 	struct btrfs_trans_handle *trans = NULL;
1898 	struct ulist *refs = NULL;
1899 	struct ulist *roots = NULL;
1900 	struct ulist_node *ref_node = NULL;
1901 	struct ulist_node *root_node = NULL;
1902 	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1903 	struct ulist_iterator ref_uiter;
1904 	struct ulist_iterator root_uiter;
1905 
1906 	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1907 			extent_item_objectid);
1908 
1909 	if (!search_commit_root) {
1910 		trans = btrfs_join_transaction(fs_info->extent_root);
1911 		if (IS_ERR(trans))
1912 			return PTR_ERR(trans);
1913 		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1914 	} else {
1915 		down_read(&fs_info->commit_root_sem);
1916 	}
1917 
1918 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1919 				   tree_mod_seq_elem.seq, &refs,
1920 				   &extent_item_pos, ignore_offset);
1921 	if (ret)
1922 		goto out;
1923 
1924 	ULIST_ITER_INIT(&ref_uiter);
1925 	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1926 		ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1927 						tree_mod_seq_elem.seq, &roots,
1928 						ignore_offset);
1929 		if (ret)
1930 			break;
1931 		ULIST_ITER_INIT(&root_uiter);
1932 		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1933 			btrfs_debug(fs_info,
1934 				    "root %llu references leaf %llu, data list %#llx",
1935 				    root_node->val, ref_node->val,
1936 				    ref_node->aux);
1937 			ret = iterate_leaf_refs(fs_info,
1938 						(struct extent_inode_elem *)
1939 						(uintptr_t)ref_node->aux,
1940 						root_node->val,
1941 						extent_item_objectid,
1942 						iterate, ctx);
1943 		}
1944 		ulist_free(roots);
1945 	}
1946 
1947 	free_leaf_list(refs);
1948 out:
1949 	if (!search_commit_root) {
1950 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1951 		btrfs_end_transaction(trans);
1952 	} else {
1953 		up_read(&fs_info->commit_root_sem);
1954 	}
1955 
1956 	return ret;
1957 }
1958 
1959 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1960 				struct btrfs_path *path,
1961 				iterate_extent_inodes_t *iterate, void *ctx,
1962 				bool ignore_offset)
1963 {
1964 	int ret;
1965 	u64 extent_item_pos;
1966 	u64 flags = 0;
1967 	struct btrfs_key found_key;
1968 	int search_commit_root = path->search_commit_root;
1969 
1970 	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1971 	btrfs_release_path(path);
1972 	if (ret < 0)
1973 		return ret;
1974 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1975 		return -EINVAL;
1976 
1977 	extent_item_pos = logical - found_key.objectid;
1978 	ret = iterate_extent_inodes(fs_info, found_key.objectid,
1979 					extent_item_pos, search_commit_root,
1980 					iterate, ctx, ignore_offset);
1981 
1982 	return ret;
1983 }
1984 
1985 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1986 			      struct extent_buffer *eb, void *ctx);
1987 
1988 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1989 			      struct btrfs_path *path,
1990 			      iterate_irefs_t *iterate, void *ctx)
1991 {
1992 	int ret = 0;
1993 	int slot;
1994 	u32 cur;
1995 	u32 len;
1996 	u32 name_len;
1997 	u64 parent = 0;
1998 	int found = 0;
1999 	struct extent_buffer *eb;
2000 	struct btrfs_item *item;
2001 	struct btrfs_inode_ref *iref;
2002 	struct btrfs_key found_key;
2003 
2004 	while (!ret) {
2005 		ret = btrfs_find_item(fs_root, path, inum,
2006 				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2007 				&found_key);
2008 
2009 		if (ret < 0)
2010 			break;
2011 		if (ret) {
2012 			ret = found ? 0 : -ENOENT;
2013 			break;
2014 		}
2015 		++found;
2016 
2017 		parent = found_key.offset;
2018 		slot = path->slots[0];
2019 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2020 		if (!eb) {
2021 			ret = -ENOMEM;
2022 			break;
2023 		}
2024 		extent_buffer_get(eb);
2025 		btrfs_tree_read_lock(eb);
2026 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2027 		btrfs_release_path(path);
2028 
2029 		item = btrfs_item_nr(slot);
2030 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2031 
2032 		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2033 			name_len = btrfs_inode_ref_name_len(eb, iref);
2034 			/* path must be released before calling iterate()! */
2035 			btrfs_debug(fs_root->fs_info,
2036 				"following ref at offset %u for inode %llu in tree %llu",
2037 				cur, found_key.objectid, fs_root->objectid);
2038 			ret = iterate(parent, name_len,
2039 				      (unsigned long)(iref + 1), eb, ctx);
2040 			if (ret)
2041 				break;
2042 			len = sizeof(*iref) + name_len;
2043 			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2044 		}
2045 		btrfs_tree_read_unlock_blocking(eb);
2046 		free_extent_buffer(eb);
2047 	}
2048 
2049 	btrfs_release_path(path);
2050 
2051 	return ret;
2052 }
2053 
2054 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2055 				 struct btrfs_path *path,
2056 				 iterate_irefs_t *iterate, void *ctx)
2057 {
2058 	int ret;
2059 	int slot;
2060 	u64 offset = 0;
2061 	u64 parent;
2062 	int found = 0;
2063 	struct extent_buffer *eb;
2064 	struct btrfs_inode_extref *extref;
2065 	u32 item_size;
2066 	u32 cur_offset;
2067 	unsigned long ptr;
2068 
2069 	while (1) {
2070 		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2071 					    &offset);
2072 		if (ret < 0)
2073 			break;
2074 		if (ret) {
2075 			ret = found ? 0 : -ENOENT;
2076 			break;
2077 		}
2078 		++found;
2079 
2080 		slot = path->slots[0];
2081 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2082 		if (!eb) {
2083 			ret = -ENOMEM;
2084 			break;
2085 		}
2086 		extent_buffer_get(eb);
2087 
2088 		btrfs_tree_read_lock(eb);
2089 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2090 		btrfs_release_path(path);
2091 
2092 		item_size = btrfs_item_size_nr(eb, slot);
2093 		ptr = btrfs_item_ptr_offset(eb, slot);
2094 		cur_offset = 0;
2095 
2096 		while (cur_offset < item_size) {
2097 			u32 name_len;
2098 
2099 			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2100 			parent = btrfs_inode_extref_parent(eb, extref);
2101 			name_len = btrfs_inode_extref_name_len(eb, extref);
2102 			ret = iterate(parent, name_len,
2103 				      (unsigned long)&extref->name, eb, ctx);
2104 			if (ret)
2105 				break;
2106 
2107 			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2108 			cur_offset += sizeof(*extref);
2109 		}
2110 		btrfs_tree_read_unlock_blocking(eb);
2111 		free_extent_buffer(eb);
2112 
2113 		offset++;
2114 	}
2115 
2116 	btrfs_release_path(path);
2117 
2118 	return ret;
2119 }
2120 
2121 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2122 			 struct btrfs_path *path, iterate_irefs_t *iterate,
2123 			 void *ctx)
2124 {
2125 	int ret;
2126 	int found_refs = 0;
2127 
2128 	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2129 	if (!ret)
2130 		++found_refs;
2131 	else if (ret != -ENOENT)
2132 		return ret;
2133 
2134 	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2135 	if (ret == -ENOENT && found_refs)
2136 		return 0;
2137 
2138 	return ret;
2139 }
2140 
2141 /*
2142  * returns 0 if the path could be dumped (probably truncated)
2143  * returns <0 in case of an error
2144  */
2145 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2146 			 struct extent_buffer *eb, void *ctx)
2147 {
2148 	struct inode_fs_paths *ipath = ctx;
2149 	char *fspath;
2150 	char *fspath_min;
2151 	int i = ipath->fspath->elem_cnt;
2152 	const int s_ptr = sizeof(char *);
2153 	u32 bytes_left;
2154 
2155 	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2156 					ipath->fspath->bytes_left - s_ptr : 0;
2157 
2158 	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2159 	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2160 				   name_off, eb, inum, fspath_min, bytes_left);
2161 	if (IS_ERR(fspath))
2162 		return PTR_ERR(fspath);
2163 
2164 	if (fspath > fspath_min) {
2165 		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2166 		++ipath->fspath->elem_cnt;
2167 		ipath->fspath->bytes_left = fspath - fspath_min;
2168 	} else {
2169 		++ipath->fspath->elem_missed;
2170 		ipath->fspath->bytes_missing += fspath_min - fspath;
2171 		ipath->fspath->bytes_left = 0;
2172 	}
2173 
2174 	return 0;
2175 }
2176 
2177 /*
2178  * this dumps all file system paths to the inode into the ipath struct, provided
2179  * is has been created large enough. each path is zero-terminated and accessed
2180  * from ipath->fspath->val[i].
2181  * when it returns, there are ipath->fspath->elem_cnt number of paths available
2182  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2183  * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2184  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2185  * have been needed to return all paths.
2186  */
2187 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2188 {
2189 	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2190 			     inode_to_path, ipath);
2191 }
2192 
2193 struct btrfs_data_container *init_data_container(u32 total_bytes)
2194 {
2195 	struct btrfs_data_container *data;
2196 	size_t alloc_bytes;
2197 
2198 	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2199 	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2200 	if (!data)
2201 		return ERR_PTR(-ENOMEM);
2202 
2203 	if (total_bytes >= sizeof(*data)) {
2204 		data->bytes_left = total_bytes - sizeof(*data);
2205 		data->bytes_missing = 0;
2206 	} else {
2207 		data->bytes_missing = sizeof(*data) - total_bytes;
2208 		data->bytes_left = 0;
2209 	}
2210 
2211 	data->elem_cnt = 0;
2212 	data->elem_missed = 0;
2213 
2214 	return data;
2215 }
2216 
2217 /*
2218  * allocates space to return multiple file system paths for an inode.
2219  * total_bytes to allocate are passed, note that space usable for actual path
2220  * information will be total_bytes - sizeof(struct inode_fs_paths).
2221  * the returned pointer must be freed with free_ipath() in the end.
2222  */
2223 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2224 					struct btrfs_path *path)
2225 {
2226 	struct inode_fs_paths *ifp;
2227 	struct btrfs_data_container *fspath;
2228 
2229 	fspath = init_data_container(total_bytes);
2230 	if (IS_ERR(fspath))
2231 		return (void *)fspath;
2232 
2233 	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2234 	if (!ifp) {
2235 		kvfree(fspath);
2236 		return ERR_PTR(-ENOMEM);
2237 	}
2238 
2239 	ifp->btrfs_path = path;
2240 	ifp->fspath = fspath;
2241 	ifp->fs_root = fs_root;
2242 
2243 	return ifp;
2244 }
2245 
2246 void free_ipath(struct inode_fs_paths *ipath)
2247 {
2248 	if (!ipath)
2249 		return;
2250 	kvfree(ipath->fspath);
2251 	kfree(ipath);
2252 }
2253