xref: /openbmc/linux/fs/btrfs/backref.c (revision cd4d09ec)
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/vmalloc.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "backref.h"
23 #include "ulist.h"
24 #include "transaction.h"
25 #include "delayed-ref.h"
26 #include "locking.h"
27 
28 /* Just an arbitrary number so we can be sure this happened */
29 #define BACKREF_FOUND_SHARED 6
30 
31 struct extent_inode_elem {
32 	u64 inum;
33 	u64 offset;
34 	struct extent_inode_elem *next;
35 };
36 
37 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
38 				struct btrfs_file_extent_item *fi,
39 				u64 extent_item_pos,
40 				struct extent_inode_elem **eie)
41 {
42 	u64 offset = 0;
43 	struct extent_inode_elem *e;
44 
45 	if (!btrfs_file_extent_compression(eb, fi) &&
46 	    !btrfs_file_extent_encryption(eb, fi) &&
47 	    !btrfs_file_extent_other_encoding(eb, fi)) {
48 		u64 data_offset;
49 		u64 data_len;
50 
51 		data_offset = btrfs_file_extent_offset(eb, fi);
52 		data_len = btrfs_file_extent_num_bytes(eb, fi);
53 
54 		if (extent_item_pos < data_offset ||
55 		    extent_item_pos >= data_offset + data_len)
56 			return 1;
57 		offset = extent_item_pos - data_offset;
58 	}
59 
60 	e = kmalloc(sizeof(*e), GFP_NOFS);
61 	if (!e)
62 		return -ENOMEM;
63 
64 	e->next = *eie;
65 	e->inum = key->objectid;
66 	e->offset = key->offset + offset;
67 	*eie = e;
68 
69 	return 0;
70 }
71 
72 static void free_inode_elem_list(struct extent_inode_elem *eie)
73 {
74 	struct extent_inode_elem *eie_next;
75 
76 	for (; eie; eie = eie_next) {
77 		eie_next = eie->next;
78 		kfree(eie);
79 	}
80 }
81 
82 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
83 				u64 extent_item_pos,
84 				struct extent_inode_elem **eie)
85 {
86 	u64 disk_byte;
87 	struct btrfs_key key;
88 	struct btrfs_file_extent_item *fi;
89 	int slot;
90 	int nritems;
91 	int extent_type;
92 	int ret;
93 
94 	/*
95 	 * from the shared data ref, we only have the leaf but we need
96 	 * the key. thus, we must look into all items and see that we
97 	 * find one (some) with a reference to our extent item.
98 	 */
99 	nritems = btrfs_header_nritems(eb);
100 	for (slot = 0; slot < nritems; ++slot) {
101 		btrfs_item_key_to_cpu(eb, &key, slot);
102 		if (key.type != BTRFS_EXTENT_DATA_KEY)
103 			continue;
104 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
105 		extent_type = btrfs_file_extent_type(eb, fi);
106 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
107 			continue;
108 		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
109 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
110 		if (disk_byte != wanted_disk_byte)
111 			continue;
112 
113 		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
114 		if (ret < 0)
115 			return ret;
116 	}
117 
118 	return 0;
119 }
120 
121 /*
122  * this structure records all encountered refs on the way up to the root
123  */
124 struct __prelim_ref {
125 	struct list_head list;
126 	u64 root_id;
127 	struct btrfs_key key_for_search;
128 	int level;
129 	int count;
130 	struct extent_inode_elem *inode_list;
131 	u64 parent;
132 	u64 wanted_disk_byte;
133 };
134 
135 static struct kmem_cache *btrfs_prelim_ref_cache;
136 
137 int __init btrfs_prelim_ref_init(void)
138 {
139 	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
140 					sizeof(struct __prelim_ref),
141 					0,
142 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
143 					NULL);
144 	if (!btrfs_prelim_ref_cache)
145 		return -ENOMEM;
146 	return 0;
147 }
148 
149 void btrfs_prelim_ref_exit(void)
150 {
151 	if (btrfs_prelim_ref_cache)
152 		kmem_cache_destroy(btrfs_prelim_ref_cache);
153 }
154 
155 /*
156  * the rules for all callers of this function are:
157  * - obtaining the parent is the goal
158  * - if you add a key, you must know that it is a correct key
159  * - if you cannot add the parent or a correct key, then we will look into the
160  *   block later to set a correct key
161  *
162  * delayed refs
163  * ============
164  *        backref type | shared | indirect | shared | indirect
165  * information         |   tree |     tree |   data |     data
166  * --------------------+--------+----------+--------+----------
167  *      parent logical |    y   |     -    |    -   |     -
168  *      key to resolve |    -   |     y    |    y   |     y
169  *  tree block logical |    -   |     -    |    -   |     -
170  *  root for resolving |    y   |     y    |    y   |     y
171  *
172  * - column 1:       we've the parent -> done
173  * - column 2, 3, 4: we use the key to find the parent
174  *
175  * on disk refs (inline or keyed)
176  * ==============================
177  *        backref type | shared | indirect | shared | indirect
178  * information         |   tree |     tree |   data |     data
179  * --------------------+--------+----------+--------+----------
180  *      parent logical |    y   |     -    |    y   |     -
181  *      key to resolve |    -   |     -    |    -   |     y
182  *  tree block logical |    y   |     y    |    y   |     y
183  *  root for resolving |    -   |     y    |    y   |     y
184  *
185  * - column 1, 3: we've the parent -> done
186  * - column 2:    we take the first key from the block to find the parent
187  *                (see __add_missing_keys)
188  * - column 4:    we use the key to find the parent
189  *
190  * additional information that's available but not required to find the parent
191  * block might help in merging entries to gain some speed.
192  */
193 
194 static int __add_prelim_ref(struct list_head *head, u64 root_id,
195 			    struct btrfs_key *key, int level,
196 			    u64 parent, u64 wanted_disk_byte, int count,
197 			    gfp_t gfp_mask)
198 {
199 	struct __prelim_ref *ref;
200 
201 	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
202 		return 0;
203 
204 	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
205 	if (!ref)
206 		return -ENOMEM;
207 
208 	ref->root_id = root_id;
209 	if (key) {
210 		ref->key_for_search = *key;
211 		/*
212 		 * We can often find data backrefs with an offset that is too
213 		 * large (>= LLONG_MAX, maximum allowed file offset) due to
214 		 * underflows when subtracting a file's offset with the data
215 		 * offset of its corresponding extent data item. This can
216 		 * happen for example in the clone ioctl.
217 		 * So if we detect such case we set the search key's offset to
218 		 * zero to make sure we will find the matching file extent item
219 		 * at add_all_parents(), otherwise we will miss it because the
220 		 * offset taken form the backref is much larger then the offset
221 		 * of the file extent item. This can make us scan a very large
222 		 * number of file extent items, but at least it will not make
223 		 * us miss any.
224 		 * This is an ugly workaround for a behaviour that should have
225 		 * never existed, but it does and a fix for the clone ioctl
226 		 * would touch a lot of places, cause backwards incompatibility
227 		 * and would not fix the problem for extents cloned with older
228 		 * kernels.
229 		 */
230 		if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
231 		    ref->key_for_search.offset >= LLONG_MAX)
232 			ref->key_for_search.offset = 0;
233 	} else {
234 		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
235 	}
236 
237 	ref->inode_list = NULL;
238 	ref->level = level;
239 	ref->count = count;
240 	ref->parent = parent;
241 	ref->wanted_disk_byte = wanted_disk_byte;
242 	list_add_tail(&ref->list, head);
243 
244 	return 0;
245 }
246 
247 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
248 			   struct ulist *parents, struct __prelim_ref *ref,
249 			   int level, u64 time_seq, const u64 *extent_item_pos,
250 			   u64 total_refs)
251 {
252 	int ret = 0;
253 	int slot;
254 	struct extent_buffer *eb;
255 	struct btrfs_key key;
256 	struct btrfs_key *key_for_search = &ref->key_for_search;
257 	struct btrfs_file_extent_item *fi;
258 	struct extent_inode_elem *eie = NULL, *old = NULL;
259 	u64 disk_byte;
260 	u64 wanted_disk_byte = ref->wanted_disk_byte;
261 	u64 count = 0;
262 
263 	if (level != 0) {
264 		eb = path->nodes[level];
265 		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
266 		if (ret < 0)
267 			return ret;
268 		return 0;
269 	}
270 
271 	/*
272 	 * We normally enter this function with the path already pointing to
273 	 * the first item to check. But sometimes, we may enter it with
274 	 * slot==nritems. In that case, go to the next leaf before we continue.
275 	 */
276 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
277 		if (time_seq == (u64)-1)
278 			ret = btrfs_next_leaf(root, path);
279 		else
280 			ret = btrfs_next_old_leaf(root, path, time_seq);
281 	}
282 
283 	while (!ret && count < total_refs) {
284 		eb = path->nodes[0];
285 		slot = path->slots[0];
286 
287 		btrfs_item_key_to_cpu(eb, &key, slot);
288 
289 		if (key.objectid != key_for_search->objectid ||
290 		    key.type != BTRFS_EXTENT_DATA_KEY)
291 			break;
292 
293 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
294 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
295 
296 		if (disk_byte == wanted_disk_byte) {
297 			eie = NULL;
298 			old = NULL;
299 			count++;
300 			if (extent_item_pos) {
301 				ret = check_extent_in_eb(&key, eb, fi,
302 						*extent_item_pos,
303 						&eie);
304 				if (ret < 0)
305 					break;
306 			}
307 			if (ret > 0)
308 				goto next;
309 			ret = ulist_add_merge_ptr(parents, eb->start,
310 						  eie, (void **)&old, GFP_NOFS);
311 			if (ret < 0)
312 				break;
313 			if (!ret && extent_item_pos) {
314 				while (old->next)
315 					old = old->next;
316 				old->next = eie;
317 			}
318 			eie = NULL;
319 		}
320 next:
321 		if (time_seq == (u64)-1)
322 			ret = btrfs_next_item(root, path);
323 		else
324 			ret = btrfs_next_old_item(root, path, time_seq);
325 	}
326 
327 	if (ret > 0)
328 		ret = 0;
329 	else if (ret < 0)
330 		free_inode_elem_list(eie);
331 	return ret;
332 }
333 
334 /*
335  * resolve an indirect backref in the form (root_id, key, level)
336  * to a logical address
337  */
338 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
339 				  struct btrfs_path *path, u64 time_seq,
340 				  struct __prelim_ref *ref,
341 				  struct ulist *parents,
342 				  const u64 *extent_item_pos, u64 total_refs)
343 {
344 	struct btrfs_root *root;
345 	struct btrfs_key root_key;
346 	struct extent_buffer *eb;
347 	int ret = 0;
348 	int root_level;
349 	int level = ref->level;
350 	int index;
351 
352 	root_key.objectid = ref->root_id;
353 	root_key.type = BTRFS_ROOT_ITEM_KEY;
354 	root_key.offset = (u64)-1;
355 
356 	index = srcu_read_lock(&fs_info->subvol_srcu);
357 
358 	root = btrfs_get_fs_root(fs_info, &root_key, false);
359 	if (IS_ERR(root)) {
360 		srcu_read_unlock(&fs_info->subvol_srcu, index);
361 		ret = PTR_ERR(root);
362 		goto out;
363 	}
364 
365 	if (btrfs_test_is_dummy_root(root)) {
366 		srcu_read_unlock(&fs_info->subvol_srcu, index);
367 		ret = -ENOENT;
368 		goto out;
369 	}
370 
371 	if (path->search_commit_root)
372 		root_level = btrfs_header_level(root->commit_root);
373 	else if (time_seq == (u64)-1)
374 		root_level = btrfs_header_level(root->node);
375 	else
376 		root_level = btrfs_old_root_level(root, time_seq);
377 
378 	if (root_level + 1 == level) {
379 		srcu_read_unlock(&fs_info->subvol_srcu, index);
380 		goto out;
381 	}
382 
383 	path->lowest_level = level;
384 	if (time_seq == (u64)-1)
385 		ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
386 					0, 0);
387 	else
388 		ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
389 					    time_seq);
390 
391 	/* root node has been locked, we can release @subvol_srcu safely here */
392 	srcu_read_unlock(&fs_info->subvol_srcu, index);
393 
394 	pr_debug("search slot in root %llu (level %d, ref count %d) returned "
395 		 "%d for key (%llu %u %llu)\n",
396 		 ref->root_id, level, ref->count, ret,
397 		 ref->key_for_search.objectid, ref->key_for_search.type,
398 		 ref->key_for_search.offset);
399 	if (ret < 0)
400 		goto out;
401 
402 	eb = path->nodes[level];
403 	while (!eb) {
404 		if (WARN_ON(!level)) {
405 			ret = 1;
406 			goto out;
407 		}
408 		level--;
409 		eb = path->nodes[level];
410 	}
411 
412 	ret = add_all_parents(root, path, parents, ref, level, time_seq,
413 			      extent_item_pos, total_refs);
414 out:
415 	path->lowest_level = 0;
416 	btrfs_release_path(path);
417 	return ret;
418 }
419 
420 /*
421  * resolve all indirect backrefs from the list
422  */
423 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
424 				   struct btrfs_path *path, u64 time_seq,
425 				   struct list_head *head,
426 				   const u64 *extent_item_pos, u64 total_refs,
427 				   u64 root_objectid)
428 {
429 	int err;
430 	int ret = 0;
431 	struct __prelim_ref *ref;
432 	struct __prelim_ref *ref_safe;
433 	struct __prelim_ref *new_ref;
434 	struct ulist *parents;
435 	struct ulist_node *node;
436 	struct ulist_iterator uiter;
437 
438 	parents = ulist_alloc(GFP_NOFS);
439 	if (!parents)
440 		return -ENOMEM;
441 
442 	/*
443 	 * _safe allows us to insert directly after the current item without
444 	 * iterating over the newly inserted items.
445 	 * we're also allowed to re-assign ref during iteration.
446 	 */
447 	list_for_each_entry_safe(ref, ref_safe, head, list) {
448 		if (ref->parent)	/* already direct */
449 			continue;
450 		if (ref->count == 0)
451 			continue;
452 		if (root_objectid && ref->root_id != root_objectid) {
453 			ret = BACKREF_FOUND_SHARED;
454 			goto out;
455 		}
456 		err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
457 					     parents, extent_item_pos,
458 					     total_refs);
459 		/*
460 		 * we can only tolerate ENOENT,otherwise,we should catch error
461 		 * and return directly.
462 		 */
463 		if (err == -ENOENT) {
464 			continue;
465 		} else if (err) {
466 			ret = err;
467 			goto out;
468 		}
469 
470 		/* we put the first parent into the ref at hand */
471 		ULIST_ITER_INIT(&uiter);
472 		node = ulist_next(parents, &uiter);
473 		ref->parent = node ? node->val : 0;
474 		ref->inode_list = node ?
475 			(struct extent_inode_elem *)(uintptr_t)node->aux : NULL;
476 
477 		/* additional parents require new refs being added here */
478 		while ((node = ulist_next(parents, &uiter))) {
479 			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
480 						   GFP_NOFS);
481 			if (!new_ref) {
482 				ret = -ENOMEM;
483 				goto out;
484 			}
485 			memcpy(new_ref, ref, sizeof(*ref));
486 			new_ref->parent = node->val;
487 			new_ref->inode_list = (struct extent_inode_elem *)
488 							(uintptr_t)node->aux;
489 			list_add(&new_ref->list, &ref->list);
490 		}
491 		ulist_reinit(parents);
492 	}
493 out:
494 	ulist_free(parents);
495 	return ret;
496 }
497 
498 static inline int ref_for_same_block(struct __prelim_ref *ref1,
499 				     struct __prelim_ref *ref2)
500 {
501 	if (ref1->level != ref2->level)
502 		return 0;
503 	if (ref1->root_id != ref2->root_id)
504 		return 0;
505 	if (ref1->key_for_search.type != ref2->key_for_search.type)
506 		return 0;
507 	if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
508 		return 0;
509 	if (ref1->key_for_search.offset != ref2->key_for_search.offset)
510 		return 0;
511 	if (ref1->parent != ref2->parent)
512 		return 0;
513 
514 	return 1;
515 }
516 
517 /*
518  * read tree blocks and add keys where required.
519  */
520 static int __add_missing_keys(struct btrfs_fs_info *fs_info,
521 			      struct list_head *head)
522 {
523 	struct __prelim_ref *ref;
524 	struct extent_buffer *eb;
525 
526 	list_for_each_entry(ref, head, list) {
527 		if (ref->parent)
528 			continue;
529 		if (ref->key_for_search.type)
530 			continue;
531 		BUG_ON(!ref->wanted_disk_byte);
532 		eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
533 				     0);
534 		if (IS_ERR(eb)) {
535 			return PTR_ERR(eb);
536 		} else if (!extent_buffer_uptodate(eb)) {
537 			free_extent_buffer(eb);
538 			return -EIO;
539 		}
540 		btrfs_tree_read_lock(eb);
541 		if (btrfs_header_level(eb) == 0)
542 			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
543 		else
544 			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
545 		btrfs_tree_read_unlock(eb);
546 		free_extent_buffer(eb);
547 	}
548 	return 0;
549 }
550 
551 /*
552  * merge backrefs and adjust counts accordingly
553  *
554  * mode = 1: merge identical keys, if key is set
555  *    FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
556  *           additionally, we could even add a key range for the blocks we
557  *           looked into to merge even more (-> replace unresolved refs by those
558  *           having a parent).
559  * mode = 2: merge identical parents
560  */
561 static void __merge_refs(struct list_head *head, int mode)
562 {
563 	struct __prelim_ref *pos1;
564 
565 	list_for_each_entry(pos1, head, list) {
566 		struct __prelim_ref *pos2 = pos1, *tmp;
567 
568 		list_for_each_entry_safe_continue(pos2, tmp, head, list) {
569 			struct __prelim_ref *xchg, *ref1 = pos1, *ref2 = pos2;
570 			struct extent_inode_elem *eie;
571 
572 			if (!ref_for_same_block(ref1, ref2))
573 				continue;
574 			if (mode == 1) {
575 				if (!ref1->parent && ref2->parent) {
576 					xchg = ref1;
577 					ref1 = ref2;
578 					ref2 = xchg;
579 				}
580 			} else {
581 				if (ref1->parent != ref2->parent)
582 					continue;
583 			}
584 
585 			eie = ref1->inode_list;
586 			while (eie && eie->next)
587 				eie = eie->next;
588 			if (eie)
589 				eie->next = ref2->inode_list;
590 			else
591 				ref1->inode_list = ref2->inode_list;
592 			ref1->count += ref2->count;
593 
594 			list_del(&ref2->list);
595 			kmem_cache_free(btrfs_prelim_ref_cache, ref2);
596 		}
597 
598 	}
599 }
600 
601 /*
602  * add all currently queued delayed refs from this head whose seq nr is
603  * smaller or equal that seq to the list
604  */
605 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
606 			      struct list_head *prefs, u64 *total_refs,
607 			      u64 inum)
608 {
609 	struct btrfs_delayed_ref_node *node;
610 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
611 	struct btrfs_key key;
612 	struct btrfs_key op_key = {0};
613 	int sgn;
614 	int ret = 0;
615 
616 	if (extent_op && extent_op->update_key)
617 		btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
618 
619 	spin_lock(&head->lock);
620 	list_for_each_entry(node, &head->ref_list, list) {
621 		if (node->seq > seq)
622 			continue;
623 
624 		switch (node->action) {
625 		case BTRFS_ADD_DELAYED_EXTENT:
626 		case BTRFS_UPDATE_DELAYED_HEAD:
627 			WARN_ON(1);
628 			continue;
629 		case BTRFS_ADD_DELAYED_REF:
630 			sgn = 1;
631 			break;
632 		case BTRFS_DROP_DELAYED_REF:
633 			sgn = -1;
634 			break;
635 		default:
636 			BUG_ON(1);
637 		}
638 		*total_refs += (node->ref_mod * sgn);
639 		switch (node->type) {
640 		case BTRFS_TREE_BLOCK_REF_KEY: {
641 			struct btrfs_delayed_tree_ref *ref;
642 
643 			ref = btrfs_delayed_node_to_tree_ref(node);
644 			ret = __add_prelim_ref(prefs, ref->root, &op_key,
645 					       ref->level + 1, 0, node->bytenr,
646 					       node->ref_mod * sgn, GFP_ATOMIC);
647 			break;
648 		}
649 		case BTRFS_SHARED_BLOCK_REF_KEY: {
650 			struct btrfs_delayed_tree_ref *ref;
651 
652 			ref = btrfs_delayed_node_to_tree_ref(node);
653 			ret = __add_prelim_ref(prefs, 0, NULL,
654 					       ref->level + 1, ref->parent,
655 					       node->bytenr,
656 					       node->ref_mod * sgn, GFP_ATOMIC);
657 			break;
658 		}
659 		case BTRFS_EXTENT_DATA_REF_KEY: {
660 			struct btrfs_delayed_data_ref *ref;
661 			ref = btrfs_delayed_node_to_data_ref(node);
662 
663 			key.objectid = ref->objectid;
664 			key.type = BTRFS_EXTENT_DATA_KEY;
665 			key.offset = ref->offset;
666 
667 			/*
668 			 * Found a inum that doesn't match our known inum, we
669 			 * know it's shared.
670 			 */
671 			if (inum && ref->objectid != inum) {
672 				ret = BACKREF_FOUND_SHARED;
673 				break;
674 			}
675 
676 			ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
677 					       node->bytenr,
678 					       node->ref_mod * sgn, GFP_ATOMIC);
679 			break;
680 		}
681 		case BTRFS_SHARED_DATA_REF_KEY: {
682 			struct btrfs_delayed_data_ref *ref;
683 
684 			ref = btrfs_delayed_node_to_data_ref(node);
685 			ret = __add_prelim_ref(prefs, 0, NULL, 0,
686 					       ref->parent, node->bytenr,
687 					       node->ref_mod * sgn, GFP_ATOMIC);
688 			break;
689 		}
690 		default:
691 			WARN_ON(1);
692 		}
693 		if (ret)
694 			break;
695 	}
696 	spin_unlock(&head->lock);
697 	return ret;
698 }
699 
700 /*
701  * add all inline backrefs for bytenr to the list
702  */
703 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
704 			     struct btrfs_path *path, u64 bytenr,
705 			     int *info_level, struct list_head *prefs,
706 			     u64 *total_refs, u64 inum)
707 {
708 	int ret = 0;
709 	int slot;
710 	struct extent_buffer *leaf;
711 	struct btrfs_key key;
712 	struct btrfs_key found_key;
713 	unsigned long ptr;
714 	unsigned long end;
715 	struct btrfs_extent_item *ei;
716 	u64 flags;
717 	u64 item_size;
718 
719 	/*
720 	 * enumerate all inline refs
721 	 */
722 	leaf = path->nodes[0];
723 	slot = path->slots[0];
724 
725 	item_size = btrfs_item_size_nr(leaf, slot);
726 	BUG_ON(item_size < sizeof(*ei));
727 
728 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
729 	flags = btrfs_extent_flags(leaf, ei);
730 	*total_refs += btrfs_extent_refs(leaf, ei);
731 	btrfs_item_key_to_cpu(leaf, &found_key, slot);
732 
733 	ptr = (unsigned long)(ei + 1);
734 	end = (unsigned long)ei + item_size;
735 
736 	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
737 	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
738 		struct btrfs_tree_block_info *info;
739 
740 		info = (struct btrfs_tree_block_info *)ptr;
741 		*info_level = btrfs_tree_block_level(leaf, info);
742 		ptr += sizeof(struct btrfs_tree_block_info);
743 		BUG_ON(ptr > end);
744 	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
745 		*info_level = found_key.offset;
746 	} else {
747 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
748 	}
749 
750 	while (ptr < end) {
751 		struct btrfs_extent_inline_ref *iref;
752 		u64 offset;
753 		int type;
754 
755 		iref = (struct btrfs_extent_inline_ref *)ptr;
756 		type = btrfs_extent_inline_ref_type(leaf, iref);
757 		offset = btrfs_extent_inline_ref_offset(leaf, iref);
758 
759 		switch (type) {
760 		case BTRFS_SHARED_BLOCK_REF_KEY:
761 			ret = __add_prelim_ref(prefs, 0, NULL,
762 						*info_level + 1, offset,
763 						bytenr, 1, GFP_NOFS);
764 			break;
765 		case BTRFS_SHARED_DATA_REF_KEY: {
766 			struct btrfs_shared_data_ref *sdref;
767 			int count;
768 
769 			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
770 			count = btrfs_shared_data_ref_count(leaf, sdref);
771 			ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
772 					       bytenr, count, GFP_NOFS);
773 			break;
774 		}
775 		case BTRFS_TREE_BLOCK_REF_KEY:
776 			ret = __add_prelim_ref(prefs, offset, NULL,
777 					       *info_level + 1, 0,
778 					       bytenr, 1, GFP_NOFS);
779 			break;
780 		case BTRFS_EXTENT_DATA_REF_KEY: {
781 			struct btrfs_extent_data_ref *dref;
782 			int count;
783 			u64 root;
784 
785 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
786 			count = btrfs_extent_data_ref_count(leaf, dref);
787 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
788 								      dref);
789 			key.type = BTRFS_EXTENT_DATA_KEY;
790 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
791 
792 			if (inum && key.objectid != inum) {
793 				ret = BACKREF_FOUND_SHARED;
794 				break;
795 			}
796 
797 			root = btrfs_extent_data_ref_root(leaf, dref);
798 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
799 					       bytenr, count, GFP_NOFS);
800 			break;
801 		}
802 		default:
803 			WARN_ON(1);
804 		}
805 		if (ret)
806 			return ret;
807 		ptr += btrfs_extent_inline_ref_size(type);
808 	}
809 
810 	return 0;
811 }
812 
813 /*
814  * add all non-inline backrefs for bytenr to the list
815  */
816 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
817 			    struct btrfs_path *path, u64 bytenr,
818 			    int info_level, struct list_head *prefs, u64 inum)
819 {
820 	struct btrfs_root *extent_root = fs_info->extent_root;
821 	int ret;
822 	int slot;
823 	struct extent_buffer *leaf;
824 	struct btrfs_key key;
825 
826 	while (1) {
827 		ret = btrfs_next_item(extent_root, path);
828 		if (ret < 0)
829 			break;
830 		if (ret) {
831 			ret = 0;
832 			break;
833 		}
834 
835 		slot = path->slots[0];
836 		leaf = path->nodes[0];
837 		btrfs_item_key_to_cpu(leaf, &key, slot);
838 
839 		if (key.objectid != bytenr)
840 			break;
841 		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
842 			continue;
843 		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
844 			break;
845 
846 		switch (key.type) {
847 		case BTRFS_SHARED_BLOCK_REF_KEY:
848 			ret = __add_prelim_ref(prefs, 0, NULL,
849 						info_level + 1, key.offset,
850 						bytenr, 1, GFP_NOFS);
851 			break;
852 		case BTRFS_SHARED_DATA_REF_KEY: {
853 			struct btrfs_shared_data_ref *sdref;
854 			int count;
855 
856 			sdref = btrfs_item_ptr(leaf, slot,
857 					      struct btrfs_shared_data_ref);
858 			count = btrfs_shared_data_ref_count(leaf, sdref);
859 			ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
860 						bytenr, count, GFP_NOFS);
861 			break;
862 		}
863 		case BTRFS_TREE_BLOCK_REF_KEY:
864 			ret = __add_prelim_ref(prefs, key.offset, NULL,
865 					       info_level + 1, 0,
866 					       bytenr, 1, GFP_NOFS);
867 			break;
868 		case BTRFS_EXTENT_DATA_REF_KEY: {
869 			struct btrfs_extent_data_ref *dref;
870 			int count;
871 			u64 root;
872 
873 			dref = btrfs_item_ptr(leaf, slot,
874 					      struct btrfs_extent_data_ref);
875 			count = btrfs_extent_data_ref_count(leaf, dref);
876 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
877 								      dref);
878 			key.type = BTRFS_EXTENT_DATA_KEY;
879 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
880 
881 			if (inum && key.objectid != inum) {
882 				ret = BACKREF_FOUND_SHARED;
883 				break;
884 			}
885 
886 			root = btrfs_extent_data_ref_root(leaf, dref);
887 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
888 					       bytenr, count, GFP_NOFS);
889 			break;
890 		}
891 		default:
892 			WARN_ON(1);
893 		}
894 		if (ret)
895 			return ret;
896 
897 	}
898 
899 	return ret;
900 }
901 
902 /*
903  * this adds all existing backrefs (inline backrefs, backrefs and delayed
904  * refs) for the given bytenr to the refs list, merges duplicates and resolves
905  * indirect refs to their parent bytenr.
906  * When roots are found, they're added to the roots list
907  *
908  * NOTE: This can return values > 0
909  *
910  * If time_seq is set to (u64)-1, it will not search delayed_refs, and behave
911  * much like trans == NULL case, the difference only lies in it will not
912  * commit root.
913  * The special case is for qgroup to search roots in commit_transaction().
914  *
915  * FIXME some caching might speed things up
916  */
917 static int find_parent_nodes(struct btrfs_trans_handle *trans,
918 			     struct btrfs_fs_info *fs_info, u64 bytenr,
919 			     u64 time_seq, struct ulist *refs,
920 			     struct ulist *roots, const u64 *extent_item_pos,
921 			     u64 root_objectid, u64 inum)
922 {
923 	struct btrfs_key key;
924 	struct btrfs_path *path;
925 	struct btrfs_delayed_ref_root *delayed_refs = NULL;
926 	struct btrfs_delayed_ref_head *head;
927 	int info_level = 0;
928 	int ret;
929 	struct list_head prefs_delayed;
930 	struct list_head prefs;
931 	struct __prelim_ref *ref;
932 	struct extent_inode_elem *eie = NULL;
933 	u64 total_refs = 0;
934 
935 	INIT_LIST_HEAD(&prefs);
936 	INIT_LIST_HEAD(&prefs_delayed);
937 
938 	key.objectid = bytenr;
939 	key.offset = (u64)-1;
940 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
941 		key.type = BTRFS_METADATA_ITEM_KEY;
942 	else
943 		key.type = BTRFS_EXTENT_ITEM_KEY;
944 
945 	path = btrfs_alloc_path();
946 	if (!path)
947 		return -ENOMEM;
948 	if (!trans) {
949 		path->search_commit_root = 1;
950 		path->skip_locking = 1;
951 	}
952 
953 	if (time_seq == (u64)-1)
954 		path->skip_locking = 1;
955 
956 	/*
957 	 * grab both a lock on the path and a lock on the delayed ref head.
958 	 * We need both to get a consistent picture of how the refs look
959 	 * at a specified point in time
960 	 */
961 again:
962 	head = NULL;
963 
964 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
965 	if (ret < 0)
966 		goto out;
967 	BUG_ON(ret == 0);
968 
969 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
970 	if (trans && likely(trans->type != __TRANS_DUMMY) &&
971 	    time_seq != (u64)-1) {
972 #else
973 	if (trans && time_seq != (u64)-1) {
974 #endif
975 		/*
976 		 * look if there are updates for this ref queued and lock the
977 		 * head
978 		 */
979 		delayed_refs = &trans->transaction->delayed_refs;
980 		spin_lock(&delayed_refs->lock);
981 		head = btrfs_find_delayed_ref_head(trans, bytenr);
982 		if (head) {
983 			if (!mutex_trylock(&head->mutex)) {
984 				atomic_inc(&head->node.refs);
985 				spin_unlock(&delayed_refs->lock);
986 
987 				btrfs_release_path(path);
988 
989 				/*
990 				 * Mutex was contended, block until it's
991 				 * released and try again
992 				 */
993 				mutex_lock(&head->mutex);
994 				mutex_unlock(&head->mutex);
995 				btrfs_put_delayed_ref(&head->node);
996 				goto again;
997 			}
998 			spin_unlock(&delayed_refs->lock);
999 			ret = __add_delayed_refs(head, time_seq,
1000 						 &prefs_delayed, &total_refs,
1001 						 inum);
1002 			mutex_unlock(&head->mutex);
1003 			if (ret)
1004 				goto out;
1005 		} else {
1006 			spin_unlock(&delayed_refs->lock);
1007 		}
1008 	}
1009 
1010 	if (path->slots[0]) {
1011 		struct extent_buffer *leaf;
1012 		int slot;
1013 
1014 		path->slots[0]--;
1015 		leaf = path->nodes[0];
1016 		slot = path->slots[0];
1017 		btrfs_item_key_to_cpu(leaf, &key, slot);
1018 		if (key.objectid == bytenr &&
1019 		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1020 		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1021 			ret = __add_inline_refs(fs_info, path, bytenr,
1022 						&info_level, &prefs,
1023 						&total_refs, inum);
1024 			if (ret)
1025 				goto out;
1026 			ret = __add_keyed_refs(fs_info, path, bytenr,
1027 					       info_level, &prefs, inum);
1028 			if (ret)
1029 				goto out;
1030 		}
1031 	}
1032 	btrfs_release_path(path);
1033 
1034 	list_splice_init(&prefs_delayed, &prefs);
1035 
1036 	ret = __add_missing_keys(fs_info, &prefs);
1037 	if (ret)
1038 		goto out;
1039 
1040 	__merge_refs(&prefs, 1);
1041 
1042 	ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
1043 				      extent_item_pos, total_refs,
1044 				      root_objectid);
1045 	if (ret)
1046 		goto out;
1047 
1048 	__merge_refs(&prefs, 2);
1049 
1050 	while (!list_empty(&prefs)) {
1051 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
1052 		WARN_ON(ref->count < 0);
1053 		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1054 			if (root_objectid && ref->root_id != root_objectid) {
1055 				ret = BACKREF_FOUND_SHARED;
1056 				goto out;
1057 			}
1058 
1059 			/* no parent == root of tree */
1060 			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1061 			if (ret < 0)
1062 				goto out;
1063 		}
1064 		if (ref->count && ref->parent) {
1065 			if (extent_item_pos && !ref->inode_list &&
1066 			    ref->level == 0) {
1067 				struct extent_buffer *eb;
1068 
1069 				eb = read_tree_block(fs_info->extent_root,
1070 							   ref->parent, 0);
1071 				if (IS_ERR(eb)) {
1072 					ret = PTR_ERR(eb);
1073 					goto out;
1074 				} else if (!extent_buffer_uptodate(eb)) {
1075 					free_extent_buffer(eb);
1076 					ret = -EIO;
1077 					goto out;
1078 				}
1079 				btrfs_tree_read_lock(eb);
1080 				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1081 				ret = find_extent_in_eb(eb, bytenr,
1082 							*extent_item_pos, &eie);
1083 				btrfs_tree_read_unlock_blocking(eb);
1084 				free_extent_buffer(eb);
1085 				if (ret < 0)
1086 					goto out;
1087 				ref->inode_list = eie;
1088 			}
1089 			ret = ulist_add_merge_ptr(refs, ref->parent,
1090 						  ref->inode_list,
1091 						  (void **)&eie, GFP_NOFS);
1092 			if (ret < 0)
1093 				goto out;
1094 			if (!ret && extent_item_pos) {
1095 				/*
1096 				 * we've recorded that parent, so we must extend
1097 				 * its inode list here
1098 				 */
1099 				BUG_ON(!eie);
1100 				while (eie->next)
1101 					eie = eie->next;
1102 				eie->next = ref->inode_list;
1103 			}
1104 			eie = NULL;
1105 		}
1106 		list_del(&ref->list);
1107 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
1108 	}
1109 
1110 out:
1111 	btrfs_free_path(path);
1112 	while (!list_empty(&prefs)) {
1113 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
1114 		list_del(&ref->list);
1115 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
1116 	}
1117 	while (!list_empty(&prefs_delayed)) {
1118 		ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
1119 				       list);
1120 		list_del(&ref->list);
1121 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
1122 	}
1123 	if (ret < 0)
1124 		free_inode_elem_list(eie);
1125 	return ret;
1126 }
1127 
1128 static void free_leaf_list(struct ulist *blocks)
1129 {
1130 	struct ulist_node *node = NULL;
1131 	struct extent_inode_elem *eie;
1132 	struct ulist_iterator uiter;
1133 
1134 	ULIST_ITER_INIT(&uiter);
1135 	while ((node = ulist_next(blocks, &uiter))) {
1136 		if (!node->aux)
1137 			continue;
1138 		eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
1139 		free_inode_elem_list(eie);
1140 		node->aux = 0;
1141 	}
1142 
1143 	ulist_free(blocks);
1144 }
1145 
1146 /*
1147  * Finds all leafs with a reference to the specified combination of bytenr and
1148  * offset. key_list_head will point to a list of corresponding keys (caller must
1149  * free each list element). The leafs will be stored in the leafs ulist, which
1150  * must be freed with ulist_free.
1151  *
1152  * returns 0 on success, <0 on error
1153  */
1154 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1155 				struct btrfs_fs_info *fs_info, u64 bytenr,
1156 				u64 time_seq, struct ulist **leafs,
1157 				const u64 *extent_item_pos)
1158 {
1159 	int ret;
1160 
1161 	*leafs = ulist_alloc(GFP_NOFS);
1162 	if (!*leafs)
1163 		return -ENOMEM;
1164 
1165 	ret = find_parent_nodes(trans, fs_info, bytenr,
1166 				time_seq, *leafs, NULL, extent_item_pos, 0, 0);
1167 	if (ret < 0 && ret != -ENOENT) {
1168 		free_leaf_list(*leafs);
1169 		return ret;
1170 	}
1171 
1172 	return 0;
1173 }
1174 
1175 /*
1176  * walk all backrefs for a given extent to find all roots that reference this
1177  * extent. Walking a backref means finding all extents that reference this
1178  * extent and in turn walk the backrefs of those, too. Naturally this is a
1179  * recursive process, but here it is implemented in an iterative fashion: We
1180  * find all referencing extents for the extent in question and put them on a
1181  * list. In turn, we find all referencing extents for those, further appending
1182  * to the list. The way we iterate the list allows adding more elements after
1183  * the current while iterating. The process stops when we reach the end of the
1184  * list. Found roots are added to the roots list.
1185  *
1186  * returns 0 on success, < 0 on error.
1187  */
1188 static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1189 				  struct btrfs_fs_info *fs_info, u64 bytenr,
1190 				  u64 time_seq, struct ulist **roots)
1191 {
1192 	struct ulist *tmp;
1193 	struct ulist_node *node = NULL;
1194 	struct ulist_iterator uiter;
1195 	int ret;
1196 
1197 	tmp = ulist_alloc(GFP_NOFS);
1198 	if (!tmp)
1199 		return -ENOMEM;
1200 	*roots = ulist_alloc(GFP_NOFS);
1201 	if (!*roots) {
1202 		ulist_free(tmp);
1203 		return -ENOMEM;
1204 	}
1205 
1206 	ULIST_ITER_INIT(&uiter);
1207 	while (1) {
1208 		ret = find_parent_nodes(trans, fs_info, bytenr,
1209 					time_seq, tmp, *roots, NULL, 0, 0);
1210 		if (ret < 0 && ret != -ENOENT) {
1211 			ulist_free(tmp);
1212 			ulist_free(*roots);
1213 			return ret;
1214 		}
1215 		node = ulist_next(tmp, &uiter);
1216 		if (!node)
1217 			break;
1218 		bytenr = node->val;
1219 		cond_resched();
1220 	}
1221 
1222 	ulist_free(tmp);
1223 	return 0;
1224 }
1225 
1226 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1227 			 struct btrfs_fs_info *fs_info, u64 bytenr,
1228 			 u64 time_seq, struct ulist **roots)
1229 {
1230 	int ret;
1231 
1232 	if (!trans)
1233 		down_read(&fs_info->commit_root_sem);
1234 	ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots);
1235 	if (!trans)
1236 		up_read(&fs_info->commit_root_sem);
1237 	return ret;
1238 }
1239 
1240 /**
1241  * btrfs_check_shared - tell us whether an extent is shared
1242  *
1243  * @trans: optional trans handle
1244  *
1245  * btrfs_check_shared uses the backref walking code but will short
1246  * circuit as soon as it finds a root or inode that doesn't match the
1247  * one passed in. This provides a significant performance benefit for
1248  * callers (such as fiemap) which want to know whether the extent is
1249  * shared but do not need a ref count.
1250  *
1251  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1252  */
1253 int btrfs_check_shared(struct btrfs_trans_handle *trans,
1254 		       struct btrfs_fs_info *fs_info, u64 root_objectid,
1255 		       u64 inum, u64 bytenr)
1256 {
1257 	struct ulist *tmp = NULL;
1258 	struct ulist *roots = NULL;
1259 	struct ulist_iterator uiter;
1260 	struct ulist_node *node;
1261 	struct seq_list elem = SEQ_LIST_INIT(elem);
1262 	int ret = 0;
1263 
1264 	tmp = ulist_alloc(GFP_NOFS);
1265 	roots = ulist_alloc(GFP_NOFS);
1266 	if (!tmp || !roots) {
1267 		ulist_free(tmp);
1268 		ulist_free(roots);
1269 		return -ENOMEM;
1270 	}
1271 
1272 	if (trans)
1273 		btrfs_get_tree_mod_seq(fs_info, &elem);
1274 	else
1275 		down_read(&fs_info->commit_root_sem);
1276 	ULIST_ITER_INIT(&uiter);
1277 	while (1) {
1278 		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1279 					roots, NULL, root_objectid, inum);
1280 		if (ret == BACKREF_FOUND_SHARED) {
1281 			/* this is the only condition under which we return 1 */
1282 			ret = 1;
1283 			break;
1284 		}
1285 		if (ret < 0 && ret != -ENOENT)
1286 			break;
1287 		ret = 0;
1288 		node = ulist_next(tmp, &uiter);
1289 		if (!node)
1290 			break;
1291 		bytenr = node->val;
1292 		cond_resched();
1293 	}
1294 	if (trans)
1295 		btrfs_put_tree_mod_seq(fs_info, &elem);
1296 	else
1297 		up_read(&fs_info->commit_root_sem);
1298 	ulist_free(tmp);
1299 	ulist_free(roots);
1300 	return ret;
1301 }
1302 
1303 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1304 			  u64 start_off, struct btrfs_path *path,
1305 			  struct btrfs_inode_extref **ret_extref,
1306 			  u64 *found_off)
1307 {
1308 	int ret, slot;
1309 	struct btrfs_key key;
1310 	struct btrfs_key found_key;
1311 	struct btrfs_inode_extref *extref;
1312 	struct extent_buffer *leaf;
1313 	unsigned long ptr;
1314 
1315 	key.objectid = inode_objectid;
1316 	key.type = BTRFS_INODE_EXTREF_KEY;
1317 	key.offset = start_off;
1318 
1319 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1320 	if (ret < 0)
1321 		return ret;
1322 
1323 	while (1) {
1324 		leaf = path->nodes[0];
1325 		slot = path->slots[0];
1326 		if (slot >= btrfs_header_nritems(leaf)) {
1327 			/*
1328 			 * If the item at offset is not found,
1329 			 * btrfs_search_slot will point us to the slot
1330 			 * where it should be inserted. In our case
1331 			 * that will be the slot directly before the
1332 			 * next INODE_REF_KEY_V2 item. In the case
1333 			 * that we're pointing to the last slot in a
1334 			 * leaf, we must move one leaf over.
1335 			 */
1336 			ret = btrfs_next_leaf(root, path);
1337 			if (ret) {
1338 				if (ret >= 1)
1339 					ret = -ENOENT;
1340 				break;
1341 			}
1342 			continue;
1343 		}
1344 
1345 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1346 
1347 		/*
1348 		 * Check that we're still looking at an extended ref key for
1349 		 * this particular objectid. If we have different
1350 		 * objectid or type then there are no more to be found
1351 		 * in the tree and we can exit.
1352 		 */
1353 		ret = -ENOENT;
1354 		if (found_key.objectid != inode_objectid)
1355 			break;
1356 		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1357 			break;
1358 
1359 		ret = 0;
1360 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1361 		extref = (struct btrfs_inode_extref *)ptr;
1362 		*ret_extref = extref;
1363 		if (found_off)
1364 			*found_off = found_key.offset;
1365 		break;
1366 	}
1367 
1368 	return ret;
1369 }
1370 
1371 /*
1372  * this iterates to turn a name (from iref/extref) into a full filesystem path.
1373  * Elements of the path are separated by '/' and the path is guaranteed to be
1374  * 0-terminated. the path is only given within the current file system.
1375  * Therefore, it never starts with a '/'. the caller is responsible to provide
1376  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1377  * the start point of the resulting string is returned. this pointer is within
1378  * dest, normally.
1379  * in case the path buffer would overflow, the pointer is decremented further
1380  * as if output was written to the buffer, though no more output is actually
1381  * generated. that way, the caller can determine how much space would be
1382  * required for the path to fit into the buffer. in that case, the returned
1383  * value will be smaller than dest. callers must check this!
1384  */
1385 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1386 			u32 name_len, unsigned long name_off,
1387 			struct extent_buffer *eb_in, u64 parent,
1388 			char *dest, u32 size)
1389 {
1390 	int slot;
1391 	u64 next_inum;
1392 	int ret;
1393 	s64 bytes_left = ((s64)size) - 1;
1394 	struct extent_buffer *eb = eb_in;
1395 	struct btrfs_key found_key;
1396 	int leave_spinning = path->leave_spinning;
1397 	struct btrfs_inode_ref *iref;
1398 
1399 	if (bytes_left >= 0)
1400 		dest[bytes_left] = '\0';
1401 
1402 	path->leave_spinning = 1;
1403 	while (1) {
1404 		bytes_left -= name_len;
1405 		if (bytes_left >= 0)
1406 			read_extent_buffer(eb, dest + bytes_left,
1407 					   name_off, name_len);
1408 		if (eb != eb_in) {
1409 			btrfs_tree_read_unlock_blocking(eb);
1410 			free_extent_buffer(eb);
1411 		}
1412 		ret = btrfs_find_item(fs_root, path, parent, 0,
1413 				BTRFS_INODE_REF_KEY, &found_key);
1414 		if (ret > 0)
1415 			ret = -ENOENT;
1416 		if (ret)
1417 			break;
1418 
1419 		next_inum = found_key.offset;
1420 
1421 		/* regular exit ahead */
1422 		if (parent == next_inum)
1423 			break;
1424 
1425 		slot = path->slots[0];
1426 		eb = path->nodes[0];
1427 		/* make sure we can use eb after releasing the path */
1428 		if (eb != eb_in) {
1429 			atomic_inc(&eb->refs);
1430 			btrfs_tree_read_lock(eb);
1431 			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1432 		}
1433 		btrfs_release_path(path);
1434 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1435 
1436 		name_len = btrfs_inode_ref_name_len(eb, iref);
1437 		name_off = (unsigned long)(iref + 1);
1438 
1439 		parent = next_inum;
1440 		--bytes_left;
1441 		if (bytes_left >= 0)
1442 			dest[bytes_left] = '/';
1443 	}
1444 
1445 	btrfs_release_path(path);
1446 	path->leave_spinning = leave_spinning;
1447 
1448 	if (ret)
1449 		return ERR_PTR(ret);
1450 
1451 	return dest + bytes_left;
1452 }
1453 
1454 /*
1455  * this makes the path point to (logical EXTENT_ITEM *)
1456  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1457  * tree blocks and <0 on error.
1458  */
1459 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1460 			struct btrfs_path *path, struct btrfs_key *found_key,
1461 			u64 *flags_ret)
1462 {
1463 	int ret;
1464 	u64 flags;
1465 	u64 size = 0;
1466 	u32 item_size;
1467 	struct extent_buffer *eb;
1468 	struct btrfs_extent_item *ei;
1469 	struct btrfs_key key;
1470 
1471 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1472 		key.type = BTRFS_METADATA_ITEM_KEY;
1473 	else
1474 		key.type = BTRFS_EXTENT_ITEM_KEY;
1475 	key.objectid = logical;
1476 	key.offset = (u64)-1;
1477 
1478 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1479 	if (ret < 0)
1480 		return ret;
1481 
1482 	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1483 	if (ret) {
1484 		if (ret > 0)
1485 			ret = -ENOENT;
1486 		return ret;
1487 	}
1488 	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1489 	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1490 		size = fs_info->extent_root->nodesize;
1491 	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1492 		size = found_key->offset;
1493 
1494 	if (found_key->objectid > logical ||
1495 	    found_key->objectid + size <= logical) {
1496 		pr_debug("logical %llu is not within any extent\n", logical);
1497 		return -ENOENT;
1498 	}
1499 
1500 	eb = path->nodes[0];
1501 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1502 	BUG_ON(item_size < sizeof(*ei));
1503 
1504 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1505 	flags = btrfs_extent_flags(eb, ei);
1506 
1507 	pr_debug("logical %llu is at position %llu within the extent (%llu "
1508 		 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1509 		 logical, logical - found_key->objectid, found_key->objectid,
1510 		 found_key->offset, flags, item_size);
1511 
1512 	WARN_ON(!flags_ret);
1513 	if (flags_ret) {
1514 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1515 			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1516 		else if (flags & BTRFS_EXTENT_FLAG_DATA)
1517 			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
1518 		else
1519 			BUG_ON(1);
1520 		return 0;
1521 	}
1522 
1523 	return -EIO;
1524 }
1525 
1526 /*
1527  * helper function to iterate extent inline refs. ptr must point to a 0 value
1528  * for the first call and may be modified. it is used to track state.
1529  * if more refs exist, 0 is returned and the next call to
1530  * __get_extent_inline_ref must pass the modified ptr parameter to get the
1531  * next ref. after the last ref was processed, 1 is returned.
1532  * returns <0 on error
1533  */
1534 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1535 				   struct btrfs_key *key,
1536 				   struct btrfs_extent_item *ei, u32 item_size,
1537 				   struct btrfs_extent_inline_ref **out_eiref,
1538 				   int *out_type)
1539 {
1540 	unsigned long end;
1541 	u64 flags;
1542 	struct btrfs_tree_block_info *info;
1543 
1544 	if (!*ptr) {
1545 		/* first call */
1546 		flags = btrfs_extent_flags(eb, ei);
1547 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1548 			if (key->type == BTRFS_METADATA_ITEM_KEY) {
1549 				/* a skinny metadata extent */
1550 				*out_eiref =
1551 				     (struct btrfs_extent_inline_ref *)(ei + 1);
1552 			} else {
1553 				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1554 				info = (struct btrfs_tree_block_info *)(ei + 1);
1555 				*out_eiref =
1556 				   (struct btrfs_extent_inline_ref *)(info + 1);
1557 			}
1558 		} else {
1559 			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1560 		}
1561 		*ptr = (unsigned long)*out_eiref;
1562 		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1563 			return -ENOENT;
1564 	}
1565 
1566 	end = (unsigned long)ei + item_size;
1567 	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1568 	*out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1569 
1570 	*ptr += btrfs_extent_inline_ref_size(*out_type);
1571 	WARN_ON(*ptr > end);
1572 	if (*ptr == end)
1573 		return 1; /* last */
1574 
1575 	return 0;
1576 }
1577 
1578 /*
1579  * reads the tree block backref for an extent. tree level and root are returned
1580  * through out_level and out_root. ptr must point to a 0 value for the first
1581  * call and may be modified (see __get_extent_inline_ref comment).
1582  * returns 0 if data was provided, 1 if there was no more data to provide or
1583  * <0 on error.
1584  */
1585 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1586 			    struct btrfs_key *key, struct btrfs_extent_item *ei,
1587 			    u32 item_size, u64 *out_root, u8 *out_level)
1588 {
1589 	int ret;
1590 	int type;
1591 	struct btrfs_extent_inline_ref *eiref;
1592 
1593 	if (*ptr == (unsigned long)-1)
1594 		return 1;
1595 
1596 	while (1) {
1597 		ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
1598 					      &eiref, &type);
1599 		if (ret < 0)
1600 			return ret;
1601 
1602 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1603 		    type == BTRFS_SHARED_BLOCK_REF_KEY)
1604 			break;
1605 
1606 		if (ret == 1)
1607 			return 1;
1608 	}
1609 
1610 	/* we can treat both ref types equally here */
1611 	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1612 
1613 	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1614 		struct btrfs_tree_block_info *info;
1615 
1616 		info = (struct btrfs_tree_block_info *)(ei + 1);
1617 		*out_level = btrfs_tree_block_level(eb, info);
1618 	} else {
1619 		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1620 		*out_level = (u8)key->offset;
1621 	}
1622 
1623 	if (ret == 1)
1624 		*ptr = (unsigned long)-1;
1625 
1626 	return 0;
1627 }
1628 
1629 static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1630 				u64 root, u64 extent_item_objectid,
1631 				iterate_extent_inodes_t *iterate, void *ctx)
1632 {
1633 	struct extent_inode_elem *eie;
1634 	int ret = 0;
1635 
1636 	for (eie = inode_list; eie; eie = eie->next) {
1637 		pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1638 			 "root %llu\n", extent_item_objectid,
1639 			 eie->inum, eie->offset, root);
1640 		ret = iterate(eie->inum, eie->offset, root, ctx);
1641 		if (ret) {
1642 			pr_debug("stopping iteration for %llu due to ret=%d\n",
1643 				 extent_item_objectid, ret);
1644 			break;
1645 		}
1646 	}
1647 
1648 	return ret;
1649 }
1650 
1651 /*
1652  * calls iterate() for every inode that references the extent identified by
1653  * the given parameters.
1654  * when the iterator function returns a non-zero value, iteration stops.
1655  */
1656 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1657 				u64 extent_item_objectid, u64 extent_item_pos,
1658 				int search_commit_root,
1659 				iterate_extent_inodes_t *iterate, void *ctx)
1660 {
1661 	int ret;
1662 	struct btrfs_trans_handle *trans = NULL;
1663 	struct ulist *refs = NULL;
1664 	struct ulist *roots = NULL;
1665 	struct ulist_node *ref_node = NULL;
1666 	struct ulist_node *root_node = NULL;
1667 	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1668 	struct ulist_iterator ref_uiter;
1669 	struct ulist_iterator root_uiter;
1670 
1671 	pr_debug("resolving all inodes for extent %llu\n",
1672 			extent_item_objectid);
1673 
1674 	if (!search_commit_root) {
1675 		trans = btrfs_join_transaction(fs_info->extent_root);
1676 		if (IS_ERR(trans))
1677 			return PTR_ERR(trans);
1678 		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1679 	} else {
1680 		down_read(&fs_info->commit_root_sem);
1681 	}
1682 
1683 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1684 				   tree_mod_seq_elem.seq, &refs,
1685 				   &extent_item_pos);
1686 	if (ret)
1687 		goto out;
1688 
1689 	ULIST_ITER_INIT(&ref_uiter);
1690 	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1691 		ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val,
1692 					     tree_mod_seq_elem.seq, &roots);
1693 		if (ret)
1694 			break;
1695 		ULIST_ITER_INIT(&root_uiter);
1696 		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1697 			pr_debug("root %llu references leaf %llu, data list "
1698 				 "%#llx\n", root_node->val, ref_node->val,
1699 				 ref_node->aux);
1700 			ret = iterate_leaf_refs((struct extent_inode_elem *)
1701 						(uintptr_t)ref_node->aux,
1702 						root_node->val,
1703 						extent_item_objectid,
1704 						iterate, ctx);
1705 		}
1706 		ulist_free(roots);
1707 	}
1708 
1709 	free_leaf_list(refs);
1710 out:
1711 	if (!search_commit_root) {
1712 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1713 		btrfs_end_transaction(trans, fs_info->extent_root);
1714 	} else {
1715 		up_read(&fs_info->commit_root_sem);
1716 	}
1717 
1718 	return ret;
1719 }
1720 
1721 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1722 				struct btrfs_path *path,
1723 				iterate_extent_inodes_t *iterate, void *ctx)
1724 {
1725 	int ret;
1726 	u64 extent_item_pos;
1727 	u64 flags = 0;
1728 	struct btrfs_key found_key;
1729 	int search_commit_root = path->search_commit_root;
1730 
1731 	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1732 	btrfs_release_path(path);
1733 	if (ret < 0)
1734 		return ret;
1735 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1736 		return -EINVAL;
1737 
1738 	extent_item_pos = logical - found_key.objectid;
1739 	ret = iterate_extent_inodes(fs_info, found_key.objectid,
1740 					extent_item_pos, search_commit_root,
1741 					iterate, ctx);
1742 
1743 	return ret;
1744 }
1745 
1746 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1747 			      struct extent_buffer *eb, void *ctx);
1748 
1749 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1750 			      struct btrfs_path *path,
1751 			      iterate_irefs_t *iterate, void *ctx)
1752 {
1753 	int ret = 0;
1754 	int slot;
1755 	u32 cur;
1756 	u32 len;
1757 	u32 name_len;
1758 	u64 parent = 0;
1759 	int found = 0;
1760 	struct extent_buffer *eb;
1761 	struct btrfs_item *item;
1762 	struct btrfs_inode_ref *iref;
1763 	struct btrfs_key found_key;
1764 
1765 	while (!ret) {
1766 		ret = btrfs_find_item(fs_root, path, inum,
1767 				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
1768 				&found_key);
1769 
1770 		if (ret < 0)
1771 			break;
1772 		if (ret) {
1773 			ret = found ? 0 : -ENOENT;
1774 			break;
1775 		}
1776 		++found;
1777 
1778 		parent = found_key.offset;
1779 		slot = path->slots[0];
1780 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
1781 		if (!eb) {
1782 			ret = -ENOMEM;
1783 			break;
1784 		}
1785 		extent_buffer_get(eb);
1786 		btrfs_tree_read_lock(eb);
1787 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1788 		btrfs_release_path(path);
1789 
1790 		item = btrfs_item_nr(slot);
1791 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1792 
1793 		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1794 			name_len = btrfs_inode_ref_name_len(eb, iref);
1795 			/* path must be released before calling iterate()! */
1796 			pr_debug("following ref at offset %u for inode %llu in "
1797 				 "tree %llu\n", cur, found_key.objectid,
1798 				 fs_root->objectid);
1799 			ret = iterate(parent, name_len,
1800 				      (unsigned long)(iref + 1), eb, ctx);
1801 			if (ret)
1802 				break;
1803 			len = sizeof(*iref) + name_len;
1804 			iref = (struct btrfs_inode_ref *)((char *)iref + len);
1805 		}
1806 		btrfs_tree_read_unlock_blocking(eb);
1807 		free_extent_buffer(eb);
1808 	}
1809 
1810 	btrfs_release_path(path);
1811 
1812 	return ret;
1813 }
1814 
1815 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1816 				 struct btrfs_path *path,
1817 				 iterate_irefs_t *iterate, void *ctx)
1818 {
1819 	int ret;
1820 	int slot;
1821 	u64 offset = 0;
1822 	u64 parent;
1823 	int found = 0;
1824 	struct extent_buffer *eb;
1825 	struct btrfs_inode_extref *extref;
1826 	u32 item_size;
1827 	u32 cur_offset;
1828 	unsigned long ptr;
1829 
1830 	while (1) {
1831 		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
1832 					    &offset);
1833 		if (ret < 0)
1834 			break;
1835 		if (ret) {
1836 			ret = found ? 0 : -ENOENT;
1837 			break;
1838 		}
1839 		++found;
1840 
1841 		slot = path->slots[0];
1842 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
1843 		if (!eb) {
1844 			ret = -ENOMEM;
1845 			break;
1846 		}
1847 		extent_buffer_get(eb);
1848 
1849 		btrfs_tree_read_lock(eb);
1850 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1851 		btrfs_release_path(path);
1852 
1853 		item_size = btrfs_item_size_nr(eb, slot);
1854 		ptr = btrfs_item_ptr_offset(eb, slot);
1855 		cur_offset = 0;
1856 
1857 		while (cur_offset < item_size) {
1858 			u32 name_len;
1859 
1860 			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
1861 			parent = btrfs_inode_extref_parent(eb, extref);
1862 			name_len = btrfs_inode_extref_name_len(eb, extref);
1863 			ret = iterate(parent, name_len,
1864 				      (unsigned long)&extref->name, eb, ctx);
1865 			if (ret)
1866 				break;
1867 
1868 			cur_offset += btrfs_inode_extref_name_len(eb, extref);
1869 			cur_offset += sizeof(*extref);
1870 		}
1871 		btrfs_tree_read_unlock_blocking(eb);
1872 		free_extent_buffer(eb);
1873 
1874 		offset++;
1875 	}
1876 
1877 	btrfs_release_path(path);
1878 
1879 	return ret;
1880 }
1881 
1882 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1883 			 struct btrfs_path *path, iterate_irefs_t *iterate,
1884 			 void *ctx)
1885 {
1886 	int ret;
1887 	int found_refs = 0;
1888 
1889 	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
1890 	if (!ret)
1891 		++found_refs;
1892 	else if (ret != -ENOENT)
1893 		return ret;
1894 
1895 	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
1896 	if (ret == -ENOENT && found_refs)
1897 		return 0;
1898 
1899 	return ret;
1900 }
1901 
1902 /*
1903  * returns 0 if the path could be dumped (probably truncated)
1904  * returns <0 in case of an error
1905  */
1906 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1907 			 struct extent_buffer *eb, void *ctx)
1908 {
1909 	struct inode_fs_paths *ipath = ctx;
1910 	char *fspath;
1911 	char *fspath_min;
1912 	int i = ipath->fspath->elem_cnt;
1913 	const int s_ptr = sizeof(char *);
1914 	u32 bytes_left;
1915 
1916 	bytes_left = ipath->fspath->bytes_left > s_ptr ?
1917 					ipath->fspath->bytes_left - s_ptr : 0;
1918 
1919 	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1920 	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
1921 				   name_off, eb, inum, fspath_min, bytes_left);
1922 	if (IS_ERR(fspath))
1923 		return PTR_ERR(fspath);
1924 
1925 	if (fspath > fspath_min) {
1926 		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1927 		++ipath->fspath->elem_cnt;
1928 		ipath->fspath->bytes_left = fspath - fspath_min;
1929 	} else {
1930 		++ipath->fspath->elem_missed;
1931 		ipath->fspath->bytes_missing += fspath_min - fspath;
1932 		ipath->fspath->bytes_left = 0;
1933 	}
1934 
1935 	return 0;
1936 }
1937 
1938 /*
1939  * this dumps all file system paths to the inode into the ipath struct, provided
1940  * is has been created large enough. each path is zero-terminated and accessed
1941  * from ipath->fspath->val[i].
1942  * when it returns, there are ipath->fspath->elem_cnt number of paths available
1943  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1944  * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1945  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1946  * have been needed to return all paths.
1947  */
1948 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1949 {
1950 	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1951 			     inode_to_path, ipath);
1952 }
1953 
1954 struct btrfs_data_container *init_data_container(u32 total_bytes)
1955 {
1956 	struct btrfs_data_container *data;
1957 	size_t alloc_bytes;
1958 
1959 	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1960 	data = vmalloc(alloc_bytes);
1961 	if (!data)
1962 		return ERR_PTR(-ENOMEM);
1963 
1964 	if (total_bytes >= sizeof(*data)) {
1965 		data->bytes_left = total_bytes - sizeof(*data);
1966 		data->bytes_missing = 0;
1967 	} else {
1968 		data->bytes_missing = sizeof(*data) - total_bytes;
1969 		data->bytes_left = 0;
1970 	}
1971 
1972 	data->elem_cnt = 0;
1973 	data->elem_missed = 0;
1974 
1975 	return data;
1976 }
1977 
1978 /*
1979  * allocates space to return multiple file system paths for an inode.
1980  * total_bytes to allocate are passed, note that space usable for actual path
1981  * information will be total_bytes - sizeof(struct inode_fs_paths).
1982  * the returned pointer must be freed with free_ipath() in the end.
1983  */
1984 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1985 					struct btrfs_path *path)
1986 {
1987 	struct inode_fs_paths *ifp;
1988 	struct btrfs_data_container *fspath;
1989 
1990 	fspath = init_data_container(total_bytes);
1991 	if (IS_ERR(fspath))
1992 		return (void *)fspath;
1993 
1994 	ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1995 	if (!ifp) {
1996 		kfree(fspath);
1997 		return ERR_PTR(-ENOMEM);
1998 	}
1999 
2000 	ifp->btrfs_path = path;
2001 	ifp->fspath = fspath;
2002 	ifp->fs_root = fs_root;
2003 
2004 	return ifp;
2005 }
2006 
2007 void free_ipath(struct inode_fs_paths *ipath)
2008 {
2009 	if (!ipath)
2010 		return;
2011 	vfree(ipath->fspath);
2012 	kfree(ipath);
2013 }
2014