xref: /openbmc/linux/fs/btrfs/backref.c (revision 33ac9dba)
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/vmalloc.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "backref.h"
23 #include "ulist.h"
24 #include "transaction.h"
25 #include "delayed-ref.h"
26 #include "locking.h"
27 
28 struct extent_inode_elem {
29 	u64 inum;
30 	u64 offset;
31 	struct extent_inode_elem *next;
32 };
33 
34 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
35 				struct btrfs_file_extent_item *fi,
36 				u64 extent_item_pos,
37 				struct extent_inode_elem **eie)
38 {
39 	u64 offset = 0;
40 	struct extent_inode_elem *e;
41 
42 	if (!btrfs_file_extent_compression(eb, fi) &&
43 	    !btrfs_file_extent_encryption(eb, fi) &&
44 	    !btrfs_file_extent_other_encoding(eb, fi)) {
45 		u64 data_offset;
46 		u64 data_len;
47 
48 		data_offset = btrfs_file_extent_offset(eb, fi);
49 		data_len = btrfs_file_extent_num_bytes(eb, fi);
50 
51 		if (extent_item_pos < data_offset ||
52 		    extent_item_pos >= data_offset + data_len)
53 			return 1;
54 		offset = extent_item_pos - data_offset;
55 	}
56 
57 	e = kmalloc(sizeof(*e), GFP_NOFS);
58 	if (!e)
59 		return -ENOMEM;
60 
61 	e->next = *eie;
62 	e->inum = key->objectid;
63 	e->offset = key->offset + offset;
64 	*eie = e;
65 
66 	return 0;
67 }
68 
69 static void free_inode_elem_list(struct extent_inode_elem *eie)
70 {
71 	struct extent_inode_elem *eie_next;
72 
73 	for (; eie; eie = eie_next) {
74 		eie_next = eie->next;
75 		kfree(eie);
76 	}
77 }
78 
79 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
80 				u64 extent_item_pos,
81 				struct extent_inode_elem **eie)
82 {
83 	u64 disk_byte;
84 	struct btrfs_key key;
85 	struct btrfs_file_extent_item *fi;
86 	int slot;
87 	int nritems;
88 	int extent_type;
89 	int ret;
90 
91 	/*
92 	 * from the shared data ref, we only have the leaf but we need
93 	 * the key. thus, we must look into all items and see that we
94 	 * find one (some) with a reference to our extent item.
95 	 */
96 	nritems = btrfs_header_nritems(eb);
97 	for (slot = 0; slot < nritems; ++slot) {
98 		btrfs_item_key_to_cpu(eb, &key, slot);
99 		if (key.type != BTRFS_EXTENT_DATA_KEY)
100 			continue;
101 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
102 		extent_type = btrfs_file_extent_type(eb, fi);
103 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
104 			continue;
105 		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
106 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
107 		if (disk_byte != wanted_disk_byte)
108 			continue;
109 
110 		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
111 		if (ret < 0)
112 			return ret;
113 	}
114 
115 	return 0;
116 }
117 
118 /*
119  * this structure records all encountered refs on the way up to the root
120  */
121 struct __prelim_ref {
122 	struct list_head list;
123 	u64 root_id;
124 	struct btrfs_key key_for_search;
125 	int level;
126 	int count;
127 	struct extent_inode_elem *inode_list;
128 	u64 parent;
129 	u64 wanted_disk_byte;
130 };
131 
132 static struct kmem_cache *btrfs_prelim_ref_cache;
133 
134 int __init btrfs_prelim_ref_init(void)
135 {
136 	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
137 					sizeof(struct __prelim_ref),
138 					0,
139 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
140 					NULL);
141 	if (!btrfs_prelim_ref_cache)
142 		return -ENOMEM;
143 	return 0;
144 }
145 
146 void btrfs_prelim_ref_exit(void)
147 {
148 	if (btrfs_prelim_ref_cache)
149 		kmem_cache_destroy(btrfs_prelim_ref_cache);
150 }
151 
152 /*
153  * the rules for all callers of this function are:
154  * - obtaining the parent is the goal
155  * - if you add a key, you must know that it is a correct key
156  * - if you cannot add the parent or a correct key, then we will look into the
157  *   block later to set a correct key
158  *
159  * delayed refs
160  * ============
161  *        backref type | shared | indirect | shared | indirect
162  * information         |   tree |     tree |   data |     data
163  * --------------------+--------+----------+--------+----------
164  *      parent logical |    y   |     -    |    -   |     -
165  *      key to resolve |    -   |     y    |    y   |     y
166  *  tree block logical |    -   |     -    |    -   |     -
167  *  root for resolving |    y   |     y    |    y   |     y
168  *
169  * - column 1:       we've the parent -> done
170  * - column 2, 3, 4: we use the key to find the parent
171  *
172  * on disk refs (inline or keyed)
173  * ==============================
174  *        backref type | shared | indirect | shared | indirect
175  * information         |   tree |     tree |   data |     data
176  * --------------------+--------+----------+--------+----------
177  *      parent logical |    y   |     -    |    y   |     -
178  *      key to resolve |    -   |     -    |    -   |     y
179  *  tree block logical |    y   |     y    |    y   |     y
180  *  root for resolving |    -   |     y    |    y   |     y
181  *
182  * - column 1, 3: we've the parent -> done
183  * - column 2:    we take the first key from the block to find the parent
184  *                (see __add_missing_keys)
185  * - column 4:    we use the key to find the parent
186  *
187  * additional information that's available but not required to find the parent
188  * block might help in merging entries to gain some speed.
189  */
190 
191 static int __add_prelim_ref(struct list_head *head, u64 root_id,
192 			    struct btrfs_key *key, int level,
193 			    u64 parent, u64 wanted_disk_byte, int count,
194 			    gfp_t gfp_mask)
195 {
196 	struct __prelim_ref *ref;
197 
198 	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
199 		return 0;
200 
201 	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
202 	if (!ref)
203 		return -ENOMEM;
204 
205 	ref->root_id = root_id;
206 	if (key)
207 		ref->key_for_search = *key;
208 	else
209 		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
210 
211 	ref->inode_list = NULL;
212 	ref->level = level;
213 	ref->count = count;
214 	ref->parent = parent;
215 	ref->wanted_disk_byte = wanted_disk_byte;
216 	list_add_tail(&ref->list, head);
217 
218 	return 0;
219 }
220 
221 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
222 			   struct ulist *parents, struct __prelim_ref *ref,
223 			   int level, u64 time_seq, const u64 *extent_item_pos,
224 			   u64 total_refs)
225 {
226 	int ret = 0;
227 	int slot;
228 	struct extent_buffer *eb;
229 	struct btrfs_key key;
230 	struct btrfs_key *key_for_search = &ref->key_for_search;
231 	struct btrfs_file_extent_item *fi;
232 	struct extent_inode_elem *eie = NULL, *old = NULL;
233 	u64 disk_byte;
234 	u64 wanted_disk_byte = ref->wanted_disk_byte;
235 	u64 count = 0;
236 
237 	if (level != 0) {
238 		eb = path->nodes[level];
239 		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
240 		if (ret < 0)
241 			return ret;
242 		return 0;
243 	}
244 
245 	/*
246 	 * We normally enter this function with the path already pointing to
247 	 * the first item to check. But sometimes, we may enter it with
248 	 * slot==nritems. In that case, go to the next leaf before we continue.
249 	 */
250 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
251 		ret = btrfs_next_old_leaf(root, path, time_seq);
252 
253 	while (!ret && count < total_refs) {
254 		eb = path->nodes[0];
255 		slot = path->slots[0];
256 
257 		btrfs_item_key_to_cpu(eb, &key, slot);
258 
259 		if (key.objectid != key_for_search->objectid ||
260 		    key.type != BTRFS_EXTENT_DATA_KEY)
261 			break;
262 
263 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
264 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
265 
266 		if (disk_byte == wanted_disk_byte) {
267 			eie = NULL;
268 			old = NULL;
269 			count++;
270 			if (extent_item_pos) {
271 				ret = check_extent_in_eb(&key, eb, fi,
272 						*extent_item_pos,
273 						&eie);
274 				if (ret < 0)
275 					break;
276 			}
277 			if (ret > 0)
278 				goto next;
279 			ret = ulist_add_merge_ptr(parents, eb->start,
280 						  eie, (void **)&old, GFP_NOFS);
281 			if (ret < 0)
282 				break;
283 			if (!ret && extent_item_pos) {
284 				while (old->next)
285 					old = old->next;
286 				old->next = eie;
287 			}
288 			eie = NULL;
289 		}
290 next:
291 		ret = btrfs_next_old_item(root, path, time_seq);
292 	}
293 
294 	if (ret > 0)
295 		ret = 0;
296 	else if (ret < 0)
297 		free_inode_elem_list(eie);
298 	return ret;
299 }
300 
301 /*
302  * resolve an indirect backref in the form (root_id, key, level)
303  * to a logical address
304  */
305 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
306 				  struct btrfs_path *path, u64 time_seq,
307 				  struct __prelim_ref *ref,
308 				  struct ulist *parents,
309 				  const u64 *extent_item_pos, u64 total_refs)
310 {
311 	struct btrfs_root *root;
312 	struct btrfs_key root_key;
313 	struct extent_buffer *eb;
314 	int ret = 0;
315 	int root_level;
316 	int level = ref->level;
317 	int index;
318 
319 	root_key.objectid = ref->root_id;
320 	root_key.type = BTRFS_ROOT_ITEM_KEY;
321 	root_key.offset = (u64)-1;
322 
323 	index = srcu_read_lock(&fs_info->subvol_srcu);
324 
325 	root = btrfs_read_fs_root_no_name(fs_info, &root_key);
326 	if (IS_ERR(root)) {
327 		srcu_read_unlock(&fs_info->subvol_srcu, index);
328 		ret = PTR_ERR(root);
329 		goto out;
330 	}
331 
332 	if (path->search_commit_root)
333 		root_level = btrfs_header_level(root->commit_root);
334 	else
335 		root_level = btrfs_old_root_level(root, time_seq);
336 
337 	if (root_level + 1 == level) {
338 		srcu_read_unlock(&fs_info->subvol_srcu, index);
339 		goto out;
340 	}
341 
342 	path->lowest_level = level;
343 	ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
344 
345 	/* root node has been locked, we can release @subvol_srcu safely here */
346 	srcu_read_unlock(&fs_info->subvol_srcu, index);
347 
348 	pr_debug("search slot in root %llu (level %d, ref count %d) returned "
349 		 "%d for key (%llu %u %llu)\n",
350 		 ref->root_id, level, ref->count, ret,
351 		 ref->key_for_search.objectid, ref->key_for_search.type,
352 		 ref->key_for_search.offset);
353 	if (ret < 0)
354 		goto out;
355 
356 	eb = path->nodes[level];
357 	while (!eb) {
358 		if (WARN_ON(!level)) {
359 			ret = 1;
360 			goto out;
361 		}
362 		level--;
363 		eb = path->nodes[level];
364 	}
365 
366 	ret = add_all_parents(root, path, parents, ref, level, time_seq,
367 			      extent_item_pos, total_refs);
368 out:
369 	path->lowest_level = 0;
370 	btrfs_release_path(path);
371 	return ret;
372 }
373 
374 /*
375  * resolve all indirect backrefs from the list
376  */
377 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
378 				   struct btrfs_path *path, u64 time_seq,
379 				   struct list_head *head,
380 				   const u64 *extent_item_pos, u64 total_refs)
381 {
382 	int err;
383 	int ret = 0;
384 	struct __prelim_ref *ref;
385 	struct __prelim_ref *ref_safe;
386 	struct __prelim_ref *new_ref;
387 	struct ulist *parents;
388 	struct ulist_node *node;
389 	struct ulist_iterator uiter;
390 
391 	parents = ulist_alloc(GFP_NOFS);
392 	if (!parents)
393 		return -ENOMEM;
394 
395 	/*
396 	 * _safe allows us to insert directly after the current item without
397 	 * iterating over the newly inserted items.
398 	 * we're also allowed to re-assign ref during iteration.
399 	 */
400 	list_for_each_entry_safe(ref, ref_safe, head, list) {
401 		if (ref->parent)	/* already direct */
402 			continue;
403 		if (ref->count == 0)
404 			continue;
405 		err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
406 					     parents, extent_item_pos,
407 					     total_refs);
408 		/*
409 		 * we can only tolerate ENOENT,otherwise,we should catch error
410 		 * and return directly.
411 		 */
412 		if (err == -ENOENT) {
413 			continue;
414 		} else if (err) {
415 			ret = err;
416 			goto out;
417 		}
418 
419 		/* we put the first parent into the ref at hand */
420 		ULIST_ITER_INIT(&uiter);
421 		node = ulist_next(parents, &uiter);
422 		ref->parent = node ? node->val : 0;
423 		ref->inode_list = node ?
424 			(struct extent_inode_elem *)(uintptr_t)node->aux : NULL;
425 
426 		/* additional parents require new refs being added here */
427 		while ((node = ulist_next(parents, &uiter))) {
428 			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
429 						   GFP_NOFS);
430 			if (!new_ref) {
431 				ret = -ENOMEM;
432 				goto out;
433 			}
434 			memcpy(new_ref, ref, sizeof(*ref));
435 			new_ref->parent = node->val;
436 			new_ref->inode_list = (struct extent_inode_elem *)
437 							(uintptr_t)node->aux;
438 			list_add(&new_ref->list, &ref->list);
439 		}
440 		ulist_reinit(parents);
441 	}
442 out:
443 	ulist_free(parents);
444 	return ret;
445 }
446 
447 static inline int ref_for_same_block(struct __prelim_ref *ref1,
448 				     struct __prelim_ref *ref2)
449 {
450 	if (ref1->level != ref2->level)
451 		return 0;
452 	if (ref1->root_id != ref2->root_id)
453 		return 0;
454 	if (ref1->key_for_search.type != ref2->key_for_search.type)
455 		return 0;
456 	if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
457 		return 0;
458 	if (ref1->key_for_search.offset != ref2->key_for_search.offset)
459 		return 0;
460 	if (ref1->parent != ref2->parent)
461 		return 0;
462 
463 	return 1;
464 }
465 
466 /*
467  * read tree blocks and add keys where required.
468  */
469 static int __add_missing_keys(struct btrfs_fs_info *fs_info,
470 			      struct list_head *head)
471 {
472 	struct list_head *pos;
473 	struct extent_buffer *eb;
474 
475 	list_for_each(pos, head) {
476 		struct __prelim_ref *ref;
477 		ref = list_entry(pos, struct __prelim_ref, list);
478 
479 		if (ref->parent)
480 			continue;
481 		if (ref->key_for_search.type)
482 			continue;
483 		BUG_ON(!ref->wanted_disk_byte);
484 		eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
485 				     fs_info->tree_root->leafsize, 0);
486 		if (!eb || !extent_buffer_uptodate(eb)) {
487 			free_extent_buffer(eb);
488 			return -EIO;
489 		}
490 		btrfs_tree_read_lock(eb);
491 		if (btrfs_header_level(eb) == 0)
492 			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
493 		else
494 			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
495 		btrfs_tree_read_unlock(eb);
496 		free_extent_buffer(eb);
497 	}
498 	return 0;
499 }
500 
501 /*
502  * merge two lists of backrefs and adjust counts accordingly
503  *
504  * mode = 1: merge identical keys, if key is set
505  *    FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
506  *           additionally, we could even add a key range for the blocks we
507  *           looked into to merge even more (-> replace unresolved refs by those
508  *           having a parent).
509  * mode = 2: merge identical parents
510  */
511 static void __merge_refs(struct list_head *head, int mode)
512 {
513 	struct list_head *pos1;
514 
515 	list_for_each(pos1, head) {
516 		struct list_head *n2;
517 		struct list_head *pos2;
518 		struct __prelim_ref *ref1;
519 
520 		ref1 = list_entry(pos1, struct __prelim_ref, list);
521 
522 		for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
523 		     pos2 = n2, n2 = pos2->next) {
524 			struct __prelim_ref *ref2;
525 			struct __prelim_ref *xchg;
526 			struct extent_inode_elem *eie;
527 
528 			ref2 = list_entry(pos2, struct __prelim_ref, list);
529 
530 			if (mode == 1) {
531 				if (!ref_for_same_block(ref1, ref2))
532 					continue;
533 				if (!ref1->parent && ref2->parent) {
534 					xchg = ref1;
535 					ref1 = ref2;
536 					ref2 = xchg;
537 				}
538 			} else {
539 				if (ref1->parent != ref2->parent)
540 					continue;
541 			}
542 
543 			eie = ref1->inode_list;
544 			while (eie && eie->next)
545 				eie = eie->next;
546 			if (eie)
547 				eie->next = ref2->inode_list;
548 			else
549 				ref1->inode_list = ref2->inode_list;
550 			ref1->count += ref2->count;
551 
552 			list_del(&ref2->list);
553 			kmem_cache_free(btrfs_prelim_ref_cache, ref2);
554 		}
555 
556 	}
557 }
558 
559 /*
560  * add all currently queued delayed refs from this head whose seq nr is
561  * smaller or equal that seq to the list
562  */
563 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
564 			      struct list_head *prefs, u64 *total_refs)
565 {
566 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
567 	struct rb_node *n = &head->node.rb_node;
568 	struct btrfs_key key;
569 	struct btrfs_key op_key = {0};
570 	int sgn;
571 	int ret = 0;
572 
573 	if (extent_op && extent_op->update_key)
574 		btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
575 
576 	spin_lock(&head->lock);
577 	n = rb_first(&head->ref_root);
578 	while (n) {
579 		struct btrfs_delayed_ref_node *node;
580 		node = rb_entry(n, struct btrfs_delayed_ref_node,
581 				rb_node);
582 		n = rb_next(n);
583 		if (node->seq > seq)
584 			continue;
585 
586 		switch (node->action) {
587 		case BTRFS_ADD_DELAYED_EXTENT:
588 		case BTRFS_UPDATE_DELAYED_HEAD:
589 			WARN_ON(1);
590 			continue;
591 		case BTRFS_ADD_DELAYED_REF:
592 			sgn = 1;
593 			break;
594 		case BTRFS_DROP_DELAYED_REF:
595 			sgn = -1;
596 			break;
597 		default:
598 			BUG_ON(1);
599 		}
600 		*total_refs += (node->ref_mod * sgn);
601 		switch (node->type) {
602 		case BTRFS_TREE_BLOCK_REF_KEY: {
603 			struct btrfs_delayed_tree_ref *ref;
604 
605 			ref = btrfs_delayed_node_to_tree_ref(node);
606 			ret = __add_prelim_ref(prefs, ref->root, &op_key,
607 					       ref->level + 1, 0, node->bytenr,
608 					       node->ref_mod * sgn, GFP_ATOMIC);
609 			break;
610 		}
611 		case BTRFS_SHARED_BLOCK_REF_KEY: {
612 			struct btrfs_delayed_tree_ref *ref;
613 
614 			ref = btrfs_delayed_node_to_tree_ref(node);
615 			ret = __add_prelim_ref(prefs, ref->root, NULL,
616 					       ref->level + 1, ref->parent,
617 					       node->bytenr,
618 					       node->ref_mod * sgn, GFP_ATOMIC);
619 			break;
620 		}
621 		case BTRFS_EXTENT_DATA_REF_KEY: {
622 			struct btrfs_delayed_data_ref *ref;
623 			ref = btrfs_delayed_node_to_data_ref(node);
624 
625 			key.objectid = ref->objectid;
626 			key.type = BTRFS_EXTENT_DATA_KEY;
627 			key.offset = ref->offset;
628 			ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
629 					       node->bytenr,
630 					       node->ref_mod * sgn, GFP_ATOMIC);
631 			break;
632 		}
633 		case BTRFS_SHARED_DATA_REF_KEY: {
634 			struct btrfs_delayed_data_ref *ref;
635 
636 			ref = btrfs_delayed_node_to_data_ref(node);
637 
638 			key.objectid = ref->objectid;
639 			key.type = BTRFS_EXTENT_DATA_KEY;
640 			key.offset = ref->offset;
641 			ret = __add_prelim_ref(prefs, ref->root, &key, 0,
642 					       ref->parent, node->bytenr,
643 					       node->ref_mod * sgn, GFP_ATOMIC);
644 			break;
645 		}
646 		default:
647 			WARN_ON(1);
648 		}
649 		if (ret)
650 			break;
651 	}
652 	spin_unlock(&head->lock);
653 	return ret;
654 }
655 
656 /*
657  * add all inline backrefs for bytenr to the list
658  */
659 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
660 			     struct btrfs_path *path, u64 bytenr,
661 			     int *info_level, struct list_head *prefs,
662 			     u64 *total_refs)
663 {
664 	int ret = 0;
665 	int slot;
666 	struct extent_buffer *leaf;
667 	struct btrfs_key key;
668 	struct btrfs_key found_key;
669 	unsigned long ptr;
670 	unsigned long end;
671 	struct btrfs_extent_item *ei;
672 	u64 flags;
673 	u64 item_size;
674 
675 	/*
676 	 * enumerate all inline refs
677 	 */
678 	leaf = path->nodes[0];
679 	slot = path->slots[0];
680 
681 	item_size = btrfs_item_size_nr(leaf, slot);
682 	BUG_ON(item_size < sizeof(*ei));
683 
684 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
685 	flags = btrfs_extent_flags(leaf, ei);
686 	*total_refs += btrfs_extent_refs(leaf, ei);
687 	btrfs_item_key_to_cpu(leaf, &found_key, slot);
688 
689 	ptr = (unsigned long)(ei + 1);
690 	end = (unsigned long)ei + item_size;
691 
692 	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
693 	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
694 		struct btrfs_tree_block_info *info;
695 
696 		info = (struct btrfs_tree_block_info *)ptr;
697 		*info_level = btrfs_tree_block_level(leaf, info);
698 		ptr += sizeof(struct btrfs_tree_block_info);
699 		BUG_ON(ptr > end);
700 	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
701 		*info_level = found_key.offset;
702 	} else {
703 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
704 	}
705 
706 	while (ptr < end) {
707 		struct btrfs_extent_inline_ref *iref;
708 		u64 offset;
709 		int type;
710 
711 		iref = (struct btrfs_extent_inline_ref *)ptr;
712 		type = btrfs_extent_inline_ref_type(leaf, iref);
713 		offset = btrfs_extent_inline_ref_offset(leaf, iref);
714 
715 		switch (type) {
716 		case BTRFS_SHARED_BLOCK_REF_KEY:
717 			ret = __add_prelim_ref(prefs, 0, NULL,
718 						*info_level + 1, offset,
719 						bytenr, 1, GFP_NOFS);
720 			break;
721 		case BTRFS_SHARED_DATA_REF_KEY: {
722 			struct btrfs_shared_data_ref *sdref;
723 			int count;
724 
725 			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
726 			count = btrfs_shared_data_ref_count(leaf, sdref);
727 			ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
728 					       bytenr, count, GFP_NOFS);
729 			break;
730 		}
731 		case BTRFS_TREE_BLOCK_REF_KEY:
732 			ret = __add_prelim_ref(prefs, offset, NULL,
733 					       *info_level + 1, 0,
734 					       bytenr, 1, GFP_NOFS);
735 			break;
736 		case BTRFS_EXTENT_DATA_REF_KEY: {
737 			struct btrfs_extent_data_ref *dref;
738 			int count;
739 			u64 root;
740 
741 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
742 			count = btrfs_extent_data_ref_count(leaf, dref);
743 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
744 								      dref);
745 			key.type = BTRFS_EXTENT_DATA_KEY;
746 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
747 			root = btrfs_extent_data_ref_root(leaf, dref);
748 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
749 					       bytenr, count, GFP_NOFS);
750 			break;
751 		}
752 		default:
753 			WARN_ON(1);
754 		}
755 		if (ret)
756 			return ret;
757 		ptr += btrfs_extent_inline_ref_size(type);
758 	}
759 
760 	return 0;
761 }
762 
763 /*
764  * add all non-inline backrefs for bytenr to the list
765  */
766 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
767 			    struct btrfs_path *path, u64 bytenr,
768 			    int info_level, struct list_head *prefs)
769 {
770 	struct btrfs_root *extent_root = fs_info->extent_root;
771 	int ret;
772 	int slot;
773 	struct extent_buffer *leaf;
774 	struct btrfs_key key;
775 
776 	while (1) {
777 		ret = btrfs_next_item(extent_root, path);
778 		if (ret < 0)
779 			break;
780 		if (ret) {
781 			ret = 0;
782 			break;
783 		}
784 
785 		slot = path->slots[0];
786 		leaf = path->nodes[0];
787 		btrfs_item_key_to_cpu(leaf, &key, slot);
788 
789 		if (key.objectid != bytenr)
790 			break;
791 		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
792 			continue;
793 		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
794 			break;
795 
796 		switch (key.type) {
797 		case BTRFS_SHARED_BLOCK_REF_KEY:
798 			ret = __add_prelim_ref(prefs, 0, NULL,
799 						info_level + 1, key.offset,
800 						bytenr, 1, GFP_NOFS);
801 			break;
802 		case BTRFS_SHARED_DATA_REF_KEY: {
803 			struct btrfs_shared_data_ref *sdref;
804 			int count;
805 
806 			sdref = btrfs_item_ptr(leaf, slot,
807 					      struct btrfs_shared_data_ref);
808 			count = btrfs_shared_data_ref_count(leaf, sdref);
809 			ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
810 						bytenr, count, GFP_NOFS);
811 			break;
812 		}
813 		case BTRFS_TREE_BLOCK_REF_KEY:
814 			ret = __add_prelim_ref(prefs, key.offset, NULL,
815 					       info_level + 1, 0,
816 					       bytenr, 1, GFP_NOFS);
817 			break;
818 		case BTRFS_EXTENT_DATA_REF_KEY: {
819 			struct btrfs_extent_data_ref *dref;
820 			int count;
821 			u64 root;
822 
823 			dref = btrfs_item_ptr(leaf, slot,
824 					      struct btrfs_extent_data_ref);
825 			count = btrfs_extent_data_ref_count(leaf, dref);
826 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
827 								      dref);
828 			key.type = BTRFS_EXTENT_DATA_KEY;
829 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
830 			root = btrfs_extent_data_ref_root(leaf, dref);
831 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
832 					       bytenr, count, GFP_NOFS);
833 			break;
834 		}
835 		default:
836 			WARN_ON(1);
837 		}
838 		if (ret)
839 			return ret;
840 
841 	}
842 
843 	return ret;
844 }
845 
846 /*
847  * this adds all existing backrefs (inline backrefs, backrefs and delayed
848  * refs) for the given bytenr to the refs list, merges duplicates and resolves
849  * indirect refs to their parent bytenr.
850  * When roots are found, they're added to the roots list
851  *
852  * FIXME some caching might speed things up
853  */
854 static int find_parent_nodes(struct btrfs_trans_handle *trans,
855 			     struct btrfs_fs_info *fs_info, u64 bytenr,
856 			     u64 time_seq, struct ulist *refs,
857 			     struct ulist *roots, const u64 *extent_item_pos)
858 {
859 	struct btrfs_key key;
860 	struct btrfs_path *path;
861 	struct btrfs_delayed_ref_root *delayed_refs = NULL;
862 	struct btrfs_delayed_ref_head *head;
863 	int info_level = 0;
864 	int ret;
865 	struct list_head prefs_delayed;
866 	struct list_head prefs;
867 	struct __prelim_ref *ref;
868 	struct extent_inode_elem *eie = NULL;
869 	u64 total_refs = 0;
870 
871 	INIT_LIST_HEAD(&prefs);
872 	INIT_LIST_HEAD(&prefs_delayed);
873 
874 	key.objectid = bytenr;
875 	key.offset = (u64)-1;
876 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
877 		key.type = BTRFS_METADATA_ITEM_KEY;
878 	else
879 		key.type = BTRFS_EXTENT_ITEM_KEY;
880 
881 	path = btrfs_alloc_path();
882 	if (!path)
883 		return -ENOMEM;
884 	if (!trans) {
885 		path->search_commit_root = 1;
886 		path->skip_locking = 1;
887 	}
888 
889 	/*
890 	 * grab both a lock on the path and a lock on the delayed ref head.
891 	 * We need both to get a consistent picture of how the refs look
892 	 * at a specified point in time
893 	 */
894 again:
895 	head = NULL;
896 
897 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
898 	if (ret < 0)
899 		goto out;
900 	BUG_ON(ret == 0);
901 
902 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
903 	if (trans && likely(trans->type != __TRANS_DUMMY)) {
904 #else
905 	if (trans) {
906 #endif
907 		/*
908 		 * look if there are updates for this ref queued and lock the
909 		 * head
910 		 */
911 		delayed_refs = &trans->transaction->delayed_refs;
912 		spin_lock(&delayed_refs->lock);
913 		head = btrfs_find_delayed_ref_head(trans, bytenr);
914 		if (head) {
915 			if (!mutex_trylock(&head->mutex)) {
916 				atomic_inc(&head->node.refs);
917 				spin_unlock(&delayed_refs->lock);
918 
919 				btrfs_release_path(path);
920 
921 				/*
922 				 * Mutex was contended, block until it's
923 				 * released and try again
924 				 */
925 				mutex_lock(&head->mutex);
926 				mutex_unlock(&head->mutex);
927 				btrfs_put_delayed_ref(&head->node);
928 				goto again;
929 			}
930 			spin_unlock(&delayed_refs->lock);
931 			ret = __add_delayed_refs(head, time_seq,
932 						 &prefs_delayed, &total_refs);
933 			mutex_unlock(&head->mutex);
934 			if (ret)
935 				goto out;
936 		} else {
937 			spin_unlock(&delayed_refs->lock);
938 		}
939 	}
940 
941 	if (path->slots[0]) {
942 		struct extent_buffer *leaf;
943 		int slot;
944 
945 		path->slots[0]--;
946 		leaf = path->nodes[0];
947 		slot = path->slots[0];
948 		btrfs_item_key_to_cpu(leaf, &key, slot);
949 		if (key.objectid == bytenr &&
950 		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
951 		     key.type == BTRFS_METADATA_ITEM_KEY)) {
952 			ret = __add_inline_refs(fs_info, path, bytenr,
953 						&info_level, &prefs,
954 						&total_refs);
955 			if (ret)
956 				goto out;
957 			ret = __add_keyed_refs(fs_info, path, bytenr,
958 					       info_level, &prefs);
959 			if (ret)
960 				goto out;
961 		}
962 	}
963 	btrfs_release_path(path);
964 
965 	list_splice_init(&prefs_delayed, &prefs);
966 
967 	ret = __add_missing_keys(fs_info, &prefs);
968 	if (ret)
969 		goto out;
970 
971 	__merge_refs(&prefs, 1);
972 
973 	ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
974 				      extent_item_pos, total_refs);
975 	if (ret)
976 		goto out;
977 
978 	__merge_refs(&prefs, 2);
979 
980 	while (!list_empty(&prefs)) {
981 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
982 		WARN_ON(ref->count < 0);
983 		if (roots && ref->count && ref->root_id && ref->parent == 0) {
984 			/* no parent == root of tree */
985 			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
986 			if (ret < 0)
987 				goto out;
988 		}
989 		if (ref->count && ref->parent) {
990 			if (extent_item_pos && !ref->inode_list &&
991 			    ref->level == 0) {
992 				u32 bsz;
993 				struct extent_buffer *eb;
994 				bsz = btrfs_level_size(fs_info->extent_root,
995 							ref->level);
996 				eb = read_tree_block(fs_info->extent_root,
997 							   ref->parent, bsz, 0);
998 				if (!eb || !extent_buffer_uptodate(eb)) {
999 					free_extent_buffer(eb);
1000 					ret = -EIO;
1001 					goto out;
1002 				}
1003 				btrfs_tree_read_lock(eb);
1004 				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1005 				ret = find_extent_in_eb(eb, bytenr,
1006 							*extent_item_pos, &eie);
1007 				btrfs_tree_read_unlock_blocking(eb);
1008 				free_extent_buffer(eb);
1009 				if (ret < 0)
1010 					goto out;
1011 				ref->inode_list = eie;
1012 			}
1013 			ret = ulist_add_merge_ptr(refs, ref->parent,
1014 						  ref->inode_list,
1015 						  (void **)&eie, GFP_NOFS);
1016 			if (ret < 0)
1017 				goto out;
1018 			if (!ret && extent_item_pos) {
1019 				/*
1020 				 * we've recorded that parent, so we must extend
1021 				 * its inode list here
1022 				 */
1023 				BUG_ON(!eie);
1024 				while (eie->next)
1025 					eie = eie->next;
1026 				eie->next = ref->inode_list;
1027 			}
1028 			eie = NULL;
1029 		}
1030 		list_del(&ref->list);
1031 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
1032 	}
1033 
1034 out:
1035 	btrfs_free_path(path);
1036 	while (!list_empty(&prefs)) {
1037 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
1038 		list_del(&ref->list);
1039 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
1040 	}
1041 	while (!list_empty(&prefs_delayed)) {
1042 		ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
1043 				       list);
1044 		list_del(&ref->list);
1045 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
1046 	}
1047 	if (ret < 0)
1048 		free_inode_elem_list(eie);
1049 	return ret;
1050 }
1051 
1052 static void free_leaf_list(struct ulist *blocks)
1053 {
1054 	struct ulist_node *node = NULL;
1055 	struct extent_inode_elem *eie;
1056 	struct ulist_iterator uiter;
1057 
1058 	ULIST_ITER_INIT(&uiter);
1059 	while ((node = ulist_next(blocks, &uiter))) {
1060 		if (!node->aux)
1061 			continue;
1062 		eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
1063 		free_inode_elem_list(eie);
1064 		node->aux = 0;
1065 	}
1066 
1067 	ulist_free(blocks);
1068 }
1069 
1070 /*
1071  * Finds all leafs with a reference to the specified combination of bytenr and
1072  * offset. key_list_head will point to a list of corresponding keys (caller must
1073  * free each list element). The leafs will be stored in the leafs ulist, which
1074  * must be freed with ulist_free.
1075  *
1076  * returns 0 on success, <0 on error
1077  */
1078 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1079 				struct btrfs_fs_info *fs_info, u64 bytenr,
1080 				u64 time_seq, struct ulist **leafs,
1081 				const u64 *extent_item_pos)
1082 {
1083 	int ret;
1084 
1085 	*leafs = ulist_alloc(GFP_NOFS);
1086 	if (!*leafs)
1087 		return -ENOMEM;
1088 
1089 	ret = find_parent_nodes(trans, fs_info, bytenr,
1090 				time_seq, *leafs, NULL, extent_item_pos);
1091 	if (ret < 0 && ret != -ENOENT) {
1092 		free_leaf_list(*leafs);
1093 		return ret;
1094 	}
1095 
1096 	return 0;
1097 }
1098 
1099 /*
1100  * walk all backrefs for a given extent to find all roots that reference this
1101  * extent. Walking a backref means finding all extents that reference this
1102  * extent and in turn walk the backrefs of those, too. Naturally this is a
1103  * recursive process, but here it is implemented in an iterative fashion: We
1104  * find all referencing extents for the extent in question and put them on a
1105  * list. In turn, we find all referencing extents for those, further appending
1106  * to the list. The way we iterate the list allows adding more elements after
1107  * the current while iterating. The process stops when we reach the end of the
1108  * list. Found roots are added to the roots list.
1109  *
1110  * returns 0 on success, < 0 on error.
1111  */
1112 static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1113 				  struct btrfs_fs_info *fs_info, u64 bytenr,
1114 				  u64 time_seq, struct ulist **roots)
1115 {
1116 	struct ulist *tmp;
1117 	struct ulist_node *node = NULL;
1118 	struct ulist_iterator uiter;
1119 	int ret;
1120 
1121 	tmp = ulist_alloc(GFP_NOFS);
1122 	if (!tmp)
1123 		return -ENOMEM;
1124 	*roots = ulist_alloc(GFP_NOFS);
1125 	if (!*roots) {
1126 		ulist_free(tmp);
1127 		return -ENOMEM;
1128 	}
1129 
1130 	ULIST_ITER_INIT(&uiter);
1131 	while (1) {
1132 		ret = find_parent_nodes(trans, fs_info, bytenr,
1133 					time_seq, tmp, *roots, NULL);
1134 		if (ret < 0 && ret != -ENOENT) {
1135 			ulist_free(tmp);
1136 			ulist_free(*roots);
1137 			return ret;
1138 		}
1139 		node = ulist_next(tmp, &uiter);
1140 		if (!node)
1141 			break;
1142 		bytenr = node->val;
1143 		cond_resched();
1144 	}
1145 
1146 	ulist_free(tmp);
1147 	return 0;
1148 }
1149 
1150 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1151 			 struct btrfs_fs_info *fs_info, u64 bytenr,
1152 			 u64 time_seq, struct ulist **roots)
1153 {
1154 	int ret;
1155 
1156 	if (!trans)
1157 		down_read(&fs_info->commit_root_sem);
1158 	ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots);
1159 	if (!trans)
1160 		up_read(&fs_info->commit_root_sem);
1161 	return ret;
1162 }
1163 
1164 /*
1165  * this makes the path point to (inum INODE_ITEM ioff)
1166  */
1167 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1168 			struct btrfs_path *path)
1169 {
1170 	struct btrfs_key key;
1171 	return btrfs_find_item(fs_root, path, inum, ioff,
1172 			BTRFS_INODE_ITEM_KEY, &key);
1173 }
1174 
1175 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1176 				struct btrfs_path *path,
1177 				struct btrfs_key *found_key)
1178 {
1179 	return btrfs_find_item(fs_root, path, inum, ioff,
1180 			BTRFS_INODE_REF_KEY, found_key);
1181 }
1182 
1183 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1184 			  u64 start_off, struct btrfs_path *path,
1185 			  struct btrfs_inode_extref **ret_extref,
1186 			  u64 *found_off)
1187 {
1188 	int ret, slot;
1189 	struct btrfs_key key;
1190 	struct btrfs_key found_key;
1191 	struct btrfs_inode_extref *extref;
1192 	struct extent_buffer *leaf;
1193 	unsigned long ptr;
1194 
1195 	key.objectid = inode_objectid;
1196 	btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
1197 	key.offset = start_off;
1198 
1199 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1200 	if (ret < 0)
1201 		return ret;
1202 
1203 	while (1) {
1204 		leaf = path->nodes[0];
1205 		slot = path->slots[0];
1206 		if (slot >= btrfs_header_nritems(leaf)) {
1207 			/*
1208 			 * If the item at offset is not found,
1209 			 * btrfs_search_slot will point us to the slot
1210 			 * where it should be inserted. In our case
1211 			 * that will be the slot directly before the
1212 			 * next INODE_REF_KEY_V2 item. In the case
1213 			 * that we're pointing to the last slot in a
1214 			 * leaf, we must move one leaf over.
1215 			 */
1216 			ret = btrfs_next_leaf(root, path);
1217 			if (ret) {
1218 				if (ret >= 1)
1219 					ret = -ENOENT;
1220 				break;
1221 			}
1222 			continue;
1223 		}
1224 
1225 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1226 
1227 		/*
1228 		 * Check that we're still looking at an extended ref key for
1229 		 * this particular objectid. If we have different
1230 		 * objectid or type then there are no more to be found
1231 		 * in the tree and we can exit.
1232 		 */
1233 		ret = -ENOENT;
1234 		if (found_key.objectid != inode_objectid)
1235 			break;
1236 		if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY)
1237 			break;
1238 
1239 		ret = 0;
1240 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1241 		extref = (struct btrfs_inode_extref *)ptr;
1242 		*ret_extref = extref;
1243 		if (found_off)
1244 			*found_off = found_key.offset;
1245 		break;
1246 	}
1247 
1248 	return ret;
1249 }
1250 
1251 /*
1252  * this iterates to turn a name (from iref/extref) into a full filesystem path.
1253  * Elements of the path are separated by '/' and the path is guaranteed to be
1254  * 0-terminated. the path is only given within the current file system.
1255  * Therefore, it never starts with a '/'. the caller is responsible to provide
1256  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1257  * the start point of the resulting string is returned. this pointer is within
1258  * dest, normally.
1259  * in case the path buffer would overflow, the pointer is decremented further
1260  * as if output was written to the buffer, though no more output is actually
1261  * generated. that way, the caller can determine how much space would be
1262  * required for the path to fit into the buffer. in that case, the returned
1263  * value will be smaller than dest. callers must check this!
1264  */
1265 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1266 			u32 name_len, unsigned long name_off,
1267 			struct extent_buffer *eb_in, u64 parent,
1268 			char *dest, u32 size)
1269 {
1270 	int slot;
1271 	u64 next_inum;
1272 	int ret;
1273 	s64 bytes_left = ((s64)size) - 1;
1274 	struct extent_buffer *eb = eb_in;
1275 	struct btrfs_key found_key;
1276 	int leave_spinning = path->leave_spinning;
1277 	struct btrfs_inode_ref *iref;
1278 
1279 	if (bytes_left >= 0)
1280 		dest[bytes_left] = '\0';
1281 
1282 	path->leave_spinning = 1;
1283 	while (1) {
1284 		bytes_left -= name_len;
1285 		if (bytes_left >= 0)
1286 			read_extent_buffer(eb, dest + bytes_left,
1287 					   name_off, name_len);
1288 		if (eb != eb_in) {
1289 			btrfs_tree_read_unlock_blocking(eb);
1290 			free_extent_buffer(eb);
1291 		}
1292 		ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
1293 		if (ret > 0)
1294 			ret = -ENOENT;
1295 		if (ret)
1296 			break;
1297 
1298 		next_inum = found_key.offset;
1299 
1300 		/* regular exit ahead */
1301 		if (parent == next_inum)
1302 			break;
1303 
1304 		slot = path->slots[0];
1305 		eb = path->nodes[0];
1306 		/* make sure we can use eb after releasing the path */
1307 		if (eb != eb_in) {
1308 			atomic_inc(&eb->refs);
1309 			btrfs_tree_read_lock(eb);
1310 			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1311 		}
1312 		btrfs_release_path(path);
1313 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1314 
1315 		name_len = btrfs_inode_ref_name_len(eb, iref);
1316 		name_off = (unsigned long)(iref + 1);
1317 
1318 		parent = next_inum;
1319 		--bytes_left;
1320 		if (bytes_left >= 0)
1321 			dest[bytes_left] = '/';
1322 	}
1323 
1324 	btrfs_release_path(path);
1325 	path->leave_spinning = leave_spinning;
1326 
1327 	if (ret)
1328 		return ERR_PTR(ret);
1329 
1330 	return dest + bytes_left;
1331 }
1332 
1333 /*
1334  * this makes the path point to (logical EXTENT_ITEM *)
1335  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1336  * tree blocks and <0 on error.
1337  */
1338 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1339 			struct btrfs_path *path, struct btrfs_key *found_key,
1340 			u64 *flags_ret)
1341 {
1342 	int ret;
1343 	u64 flags;
1344 	u64 size = 0;
1345 	u32 item_size;
1346 	struct extent_buffer *eb;
1347 	struct btrfs_extent_item *ei;
1348 	struct btrfs_key key;
1349 
1350 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1351 		key.type = BTRFS_METADATA_ITEM_KEY;
1352 	else
1353 		key.type = BTRFS_EXTENT_ITEM_KEY;
1354 	key.objectid = logical;
1355 	key.offset = (u64)-1;
1356 
1357 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1358 	if (ret < 0)
1359 		return ret;
1360 
1361 	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1362 	if (ret) {
1363 		if (ret > 0)
1364 			ret = -ENOENT;
1365 		return ret;
1366 	}
1367 	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1368 	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1369 		size = fs_info->extent_root->leafsize;
1370 	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1371 		size = found_key->offset;
1372 
1373 	if (found_key->objectid > logical ||
1374 	    found_key->objectid + size <= logical) {
1375 		pr_debug("logical %llu is not within any extent\n", logical);
1376 		return -ENOENT;
1377 	}
1378 
1379 	eb = path->nodes[0];
1380 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1381 	BUG_ON(item_size < sizeof(*ei));
1382 
1383 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1384 	flags = btrfs_extent_flags(eb, ei);
1385 
1386 	pr_debug("logical %llu is at position %llu within the extent (%llu "
1387 		 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1388 		 logical, logical - found_key->objectid, found_key->objectid,
1389 		 found_key->offset, flags, item_size);
1390 
1391 	WARN_ON(!flags_ret);
1392 	if (flags_ret) {
1393 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1394 			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1395 		else if (flags & BTRFS_EXTENT_FLAG_DATA)
1396 			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
1397 		else
1398 			BUG_ON(1);
1399 		return 0;
1400 	}
1401 
1402 	return -EIO;
1403 }
1404 
1405 /*
1406  * helper function to iterate extent inline refs. ptr must point to a 0 value
1407  * for the first call and may be modified. it is used to track state.
1408  * if more refs exist, 0 is returned and the next call to
1409  * __get_extent_inline_ref must pass the modified ptr parameter to get the
1410  * next ref. after the last ref was processed, 1 is returned.
1411  * returns <0 on error
1412  */
1413 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1414 				   struct btrfs_key *key,
1415 				   struct btrfs_extent_item *ei, u32 item_size,
1416 				   struct btrfs_extent_inline_ref **out_eiref,
1417 				   int *out_type)
1418 {
1419 	unsigned long end;
1420 	u64 flags;
1421 	struct btrfs_tree_block_info *info;
1422 
1423 	if (!*ptr) {
1424 		/* first call */
1425 		flags = btrfs_extent_flags(eb, ei);
1426 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1427 			if (key->type == BTRFS_METADATA_ITEM_KEY) {
1428 				/* a skinny metadata extent */
1429 				*out_eiref =
1430 				     (struct btrfs_extent_inline_ref *)(ei + 1);
1431 			} else {
1432 				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1433 				info = (struct btrfs_tree_block_info *)(ei + 1);
1434 				*out_eiref =
1435 				   (struct btrfs_extent_inline_ref *)(info + 1);
1436 			}
1437 		} else {
1438 			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1439 		}
1440 		*ptr = (unsigned long)*out_eiref;
1441 		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1442 			return -ENOENT;
1443 	}
1444 
1445 	end = (unsigned long)ei + item_size;
1446 	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1447 	*out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1448 
1449 	*ptr += btrfs_extent_inline_ref_size(*out_type);
1450 	WARN_ON(*ptr > end);
1451 	if (*ptr == end)
1452 		return 1; /* last */
1453 
1454 	return 0;
1455 }
1456 
1457 /*
1458  * reads the tree block backref for an extent. tree level and root are returned
1459  * through out_level and out_root. ptr must point to a 0 value for the first
1460  * call and may be modified (see __get_extent_inline_ref comment).
1461  * returns 0 if data was provided, 1 if there was no more data to provide or
1462  * <0 on error.
1463  */
1464 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1465 			    struct btrfs_key *key, struct btrfs_extent_item *ei,
1466 			    u32 item_size, u64 *out_root, u8 *out_level)
1467 {
1468 	int ret;
1469 	int type;
1470 	struct btrfs_tree_block_info *info;
1471 	struct btrfs_extent_inline_ref *eiref;
1472 
1473 	if (*ptr == (unsigned long)-1)
1474 		return 1;
1475 
1476 	while (1) {
1477 		ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
1478 					      &eiref, &type);
1479 		if (ret < 0)
1480 			return ret;
1481 
1482 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1483 		    type == BTRFS_SHARED_BLOCK_REF_KEY)
1484 			break;
1485 
1486 		if (ret == 1)
1487 			return 1;
1488 	}
1489 
1490 	/* we can treat both ref types equally here */
1491 	info = (struct btrfs_tree_block_info *)(ei + 1);
1492 	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1493 	*out_level = btrfs_tree_block_level(eb, info);
1494 
1495 	if (ret == 1)
1496 		*ptr = (unsigned long)-1;
1497 
1498 	return 0;
1499 }
1500 
1501 static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1502 				u64 root, u64 extent_item_objectid,
1503 				iterate_extent_inodes_t *iterate, void *ctx)
1504 {
1505 	struct extent_inode_elem *eie;
1506 	int ret = 0;
1507 
1508 	for (eie = inode_list; eie; eie = eie->next) {
1509 		pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1510 			 "root %llu\n", extent_item_objectid,
1511 			 eie->inum, eie->offset, root);
1512 		ret = iterate(eie->inum, eie->offset, root, ctx);
1513 		if (ret) {
1514 			pr_debug("stopping iteration for %llu due to ret=%d\n",
1515 				 extent_item_objectid, ret);
1516 			break;
1517 		}
1518 	}
1519 
1520 	return ret;
1521 }
1522 
1523 /*
1524  * calls iterate() for every inode that references the extent identified by
1525  * the given parameters.
1526  * when the iterator function returns a non-zero value, iteration stops.
1527  */
1528 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1529 				u64 extent_item_objectid, u64 extent_item_pos,
1530 				int search_commit_root,
1531 				iterate_extent_inodes_t *iterate, void *ctx)
1532 {
1533 	int ret;
1534 	struct btrfs_trans_handle *trans = NULL;
1535 	struct ulist *refs = NULL;
1536 	struct ulist *roots = NULL;
1537 	struct ulist_node *ref_node = NULL;
1538 	struct ulist_node *root_node = NULL;
1539 	struct seq_list tree_mod_seq_elem = {};
1540 	struct ulist_iterator ref_uiter;
1541 	struct ulist_iterator root_uiter;
1542 
1543 	pr_debug("resolving all inodes for extent %llu\n",
1544 			extent_item_objectid);
1545 
1546 	if (!search_commit_root) {
1547 		trans = btrfs_join_transaction(fs_info->extent_root);
1548 		if (IS_ERR(trans))
1549 			return PTR_ERR(trans);
1550 		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1551 	} else {
1552 		down_read(&fs_info->commit_root_sem);
1553 	}
1554 
1555 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1556 				   tree_mod_seq_elem.seq, &refs,
1557 				   &extent_item_pos);
1558 	if (ret)
1559 		goto out;
1560 
1561 	ULIST_ITER_INIT(&ref_uiter);
1562 	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1563 		ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val,
1564 					     tree_mod_seq_elem.seq, &roots);
1565 		if (ret)
1566 			break;
1567 		ULIST_ITER_INIT(&root_uiter);
1568 		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1569 			pr_debug("root %llu references leaf %llu, data list "
1570 				 "%#llx\n", root_node->val, ref_node->val,
1571 				 ref_node->aux);
1572 			ret = iterate_leaf_refs((struct extent_inode_elem *)
1573 						(uintptr_t)ref_node->aux,
1574 						root_node->val,
1575 						extent_item_objectid,
1576 						iterate, ctx);
1577 		}
1578 		ulist_free(roots);
1579 	}
1580 
1581 	free_leaf_list(refs);
1582 out:
1583 	if (!search_commit_root) {
1584 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1585 		btrfs_end_transaction(trans, fs_info->extent_root);
1586 	} else {
1587 		up_read(&fs_info->commit_root_sem);
1588 	}
1589 
1590 	return ret;
1591 }
1592 
1593 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1594 				struct btrfs_path *path,
1595 				iterate_extent_inodes_t *iterate, void *ctx)
1596 {
1597 	int ret;
1598 	u64 extent_item_pos;
1599 	u64 flags = 0;
1600 	struct btrfs_key found_key;
1601 	int search_commit_root = path->search_commit_root;
1602 
1603 	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1604 	btrfs_release_path(path);
1605 	if (ret < 0)
1606 		return ret;
1607 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1608 		return -EINVAL;
1609 
1610 	extent_item_pos = logical - found_key.objectid;
1611 	ret = iterate_extent_inodes(fs_info, found_key.objectid,
1612 					extent_item_pos, search_commit_root,
1613 					iterate, ctx);
1614 
1615 	return ret;
1616 }
1617 
1618 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1619 			      struct extent_buffer *eb, void *ctx);
1620 
1621 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1622 			      struct btrfs_path *path,
1623 			      iterate_irefs_t *iterate, void *ctx)
1624 {
1625 	int ret = 0;
1626 	int slot;
1627 	u32 cur;
1628 	u32 len;
1629 	u32 name_len;
1630 	u64 parent = 0;
1631 	int found = 0;
1632 	struct extent_buffer *eb;
1633 	struct btrfs_item *item;
1634 	struct btrfs_inode_ref *iref;
1635 	struct btrfs_key found_key;
1636 
1637 	while (!ret) {
1638 		ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1639 				     &found_key);
1640 		if (ret < 0)
1641 			break;
1642 		if (ret) {
1643 			ret = found ? 0 : -ENOENT;
1644 			break;
1645 		}
1646 		++found;
1647 
1648 		parent = found_key.offset;
1649 		slot = path->slots[0];
1650 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
1651 		if (!eb) {
1652 			ret = -ENOMEM;
1653 			break;
1654 		}
1655 		extent_buffer_get(eb);
1656 		btrfs_tree_read_lock(eb);
1657 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1658 		btrfs_release_path(path);
1659 
1660 		item = btrfs_item_nr(slot);
1661 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1662 
1663 		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1664 			name_len = btrfs_inode_ref_name_len(eb, iref);
1665 			/* path must be released before calling iterate()! */
1666 			pr_debug("following ref at offset %u for inode %llu in "
1667 				 "tree %llu\n", cur, found_key.objectid,
1668 				 fs_root->objectid);
1669 			ret = iterate(parent, name_len,
1670 				      (unsigned long)(iref + 1), eb, ctx);
1671 			if (ret)
1672 				break;
1673 			len = sizeof(*iref) + name_len;
1674 			iref = (struct btrfs_inode_ref *)((char *)iref + len);
1675 		}
1676 		btrfs_tree_read_unlock_blocking(eb);
1677 		free_extent_buffer(eb);
1678 	}
1679 
1680 	btrfs_release_path(path);
1681 
1682 	return ret;
1683 }
1684 
1685 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1686 				 struct btrfs_path *path,
1687 				 iterate_irefs_t *iterate, void *ctx)
1688 {
1689 	int ret;
1690 	int slot;
1691 	u64 offset = 0;
1692 	u64 parent;
1693 	int found = 0;
1694 	struct extent_buffer *eb;
1695 	struct btrfs_inode_extref *extref;
1696 	struct extent_buffer *leaf;
1697 	u32 item_size;
1698 	u32 cur_offset;
1699 	unsigned long ptr;
1700 
1701 	while (1) {
1702 		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
1703 					    &offset);
1704 		if (ret < 0)
1705 			break;
1706 		if (ret) {
1707 			ret = found ? 0 : -ENOENT;
1708 			break;
1709 		}
1710 		++found;
1711 
1712 		slot = path->slots[0];
1713 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
1714 		if (!eb) {
1715 			ret = -ENOMEM;
1716 			break;
1717 		}
1718 		extent_buffer_get(eb);
1719 
1720 		btrfs_tree_read_lock(eb);
1721 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1722 		btrfs_release_path(path);
1723 
1724 		leaf = path->nodes[0];
1725 		item_size = btrfs_item_size_nr(leaf, slot);
1726 		ptr = btrfs_item_ptr_offset(leaf, slot);
1727 		cur_offset = 0;
1728 
1729 		while (cur_offset < item_size) {
1730 			u32 name_len;
1731 
1732 			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
1733 			parent = btrfs_inode_extref_parent(eb, extref);
1734 			name_len = btrfs_inode_extref_name_len(eb, extref);
1735 			ret = iterate(parent, name_len,
1736 				      (unsigned long)&extref->name, eb, ctx);
1737 			if (ret)
1738 				break;
1739 
1740 			cur_offset += btrfs_inode_extref_name_len(leaf, extref);
1741 			cur_offset += sizeof(*extref);
1742 		}
1743 		btrfs_tree_read_unlock_blocking(eb);
1744 		free_extent_buffer(eb);
1745 
1746 		offset++;
1747 	}
1748 
1749 	btrfs_release_path(path);
1750 
1751 	return ret;
1752 }
1753 
1754 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1755 			 struct btrfs_path *path, iterate_irefs_t *iterate,
1756 			 void *ctx)
1757 {
1758 	int ret;
1759 	int found_refs = 0;
1760 
1761 	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
1762 	if (!ret)
1763 		++found_refs;
1764 	else if (ret != -ENOENT)
1765 		return ret;
1766 
1767 	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
1768 	if (ret == -ENOENT && found_refs)
1769 		return 0;
1770 
1771 	return ret;
1772 }
1773 
1774 /*
1775  * returns 0 if the path could be dumped (probably truncated)
1776  * returns <0 in case of an error
1777  */
1778 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1779 			 struct extent_buffer *eb, void *ctx)
1780 {
1781 	struct inode_fs_paths *ipath = ctx;
1782 	char *fspath;
1783 	char *fspath_min;
1784 	int i = ipath->fspath->elem_cnt;
1785 	const int s_ptr = sizeof(char *);
1786 	u32 bytes_left;
1787 
1788 	bytes_left = ipath->fspath->bytes_left > s_ptr ?
1789 					ipath->fspath->bytes_left - s_ptr : 0;
1790 
1791 	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1792 	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
1793 				   name_off, eb, inum, fspath_min, bytes_left);
1794 	if (IS_ERR(fspath))
1795 		return PTR_ERR(fspath);
1796 
1797 	if (fspath > fspath_min) {
1798 		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1799 		++ipath->fspath->elem_cnt;
1800 		ipath->fspath->bytes_left = fspath - fspath_min;
1801 	} else {
1802 		++ipath->fspath->elem_missed;
1803 		ipath->fspath->bytes_missing += fspath_min - fspath;
1804 		ipath->fspath->bytes_left = 0;
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 /*
1811  * this dumps all file system paths to the inode into the ipath struct, provided
1812  * is has been created large enough. each path is zero-terminated and accessed
1813  * from ipath->fspath->val[i].
1814  * when it returns, there are ipath->fspath->elem_cnt number of paths available
1815  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1816  * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1817  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1818  * have been needed to return all paths.
1819  */
1820 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1821 {
1822 	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1823 			     inode_to_path, ipath);
1824 }
1825 
1826 struct btrfs_data_container *init_data_container(u32 total_bytes)
1827 {
1828 	struct btrfs_data_container *data;
1829 	size_t alloc_bytes;
1830 
1831 	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1832 	data = vmalloc(alloc_bytes);
1833 	if (!data)
1834 		return ERR_PTR(-ENOMEM);
1835 
1836 	if (total_bytes >= sizeof(*data)) {
1837 		data->bytes_left = total_bytes - sizeof(*data);
1838 		data->bytes_missing = 0;
1839 	} else {
1840 		data->bytes_missing = sizeof(*data) - total_bytes;
1841 		data->bytes_left = 0;
1842 	}
1843 
1844 	data->elem_cnt = 0;
1845 	data->elem_missed = 0;
1846 
1847 	return data;
1848 }
1849 
1850 /*
1851  * allocates space to return multiple file system paths for an inode.
1852  * total_bytes to allocate are passed, note that space usable for actual path
1853  * information will be total_bytes - sizeof(struct inode_fs_paths).
1854  * the returned pointer must be freed with free_ipath() in the end.
1855  */
1856 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1857 					struct btrfs_path *path)
1858 {
1859 	struct inode_fs_paths *ifp;
1860 	struct btrfs_data_container *fspath;
1861 
1862 	fspath = init_data_container(total_bytes);
1863 	if (IS_ERR(fspath))
1864 		return (void *)fspath;
1865 
1866 	ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1867 	if (!ifp) {
1868 		kfree(fspath);
1869 		return ERR_PTR(-ENOMEM);
1870 	}
1871 
1872 	ifp->btrfs_path = path;
1873 	ifp->fspath = fspath;
1874 	ifp->fs_root = fs_root;
1875 
1876 	return ifp;
1877 }
1878 
1879 void free_ipath(struct inode_fs_paths *ipath)
1880 {
1881 	if (!ipath)
1882 		return;
1883 	vfree(ipath->fspath);
1884 	kfree(ipath);
1885 }
1886