xref: /openbmc/linux/fs/btrfs/backref.c (revision f7777dcc)
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/vmalloc.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "backref.h"
23 #include "ulist.h"
24 #include "transaction.h"
25 #include "delayed-ref.h"
26 #include "locking.h"
27 
28 struct extent_inode_elem {
29 	u64 inum;
30 	u64 offset;
31 	struct extent_inode_elem *next;
32 };
33 
34 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
35 				struct btrfs_file_extent_item *fi,
36 				u64 extent_item_pos,
37 				struct extent_inode_elem **eie)
38 {
39 	u64 offset = 0;
40 	struct extent_inode_elem *e;
41 
42 	if (!btrfs_file_extent_compression(eb, fi) &&
43 	    !btrfs_file_extent_encryption(eb, fi) &&
44 	    !btrfs_file_extent_other_encoding(eb, fi)) {
45 		u64 data_offset;
46 		u64 data_len;
47 
48 		data_offset = btrfs_file_extent_offset(eb, fi);
49 		data_len = btrfs_file_extent_num_bytes(eb, fi);
50 
51 		if (extent_item_pos < data_offset ||
52 		    extent_item_pos >= data_offset + data_len)
53 			return 1;
54 		offset = extent_item_pos - data_offset;
55 	}
56 
57 	e = kmalloc(sizeof(*e), GFP_NOFS);
58 	if (!e)
59 		return -ENOMEM;
60 
61 	e->next = *eie;
62 	e->inum = key->objectid;
63 	e->offset = key->offset + offset;
64 	*eie = e;
65 
66 	return 0;
67 }
68 
69 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
70 				u64 extent_item_pos,
71 				struct extent_inode_elem **eie)
72 {
73 	u64 disk_byte;
74 	struct btrfs_key key;
75 	struct btrfs_file_extent_item *fi;
76 	int slot;
77 	int nritems;
78 	int extent_type;
79 	int ret;
80 
81 	/*
82 	 * from the shared data ref, we only have the leaf but we need
83 	 * the key. thus, we must look into all items and see that we
84 	 * find one (some) with a reference to our extent item.
85 	 */
86 	nritems = btrfs_header_nritems(eb);
87 	for (slot = 0; slot < nritems; ++slot) {
88 		btrfs_item_key_to_cpu(eb, &key, slot);
89 		if (key.type != BTRFS_EXTENT_DATA_KEY)
90 			continue;
91 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
92 		extent_type = btrfs_file_extent_type(eb, fi);
93 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
94 			continue;
95 		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
96 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
97 		if (disk_byte != wanted_disk_byte)
98 			continue;
99 
100 		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
101 		if (ret < 0)
102 			return ret;
103 	}
104 
105 	return 0;
106 }
107 
108 /*
109  * this structure records all encountered refs on the way up to the root
110  */
111 struct __prelim_ref {
112 	struct list_head list;
113 	u64 root_id;
114 	struct btrfs_key key_for_search;
115 	int level;
116 	int count;
117 	struct extent_inode_elem *inode_list;
118 	u64 parent;
119 	u64 wanted_disk_byte;
120 };
121 
122 static struct kmem_cache *btrfs_prelim_ref_cache;
123 
124 int __init btrfs_prelim_ref_init(void)
125 {
126 	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
127 					sizeof(struct __prelim_ref),
128 					0,
129 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
130 					NULL);
131 	if (!btrfs_prelim_ref_cache)
132 		return -ENOMEM;
133 	return 0;
134 }
135 
136 void btrfs_prelim_ref_exit(void)
137 {
138 	if (btrfs_prelim_ref_cache)
139 		kmem_cache_destroy(btrfs_prelim_ref_cache);
140 }
141 
142 /*
143  * the rules for all callers of this function are:
144  * - obtaining the parent is the goal
145  * - if you add a key, you must know that it is a correct key
146  * - if you cannot add the parent or a correct key, then we will look into the
147  *   block later to set a correct key
148  *
149  * delayed refs
150  * ============
151  *        backref type | shared | indirect | shared | indirect
152  * information         |   tree |     tree |   data |     data
153  * --------------------+--------+----------+--------+----------
154  *      parent logical |    y   |     -    |    -   |     -
155  *      key to resolve |    -   |     y    |    y   |     y
156  *  tree block logical |    -   |     -    |    -   |     -
157  *  root for resolving |    y   |     y    |    y   |     y
158  *
159  * - column 1:       we've the parent -> done
160  * - column 2, 3, 4: we use the key to find the parent
161  *
162  * on disk refs (inline or keyed)
163  * ==============================
164  *        backref type | shared | indirect | shared | indirect
165  * information         |   tree |     tree |   data |     data
166  * --------------------+--------+----------+--------+----------
167  *      parent logical |    y   |     -    |    y   |     -
168  *      key to resolve |    -   |     -    |    -   |     y
169  *  tree block logical |    y   |     y    |    y   |     y
170  *  root for resolving |    -   |     y    |    y   |     y
171  *
172  * - column 1, 3: we've the parent -> done
173  * - column 2:    we take the first key from the block to find the parent
174  *                (see __add_missing_keys)
175  * - column 4:    we use the key to find the parent
176  *
177  * additional information that's available but not required to find the parent
178  * block might help in merging entries to gain some speed.
179  */
180 
181 static int __add_prelim_ref(struct list_head *head, u64 root_id,
182 			    struct btrfs_key *key, int level,
183 			    u64 parent, u64 wanted_disk_byte, int count,
184 			    gfp_t gfp_mask)
185 {
186 	struct __prelim_ref *ref;
187 
188 	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
189 	if (!ref)
190 		return -ENOMEM;
191 
192 	ref->root_id = root_id;
193 	if (key)
194 		ref->key_for_search = *key;
195 	else
196 		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
197 
198 	ref->inode_list = NULL;
199 	ref->level = level;
200 	ref->count = count;
201 	ref->parent = parent;
202 	ref->wanted_disk_byte = wanted_disk_byte;
203 	list_add_tail(&ref->list, head);
204 
205 	return 0;
206 }
207 
208 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
209 				struct ulist *parents, int level,
210 				struct btrfs_key *key_for_search, u64 time_seq,
211 				u64 wanted_disk_byte,
212 				const u64 *extent_item_pos)
213 {
214 	int ret = 0;
215 	int slot;
216 	struct extent_buffer *eb;
217 	struct btrfs_key key;
218 	struct btrfs_file_extent_item *fi;
219 	struct extent_inode_elem *eie = NULL, *old = NULL;
220 	u64 disk_byte;
221 
222 	if (level != 0) {
223 		eb = path->nodes[level];
224 		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
225 		if (ret < 0)
226 			return ret;
227 		return 0;
228 	}
229 
230 	/*
231 	 * We normally enter this function with the path already pointing to
232 	 * the first item to check. But sometimes, we may enter it with
233 	 * slot==nritems. In that case, go to the next leaf before we continue.
234 	 */
235 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
236 		ret = btrfs_next_old_leaf(root, path, time_seq);
237 
238 	while (!ret) {
239 		eb = path->nodes[0];
240 		slot = path->slots[0];
241 
242 		btrfs_item_key_to_cpu(eb, &key, slot);
243 
244 		if (key.objectid != key_for_search->objectid ||
245 		    key.type != BTRFS_EXTENT_DATA_KEY)
246 			break;
247 
248 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
249 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
250 
251 		if (disk_byte == wanted_disk_byte) {
252 			eie = NULL;
253 			old = NULL;
254 			if (extent_item_pos) {
255 				ret = check_extent_in_eb(&key, eb, fi,
256 						*extent_item_pos,
257 						&eie);
258 				if (ret < 0)
259 					break;
260 			}
261 			if (ret > 0)
262 				goto next;
263 			ret = ulist_add_merge(parents, eb->start,
264 					      (uintptr_t)eie,
265 					      (u64 *)&old, GFP_NOFS);
266 			if (ret < 0)
267 				break;
268 			if (!ret && extent_item_pos) {
269 				while (old->next)
270 					old = old->next;
271 				old->next = eie;
272 			}
273 		}
274 next:
275 		ret = btrfs_next_old_item(root, path, time_seq);
276 	}
277 
278 	if (ret > 0)
279 		ret = 0;
280 	return ret;
281 }
282 
283 /*
284  * resolve an indirect backref in the form (root_id, key, level)
285  * to a logical address
286  */
287 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
288 				  struct btrfs_path *path, u64 time_seq,
289 				  struct __prelim_ref *ref,
290 				  struct ulist *parents,
291 				  const u64 *extent_item_pos)
292 {
293 	struct btrfs_root *root;
294 	struct btrfs_key root_key;
295 	struct extent_buffer *eb;
296 	int ret = 0;
297 	int root_level;
298 	int level = ref->level;
299 
300 	root_key.objectid = ref->root_id;
301 	root_key.type = BTRFS_ROOT_ITEM_KEY;
302 	root_key.offset = (u64)-1;
303 	root = btrfs_read_fs_root_no_name(fs_info, &root_key);
304 	if (IS_ERR(root)) {
305 		ret = PTR_ERR(root);
306 		goto out;
307 	}
308 
309 	root_level = btrfs_old_root_level(root, time_seq);
310 
311 	if (root_level + 1 == level)
312 		goto out;
313 
314 	path->lowest_level = level;
315 	ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
316 	pr_debug("search slot in root %llu (level %d, ref count %d) returned "
317 		 "%d for key (%llu %u %llu)\n",
318 		 ref->root_id, level, ref->count, ret,
319 		 ref->key_for_search.objectid, ref->key_for_search.type,
320 		 ref->key_for_search.offset);
321 	if (ret < 0)
322 		goto out;
323 
324 	eb = path->nodes[level];
325 	while (!eb) {
326 		if (!level) {
327 			WARN_ON(1);
328 			ret = 1;
329 			goto out;
330 		}
331 		level--;
332 		eb = path->nodes[level];
333 	}
334 
335 	ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
336 				time_seq, ref->wanted_disk_byte,
337 				extent_item_pos);
338 out:
339 	path->lowest_level = 0;
340 	btrfs_release_path(path);
341 	return ret;
342 }
343 
344 /*
345  * resolve all indirect backrefs from the list
346  */
347 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
348 				   struct btrfs_path *path, u64 time_seq,
349 				   struct list_head *head,
350 				   const u64 *extent_item_pos)
351 {
352 	int err;
353 	int ret = 0;
354 	struct __prelim_ref *ref;
355 	struct __prelim_ref *ref_safe;
356 	struct __prelim_ref *new_ref;
357 	struct ulist *parents;
358 	struct ulist_node *node;
359 	struct ulist_iterator uiter;
360 
361 	parents = ulist_alloc(GFP_NOFS);
362 	if (!parents)
363 		return -ENOMEM;
364 
365 	/*
366 	 * _safe allows us to insert directly after the current item without
367 	 * iterating over the newly inserted items.
368 	 * we're also allowed to re-assign ref during iteration.
369 	 */
370 	list_for_each_entry_safe(ref, ref_safe, head, list) {
371 		if (ref->parent)	/* already direct */
372 			continue;
373 		if (ref->count == 0)
374 			continue;
375 		err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
376 					     parents, extent_item_pos);
377 		if (err == -ENOMEM)
378 			goto out;
379 		if (err)
380 			continue;
381 
382 		/* we put the first parent into the ref at hand */
383 		ULIST_ITER_INIT(&uiter);
384 		node = ulist_next(parents, &uiter);
385 		ref->parent = node ? node->val : 0;
386 		ref->inode_list = node ?
387 			(struct extent_inode_elem *)(uintptr_t)node->aux : NULL;
388 
389 		/* additional parents require new refs being added here */
390 		while ((node = ulist_next(parents, &uiter))) {
391 			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
392 						   GFP_NOFS);
393 			if (!new_ref) {
394 				ret = -ENOMEM;
395 				goto out;
396 			}
397 			memcpy(new_ref, ref, sizeof(*ref));
398 			new_ref->parent = node->val;
399 			new_ref->inode_list = (struct extent_inode_elem *)
400 							(uintptr_t)node->aux;
401 			list_add(&new_ref->list, &ref->list);
402 		}
403 		ulist_reinit(parents);
404 	}
405 out:
406 	ulist_free(parents);
407 	return ret;
408 }
409 
410 static inline int ref_for_same_block(struct __prelim_ref *ref1,
411 				     struct __prelim_ref *ref2)
412 {
413 	if (ref1->level != ref2->level)
414 		return 0;
415 	if (ref1->root_id != ref2->root_id)
416 		return 0;
417 	if (ref1->key_for_search.type != ref2->key_for_search.type)
418 		return 0;
419 	if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
420 		return 0;
421 	if (ref1->key_for_search.offset != ref2->key_for_search.offset)
422 		return 0;
423 	if (ref1->parent != ref2->parent)
424 		return 0;
425 
426 	return 1;
427 }
428 
429 /*
430  * read tree blocks and add keys where required.
431  */
432 static int __add_missing_keys(struct btrfs_fs_info *fs_info,
433 			      struct list_head *head)
434 {
435 	struct list_head *pos;
436 	struct extent_buffer *eb;
437 
438 	list_for_each(pos, head) {
439 		struct __prelim_ref *ref;
440 		ref = list_entry(pos, struct __prelim_ref, list);
441 
442 		if (ref->parent)
443 			continue;
444 		if (ref->key_for_search.type)
445 			continue;
446 		BUG_ON(!ref->wanted_disk_byte);
447 		eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
448 				     fs_info->tree_root->leafsize, 0);
449 		if (!eb || !extent_buffer_uptodate(eb)) {
450 			free_extent_buffer(eb);
451 			return -EIO;
452 		}
453 		btrfs_tree_read_lock(eb);
454 		if (btrfs_header_level(eb) == 0)
455 			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
456 		else
457 			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
458 		btrfs_tree_read_unlock(eb);
459 		free_extent_buffer(eb);
460 	}
461 	return 0;
462 }
463 
464 /*
465  * merge two lists of backrefs and adjust counts accordingly
466  *
467  * mode = 1: merge identical keys, if key is set
468  *    FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
469  *           additionally, we could even add a key range for the blocks we
470  *           looked into to merge even more (-> replace unresolved refs by those
471  *           having a parent).
472  * mode = 2: merge identical parents
473  */
474 static void __merge_refs(struct list_head *head, int mode)
475 {
476 	struct list_head *pos1;
477 
478 	list_for_each(pos1, head) {
479 		struct list_head *n2;
480 		struct list_head *pos2;
481 		struct __prelim_ref *ref1;
482 
483 		ref1 = list_entry(pos1, struct __prelim_ref, list);
484 
485 		for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
486 		     pos2 = n2, n2 = pos2->next) {
487 			struct __prelim_ref *ref2;
488 			struct __prelim_ref *xchg;
489 			struct extent_inode_elem *eie;
490 
491 			ref2 = list_entry(pos2, struct __prelim_ref, list);
492 
493 			if (mode == 1) {
494 				if (!ref_for_same_block(ref1, ref2))
495 					continue;
496 				if (!ref1->parent && ref2->parent) {
497 					xchg = ref1;
498 					ref1 = ref2;
499 					ref2 = xchg;
500 				}
501 			} else {
502 				if (ref1->parent != ref2->parent)
503 					continue;
504 			}
505 
506 			eie = ref1->inode_list;
507 			while (eie && eie->next)
508 				eie = eie->next;
509 			if (eie)
510 				eie->next = ref2->inode_list;
511 			else
512 				ref1->inode_list = ref2->inode_list;
513 			ref1->count += ref2->count;
514 
515 			list_del(&ref2->list);
516 			kmem_cache_free(btrfs_prelim_ref_cache, ref2);
517 		}
518 
519 	}
520 }
521 
522 /*
523  * add all currently queued delayed refs from this head whose seq nr is
524  * smaller or equal that seq to the list
525  */
526 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
527 			      struct list_head *prefs)
528 {
529 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
530 	struct rb_node *n = &head->node.rb_node;
531 	struct btrfs_key key;
532 	struct btrfs_key op_key = {0};
533 	int sgn;
534 	int ret = 0;
535 
536 	if (extent_op && extent_op->update_key)
537 		btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
538 
539 	while ((n = rb_prev(n))) {
540 		struct btrfs_delayed_ref_node *node;
541 		node = rb_entry(n, struct btrfs_delayed_ref_node,
542 				rb_node);
543 		if (node->bytenr != head->node.bytenr)
544 			break;
545 		WARN_ON(node->is_head);
546 
547 		if (node->seq > seq)
548 			continue;
549 
550 		switch (node->action) {
551 		case BTRFS_ADD_DELAYED_EXTENT:
552 		case BTRFS_UPDATE_DELAYED_HEAD:
553 			WARN_ON(1);
554 			continue;
555 		case BTRFS_ADD_DELAYED_REF:
556 			sgn = 1;
557 			break;
558 		case BTRFS_DROP_DELAYED_REF:
559 			sgn = -1;
560 			break;
561 		default:
562 			BUG_ON(1);
563 		}
564 		switch (node->type) {
565 		case BTRFS_TREE_BLOCK_REF_KEY: {
566 			struct btrfs_delayed_tree_ref *ref;
567 
568 			ref = btrfs_delayed_node_to_tree_ref(node);
569 			ret = __add_prelim_ref(prefs, ref->root, &op_key,
570 					       ref->level + 1, 0, node->bytenr,
571 					       node->ref_mod * sgn, GFP_ATOMIC);
572 			break;
573 		}
574 		case BTRFS_SHARED_BLOCK_REF_KEY: {
575 			struct btrfs_delayed_tree_ref *ref;
576 
577 			ref = btrfs_delayed_node_to_tree_ref(node);
578 			ret = __add_prelim_ref(prefs, ref->root, NULL,
579 					       ref->level + 1, ref->parent,
580 					       node->bytenr,
581 					       node->ref_mod * sgn, GFP_ATOMIC);
582 			break;
583 		}
584 		case BTRFS_EXTENT_DATA_REF_KEY: {
585 			struct btrfs_delayed_data_ref *ref;
586 			ref = btrfs_delayed_node_to_data_ref(node);
587 
588 			key.objectid = ref->objectid;
589 			key.type = BTRFS_EXTENT_DATA_KEY;
590 			key.offset = ref->offset;
591 			ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
592 					       node->bytenr,
593 					       node->ref_mod * sgn, GFP_ATOMIC);
594 			break;
595 		}
596 		case BTRFS_SHARED_DATA_REF_KEY: {
597 			struct btrfs_delayed_data_ref *ref;
598 
599 			ref = btrfs_delayed_node_to_data_ref(node);
600 
601 			key.objectid = ref->objectid;
602 			key.type = BTRFS_EXTENT_DATA_KEY;
603 			key.offset = ref->offset;
604 			ret = __add_prelim_ref(prefs, ref->root, &key, 0,
605 					       ref->parent, node->bytenr,
606 					       node->ref_mod * sgn, GFP_ATOMIC);
607 			break;
608 		}
609 		default:
610 			WARN_ON(1);
611 		}
612 		if (ret)
613 			return ret;
614 	}
615 
616 	return 0;
617 }
618 
619 /*
620  * add all inline backrefs for bytenr to the list
621  */
622 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
623 			     struct btrfs_path *path, u64 bytenr,
624 			     int *info_level, struct list_head *prefs)
625 {
626 	int ret = 0;
627 	int slot;
628 	struct extent_buffer *leaf;
629 	struct btrfs_key key;
630 	struct btrfs_key found_key;
631 	unsigned long ptr;
632 	unsigned long end;
633 	struct btrfs_extent_item *ei;
634 	u64 flags;
635 	u64 item_size;
636 
637 	/*
638 	 * enumerate all inline refs
639 	 */
640 	leaf = path->nodes[0];
641 	slot = path->slots[0];
642 
643 	item_size = btrfs_item_size_nr(leaf, slot);
644 	BUG_ON(item_size < sizeof(*ei));
645 
646 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
647 	flags = btrfs_extent_flags(leaf, ei);
648 	btrfs_item_key_to_cpu(leaf, &found_key, slot);
649 
650 	ptr = (unsigned long)(ei + 1);
651 	end = (unsigned long)ei + item_size;
652 
653 	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
654 	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
655 		struct btrfs_tree_block_info *info;
656 
657 		info = (struct btrfs_tree_block_info *)ptr;
658 		*info_level = btrfs_tree_block_level(leaf, info);
659 		ptr += sizeof(struct btrfs_tree_block_info);
660 		BUG_ON(ptr > end);
661 	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
662 		*info_level = found_key.offset;
663 	} else {
664 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
665 	}
666 
667 	while (ptr < end) {
668 		struct btrfs_extent_inline_ref *iref;
669 		u64 offset;
670 		int type;
671 
672 		iref = (struct btrfs_extent_inline_ref *)ptr;
673 		type = btrfs_extent_inline_ref_type(leaf, iref);
674 		offset = btrfs_extent_inline_ref_offset(leaf, iref);
675 
676 		switch (type) {
677 		case BTRFS_SHARED_BLOCK_REF_KEY:
678 			ret = __add_prelim_ref(prefs, 0, NULL,
679 						*info_level + 1, offset,
680 						bytenr, 1, GFP_NOFS);
681 			break;
682 		case BTRFS_SHARED_DATA_REF_KEY: {
683 			struct btrfs_shared_data_ref *sdref;
684 			int count;
685 
686 			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
687 			count = btrfs_shared_data_ref_count(leaf, sdref);
688 			ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
689 					       bytenr, count, GFP_NOFS);
690 			break;
691 		}
692 		case BTRFS_TREE_BLOCK_REF_KEY:
693 			ret = __add_prelim_ref(prefs, offset, NULL,
694 					       *info_level + 1, 0,
695 					       bytenr, 1, GFP_NOFS);
696 			break;
697 		case BTRFS_EXTENT_DATA_REF_KEY: {
698 			struct btrfs_extent_data_ref *dref;
699 			int count;
700 			u64 root;
701 
702 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
703 			count = btrfs_extent_data_ref_count(leaf, dref);
704 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
705 								      dref);
706 			key.type = BTRFS_EXTENT_DATA_KEY;
707 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
708 			root = btrfs_extent_data_ref_root(leaf, dref);
709 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
710 					       bytenr, count, GFP_NOFS);
711 			break;
712 		}
713 		default:
714 			WARN_ON(1);
715 		}
716 		if (ret)
717 			return ret;
718 		ptr += btrfs_extent_inline_ref_size(type);
719 	}
720 
721 	return 0;
722 }
723 
724 /*
725  * add all non-inline backrefs for bytenr to the list
726  */
727 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
728 			    struct btrfs_path *path, u64 bytenr,
729 			    int info_level, struct list_head *prefs)
730 {
731 	struct btrfs_root *extent_root = fs_info->extent_root;
732 	int ret;
733 	int slot;
734 	struct extent_buffer *leaf;
735 	struct btrfs_key key;
736 
737 	while (1) {
738 		ret = btrfs_next_item(extent_root, path);
739 		if (ret < 0)
740 			break;
741 		if (ret) {
742 			ret = 0;
743 			break;
744 		}
745 
746 		slot = path->slots[0];
747 		leaf = path->nodes[0];
748 		btrfs_item_key_to_cpu(leaf, &key, slot);
749 
750 		if (key.objectid != bytenr)
751 			break;
752 		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
753 			continue;
754 		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
755 			break;
756 
757 		switch (key.type) {
758 		case BTRFS_SHARED_BLOCK_REF_KEY:
759 			ret = __add_prelim_ref(prefs, 0, NULL,
760 						info_level + 1, key.offset,
761 						bytenr, 1, GFP_NOFS);
762 			break;
763 		case BTRFS_SHARED_DATA_REF_KEY: {
764 			struct btrfs_shared_data_ref *sdref;
765 			int count;
766 
767 			sdref = btrfs_item_ptr(leaf, slot,
768 					      struct btrfs_shared_data_ref);
769 			count = btrfs_shared_data_ref_count(leaf, sdref);
770 			ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
771 						bytenr, count, GFP_NOFS);
772 			break;
773 		}
774 		case BTRFS_TREE_BLOCK_REF_KEY:
775 			ret = __add_prelim_ref(prefs, key.offset, NULL,
776 					       info_level + 1, 0,
777 					       bytenr, 1, GFP_NOFS);
778 			break;
779 		case BTRFS_EXTENT_DATA_REF_KEY: {
780 			struct btrfs_extent_data_ref *dref;
781 			int count;
782 			u64 root;
783 
784 			dref = btrfs_item_ptr(leaf, slot,
785 					      struct btrfs_extent_data_ref);
786 			count = btrfs_extent_data_ref_count(leaf, dref);
787 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
788 								      dref);
789 			key.type = BTRFS_EXTENT_DATA_KEY;
790 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
791 			root = btrfs_extent_data_ref_root(leaf, dref);
792 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
793 					       bytenr, count, GFP_NOFS);
794 			break;
795 		}
796 		default:
797 			WARN_ON(1);
798 		}
799 		if (ret)
800 			return ret;
801 
802 	}
803 
804 	return ret;
805 }
806 
807 /*
808  * this adds all existing backrefs (inline backrefs, backrefs and delayed
809  * refs) for the given bytenr to the refs list, merges duplicates and resolves
810  * indirect refs to their parent bytenr.
811  * When roots are found, they're added to the roots list
812  *
813  * FIXME some caching might speed things up
814  */
815 static int find_parent_nodes(struct btrfs_trans_handle *trans,
816 			     struct btrfs_fs_info *fs_info, u64 bytenr,
817 			     u64 time_seq, struct ulist *refs,
818 			     struct ulist *roots, const u64 *extent_item_pos)
819 {
820 	struct btrfs_key key;
821 	struct btrfs_path *path;
822 	struct btrfs_delayed_ref_root *delayed_refs = NULL;
823 	struct btrfs_delayed_ref_head *head;
824 	int info_level = 0;
825 	int ret;
826 	struct list_head prefs_delayed;
827 	struct list_head prefs;
828 	struct __prelim_ref *ref;
829 
830 	INIT_LIST_HEAD(&prefs);
831 	INIT_LIST_HEAD(&prefs_delayed);
832 
833 	key.objectid = bytenr;
834 	key.offset = (u64)-1;
835 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
836 		key.type = BTRFS_METADATA_ITEM_KEY;
837 	else
838 		key.type = BTRFS_EXTENT_ITEM_KEY;
839 
840 	path = btrfs_alloc_path();
841 	if (!path)
842 		return -ENOMEM;
843 	if (!trans)
844 		path->search_commit_root = 1;
845 
846 	/*
847 	 * grab both a lock on the path and a lock on the delayed ref head.
848 	 * We need both to get a consistent picture of how the refs look
849 	 * at a specified point in time
850 	 */
851 again:
852 	head = NULL;
853 
854 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
855 	if (ret < 0)
856 		goto out;
857 	BUG_ON(ret == 0);
858 
859 	if (trans) {
860 		/*
861 		 * look if there are updates for this ref queued and lock the
862 		 * head
863 		 */
864 		delayed_refs = &trans->transaction->delayed_refs;
865 		spin_lock(&delayed_refs->lock);
866 		head = btrfs_find_delayed_ref_head(trans, bytenr);
867 		if (head) {
868 			if (!mutex_trylock(&head->mutex)) {
869 				atomic_inc(&head->node.refs);
870 				spin_unlock(&delayed_refs->lock);
871 
872 				btrfs_release_path(path);
873 
874 				/*
875 				 * Mutex was contended, block until it's
876 				 * released and try again
877 				 */
878 				mutex_lock(&head->mutex);
879 				mutex_unlock(&head->mutex);
880 				btrfs_put_delayed_ref(&head->node);
881 				goto again;
882 			}
883 			ret = __add_delayed_refs(head, time_seq,
884 						 &prefs_delayed);
885 			mutex_unlock(&head->mutex);
886 			if (ret) {
887 				spin_unlock(&delayed_refs->lock);
888 				goto out;
889 			}
890 		}
891 		spin_unlock(&delayed_refs->lock);
892 	}
893 
894 	if (path->slots[0]) {
895 		struct extent_buffer *leaf;
896 		int slot;
897 
898 		path->slots[0]--;
899 		leaf = path->nodes[0];
900 		slot = path->slots[0];
901 		btrfs_item_key_to_cpu(leaf, &key, slot);
902 		if (key.objectid == bytenr &&
903 		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
904 		     key.type == BTRFS_METADATA_ITEM_KEY)) {
905 			ret = __add_inline_refs(fs_info, path, bytenr,
906 						&info_level, &prefs);
907 			if (ret)
908 				goto out;
909 			ret = __add_keyed_refs(fs_info, path, bytenr,
910 					       info_level, &prefs);
911 			if (ret)
912 				goto out;
913 		}
914 	}
915 	btrfs_release_path(path);
916 
917 	list_splice_init(&prefs_delayed, &prefs);
918 
919 	ret = __add_missing_keys(fs_info, &prefs);
920 	if (ret)
921 		goto out;
922 
923 	__merge_refs(&prefs, 1);
924 
925 	ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
926 				      extent_item_pos);
927 	if (ret)
928 		goto out;
929 
930 	__merge_refs(&prefs, 2);
931 
932 	while (!list_empty(&prefs)) {
933 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
934 		WARN_ON(ref->count < 0);
935 		if (ref->count && ref->root_id && ref->parent == 0) {
936 			/* no parent == root of tree */
937 			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
938 			if (ret < 0)
939 				goto out;
940 		}
941 		if (ref->count && ref->parent) {
942 			struct extent_inode_elem *eie = NULL;
943 			if (extent_item_pos && !ref->inode_list) {
944 				u32 bsz;
945 				struct extent_buffer *eb;
946 				bsz = btrfs_level_size(fs_info->extent_root,
947 							info_level);
948 				eb = read_tree_block(fs_info->extent_root,
949 							   ref->parent, bsz, 0);
950 				if (!eb || !extent_buffer_uptodate(eb)) {
951 					free_extent_buffer(eb);
952 					ret = -EIO;
953 					goto out;
954 				}
955 				ret = find_extent_in_eb(eb, bytenr,
956 							*extent_item_pos, &eie);
957 				free_extent_buffer(eb);
958 				if (ret < 0)
959 					goto out;
960 				ref->inode_list = eie;
961 			}
962 			ret = ulist_add_merge(refs, ref->parent,
963 					      (uintptr_t)ref->inode_list,
964 					      (u64 *)&eie, GFP_NOFS);
965 			if (ret < 0)
966 				goto out;
967 			if (!ret && extent_item_pos) {
968 				/*
969 				 * we've recorded that parent, so we must extend
970 				 * its inode list here
971 				 */
972 				BUG_ON(!eie);
973 				while (eie->next)
974 					eie = eie->next;
975 				eie->next = ref->inode_list;
976 			}
977 		}
978 		list_del(&ref->list);
979 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
980 	}
981 
982 out:
983 	btrfs_free_path(path);
984 	while (!list_empty(&prefs)) {
985 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
986 		list_del(&ref->list);
987 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
988 	}
989 	while (!list_empty(&prefs_delayed)) {
990 		ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
991 				       list);
992 		list_del(&ref->list);
993 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
994 	}
995 
996 	return ret;
997 }
998 
999 static void free_leaf_list(struct ulist *blocks)
1000 {
1001 	struct ulist_node *node = NULL;
1002 	struct extent_inode_elem *eie;
1003 	struct extent_inode_elem *eie_next;
1004 	struct ulist_iterator uiter;
1005 
1006 	ULIST_ITER_INIT(&uiter);
1007 	while ((node = ulist_next(blocks, &uiter))) {
1008 		if (!node->aux)
1009 			continue;
1010 		eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
1011 		for (; eie; eie = eie_next) {
1012 			eie_next = eie->next;
1013 			kfree(eie);
1014 		}
1015 		node->aux = 0;
1016 	}
1017 
1018 	ulist_free(blocks);
1019 }
1020 
1021 /*
1022  * Finds all leafs with a reference to the specified combination of bytenr and
1023  * offset. key_list_head will point to a list of corresponding keys (caller must
1024  * free each list element). The leafs will be stored in the leafs ulist, which
1025  * must be freed with ulist_free.
1026  *
1027  * returns 0 on success, <0 on error
1028  */
1029 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1030 				struct btrfs_fs_info *fs_info, u64 bytenr,
1031 				u64 time_seq, struct ulist **leafs,
1032 				const u64 *extent_item_pos)
1033 {
1034 	struct ulist *tmp;
1035 	int ret;
1036 
1037 	tmp = ulist_alloc(GFP_NOFS);
1038 	if (!tmp)
1039 		return -ENOMEM;
1040 	*leafs = ulist_alloc(GFP_NOFS);
1041 	if (!*leafs) {
1042 		ulist_free(tmp);
1043 		return -ENOMEM;
1044 	}
1045 
1046 	ret = find_parent_nodes(trans, fs_info, bytenr,
1047 				time_seq, *leafs, tmp, extent_item_pos);
1048 	ulist_free(tmp);
1049 
1050 	if (ret < 0 && ret != -ENOENT) {
1051 		free_leaf_list(*leafs);
1052 		return ret;
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 /*
1059  * walk all backrefs for a given extent to find all roots that reference this
1060  * extent. Walking a backref means finding all extents that reference this
1061  * extent and in turn walk the backrefs of those, too. Naturally this is a
1062  * recursive process, but here it is implemented in an iterative fashion: We
1063  * find all referencing extents for the extent in question and put them on a
1064  * list. In turn, we find all referencing extents for those, further appending
1065  * to the list. The way we iterate the list allows adding more elements after
1066  * the current while iterating. The process stops when we reach the end of the
1067  * list. Found roots are added to the roots list.
1068  *
1069  * returns 0 on success, < 0 on error.
1070  */
1071 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1072 				struct btrfs_fs_info *fs_info, u64 bytenr,
1073 				u64 time_seq, struct ulist **roots)
1074 {
1075 	struct ulist *tmp;
1076 	struct ulist_node *node = NULL;
1077 	struct ulist_iterator uiter;
1078 	int ret;
1079 
1080 	tmp = ulist_alloc(GFP_NOFS);
1081 	if (!tmp)
1082 		return -ENOMEM;
1083 	*roots = ulist_alloc(GFP_NOFS);
1084 	if (!*roots) {
1085 		ulist_free(tmp);
1086 		return -ENOMEM;
1087 	}
1088 
1089 	ULIST_ITER_INIT(&uiter);
1090 	while (1) {
1091 		ret = find_parent_nodes(trans, fs_info, bytenr,
1092 					time_seq, tmp, *roots, NULL);
1093 		if (ret < 0 && ret != -ENOENT) {
1094 			ulist_free(tmp);
1095 			ulist_free(*roots);
1096 			return ret;
1097 		}
1098 		node = ulist_next(tmp, &uiter);
1099 		if (!node)
1100 			break;
1101 		bytenr = node->val;
1102 	}
1103 
1104 	ulist_free(tmp);
1105 	return 0;
1106 }
1107 
1108 
1109 static int __inode_info(u64 inum, u64 ioff, u8 key_type,
1110 			struct btrfs_root *fs_root, struct btrfs_path *path,
1111 			struct btrfs_key *found_key)
1112 {
1113 	int ret;
1114 	struct btrfs_key key;
1115 	struct extent_buffer *eb;
1116 
1117 	key.type = key_type;
1118 	key.objectid = inum;
1119 	key.offset = ioff;
1120 
1121 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1122 	if (ret < 0)
1123 		return ret;
1124 
1125 	eb = path->nodes[0];
1126 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1127 		ret = btrfs_next_leaf(fs_root, path);
1128 		if (ret)
1129 			return ret;
1130 		eb = path->nodes[0];
1131 	}
1132 
1133 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1134 	if (found_key->type != key.type || found_key->objectid != key.objectid)
1135 		return 1;
1136 
1137 	return 0;
1138 }
1139 
1140 /*
1141  * this makes the path point to (inum INODE_ITEM ioff)
1142  */
1143 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1144 			struct btrfs_path *path)
1145 {
1146 	struct btrfs_key key;
1147 	return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
1148 				&key);
1149 }
1150 
1151 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1152 				struct btrfs_path *path,
1153 				struct btrfs_key *found_key)
1154 {
1155 	return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
1156 				found_key);
1157 }
1158 
1159 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1160 			  u64 start_off, struct btrfs_path *path,
1161 			  struct btrfs_inode_extref **ret_extref,
1162 			  u64 *found_off)
1163 {
1164 	int ret, slot;
1165 	struct btrfs_key key;
1166 	struct btrfs_key found_key;
1167 	struct btrfs_inode_extref *extref;
1168 	struct extent_buffer *leaf;
1169 	unsigned long ptr;
1170 
1171 	key.objectid = inode_objectid;
1172 	btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
1173 	key.offset = start_off;
1174 
1175 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1176 	if (ret < 0)
1177 		return ret;
1178 
1179 	while (1) {
1180 		leaf = path->nodes[0];
1181 		slot = path->slots[0];
1182 		if (slot >= btrfs_header_nritems(leaf)) {
1183 			/*
1184 			 * If the item at offset is not found,
1185 			 * btrfs_search_slot will point us to the slot
1186 			 * where it should be inserted. In our case
1187 			 * that will be the slot directly before the
1188 			 * next INODE_REF_KEY_V2 item. In the case
1189 			 * that we're pointing to the last slot in a
1190 			 * leaf, we must move one leaf over.
1191 			 */
1192 			ret = btrfs_next_leaf(root, path);
1193 			if (ret) {
1194 				if (ret >= 1)
1195 					ret = -ENOENT;
1196 				break;
1197 			}
1198 			continue;
1199 		}
1200 
1201 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1202 
1203 		/*
1204 		 * Check that we're still looking at an extended ref key for
1205 		 * this particular objectid. If we have different
1206 		 * objectid or type then there are no more to be found
1207 		 * in the tree and we can exit.
1208 		 */
1209 		ret = -ENOENT;
1210 		if (found_key.objectid != inode_objectid)
1211 			break;
1212 		if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY)
1213 			break;
1214 
1215 		ret = 0;
1216 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1217 		extref = (struct btrfs_inode_extref *)ptr;
1218 		*ret_extref = extref;
1219 		if (found_off)
1220 			*found_off = found_key.offset;
1221 		break;
1222 	}
1223 
1224 	return ret;
1225 }
1226 
1227 /*
1228  * this iterates to turn a name (from iref/extref) into a full filesystem path.
1229  * Elements of the path are separated by '/' and the path is guaranteed to be
1230  * 0-terminated. the path is only given within the current file system.
1231  * Therefore, it never starts with a '/'. the caller is responsible to provide
1232  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1233  * the start point of the resulting string is returned. this pointer is within
1234  * dest, normally.
1235  * in case the path buffer would overflow, the pointer is decremented further
1236  * as if output was written to the buffer, though no more output is actually
1237  * generated. that way, the caller can determine how much space would be
1238  * required for the path to fit into the buffer. in that case, the returned
1239  * value will be smaller than dest. callers must check this!
1240  */
1241 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1242 			u32 name_len, unsigned long name_off,
1243 			struct extent_buffer *eb_in, u64 parent,
1244 			char *dest, u32 size)
1245 {
1246 	int slot;
1247 	u64 next_inum;
1248 	int ret;
1249 	s64 bytes_left = ((s64)size) - 1;
1250 	struct extent_buffer *eb = eb_in;
1251 	struct btrfs_key found_key;
1252 	int leave_spinning = path->leave_spinning;
1253 	struct btrfs_inode_ref *iref;
1254 
1255 	if (bytes_left >= 0)
1256 		dest[bytes_left] = '\0';
1257 
1258 	path->leave_spinning = 1;
1259 	while (1) {
1260 		bytes_left -= name_len;
1261 		if (bytes_left >= 0)
1262 			read_extent_buffer(eb, dest + bytes_left,
1263 					   name_off, name_len);
1264 		if (eb != eb_in) {
1265 			btrfs_tree_read_unlock_blocking(eb);
1266 			free_extent_buffer(eb);
1267 		}
1268 		ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
1269 		if (ret > 0)
1270 			ret = -ENOENT;
1271 		if (ret)
1272 			break;
1273 
1274 		next_inum = found_key.offset;
1275 
1276 		/* regular exit ahead */
1277 		if (parent == next_inum)
1278 			break;
1279 
1280 		slot = path->slots[0];
1281 		eb = path->nodes[0];
1282 		/* make sure we can use eb after releasing the path */
1283 		if (eb != eb_in) {
1284 			atomic_inc(&eb->refs);
1285 			btrfs_tree_read_lock(eb);
1286 			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1287 		}
1288 		btrfs_release_path(path);
1289 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1290 
1291 		name_len = btrfs_inode_ref_name_len(eb, iref);
1292 		name_off = (unsigned long)(iref + 1);
1293 
1294 		parent = next_inum;
1295 		--bytes_left;
1296 		if (bytes_left >= 0)
1297 			dest[bytes_left] = '/';
1298 	}
1299 
1300 	btrfs_release_path(path);
1301 	path->leave_spinning = leave_spinning;
1302 
1303 	if (ret)
1304 		return ERR_PTR(ret);
1305 
1306 	return dest + bytes_left;
1307 }
1308 
1309 /*
1310  * this makes the path point to (logical EXTENT_ITEM *)
1311  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1312  * tree blocks and <0 on error.
1313  */
1314 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1315 			struct btrfs_path *path, struct btrfs_key *found_key,
1316 			u64 *flags_ret)
1317 {
1318 	int ret;
1319 	u64 flags;
1320 	u64 size = 0;
1321 	u32 item_size;
1322 	struct extent_buffer *eb;
1323 	struct btrfs_extent_item *ei;
1324 	struct btrfs_key key;
1325 
1326 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1327 		key.type = BTRFS_METADATA_ITEM_KEY;
1328 	else
1329 		key.type = BTRFS_EXTENT_ITEM_KEY;
1330 	key.objectid = logical;
1331 	key.offset = (u64)-1;
1332 
1333 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1334 	if (ret < 0)
1335 		return ret;
1336 	ret = btrfs_previous_item(fs_info->extent_root, path,
1337 					0, BTRFS_EXTENT_ITEM_KEY);
1338 	if (ret < 0)
1339 		return ret;
1340 
1341 	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1342 	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1343 		size = fs_info->extent_root->leafsize;
1344 	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1345 		size = found_key->offset;
1346 
1347 	if ((found_key->type != BTRFS_EXTENT_ITEM_KEY &&
1348 	     found_key->type != BTRFS_METADATA_ITEM_KEY) ||
1349 	    found_key->objectid > logical ||
1350 	    found_key->objectid + size <= logical) {
1351 		pr_debug("logical %llu is not within any extent\n", logical);
1352 		return -ENOENT;
1353 	}
1354 
1355 	eb = path->nodes[0];
1356 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1357 	BUG_ON(item_size < sizeof(*ei));
1358 
1359 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1360 	flags = btrfs_extent_flags(eb, ei);
1361 
1362 	pr_debug("logical %llu is at position %llu within the extent (%llu "
1363 		 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1364 		 logical, logical - found_key->objectid, found_key->objectid,
1365 		 found_key->offset, flags, item_size);
1366 
1367 	WARN_ON(!flags_ret);
1368 	if (flags_ret) {
1369 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1370 			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1371 		else if (flags & BTRFS_EXTENT_FLAG_DATA)
1372 			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
1373 		else
1374 			BUG_ON(1);
1375 		return 0;
1376 	}
1377 
1378 	return -EIO;
1379 }
1380 
1381 /*
1382  * helper function to iterate extent inline refs. ptr must point to a 0 value
1383  * for the first call and may be modified. it is used to track state.
1384  * if more refs exist, 0 is returned and the next call to
1385  * __get_extent_inline_ref must pass the modified ptr parameter to get the
1386  * next ref. after the last ref was processed, 1 is returned.
1387  * returns <0 on error
1388  */
1389 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1390 				struct btrfs_extent_item *ei, u32 item_size,
1391 				struct btrfs_extent_inline_ref **out_eiref,
1392 				int *out_type)
1393 {
1394 	unsigned long end;
1395 	u64 flags;
1396 	struct btrfs_tree_block_info *info;
1397 
1398 	if (!*ptr) {
1399 		/* first call */
1400 		flags = btrfs_extent_flags(eb, ei);
1401 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1402 			info = (struct btrfs_tree_block_info *)(ei + 1);
1403 			*out_eiref =
1404 				(struct btrfs_extent_inline_ref *)(info + 1);
1405 		} else {
1406 			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1407 		}
1408 		*ptr = (unsigned long)*out_eiref;
1409 		if ((void *)*ptr >= (void *)ei + item_size)
1410 			return -ENOENT;
1411 	}
1412 
1413 	end = (unsigned long)ei + item_size;
1414 	*out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
1415 	*out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1416 
1417 	*ptr += btrfs_extent_inline_ref_size(*out_type);
1418 	WARN_ON(*ptr > end);
1419 	if (*ptr == end)
1420 		return 1; /* last */
1421 
1422 	return 0;
1423 }
1424 
1425 /*
1426  * reads the tree block backref for an extent. tree level and root are returned
1427  * through out_level and out_root. ptr must point to a 0 value for the first
1428  * call and may be modified (see __get_extent_inline_ref comment).
1429  * returns 0 if data was provided, 1 if there was no more data to provide or
1430  * <0 on error.
1431  */
1432 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1433 				struct btrfs_extent_item *ei, u32 item_size,
1434 				u64 *out_root, u8 *out_level)
1435 {
1436 	int ret;
1437 	int type;
1438 	struct btrfs_tree_block_info *info;
1439 	struct btrfs_extent_inline_ref *eiref;
1440 
1441 	if (*ptr == (unsigned long)-1)
1442 		return 1;
1443 
1444 	while (1) {
1445 		ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
1446 						&eiref, &type);
1447 		if (ret < 0)
1448 			return ret;
1449 
1450 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1451 		    type == BTRFS_SHARED_BLOCK_REF_KEY)
1452 			break;
1453 
1454 		if (ret == 1)
1455 			return 1;
1456 	}
1457 
1458 	/* we can treat both ref types equally here */
1459 	info = (struct btrfs_tree_block_info *)(ei + 1);
1460 	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1461 	*out_level = btrfs_tree_block_level(eb, info);
1462 
1463 	if (ret == 1)
1464 		*ptr = (unsigned long)-1;
1465 
1466 	return 0;
1467 }
1468 
1469 static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1470 				u64 root, u64 extent_item_objectid,
1471 				iterate_extent_inodes_t *iterate, void *ctx)
1472 {
1473 	struct extent_inode_elem *eie;
1474 	int ret = 0;
1475 
1476 	for (eie = inode_list; eie; eie = eie->next) {
1477 		pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1478 			 "root %llu\n", extent_item_objectid,
1479 			 eie->inum, eie->offset, root);
1480 		ret = iterate(eie->inum, eie->offset, root, ctx);
1481 		if (ret) {
1482 			pr_debug("stopping iteration for %llu due to ret=%d\n",
1483 				 extent_item_objectid, ret);
1484 			break;
1485 		}
1486 	}
1487 
1488 	return ret;
1489 }
1490 
1491 /*
1492  * calls iterate() for every inode that references the extent identified by
1493  * the given parameters.
1494  * when the iterator function returns a non-zero value, iteration stops.
1495  */
1496 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1497 				u64 extent_item_objectid, u64 extent_item_pos,
1498 				int search_commit_root,
1499 				iterate_extent_inodes_t *iterate, void *ctx)
1500 {
1501 	int ret;
1502 	struct btrfs_trans_handle *trans = NULL;
1503 	struct ulist *refs = NULL;
1504 	struct ulist *roots = NULL;
1505 	struct ulist_node *ref_node = NULL;
1506 	struct ulist_node *root_node = NULL;
1507 	struct seq_list tree_mod_seq_elem = {};
1508 	struct ulist_iterator ref_uiter;
1509 	struct ulist_iterator root_uiter;
1510 
1511 	pr_debug("resolving all inodes for extent %llu\n",
1512 			extent_item_objectid);
1513 
1514 	if (!search_commit_root) {
1515 		trans = btrfs_join_transaction(fs_info->extent_root);
1516 		if (IS_ERR(trans))
1517 			return PTR_ERR(trans);
1518 		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1519 	}
1520 
1521 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1522 				   tree_mod_seq_elem.seq, &refs,
1523 				   &extent_item_pos);
1524 	if (ret)
1525 		goto out;
1526 
1527 	ULIST_ITER_INIT(&ref_uiter);
1528 	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1529 		ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
1530 					   tree_mod_seq_elem.seq, &roots);
1531 		if (ret)
1532 			break;
1533 		ULIST_ITER_INIT(&root_uiter);
1534 		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1535 			pr_debug("root %llu references leaf %llu, data list "
1536 				 "%#llx\n", root_node->val, ref_node->val,
1537 				 ref_node->aux);
1538 			ret = iterate_leaf_refs((struct extent_inode_elem *)
1539 						(uintptr_t)ref_node->aux,
1540 						root_node->val,
1541 						extent_item_objectid,
1542 						iterate, ctx);
1543 		}
1544 		ulist_free(roots);
1545 	}
1546 
1547 	free_leaf_list(refs);
1548 out:
1549 	if (!search_commit_root) {
1550 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1551 		btrfs_end_transaction(trans, fs_info->extent_root);
1552 	}
1553 
1554 	return ret;
1555 }
1556 
1557 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1558 				struct btrfs_path *path,
1559 				iterate_extent_inodes_t *iterate, void *ctx)
1560 {
1561 	int ret;
1562 	u64 extent_item_pos;
1563 	u64 flags = 0;
1564 	struct btrfs_key found_key;
1565 	int search_commit_root = path->search_commit_root;
1566 
1567 	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1568 	btrfs_release_path(path);
1569 	if (ret < 0)
1570 		return ret;
1571 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1572 		return -EINVAL;
1573 
1574 	extent_item_pos = logical - found_key.objectid;
1575 	ret = iterate_extent_inodes(fs_info, found_key.objectid,
1576 					extent_item_pos, search_commit_root,
1577 					iterate, ctx);
1578 
1579 	return ret;
1580 }
1581 
1582 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1583 			      struct extent_buffer *eb, void *ctx);
1584 
1585 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1586 			      struct btrfs_path *path,
1587 			      iterate_irefs_t *iterate, void *ctx)
1588 {
1589 	int ret = 0;
1590 	int slot;
1591 	u32 cur;
1592 	u32 len;
1593 	u32 name_len;
1594 	u64 parent = 0;
1595 	int found = 0;
1596 	struct extent_buffer *eb;
1597 	struct btrfs_item *item;
1598 	struct btrfs_inode_ref *iref;
1599 	struct btrfs_key found_key;
1600 
1601 	while (!ret) {
1602 		path->leave_spinning = 1;
1603 		ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1604 				     &found_key);
1605 		if (ret < 0)
1606 			break;
1607 		if (ret) {
1608 			ret = found ? 0 : -ENOENT;
1609 			break;
1610 		}
1611 		++found;
1612 
1613 		parent = found_key.offset;
1614 		slot = path->slots[0];
1615 		eb = path->nodes[0];
1616 		/* make sure we can use eb after releasing the path */
1617 		atomic_inc(&eb->refs);
1618 		btrfs_tree_read_lock(eb);
1619 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1620 		btrfs_release_path(path);
1621 
1622 		item = btrfs_item_nr(eb, slot);
1623 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1624 
1625 		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1626 			name_len = btrfs_inode_ref_name_len(eb, iref);
1627 			/* path must be released before calling iterate()! */
1628 			pr_debug("following ref at offset %u for inode %llu in "
1629 				 "tree %llu\n", cur, found_key.objectid,
1630 				 fs_root->objectid);
1631 			ret = iterate(parent, name_len,
1632 				      (unsigned long)(iref + 1), eb, ctx);
1633 			if (ret)
1634 				break;
1635 			len = sizeof(*iref) + name_len;
1636 			iref = (struct btrfs_inode_ref *)((char *)iref + len);
1637 		}
1638 		btrfs_tree_read_unlock_blocking(eb);
1639 		free_extent_buffer(eb);
1640 	}
1641 
1642 	btrfs_release_path(path);
1643 
1644 	return ret;
1645 }
1646 
1647 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1648 				 struct btrfs_path *path,
1649 				 iterate_irefs_t *iterate, void *ctx)
1650 {
1651 	int ret;
1652 	int slot;
1653 	u64 offset = 0;
1654 	u64 parent;
1655 	int found = 0;
1656 	struct extent_buffer *eb;
1657 	struct btrfs_inode_extref *extref;
1658 	struct extent_buffer *leaf;
1659 	u32 item_size;
1660 	u32 cur_offset;
1661 	unsigned long ptr;
1662 
1663 	while (1) {
1664 		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
1665 					    &offset);
1666 		if (ret < 0)
1667 			break;
1668 		if (ret) {
1669 			ret = found ? 0 : -ENOENT;
1670 			break;
1671 		}
1672 		++found;
1673 
1674 		slot = path->slots[0];
1675 		eb = path->nodes[0];
1676 		/* make sure we can use eb after releasing the path */
1677 		atomic_inc(&eb->refs);
1678 
1679 		btrfs_tree_read_lock(eb);
1680 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1681 		btrfs_release_path(path);
1682 
1683 		leaf = path->nodes[0];
1684 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1685 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1686 		cur_offset = 0;
1687 
1688 		while (cur_offset < item_size) {
1689 			u32 name_len;
1690 
1691 			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
1692 			parent = btrfs_inode_extref_parent(eb, extref);
1693 			name_len = btrfs_inode_extref_name_len(eb, extref);
1694 			ret = iterate(parent, name_len,
1695 				      (unsigned long)&extref->name, eb, ctx);
1696 			if (ret)
1697 				break;
1698 
1699 			cur_offset += btrfs_inode_extref_name_len(leaf, extref);
1700 			cur_offset += sizeof(*extref);
1701 		}
1702 		btrfs_tree_read_unlock_blocking(eb);
1703 		free_extent_buffer(eb);
1704 
1705 		offset++;
1706 	}
1707 
1708 	btrfs_release_path(path);
1709 
1710 	return ret;
1711 }
1712 
1713 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1714 			 struct btrfs_path *path, iterate_irefs_t *iterate,
1715 			 void *ctx)
1716 {
1717 	int ret;
1718 	int found_refs = 0;
1719 
1720 	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
1721 	if (!ret)
1722 		++found_refs;
1723 	else if (ret != -ENOENT)
1724 		return ret;
1725 
1726 	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
1727 	if (ret == -ENOENT && found_refs)
1728 		return 0;
1729 
1730 	return ret;
1731 }
1732 
1733 /*
1734  * returns 0 if the path could be dumped (probably truncated)
1735  * returns <0 in case of an error
1736  */
1737 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1738 			 struct extent_buffer *eb, void *ctx)
1739 {
1740 	struct inode_fs_paths *ipath = ctx;
1741 	char *fspath;
1742 	char *fspath_min;
1743 	int i = ipath->fspath->elem_cnt;
1744 	const int s_ptr = sizeof(char *);
1745 	u32 bytes_left;
1746 
1747 	bytes_left = ipath->fspath->bytes_left > s_ptr ?
1748 					ipath->fspath->bytes_left - s_ptr : 0;
1749 
1750 	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1751 	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
1752 				   name_off, eb, inum, fspath_min, bytes_left);
1753 	if (IS_ERR(fspath))
1754 		return PTR_ERR(fspath);
1755 
1756 	if (fspath > fspath_min) {
1757 		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1758 		++ipath->fspath->elem_cnt;
1759 		ipath->fspath->bytes_left = fspath - fspath_min;
1760 	} else {
1761 		++ipath->fspath->elem_missed;
1762 		ipath->fspath->bytes_missing += fspath_min - fspath;
1763 		ipath->fspath->bytes_left = 0;
1764 	}
1765 
1766 	return 0;
1767 }
1768 
1769 /*
1770  * this dumps all file system paths to the inode into the ipath struct, provided
1771  * is has been created large enough. each path is zero-terminated and accessed
1772  * from ipath->fspath->val[i].
1773  * when it returns, there are ipath->fspath->elem_cnt number of paths available
1774  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1775  * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1776  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1777  * have been needed to return all paths.
1778  */
1779 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1780 {
1781 	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1782 			     inode_to_path, ipath);
1783 }
1784 
1785 struct btrfs_data_container *init_data_container(u32 total_bytes)
1786 {
1787 	struct btrfs_data_container *data;
1788 	size_t alloc_bytes;
1789 
1790 	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1791 	data = vmalloc(alloc_bytes);
1792 	if (!data)
1793 		return ERR_PTR(-ENOMEM);
1794 
1795 	if (total_bytes >= sizeof(*data)) {
1796 		data->bytes_left = total_bytes - sizeof(*data);
1797 		data->bytes_missing = 0;
1798 	} else {
1799 		data->bytes_missing = sizeof(*data) - total_bytes;
1800 		data->bytes_left = 0;
1801 	}
1802 
1803 	data->elem_cnt = 0;
1804 	data->elem_missed = 0;
1805 
1806 	return data;
1807 }
1808 
1809 /*
1810  * allocates space to return multiple file system paths for an inode.
1811  * total_bytes to allocate are passed, note that space usable for actual path
1812  * information will be total_bytes - sizeof(struct inode_fs_paths).
1813  * the returned pointer must be freed with free_ipath() in the end.
1814  */
1815 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1816 					struct btrfs_path *path)
1817 {
1818 	struct inode_fs_paths *ifp;
1819 	struct btrfs_data_container *fspath;
1820 
1821 	fspath = init_data_container(total_bytes);
1822 	if (IS_ERR(fspath))
1823 		return (void *)fspath;
1824 
1825 	ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1826 	if (!ifp) {
1827 		kfree(fspath);
1828 		return ERR_PTR(-ENOMEM);
1829 	}
1830 
1831 	ifp->btrfs_path = path;
1832 	ifp->fspath = fspath;
1833 	ifp->fs_root = fs_root;
1834 
1835 	return ifp;
1836 }
1837 
1838 void free_ipath(struct inode_fs_paths *ipath)
1839 {
1840 	if (!ipath)
1841 		return;
1842 	vfree(ipath->fspath);
1843 	kfree(ipath);
1844 }
1845