xref: /openbmc/linux/fs/btrfs/backref.c (revision 8da6d5815c592b713ecaf4f4f8b631f8359c96c4)
1a542ad1bSJan Schmidt /*
2a542ad1bSJan Schmidt  * Copyright (C) 2011 STRATO.  All rights reserved.
3a542ad1bSJan Schmidt  *
4a542ad1bSJan Schmidt  * This program is free software; you can redistribute it and/or
5a542ad1bSJan Schmidt  * modify it under the terms of the GNU General Public
6a542ad1bSJan Schmidt  * License v2 as published by the Free Software Foundation.
7a542ad1bSJan Schmidt  *
8a542ad1bSJan Schmidt  * This program is distributed in the hope that it will be useful,
9a542ad1bSJan Schmidt  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10a542ad1bSJan Schmidt  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11a542ad1bSJan Schmidt  * General Public License for more details.
12a542ad1bSJan Schmidt  *
13a542ad1bSJan Schmidt  * You should have received a copy of the GNU General Public
14a542ad1bSJan Schmidt  * License along with this program; if not, write to the
15a542ad1bSJan Schmidt  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16a542ad1bSJan Schmidt  * Boston, MA 021110-1307, USA.
17a542ad1bSJan Schmidt  */
18a542ad1bSJan Schmidt 
19a542ad1bSJan Schmidt #include "ctree.h"
20a542ad1bSJan Schmidt #include "disk-io.h"
21a542ad1bSJan Schmidt #include "backref.h"
22*8da6d581SJan Schmidt #include "ulist.h"
23*8da6d581SJan Schmidt #include "transaction.h"
24*8da6d581SJan Schmidt #include "delayed-ref.h"
25a542ad1bSJan Schmidt 
26a542ad1bSJan Schmidt struct __data_ref {
27a542ad1bSJan Schmidt 	struct list_head list;
28a542ad1bSJan Schmidt 	u64 inum;
29a542ad1bSJan Schmidt 	u64 root;
30a542ad1bSJan Schmidt 	u64 extent_data_item_offset;
31a542ad1bSJan Schmidt };
32a542ad1bSJan Schmidt 
33a542ad1bSJan Schmidt struct __shared_ref {
34a542ad1bSJan Schmidt 	struct list_head list;
35a542ad1bSJan Schmidt 	u64 disk_byte;
36a542ad1bSJan Schmidt };
37a542ad1bSJan Schmidt 
38*8da6d581SJan Schmidt /*
39*8da6d581SJan Schmidt  * this structure records all encountered refs on the way up to the root
40*8da6d581SJan Schmidt  */
41*8da6d581SJan Schmidt struct __prelim_ref {
42*8da6d581SJan Schmidt 	struct list_head list;
43*8da6d581SJan Schmidt 	u64 root_id;
44*8da6d581SJan Schmidt 	struct btrfs_key key;
45*8da6d581SJan Schmidt 	int level;
46*8da6d581SJan Schmidt 	int count;
47*8da6d581SJan Schmidt 	u64 parent;
48*8da6d581SJan Schmidt 	u64 wanted_disk_byte;
49*8da6d581SJan Schmidt };
50*8da6d581SJan Schmidt 
51*8da6d581SJan Schmidt static int __add_prelim_ref(struct list_head *head, u64 root_id,
52*8da6d581SJan Schmidt 			    struct btrfs_key *key, int level, u64 parent,
53*8da6d581SJan Schmidt 			    u64 wanted_disk_byte, int count)
54*8da6d581SJan Schmidt {
55*8da6d581SJan Schmidt 	struct __prelim_ref *ref;
56*8da6d581SJan Schmidt 
57*8da6d581SJan Schmidt 	/* in case we're adding delayed refs, we're holding the refs spinlock */
58*8da6d581SJan Schmidt 	ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
59*8da6d581SJan Schmidt 	if (!ref)
60*8da6d581SJan Schmidt 		return -ENOMEM;
61*8da6d581SJan Schmidt 
62*8da6d581SJan Schmidt 	ref->root_id = root_id;
63*8da6d581SJan Schmidt 	if (key)
64*8da6d581SJan Schmidt 		ref->key = *key;
65*8da6d581SJan Schmidt 	else
66*8da6d581SJan Schmidt 		memset(&ref->key, 0, sizeof(ref->key));
67*8da6d581SJan Schmidt 
68*8da6d581SJan Schmidt 	ref->level = level;
69*8da6d581SJan Schmidt 	ref->count = count;
70*8da6d581SJan Schmidt 	ref->parent = parent;
71*8da6d581SJan Schmidt 	ref->wanted_disk_byte = wanted_disk_byte;
72*8da6d581SJan Schmidt 	list_add_tail(&ref->list, head);
73*8da6d581SJan Schmidt 
74*8da6d581SJan Schmidt 	return 0;
75*8da6d581SJan Schmidt }
76*8da6d581SJan Schmidt 
77*8da6d581SJan Schmidt static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
78*8da6d581SJan Schmidt 				struct ulist *parents,
79*8da6d581SJan Schmidt 				struct extent_buffer *eb, int level,
80*8da6d581SJan Schmidt 				u64 wanted_objectid, u64 wanted_disk_byte)
81*8da6d581SJan Schmidt {
82*8da6d581SJan Schmidt 	int ret;
83*8da6d581SJan Schmidt 	int slot;
84*8da6d581SJan Schmidt 	struct btrfs_file_extent_item *fi;
85*8da6d581SJan Schmidt 	struct btrfs_key key;
86*8da6d581SJan Schmidt 	u64 disk_byte;
87*8da6d581SJan Schmidt 
88*8da6d581SJan Schmidt add_parent:
89*8da6d581SJan Schmidt 	ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
90*8da6d581SJan Schmidt 	if (ret < 0)
91*8da6d581SJan Schmidt 		return ret;
92*8da6d581SJan Schmidt 
93*8da6d581SJan Schmidt 	if (level != 0)
94*8da6d581SJan Schmidt 		return 0;
95*8da6d581SJan Schmidt 
96*8da6d581SJan Schmidt 	/*
97*8da6d581SJan Schmidt 	 * if the current leaf is full with EXTENT_DATA items, we must
98*8da6d581SJan Schmidt 	 * check the next one if that holds a reference as well.
99*8da6d581SJan Schmidt 	 * ref->count cannot be used to skip this check.
100*8da6d581SJan Schmidt 	 * repeat this until we don't find any additional EXTENT_DATA items.
101*8da6d581SJan Schmidt 	 */
102*8da6d581SJan Schmidt 	while (1) {
103*8da6d581SJan Schmidt 		ret = btrfs_next_leaf(root, path);
104*8da6d581SJan Schmidt 		if (ret < 0)
105*8da6d581SJan Schmidt 			return ret;
106*8da6d581SJan Schmidt 		if (ret)
107*8da6d581SJan Schmidt 			return 0;
108*8da6d581SJan Schmidt 
109*8da6d581SJan Schmidt 		eb = path->nodes[0];
110*8da6d581SJan Schmidt 		for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
111*8da6d581SJan Schmidt 			btrfs_item_key_to_cpu(eb, &key, slot);
112*8da6d581SJan Schmidt 			if (key.objectid != wanted_objectid ||
113*8da6d581SJan Schmidt 			    key.type != BTRFS_EXTENT_DATA_KEY)
114*8da6d581SJan Schmidt 				return 0;
115*8da6d581SJan Schmidt 			fi = btrfs_item_ptr(eb, slot,
116*8da6d581SJan Schmidt 						struct btrfs_file_extent_item);
117*8da6d581SJan Schmidt 			disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
118*8da6d581SJan Schmidt 			if (disk_byte == wanted_disk_byte)
119*8da6d581SJan Schmidt 				goto add_parent;
120*8da6d581SJan Schmidt 		}
121*8da6d581SJan Schmidt 	}
122*8da6d581SJan Schmidt 
123*8da6d581SJan Schmidt 	return 0;
124*8da6d581SJan Schmidt }
125*8da6d581SJan Schmidt 
126*8da6d581SJan Schmidt /*
127*8da6d581SJan Schmidt  * resolve an indirect backref in the form (root_id, key, level)
128*8da6d581SJan Schmidt  * to a logical address
129*8da6d581SJan Schmidt  */
130*8da6d581SJan Schmidt static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
131*8da6d581SJan Schmidt 					struct __prelim_ref *ref,
132*8da6d581SJan Schmidt 					struct ulist *parents)
133*8da6d581SJan Schmidt {
134*8da6d581SJan Schmidt 	struct btrfs_path *path;
135*8da6d581SJan Schmidt 	struct btrfs_root *root;
136*8da6d581SJan Schmidt 	struct btrfs_key root_key;
137*8da6d581SJan Schmidt 	struct btrfs_key key = {0};
138*8da6d581SJan Schmidt 	struct extent_buffer *eb;
139*8da6d581SJan Schmidt 	int ret = 0;
140*8da6d581SJan Schmidt 	int root_level;
141*8da6d581SJan Schmidt 	int level = ref->level;
142*8da6d581SJan Schmidt 
143*8da6d581SJan Schmidt 	path = btrfs_alloc_path();
144*8da6d581SJan Schmidt 	if (!path)
145*8da6d581SJan Schmidt 		return -ENOMEM;
146*8da6d581SJan Schmidt 
147*8da6d581SJan Schmidt 	root_key.objectid = ref->root_id;
148*8da6d581SJan Schmidt 	root_key.type = BTRFS_ROOT_ITEM_KEY;
149*8da6d581SJan Schmidt 	root_key.offset = (u64)-1;
150*8da6d581SJan Schmidt 	root = btrfs_read_fs_root_no_name(fs_info, &root_key);
151*8da6d581SJan Schmidt 	if (IS_ERR(root)) {
152*8da6d581SJan Schmidt 		ret = PTR_ERR(root);
153*8da6d581SJan Schmidt 		goto out;
154*8da6d581SJan Schmidt 	}
155*8da6d581SJan Schmidt 
156*8da6d581SJan Schmidt 	rcu_read_lock();
157*8da6d581SJan Schmidt 	root_level = btrfs_header_level(root->node);
158*8da6d581SJan Schmidt 	rcu_read_unlock();
159*8da6d581SJan Schmidt 
160*8da6d581SJan Schmidt 	if (root_level + 1 == level)
161*8da6d581SJan Schmidt 		goto out;
162*8da6d581SJan Schmidt 
163*8da6d581SJan Schmidt 	path->lowest_level = level;
164*8da6d581SJan Schmidt 	ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0);
165*8da6d581SJan Schmidt 	pr_debug("search slot in root %llu (level %d, ref count %d) returned "
166*8da6d581SJan Schmidt 		 "%d for key (%llu %u %llu)\n",
167*8da6d581SJan Schmidt 		 (unsigned long long)ref->root_id, level, ref->count, ret,
168*8da6d581SJan Schmidt 		 (unsigned long long)ref->key.objectid, ref->key.type,
169*8da6d581SJan Schmidt 		 (unsigned long long)ref->key.offset);
170*8da6d581SJan Schmidt 	if (ret < 0)
171*8da6d581SJan Schmidt 		goto out;
172*8da6d581SJan Schmidt 
173*8da6d581SJan Schmidt 	eb = path->nodes[level];
174*8da6d581SJan Schmidt 	if (!eb) {
175*8da6d581SJan Schmidt 		WARN_ON(1);
176*8da6d581SJan Schmidt 		ret = 1;
177*8da6d581SJan Schmidt 		goto out;
178*8da6d581SJan Schmidt 	}
179*8da6d581SJan Schmidt 
180*8da6d581SJan Schmidt 	if (level == 0) {
181*8da6d581SJan Schmidt 		if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) {
182*8da6d581SJan Schmidt 			ret = btrfs_next_leaf(root, path);
183*8da6d581SJan Schmidt 			if (ret)
184*8da6d581SJan Schmidt 				goto out;
185*8da6d581SJan Schmidt 			eb = path->nodes[0];
186*8da6d581SJan Schmidt 		}
187*8da6d581SJan Schmidt 
188*8da6d581SJan Schmidt 		btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
189*8da6d581SJan Schmidt 	}
190*8da6d581SJan Schmidt 
191*8da6d581SJan Schmidt 	/* the last two parameters will only be used for level == 0 */
192*8da6d581SJan Schmidt 	ret = add_all_parents(root, path, parents, eb, level, key.objectid,
193*8da6d581SJan Schmidt 				ref->wanted_disk_byte);
194*8da6d581SJan Schmidt out:
195*8da6d581SJan Schmidt 	btrfs_free_path(path);
196*8da6d581SJan Schmidt 	return ret;
197*8da6d581SJan Schmidt }
198*8da6d581SJan Schmidt 
199*8da6d581SJan Schmidt /*
200*8da6d581SJan Schmidt  * resolve all indirect backrefs from the list
201*8da6d581SJan Schmidt  */
202*8da6d581SJan Schmidt static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
203*8da6d581SJan Schmidt 				   struct list_head *head)
204*8da6d581SJan Schmidt {
205*8da6d581SJan Schmidt 	int err;
206*8da6d581SJan Schmidt 	int ret = 0;
207*8da6d581SJan Schmidt 	struct __prelim_ref *ref;
208*8da6d581SJan Schmidt 	struct __prelim_ref *ref_safe;
209*8da6d581SJan Schmidt 	struct __prelim_ref *new_ref;
210*8da6d581SJan Schmidt 	struct ulist *parents;
211*8da6d581SJan Schmidt 	struct ulist_node *node;
212*8da6d581SJan Schmidt 
213*8da6d581SJan Schmidt 	parents = ulist_alloc(GFP_NOFS);
214*8da6d581SJan Schmidt 	if (!parents)
215*8da6d581SJan Schmidt 		return -ENOMEM;
216*8da6d581SJan Schmidt 
217*8da6d581SJan Schmidt 	/*
218*8da6d581SJan Schmidt 	 * _safe allows us to insert directly after the current item without
219*8da6d581SJan Schmidt 	 * iterating over the newly inserted items.
220*8da6d581SJan Schmidt 	 * we're also allowed to re-assign ref during iteration.
221*8da6d581SJan Schmidt 	 */
222*8da6d581SJan Schmidt 	list_for_each_entry_safe(ref, ref_safe, head, list) {
223*8da6d581SJan Schmidt 		if (ref->parent)	/* already direct */
224*8da6d581SJan Schmidt 			continue;
225*8da6d581SJan Schmidt 		if (ref->count == 0)
226*8da6d581SJan Schmidt 			continue;
227*8da6d581SJan Schmidt 		err = __resolve_indirect_ref(fs_info, ref, parents);
228*8da6d581SJan Schmidt 		if (err) {
229*8da6d581SJan Schmidt 			if (ret == 0)
230*8da6d581SJan Schmidt 				ret = err;
231*8da6d581SJan Schmidt 			continue;
232*8da6d581SJan Schmidt 		}
233*8da6d581SJan Schmidt 
234*8da6d581SJan Schmidt 		/* we put the first parent into the ref at hand */
235*8da6d581SJan Schmidt 		node = ulist_next(parents, NULL);
236*8da6d581SJan Schmidt 		ref->parent = node ? node->val : 0;
237*8da6d581SJan Schmidt 
238*8da6d581SJan Schmidt 		/* additional parents require new refs being added here */
239*8da6d581SJan Schmidt 		while ((node = ulist_next(parents, node))) {
240*8da6d581SJan Schmidt 			new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
241*8da6d581SJan Schmidt 			if (!new_ref) {
242*8da6d581SJan Schmidt 				ret = -ENOMEM;
243*8da6d581SJan Schmidt 				break;
244*8da6d581SJan Schmidt 			}
245*8da6d581SJan Schmidt 			memcpy(new_ref, ref, sizeof(*ref));
246*8da6d581SJan Schmidt 			new_ref->parent = node->val;
247*8da6d581SJan Schmidt 			list_add(&new_ref->list, &ref->list);
248*8da6d581SJan Schmidt 		}
249*8da6d581SJan Schmidt 		ulist_reinit(parents);
250*8da6d581SJan Schmidt 	}
251*8da6d581SJan Schmidt 
252*8da6d581SJan Schmidt 	ulist_free(parents);
253*8da6d581SJan Schmidt 	return ret;
254*8da6d581SJan Schmidt }
255*8da6d581SJan Schmidt 
256*8da6d581SJan Schmidt /*
257*8da6d581SJan Schmidt  * merge two lists of backrefs and adjust counts accordingly
258*8da6d581SJan Schmidt  *
259*8da6d581SJan Schmidt  * mode = 1: merge identical keys, if key is set
260*8da6d581SJan Schmidt  * mode = 2: merge identical parents
261*8da6d581SJan Schmidt  */
262*8da6d581SJan Schmidt static int __merge_refs(struct list_head *head, int mode)
263*8da6d581SJan Schmidt {
264*8da6d581SJan Schmidt 	struct list_head *pos1;
265*8da6d581SJan Schmidt 
266*8da6d581SJan Schmidt 	list_for_each(pos1, head) {
267*8da6d581SJan Schmidt 		struct list_head *n2;
268*8da6d581SJan Schmidt 		struct list_head *pos2;
269*8da6d581SJan Schmidt 		struct __prelim_ref *ref1;
270*8da6d581SJan Schmidt 
271*8da6d581SJan Schmidt 		ref1 = list_entry(pos1, struct __prelim_ref, list);
272*8da6d581SJan Schmidt 
273*8da6d581SJan Schmidt 		if (mode == 1 && ref1->key.type == 0)
274*8da6d581SJan Schmidt 			continue;
275*8da6d581SJan Schmidt 		for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
276*8da6d581SJan Schmidt 		     pos2 = n2, n2 = pos2->next) {
277*8da6d581SJan Schmidt 			struct __prelim_ref *ref2;
278*8da6d581SJan Schmidt 
279*8da6d581SJan Schmidt 			ref2 = list_entry(pos2, struct __prelim_ref, list);
280*8da6d581SJan Schmidt 
281*8da6d581SJan Schmidt 			if (mode == 1) {
282*8da6d581SJan Schmidt 				if (memcmp(&ref1->key, &ref2->key,
283*8da6d581SJan Schmidt 					   sizeof(ref1->key)) ||
284*8da6d581SJan Schmidt 				    ref1->level != ref2->level ||
285*8da6d581SJan Schmidt 				    ref1->root_id != ref2->root_id)
286*8da6d581SJan Schmidt 					continue;
287*8da6d581SJan Schmidt 				ref1->count += ref2->count;
288*8da6d581SJan Schmidt 			} else {
289*8da6d581SJan Schmidt 				if (ref1->parent != ref2->parent)
290*8da6d581SJan Schmidt 					continue;
291*8da6d581SJan Schmidt 				ref1->count += ref2->count;
292*8da6d581SJan Schmidt 			}
293*8da6d581SJan Schmidt 			list_del(&ref2->list);
294*8da6d581SJan Schmidt 			kfree(ref2);
295*8da6d581SJan Schmidt 		}
296*8da6d581SJan Schmidt 
297*8da6d581SJan Schmidt 	}
298*8da6d581SJan Schmidt 	return 0;
299*8da6d581SJan Schmidt }
300*8da6d581SJan Schmidt 
301*8da6d581SJan Schmidt /*
302*8da6d581SJan Schmidt  * add all currently queued delayed refs from this head whose seq nr is
303*8da6d581SJan Schmidt  * smaller or equal that seq to the list
304*8da6d581SJan Schmidt  */
305*8da6d581SJan Schmidt static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
306*8da6d581SJan Schmidt 			      struct btrfs_key *info_key,
307*8da6d581SJan Schmidt 			      struct list_head *prefs)
308*8da6d581SJan Schmidt {
309*8da6d581SJan Schmidt 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
310*8da6d581SJan Schmidt 	struct rb_node *n = &head->node.rb_node;
311*8da6d581SJan Schmidt 	int sgn;
312*8da6d581SJan Schmidt 	int ret;
313*8da6d581SJan Schmidt 
314*8da6d581SJan Schmidt 	if (extent_op && extent_op->update_key)
315*8da6d581SJan Schmidt 		btrfs_disk_key_to_cpu(info_key, &extent_op->key);
316*8da6d581SJan Schmidt 
317*8da6d581SJan Schmidt 	while ((n = rb_prev(n))) {
318*8da6d581SJan Schmidt 		struct btrfs_delayed_ref_node *node;
319*8da6d581SJan Schmidt 		node = rb_entry(n, struct btrfs_delayed_ref_node,
320*8da6d581SJan Schmidt 				rb_node);
321*8da6d581SJan Schmidt 		if (node->bytenr != head->node.bytenr)
322*8da6d581SJan Schmidt 			break;
323*8da6d581SJan Schmidt 		WARN_ON(node->is_head);
324*8da6d581SJan Schmidt 
325*8da6d581SJan Schmidt 		if (node->seq > seq)
326*8da6d581SJan Schmidt 			continue;
327*8da6d581SJan Schmidt 
328*8da6d581SJan Schmidt 		switch (node->action) {
329*8da6d581SJan Schmidt 		case BTRFS_ADD_DELAYED_EXTENT:
330*8da6d581SJan Schmidt 		case BTRFS_UPDATE_DELAYED_HEAD:
331*8da6d581SJan Schmidt 			WARN_ON(1);
332*8da6d581SJan Schmidt 			continue;
333*8da6d581SJan Schmidt 		case BTRFS_ADD_DELAYED_REF:
334*8da6d581SJan Schmidt 			sgn = 1;
335*8da6d581SJan Schmidt 			break;
336*8da6d581SJan Schmidt 		case BTRFS_DROP_DELAYED_REF:
337*8da6d581SJan Schmidt 			sgn = -1;
338*8da6d581SJan Schmidt 			break;
339*8da6d581SJan Schmidt 		default:
340*8da6d581SJan Schmidt 			BUG_ON(1);
341*8da6d581SJan Schmidt 		}
342*8da6d581SJan Schmidt 		switch (node->type) {
343*8da6d581SJan Schmidt 		case BTRFS_TREE_BLOCK_REF_KEY: {
344*8da6d581SJan Schmidt 			struct btrfs_delayed_tree_ref *ref;
345*8da6d581SJan Schmidt 
346*8da6d581SJan Schmidt 			ref = btrfs_delayed_node_to_tree_ref(node);
347*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, ref->root, info_key,
348*8da6d581SJan Schmidt 					       ref->level + 1, 0, node->bytenr,
349*8da6d581SJan Schmidt 					       node->ref_mod * sgn);
350*8da6d581SJan Schmidt 			break;
351*8da6d581SJan Schmidt 		}
352*8da6d581SJan Schmidt 		case BTRFS_SHARED_BLOCK_REF_KEY: {
353*8da6d581SJan Schmidt 			struct btrfs_delayed_tree_ref *ref;
354*8da6d581SJan Schmidt 
355*8da6d581SJan Schmidt 			ref = btrfs_delayed_node_to_tree_ref(node);
356*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, ref->root, info_key,
357*8da6d581SJan Schmidt 					       ref->level + 1, ref->parent,
358*8da6d581SJan Schmidt 					       node->bytenr,
359*8da6d581SJan Schmidt 					       node->ref_mod * sgn);
360*8da6d581SJan Schmidt 			break;
361*8da6d581SJan Schmidt 		}
362*8da6d581SJan Schmidt 		case BTRFS_EXTENT_DATA_REF_KEY: {
363*8da6d581SJan Schmidt 			struct btrfs_delayed_data_ref *ref;
364*8da6d581SJan Schmidt 			struct btrfs_key key;
365*8da6d581SJan Schmidt 
366*8da6d581SJan Schmidt 			ref = btrfs_delayed_node_to_data_ref(node);
367*8da6d581SJan Schmidt 
368*8da6d581SJan Schmidt 			key.objectid = ref->objectid;
369*8da6d581SJan Schmidt 			key.type = BTRFS_EXTENT_DATA_KEY;
370*8da6d581SJan Schmidt 			key.offset = ref->offset;
371*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
372*8da6d581SJan Schmidt 					       node->bytenr,
373*8da6d581SJan Schmidt 					       node->ref_mod * sgn);
374*8da6d581SJan Schmidt 			break;
375*8da6d581SJan Schmidt 		}
376*8da6d581SJan Schmidt 		case BTRFS_SHARED_DATA_REF_KEY: {
377*8da6d581SJan Schmidt 			struct btrfs_delayed_data_ref *ref;
378*8da6d581SJan Schmidt 			struct btrfs_key key;
379*8da6d581SJan Schmidt 
380*8da6d581SJan Schmidt 			ref = btrfs_delayed_node_to_data_ref(node);
381*8da6d581SJan Schmidt 
382*8da6d581SJan Schmidt 			key.objectid = ref->objectid;
383*8da6d581SJan Schmidt 			key.type = BTRFS_EXTENT_DATA_KEY;
384*8da6d581SJan Schmidt 			key.offset = ref->offset;
385*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, ref->root, &key, 0,
386*8da6d581SJan Schmidt 					       ref->parent, node->bytenr,
387*8da6d581SJan Schmidt 					       node->ref_mod * sgn);
388*8da6d581SJan Schmidt 			break;
389*8da6d581SJan Schmidt 		}
390*8da6d581SJan Schmidt 		default:
391*8da6d581SJan Schmidt 			WARN_ON(1);
392*8da6d581SJan Schmidt 		}
393*8da6d581SJan Schmidt 		BUG_ON(ret);
394*8da6d581SJan Schmidt 	}
395*8da6d581SJan Schmidt 
396*8da6d581SJan Schmidt 	return 0;
397*8da6d581SJan Schmidt }
398*8da6d581SJan Schmidt 
399*8da6d581SJan Schmidt /*
400*8da6d581SJan Schmidt  * add all inline backrefs for bytenr to the list
401*8da6d581SJan Schmidt  */
402*8da6d581SJan Schmidt static int __add_inline_refs(struct btrfs_fs_info *fs_info,
403*8da6d581SJan Schmidt 			     struct btrfs_path *path, u64 bytenr,
404*8da6d581SJan Schmidt 			     struct btrfs_key *info_key, int *info_level,
405*8da6d581SJan Schmidt 			     struct list_head *prefs)
406*8da6d581SJan Schmidt {
407*8da6d581SJan Schmidt 	int ret;
408*8da6d581SJan Schmidt 	int slot;
409*8da6d581SJan Schmidt 	struct extent_buffer *leaf;
410*8da6d581SJan Schmidt 	struct btrfs_key key;
411*8da6d581SJan Schmidt 	unsigned long ptr;
412*8da6d581SJan Schmidt 	unsigned long end;
413*8da6d581SJan Schmidt 	struct btrfs_extent_item *ei;
414*8da6d581SJan Schmidt 	u64 flags;
415*8da6d581SJan Schmidt 	u64 item_size;
416*8da6d581SJan Schmidt 
417*8da6d581SJan Schmidt 	/*
418*8da6d581SJan Schmidt 	 * enumerate all inline refs
419*8da6d581SJan Schmidt 	 */
420*8da6d581SJan Schmidt 	leaf = path->nodes[0];
421*8da6d581SJan Schmidt 	slot = path->slots[0] - 1;
422*8da6d581SJan Schmidt 
423*8da6d581SJan Schmidt 	item_size = btrfs_item_size_nr(leaf, slot);
424*8da6d581SJan Schmidt 	BUG_ON(item_size < sizeof(*ei));
425*8da6d581SJan Schmidt 
426*8da6d581SJan Schmidt 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
427*8da6d581SJan Schmidt 	flags = btrfs_extent_flags(leaf, ei);
428*8da6d581SJan Schmidt 
429*8da6d581SJan Schmidt 	ptr = (unsigned long)(ei + 1);
430*8da6d581SJan Schmidt 	end = (unsigned long)ei + item_size;
431*8da6d581SJan Schmidt 
432*8da6d581SJan Schmidt 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
433*8da6d581SJan Schmidt 		struct btrfs_tree_block_info *info;
434*8da6d581SJan Schmidt 		struct btrfs_disk_key disk_key;
435*8da6d581SJan Schmidt 
436*8da6d581SJan Schmidt 		info = (struct btrfs_tree_block_info *)ptr;
437*8da6d581SJan Schmidt 		*info_level = btrfs_tree_block_level(leaf, info);
438*8da6d581SJan Schmidt 		btrfs_tree_block_key(leaf, info, &disk_key);
439*8da6d581SJan Schmidt 		btrfs_disk_key_to_cpu(info_key, &disk_key);
440*8da6d581SJan Schmidt 		ptr += sizeof(struct btrfs_tree_block_info);
441*8da6d581SJan Schmidt 		BUG_ON(ptr > end);
442*8da6d581SJan Schmidt 	} else {
443*8da6d581SJan Schmidt 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
444*8da6d581SJan Schmidt 	}
445*8da6d581SJan Schmidt 
446*8da6d581SJan Schmidt 	while (ptr < end) {
447*8da6d581SJan Schmidt 		struct btrfs_extent_inline_ref *iref;
448*8da6d581SJan Schmidt 		u64 offset;
449*8da6d581SJan Schmidt 		int type;
450*8da6d581SJan Schmidt 
451*8da6d581SJan Schmidt 		iref = (struct btrfs_extent_inline_ref *)ptr;
452*8da6d581SJan Schmidt 		type = btrfs_extent_inline_ref_type(leaf, iref);
453*8da6d581SJan Schmidt 		offset = btrfs_extent_inline_ref_offset(leaf, iref);
454*8da6d581SJan Schmidt 
455*8da6d581SJan Schmidt 		switch (type) {
456*8da6d581SJan Schmidt 		case BTRFS_SHARED_BLOCK_REF_KEY:
457*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, 0, info_key,
458*8da6d581SJan Schmidt 						*info_level + 1, offset,
459*8da6d581SJan Schmidt 						bytenr, 1);
460*8da6d581SJan Schmidt 			break;
461*8da6d581SJan Schmidt 		case BTRFS_SHARED_DATA_REF_KEY: {
462*8da6d581SJan Schmidt 			struct btrfs_shared_data_ref *sdref;
463*8da6d581SJan Schmidt 			int count;
464*8da6d581SJan Schmidt 
465*8da6d581SJan Schmidt 			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
466*8da6d581SJan Schmidt 			count = btrfs_shared_data_ref_count(leaf, sdref);
467*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
468*8da6d581SJan Schmidt 					       bytenr, count);
469*8da6d581SJan Schmidt 			break;
470*8da6d581SJan Schmidt 		}
471*8da6d581SJan Schmidt 		case BTRFS_TREE_BLOCK_REF_KEY:
472*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, offset, info_key,
473*8da6d581SJan Schmidt 					       *info_level + 1, 0, bytenr, 1);
474*8da6d581SJan Schmidt 			break;
475*8da6d581SJan Schmidt 		case BTRFS_EXTENT_DATA_REF_KEY: {
476*8da6d581SJan Schmidt 			struct btrfs_extent_data_ref *dref;
477*8da6d581SJan Schmidt 			int count;
478*8da6d581SJan Schmidt 			u64 root;
479*8da6d581SJan Schmidt 
480*8da6d581SJan Schmidt 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
481*8da6d581SJan Schmidt 			count = btrfs_extent_data_ref_count(leaf, dref);
482*8da6d581SJan Schmidt 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
483*8da6d581SJan Schmidt 								      dref);
484*8da6d581SJan Schmidt 			key.type = BTRFS_EXTENT_DATA_KEY;
485*8da6d581SJan Schmidt 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
486*8da6d581SJan Schmidt 			root = btrfs_extent_data_ref_root(leaf, dref);
487*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr,
488*8da6d581SJan Schmidt 						count);
489*8da6d581SJan Schmidt 			break;
490*8da6d581SJan Schmidt 		}
491*8da6d581SJan Schmidt 		default:
492*8da6d581SJan Schmidt 			WARN_ON(1);
493*8da6d581SJan Schmidt 		}
494*8da6d581SJan Schmidt 		BUG_ON(ret);
495*8da6d581SJan Schmidt 		ptr += btrfs_extent_inline_ref_size(type);
496*8da6d581SJan Schmidt 	}
497*8da6d581SJan Schmidt 
498*8da6d581SJan Schmidt 	return 0;
499*8da6d581SJan Schmidt }
500*8da6d581SJan Schmidt 
501*8da6d581SJan Schmidt /*
502*8da6d581SJan Schmidt  * add all non-inline backrefs for bytenr to the list
503*8da6d581SJan Schmidt  */
504*8da6d581SJan Schmidt static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
505*8da6d581SJan Schmidt 			    struct btrfs_path *path, u64 bytenr,
506*8da6d581SJan Schmidt 			    struct btrfs_key *info_key, int info_level,
507*8da6d581SJan Schmidt 			    struct list_head *prefs)
508*8da6d581SJan Schmidt {
509*8da6d581SJan Schmidt 	struct btrfs_root *extent_root = fs_info->extent_root;
510*8da6d581SJan Schmidt 	int ret;
511*8da6d581SJan Schmidt 	int slot;
512*8da6d581SJan Schmidt 	struct extent_buffer *leaf;
513*8da6d581SJan Schmidt 	struct btrfs_key key;
514*8da6d581SJan Schmidt 
515*8da6d581SJan Schmidt 	while (1) {
516*8da6d581SJan Schmidt 		ret = btrfs_next_item(extent_root, path);
517*8da6d581SJan Schmidt 		if (ret < 0)
518*8da6d581SJan Schmidt 			break;
519*8da6d581SJan Schmidt 		if (ret) {
520*8da6d581SJan Schmidt 			ret = 0;
521*8da6d581SJan Schmidt 			break;
522*8da6d581SJan Schmidt 		}
523*8da6d581SJan Schmidt 
524*8da6d581SJan Schmidt 		slot = path->slots[0];
525*8da6d581SJan Schmidt 		leaf = path->nodes[0];
526*8da6d581SJan Schmidt 		btrfs_item_key_to_cpu(leaf, &key, slot);
527*8da6d581SJan Schmidt 
528*8da6d581SJan Schmidt 		if (key.objectid != bytenr)
529*8da6d581SJan Schmidt 			break;
530*8da6d581SJan Schmidt 		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
531*8da6d581SJan Schmidt 			continue;
532*8da6d581SJan Schmidt 		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
533*8da6d581SJan Schmidt 			break;
534*8da6d581SJan Schmidt 
535*8da6d581SJan Schmidt 		switch (key.type) {
536*8da6d581SJan Schmidt 		case BTRFS_SHARED_BLOCK_REF_KEY:
537*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, 0, info_key,
538*8da6d581SJan Schmidt 						info_level + 1, key.offset,
539*8da6d581SJan Schmidt 						bytenr, 1);
540*8da6d581SJan Schmidt 			break;
541*8da6d581SJan Schmidt 		case BTRFS_SHARED_DATA_REF_KEY: {
542*8da6d581SJan Schmidt 			struct btrfs_shared_data_ref *sdref;
543*8da6d581SJan Schmidt 			int count;
544*8da6d581SJan Schmidt 
545*8da6d581SJan Schmidt 			sdref = btrfs_item_ptr(leaf, slot,
546*8da6d581SJan Schmidt 					      struct btrfs_shared_data_ref);
547*8da6d581SJan Schmidt 			count = btrfs_shared_data_ref_count(leaf, sdref);
548*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
549*8da6d581SJan Schmidt 						bytenr, count);
550*8da6d581SJan Schmidt 			break;
551*8da6d581SJan Schmidt 		}
552*8da6d581SJan Schmidt 		case BTRFS_TREE_BLOCK_REF_KEY:
553*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, key.offset, info_key,
554*8da6d581SJan Schmidt 						info_level + 1, 0, bytenr, 1);
555*8da6d581SJan Schmidt 			break;
556*8da6d581SJan Schmidt 		case BTRFS_EXTENT_DATA_REF_KEY: {
557*8da6d581SJan Schmidt 			struct btrfs_extent_data_ref *dref;
558*8da6d581SJan Schmidt 			int count;
559*8da6d581SJan Schmidt 			u64 root;
560*8da6d581SJan Schmidt 
561*8da6d581SJan Schmidt 			dref = btrfs_item_ptr(leaf, slot,
562*8da6d581SJan Schmidt 					      struct btrfs_extent_data_ref);
563*8da6d581SJan Schmidt 			count = btrfs_extent_data_ref_count(leaf, dref);
564*8da6d581SJan Schmidt 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
565*8da6d581SJan Schmidt 								      dref);
566*8da6d581SJan Schmidt 			key.type = BTRFS_EXTENT_DATA_KEY;
567*8da6d581SJan Schmidt 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
568*8da6d581SJan Schmidt 			root = btrfs_extent_data_ref_root(leaf, dref);
569*8da6d581SJan Schmidt 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
570*8da6d581SJan Schmidt 						bytenr, count);
571*8da6d581SJan Schmidt 			break;
572*8da6d581SJan Schmidt 		}
573*8da6d581SJan Schmidt 		default:
574*8da6d581SJan Schmidt 			WARN_ON(1);
575*8da6d581SJan Schmidt 		}
576*8da6d581SJan Schmidt 		BUG_ON(ret);
577*8da6d581SJan Schmidt 	}
578*8da6d581SJan Schmidt 
579*8da6d581SJan Schmidt 	return ret;
580*8da6d581SJan Schmidt }
581*8da6d581SJan Schmidt 
582*8da6d581SJan Schmidt /*
583*8da6d581SJan Schmidt  * this adds all existing backrefs (inline backrefs, backrefs and delayed
584*8da6d581SJan Schmidt  * refs) for the given bytenr to the refs list, merges duplicates and resolves
585*8da6d581SJan Schmidt  * indirect refs to their parent bytenr.
586*8da6d581SJan Schmidt  * When roots are found, they're added to the roots list
587*8da6d581SJan Schmidt  *
588*8da6d581SJan Schmidt  * FIXME some caching might speed things up
589*8da6d581SJan Schmidt  */
590*8da6d581SJan Schmidt static int find_parent_nodes(struct btrfs_trans_handle *trans,
591*8da6d581SJan Schmidt 			     struct btrfs_fs_info *fs_info, u64 bytenr,
592*8da6d581SJan Schmidt 			     u64 seq, struct ulist *refs, struct ulist *roots)
593*8da6d581SJan Schmidt {
594*8da6d581SJan Schmidt 	struct btrfs_key key;
595*8da6d581SJan Schmidt 	struct btrfs_path *path;
596*8da6d581SJan Schmidt 	struct btrfs_key info_key = { 0 };
597*8da6d581SJan Schmidt 	struct btrfs_delayed_ref_root *delayed_refs = NULL;
598*8da6d581SJan Schmidt 	struct btrfs_delayed_ref_head *head = NULL;
599*8da6d581SJan Schmidt 	int info_level = 0;
600*8da6d581SJan Schmidt 	int ret;
601*8da6d581SJan Schmidt 	struct list_head prefs_delayed;
602*8da6d581SJan Schmidt 	struct list_head prefs;
603*8da6d581SJan Schmidt 	struct __prelim_ref *ref;
604*8da6d581SJan Schmidt 
605*8da6d581SJan Schmidt 	INIT_LIST_HEAD(&prefs);
606*8da6d581SJan Schmidt 	INIT_LIST_HEAD(&prefs_delayed);
607*8da6d581SJan Schmidt 
608*8da6d581SJan Schmidt 	key.objectid = bytenr;
609*8da6d581SJan Schmidt 	key.type = BTRFS_EXTENT_ITEM_KEY;
610*8da6d581SJan Schmidt 	key.offset = (u64)-1;
611*8da6d581SJan Schmidt 
612*8da6d581SJan Schmidt 	path = btrfs_alloc_path();
613*8da6d581SJan Schmidt 	if (!path)
614*8da6d581SJan Schmidt 		return -ENOMEM;
615*8da6d581SJan Schmidt 
616*8da6d581SJan Schmidt 	/*
617*8da6d581SJan Schmidt 	 * grab both a lock on the path and a lock on the delayed ref head.
618*8da6d581SJan Schmidt 	 * We need both to get a consistent picture of how the refs look
619*8da6d581SJan Schmidt 	 * at a specified point in time
620*8da6d581SJan Schmidt 	 */
621*8da6d581SJan Schmidt again:
622*8da6d581SJan Schmidt 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
623*8da6d581SJan Schmidt 	if (ret < 0)
624*8da6d581SJan Schmidt 		goto out;
625*8da6d581SJan Schmidt 	BUG_ON(ret == 0);
626*8da6d581SJan Schmidt 
627*8da6d581SJan Schmidt 	/*
628*8da6d581SJan Schmidt 	 * look if there are updates for this ref queued and lock the head
629*8da6d581SJan Schmidt 	 */
630*8da6d581SJan Schmidt 	delayed_refs = &trans->transaction->delayed_refs;
631*8da6d581SJan Schmidt 	spin_lock(&delayed_refs->lock);
632*8da6d581SJan Schmidt 	head = btrfs_find_delayed_ref_head(trans, bytenr);
633*8da6d581SJan Schmidt 	if (head) {
634*8da6d581SJan Schmidt 		if (!mutex_trylock(&head->mutex)) {
635*8da6d581SJan Schmidt 			atomic_inc(&head->node.refs);
636*8da6d581SJan Schmidt 			spin_unlock(&delayed_refs->lock);
637*8da6d581SJan Schmidt 
638*8da6d581SJan Schmidt 			btrfs_release_path(path);
639*8da6d581SJan Schmidt 
640*8da6d581SJan Schmidt 			/*
641*8da6d581SJan Schmidt 			 * Mutex was contended, block until it's
642*8da6d581SJan Schmidt 			 * released and try again
643*8da6d581SJan Schmidt 			 */
644*8da6d581SJan Schmidt 			mutex_lock(&head->mutex);
645*8da6d581SJan Schmidt 			mutex_unlock(&head->mutex);
646*8da6d581SJan Schmidt 			btrfs_put_delayed_ref(&head->node);
647*8da6d581SJan Schmidt 			goto again;
648*8da6d581SJan Schmidt 		}
649*8da6d581SJan Schmidt 		ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
650*8da6d581SJan Schmidt 		if (ret)
651*8da6d581SJan Schmidt 			goto out;
652*8da6d581SJan Schmidt 	}
653*8da6d581SJan Schmidt 	spin_unlock(&delayed_refs->lock);
654*8da6d581SJan Schmidt 
655*8da6d581SJan Schmidt 	if (path->slots[0]) {
656*8da6d581SJan Schmidt 		struct extent_buffer *leaf;
657*8da6d581SJan Schmidt 		int slot;
658*8da6d581SJan Schmidt 
659*8da6d581SJan Schmidt 		leaf = path->nodes[0];
660*8da6d581SJan Schmidt 		slot = path->slots[0] - 1;
661*8da6d581SJan Schmidt 		btrfs_item_key_to_cpu(leaf, &key, slot);
662*8da6d581SJan Schmidt 		if (key.objectid == bytenr &&
663*8da6d581SJan Schmidt 		    key.type == BTRFS_EXTENT_ITEM_KEY) {
664*8da6d581SJan Schmidt 			ret = __add_inline_refs(fs_info, path, bytenr,
665*8da6d581SJan Schmidt 						&info_key, &info_level, &prefs);
666*8da6d581SJan Schmidt 			if (ret)
667*8da6d581SJan Schmidt 				goto out;
668*8da6d581SJan Schmidt 			ret = __add_keyed_refs(fs_info, path, bytenr, &info_key,
669*8da6d581SJan Schmidt 					       info_level, &prefs);
670*8da6d581SJan Schmidt 			if (ret)
671*8da6d581SJan Schmidt 				goto out;
672*8da6d581SJan Schmidt 		}
673*8da6d581SJan Schmidt 	}
674*8da6d581SJan Schmidt 	btrfs_release_path(path);
675*8da6d581SJan Schmidt 
676*8da6d581SJan Schmidt 	/*
677*8da6d581SJan Schmidt 	 * when adding the delayed refs above, the info_key might not have
678*8da6d581SJan Schmidt 	 * been known yet. Go over the list and replace the missing keys
679*8da6d581SJan Schmidt 	 */
680*8da6d581SJan Schmidt 	list_for_each_entry(ref, &prefs_delayed, list) {
681*8da6d581SJan Schmidt 		if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
682*8da6d581SJan Schmidt 			memcpy(&ref->key, &info_key, sizeof(ref->key));
683*8da6d581SJan Schmidt 	}
684*8da6d581SJan Schmidt 	list_splice_init(&prefs_delayed, &prefs);
685*8da6d581SJan Schmidt 
686*8da6d581SJan Schmidt 	ret = __merge_refs(&prefs, 1);
687*8da6d581SJan Schmidt 	if (ret)
688*8da6d581SJan Schmidt 		goto out;
689*8da6d581SJan Schmidt 
690*8da6d581SJan Schmidt 	ret = __resolve_indirect_refs(fs_info, &prefs);
691*8da6d581SJan Schmidt 	if (ret)
692*8da6d581SJan Schmidt 		goto out;
693*8da6d581SJan Schmidt 
694*8da6d581SJan Schmidt 	ret = __merge_refs(&prefs, 2);
695*8da6d581SJan Schmidt 	if (ret)
696*8da6d581SJan Schmidt 		goto out;
697*8da6d581SJan Schmidt 
698*8da6d581SJan Schmidt 	while (!list_empty(&prefs)) {
699*8da6d581SJan Schmidt 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
700*8da6d581SJan Schmidt 		list_del(&ref->list);
701*8da6d581SJan Schmidt 		if (ref->count < 0)
702*8da6d581SJan Schmidt 			WARN_ON(1);
703*8da6d581SJan Schmidt 		if (ref->count && ref->root_id && ref->parent == 0) {
704*8da6d581SJan Schmidt 			/* no parent == root of tree */
705*8da6d581SJan Schmidt 			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
706*8da6d581SJan Schmidt 			BUG_ON(ret < 0);
707*8da6d581SJan Schmidt 		}
708*8da6d581SJan Schmidt 		if (ref->count && ref->parent) {
709*8da6d581SJan Schmidt 			ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
710*8da6d581SJan Schmidt 			BUG_ON(ret < 0);
711*8da6d581SJan Schmidt 		}
712*8da6d581SJan Schmidt 		kfree(ref);
713*8da6d581SJan Schmidt 	}
714*8da6d581SJan Schmidt 
715*8da6d581SJan Schmidt out:
716*8da6d581SJan Schmidt 	if (head)
717*8da6d581SJan Schmidt 		mutex_unlock(&head->mutex);
718*8da6d581SJan Schmidt 	btrfs_free_path(path);
719*8da6d581SJan Schmidt 	while (!list_empty(&prefs)) {
720*8da6d581SJan Schmidt 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
721*8da6d581SJan Schmidt 		list_del(&ref->list);
722*8da6d581SJan Schmidt 		kfree(ref);
723*8da6d581SJan Schmidt 	}
724*8da6d581SJan Schmidt 	while (!list_empty(&prefs_delayed)) {
725*8da6d581SJan Schmidt 		ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
726*8da6d581SJan Schmidt 				       list);
727*8da6d581SJan Schmidt 		list_del(&ref->list);
728*8da6d581SJan Schmidt 		kfree(ref);
729*8da6d581SJan Schmidt 	}
730*8da6d581SJan Schmidt 
731*8da6d581SJan Schmidt 	return ret;
732*8da6d581SJan Schmidt }
733*8da6d581SJan Schmidt 
734*8da6d581SJan Schmidt /*
735*8da6d581SJan Schmidt  * Finds all leafs with a reference to the specified combination of bytenr and
736*8da6d581SJan Schmidt  * offset. key_list_head will point to a list of corresponding keys (caller must
737*8da6d581SJan Schmidt  * free each list element). The leafs will be stored in the leafs ulist, which
738*8da6d581SJan Schmidt  * must be freed with ulist_free.
739*8da6d581SJan Schmidt  *
740*8da6d581SJan Schmidt  * returns 0 on success, <0 on error
741*8da6d581SJan Schmidt  */
742*8da6d581SJan Schmidt static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
743*8da6d581SJan Schmidt 				struct btrfs_fs_info *fs_info, u64 bytenr,
744*8da6d581SJan Schmidt 				u64 num_bytes, u64 seq, struct ulist **leafs)
745*8da6d581SJan Schmidt {
746*8da6d581SJan Schmidt 	struct ulist *tmp;
747*8da6d581SJan Schmidt 	int ret;
748*8da6d581SJan Schmidt 
749*8da6d581SJan Schmidt 	tmp = ulist_alloc(GFP_NOFS);
750*8da6d581SJan Schmidt 	if (!tmp)
751*8da6d581SJan Schmidt 		return -ENOMEM;
752*8da6d581SJan Schmidt 	*leafs = ulist_alloc(GFP_NOFS);
753*8da6d581SJan Schmidt 	if (!*leafs) {
754*8da6d581SJan Schmidt 		ulist_free(tmp);
755*8da6d581SJan Schmidt 		return -ENOMEM;
756*8da6d581SJan Schmidt 	}
757*8da6d581SJan Schmidt 
758*8da6d581SJan Schmidt 	ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp);
759*8da6d581SJan Schmidt 	ulist_free(tmp);
760*8da6d581SJan Schmidt 
761*8da6d581SJan Schmidt 	if (ret < 0 && ret != -ENOENT) {
762*8da6d581SJan Schmidt 		ulist_free(*leafs);
763*8da6d581SJan Schmidt 		return ret;
764*8da6d581SJan Schmidt 	}
765*8da6d581SJan Schmidt 
766*8da6d581SJan Schmidt 	return 0;
767*8da6d581SJan Schmidt }
768*8da6d581SJan Schmidt 
769*8da6d581SJan Schmidt /*
770*8da6d581SJan Schmidt  * walk all backrefs for a given extent to find all roots that reference this
771*8da6d581SJan Schmidt  * extent. Walking a backref means finding all extents that reference this
772*8da6d581SJan Schmidt  * extent and in turn walk the backrefs of those, too. Naturally this is a
773*8da6d581SJan Schmidt  * recursive process, but here it is implemented in an iterative fashion: We
774*8da6d581SJan Schmidt  * find all referencing extents for the extent in question and put them on a
775*8da6d581SJan Schmidt  * list. In turn, we find all referencing extents for those, further appending
776*8da6d581SJan Schmidt  * to the list. The way we iterate the list allows adding more elements after
777*8da6d581SJan Schmidt  * the current while iterating. The process stops when we reach the end of the
778*8da6d581SJan Schmidt  * list. Found roots are added to the roots list.
779*8da6d581SJan Schmidt  *
780*8da6d581SJan Schmidt  * returns 0 on success, < 0 on error.
781*8da6d581SJan Schmidt  */
782*8da6d581SJan Schmidt int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
783*8da6d581SJan Schmidt 				struct btrfs_fs_info *fs_info, u64 bytenr,
784*8da6d581SJan Schmidt 				u64 num_bytes, u64 seq, struct ulist **roots)
785*8da6d581SJan Schmidt {
786*8da6d581SJan Schmidt 	struct ulist *tmp;
787*8da6d581SJan Schmidt 	struct ulist_node *node = NULL;
788*8da6d581SJan Schmidt 	int ret;
789*8da6d581SJan Schmidt 
790*8da6d581SJan Schmidt 	tmp = ulist_alloc(GFP_NOFS);
791*8da6d581SJan Schmidt 	if (!tmp)
792*8da6d581SJan Schmidt 		return -ENOMEM;
793*8da6d581SJan Schmidt 	*roots = ulist_alloc(GFP_NOFS);
794*8da6d581SJan Schmidt 	if (!*roots) {
795*8da6d581SJan Schmidt 		ulist_free(tmp);
796*8da6d581SJan Schmidt 		return -ENOMEM;
797*8da6d581SJan Schmidt 	}
798*8da6d581SJan Schmidt 
799*8da6d581SJan Schmidt 	while (1) {
800*8da6d581SJan Schmidt 		ret = find_parent_nodes(trans, fs_info, bytenr, seq,
801*8da6d581SJan Schmidt 					tmp, *roots);
802*8da6d581SJan Schmidt 		if (ret < 0 && ret != -ENOENT) {
803*8da6d581SJan Schmidt 			ulist_free(tmp);
804*8da6d581SJan Schmidt 			ulist_free(*roots);
805*8da6d581SJan Schmidt 			return ret;
806*8da6d581SJan Schmidt 		}
807*8da6d581SJan Schmidt 		node = ulist_next(tmp, node);
808*8da6d581SJan Schmidt 		if (!node)
809*8da6d581SJan Schmidt 			break;
810*8da6d581SJan Schmidt 		bytenr = node->val;
811*8da6d581SJan Schmidt 	}
812*8da6d581SJan Schmidt 
813*8da6d581SJan Schmidt 	ulist_free(tmp);
814*8da6d581SJan Schmidt 	return 0;
815*8da6d581SJan Schmidt }
816*8da6d581SJan Schmidt 
817*8da6d581SJan Schmidt 
818a542ad1bSJan Schmidt static int __inode_info(u64 inum, u64 ioff, u8 key_type,
819a542ad1bSJan Schmidt 			struct btrfs_root *fs_root, struct btrfs_path *path,
820a542ad1bSJan Schmidt 			struct btrfs_key *found_key)
821a542ad1bSJan Schmidt {
822a542ad1bSJan Schmidt 	int ret;
823a542ad1bSJan Schmidt 	struct btrfs_key key;
824a542ad1bSJan Schmidt 	struct extent_buffer *eb;
825a542ad1bSJan Schmidt 
826a542ad1bSJan Schmidt 	key.type = key_type;
827a542ad1bSJan Schmidt 	key.objectid = inum;
828a542ad1bSJan Schmidt 	key.offset = ioff;
829a542ad1bSJan Schmidt 
830a542ad1bSJan Schmidt 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
831a542ad1bSJan Schmidt 	if (ret < 0)
832a542ad1bSJan Schmidt 		return ret;
833a542ad1bSJan Schmidt 
834a542ad1bSJan Schmidt 	eb = path->nodes[0];
835a542ad1bSJan Schmidt 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
836a542ad1bSJan Schmidt 		ret = btrfs_next_leaf(fs_root, path);
837a542ad1bSJan Schmidt 		if (ret)
838a542ad1bSJan Schmidt 			return ret;
839a542ad1bSJan Schmidt 		eb = path->nodes[0];
840a542ad1bSJan Schmidt 	}
841a542ad1bSJan Schmidt 
842a542ad1bSJan Schmidt 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
843a542ad1bSJan Schmidt 	if (found_key->type != key.type || found_key->objectid != key.objectid)
844a542ad1bSJan Schmidt 		return 1;
845a542ad1bSJan Schmidt 
846a542ad1bSJan Schmidt 	return 0;
847a542ad1bSJan Schmidt }
848a542ad1bSJan Schmidt 
849a542ad1bSJan Schmidt /*
850a542ad1bSJan Schmidt  * this makes the path point to (inum INODE_ITEM ioff)
851a542ad1bSJan Schmidt  */
852a542ad1bSJan Schmidt int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
853a542ad1bSJan Schmidt 			struct btrfs_path *path)
854a542ad1bSJan Schmidt {
855a542ad1bSJan Schmidt 	struct btrfs_key key;
856a542ad1bSJan Schmidt 	return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
857a542ad1bSJan Schmidt 				&key);
858a542ad1bSJan Schmidt }
859a542ad1bSJan Schmidt 
860a542ad1bSJan Schmidt static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
861a542ad1bSJan Schmidt 				struct btrfs_path *path,
862a542ad1bSJan Schmidt 				struct btrfs_key *found_key)
863a542ad1bSJan Schmidt {
864a542ad1bSJan Schmidt 	return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
865a542ad1bSJan Schmidt 				found_key);
866a542ad1bSJan Schmidt }
867a542ad1bSJan Schmidt 
868a542ad1bSJan Schmidt /*
869a542ad1bSJan Schmidt  * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
870a542ad1bSJan Schmidt  * of the path are separated by '/' and the path is guaranteed to be
871a542ad1bSJan Schmidt  * 0-terminated. the path is only given within the current file system.
872a542ad1bSJan Schmidt  * Therefore, it never starts with a '/'. the caller is responsible to provide
873a542ad1bSJan Schmidt  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
874a542ad1bSJan Schmidt  * the start point of the resulting string is returned. this pointer is within
875a542ad1bSJan Schmidt  * dest, normally.
876a542ad1bSJan Schmidt  * in case the path buffer would overflow, the pointer is decremented further
877a542ad1bSJan Schmidt  * as if output was written to the buffer, though no more output is actually
878a542ad1bSJan Schmidt  * generated. that way, the caller can determine how much space would be
879a542ad1bSJan Schmidt  * required for the path to fit into the buffer. in that case, the returned
880a542ad1bSJan Schmidt  * value will be smaller than dest. callers must check this!
881a542ad1bSJan Schmidt  */
882a542ad1bSJan Schmidt static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
883a542ad1bSJan Schmidt 				struct btrfs_inode_ref *iref,
884a542ad1bSJan Schmidt 				struct extent_buffer *eb_in, u64 parent,
885a542ad1bSJan Schmidt 				char *dest, u32 size)
886a542ad1bSJan Schmidt {
887a542ad1bSJan Schmidt 	u32 len;
888a542ad1bSJan Schmidt 	int slot;
889a542ad1bSJan Schmidt 	u64 next_inum;
890a542ad1bSJan Schmidt 	int ret;
891a542ad1bSJan Schmidt 	s64 bytes_left = size - 1;
892a542ad1bSJan Schmidt 	struct extent_buffer *eb = eb_in;
893a542ad1bSJan Schmidt 	struct btrfs_key found_key;
894a542ad1bSJan Schmidt 
895a542ad1bSJan Schmidt 	if (bytes_left >= 0)
896a542ad1bSJan Schmidt 		dest[bytes_left] = '\0';
897a542ad1bSJan Schmidt 
898a542ad1bSJan Schmidt 	while (1) {
899a542ad1bSJan Schmidt 		len = btrfs_inode_ref_name_len(eb, iref);
900a542ad1bSJan Schmidt 		bytes_left -= len;
901a542ad1bSJan Schmidt 		if (bytes_left >= 0)
902a542ad1bSJan Schmidt 			read_extent_buffer(eb, dest + bytes_left,
903a542ad1bSJan Schmidt 						(unsigned long)(iref + 1), len);
904a542ad1bSJan Schmidt 		if (eb != eb_in)
905a542ad1bSJan Schmidt 			free_extent_buffer(eb);
906a542ad1bSJan Schmidt 		ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
907a542ad1bSJan Schmidt 		if (ret)
908a542ad1bSJan Schmidt 			break;
909a542ad1bSJan Schmidt 		next_inum = found_key.offset;
910a542ad1bSJan Schmidt 
911a542ad1bSJan Schmidt 		/* regular exit ahead */
912a542ad1bSJan Schmidt 		if (parent == next_inum)
913a542ad1bSJan Schmidt 			break;
914a542ad1bSJan Schmidt 
915a542ad1bSJan Schmidt 		slot = path->slots[0];
916a542ad1bSJan Schmidt 		eb = path->nodes[0];
917a542ad1bSJan Schmidt 		/* make sure we can use eb after releasing the path */
918a542ad1bSJan Schmidt 		if (eb != eb_in)
919a542ad1bSJan Schmidt 			atomic_inc(&eb->refs);
920a542ad1bSJan Schmidt 		btrfs_release_path(path);
921a542ad1bSJan Schmidt 
922a542ad1bSJan Schmidt 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
923a542ad1bSJan Schmidt 		parent = next_inum;
924a542ad1bSJan Schmidt 		--bytes_left;
925a542ad1bSJan Schmidt 		if (bytes_left >= 0)
926a542ad1bSJan Schmidt 			dest[bytes_left] = '/';
927a542ad1bSJan Schmidt 	}
928a542ad1bSJan Schmidt 
929a542ad1bSJan Schmidt 	btrfs_release_path(path);
930a542ad1bSJan Schmidt 
931a542ad1bSJan Schmidt 	if (ret)
932a542ad1bSJan Schmidt 		return ERR_PTR(ret);
933a542ad1bSJan Schmidt 
934a542ad1bSJan Schmidt 	return dest + bytes_left;
935a542ad1bSJan Schmidt }
936a542ad1bSJan Schmidt 
937a542ad1bSJan Schmidt /*
938a542ad1bSJan Schmidt  * this makes the path point to (logical EXTENT_ITEM *)
939a542ad1bSJan Schmidt  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
940a542ad1bSJan Schmidt  * tree blocks and <0 on error.
941a542ad1bSJan Schmidt  */
942a542ad1bSJan Schmidt int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
943a542ad1bSJan Schmidt 			struct btrfs_path *path, struct btrfs_key *found_key)
944a542ad1bSJan Schmidt {
945a542ad1bSJan Schmidt 	int ret;
946a542ad1bSJan Schmidt 	u64 flags;
947a542ad1bSJan Schmidt 	u32 item_size;
948a542ad1bSJan Schmidt 	struct extent_buffer *eb;
949a542ad1bSJan Schmidt 	struct btrfs_extent_item *ei;
950a542ad1bSJan Schmidt 	struct btrfs_key key;
951a542ad1bSJan Schmidt 
952a542ad1bSJan Schmidt 	key.type = BTRFS_EXTENT_ITEM_KEY;
953a542ad1bSJan Schmidt 	key.objectid = logical;
954a542ad1bSJan Schmidt 	key.offset = (u64)-1;
955a542ad1bSJan Schmidt 
956a542ad1bSJan Schmidt 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
957a542ad1bSJan Schmidt 	if (ret < 0)
958a542ad1bSJan Schmidt 		return ret;
959a542ad1bSJan Schmidt 	ret = btrfs_previous_item(fs_info->extent_root, path,
960a542ad1bSJan Schmidt 					0, BTRFS_EXTENT_ITEM_KEY);
961a542ad1bSJan Schmidt 	if (ret < 0)
962a542ad1bSJan Schmidt 		return ret;
963a542ad1bSJan Schmidt 
964a542ad1bSJan Schmidt 	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
965a542ad1bSJan Schmidt 	if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
966a542ad1bSJan Schmidt 	    found_key->objectid > logical ||
967a542ad1bSJan Schmidt 	    found_key->objectid + found_key->offset <= logical)
968a542ad1bSJan Schmidt 		return -ENOENT;
969a542ad1bSJan Schmidt 
970a542ad1bSJan Schmidt 	eb = path->nodes[0];
971a542ad1bSJan Schmidt 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
972a542ad1bSJan Schmidt 	BUG_ON(item_size < sizeof(*ei));
973a542ad1bSJan Schmidt 
974a542ad1bSJan Schmidt 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
975a542ad1bSJan Schmidt 	flags = btrfs_extent_flags(eb, ei);
976a542ad1bSJan Schmidt 
977a542ad1bSJan Schmidt 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
978a542ad1bSJan Schmidt 		return BTRFS_EXTENT_FLAG_TREE_BLOCK;
979a542ad1bSJan Schmidt 	if (flags & BTRFS_EXTENT_FLAG_DATA)
980a542ad1bSJan Schmidt 		return BTRFS_EXTENT_FLAG_DATA;
981a542ad1bSJan Schmidt 
982a542ad1bSJan Schmidt 	return -EIO;
983a542ad1bSJan Schmidt }
984a542ad1bSJan Schmidt 
985a542ad1bSJan Schmidt /*
986a542ad1bSJan Schmidt  * helper function to iterate extent inline refs. ptr must point to a 0 value
987a542ad1bSJan Schmidt  * for the first call and may be modified. it is used to track state.
988a542ad1bSJan Schmidt  * if more refs exist, 0 is returned and the next call to
989a542ad1bSJan Schmidt  * __get_extent_inline_ref must pass the modified ptr parameter to get the
990a542ad1bSJan Schmidt  * next ref. after the last ref was processed, 1 is returned.
991a542ad1bSJan Schmidt  * returns <0 on error
992a542ad1bSJan Schmidt  */
993a542ad1bSJan Schmidt static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
994a542ad1bSJan Schmidt 				struct btrfs_extent_item *ei, u32 item_size,
995a542ad1bSJan Schmidt 				struct btrfs_extent_inline_ref **out_eiref,
996a542ad1bSJan Schmidt 				int *out_type)
997a542ad1bSJan Schmidt {
998a542ad1bSJan Schmidt 	unsigned long end;
999a542ad1bSJan Schmidt 	u64 flags;
1000a542ad1bSJan Schmidt 	struct btrfs_tree_block_info *info;
1001a542ad1bSJan Schmidt 
1002a542ad1bSJan Schmidt 	if (!*ptr) {
1003a542ad1bSJan Schmidt 		/* first call */
1004a542ad1bSJan Schmidt 		flags = btrfs_extent_flags(eb, ei);
1005a542ad1bSJan Schmidt 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1006a542ad1bSJan Schmidt 			info = (struct btrfs_tree_block_info *)(ei + 1);
1007a542ad1bSJan Schmidt 			*out_eiref =
1008a542ad1bSJan Schmidt 				(struct btrfs_extent_inline_ref *)(info + 1);
1009a542ad1bSJan Schmidt 		} else {
1010a542ad1bSJan Schmidt 			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1011a542ad1bSJan Schmidt 		}
1012a542ad1bSJan Schmidt 		*ptr = (unsigned long)*out_eiref;
1013a542ad1bSJan Schmidt 		if ((void *)*ptr >= (void *)ei + item_size)
1014a542ad1bSJan Schmidt 			return -ENOENT;
1015a542ad1bSJan Schmidt 	}
1016a542ad1bSJan Schmidt 
1017a542ad1bSJan Schmidt 	end = (unsigned long)ei + item_size;
1018a542ad1bSJan Schmidt 	*out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
1019a542ad1bSJan Schmidt 	*out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1020a542ad1bSJan Schmidt 
1021a542ad1bSJan Schmidt 	*ptr += btrfs_extent_inline_ref_size(*out_type);
1022a542ad1bSJan Schmidt 	WARN_ON(*ptr > end);
1023a542ad1bSJan Schmidt 	if (*ptr == end)
1024a542ad1bSJan Schmidt 		return 1; /* last */
1025a542ad1bSJan Schmidt 
1026a542ad1bSJan Schmidt 	return 0;
1027a542ad1bSJan Schmidt }
1028a542ad1bSJan Schmidt 
1029a542ad1bSJan Schmidt /*
1030a542ad1bSJan Schmidt  * reads the tree block backref for an extent. tree level and root are returned
1031a542ad1bSJan Schmidt  * through out_level and out_root. ptr must point to a 0 value for the first
1032a542ad1bSJan Schmidt  * call and may be modified (see __get_extent_inline_ref comment).
1033a542ad1bSJan Schmidt  * returns 0 if data was provided, 1 if there was no more data to provide or
1034a542ad1bSJan Schmidt  * <0 on error.
1035a542ad1bSJan Schmidt  */
1036a542ad1bSJan Schmidt int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1037a542ad1bSJan Schmidt 				struct btrfs_extent_item *ei, u32 item_size,
1038a542ad1bSJan Schmidt 				u64 *out_root, u8 *out_level)
1039a542ad1bSJan Schmidt {
1040a542ad1bSJan Schmidt 	int ret;
1041a542ad1bSJan Schmidt 	int type;
1042a542ad1bSJan Schmidt 	struct btrfs_tree_block_info *info;
1043a542ad1bSJan Schmidt 	struct btrfs_extent_inline_ref *eiref;
1044a542ad1bSJan Schmidt 
1045a542ad1bSJan Schmidt 	if (*ptr == (unsigned long)-1)
1046a542ad1bSJan Schmidt 		return 1;
1047a542ad1bSJan Schmidt 
1048a542ad1bSJan Schmidt 	while (1) {
1049a542ad1bSJan Schmidt 		ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
1050a542ad1bSJan Schmidt 						&eiref, &type);
1051a542ad1bSJan Schmidt 		if (ret < 0)
1052a542ad1bSJan Schmidt 			return ret;
1053a542ad1bSJan Schmidt 
1054a542ad1bSJan Schmidt 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1055a542ad1bSJan Schmidt 		    type == BTRFS_SHARED_BLOCK_REF_KEY)
1056a542ad1bSJan Schmidt 			break;
1057a542ad1bSJan Schmidt 
1058a542ad1bSJan Schmidt 		if (ret == 1)
1059a542ad1bSJan Schmidt 			return 1;
1060a542ad1bSJan Schmidt 	}
1061a542ad1bSJan Schmidt 
1062a542ad1bSJan Schmidt 	/* we can treat both ref types equally here */
1063a542ad1bSJan Schmidt 	info = (struct btrfs_tree_block_info *)(ei + 1);
1064a542ad1bSJan Schmidt 	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1065a542ad1bSJan Schmidt 	*out_level = btrfs_tree_block_level(eb, info);
1066a542ad1bSJan Schmidt 
1067a542ad1bSJan Schmidt 	if (ret == 1)
1068a542ad1bSJan Schmidt 		*ptr = (unsigned long)-1;
1069a542ad1bSJan Schmidt 
1070a542ad1bSJan Schmidt 	return 0;
1071a542ad1bSJan Schmidt }
1072a542ad1bSJan Schmidt 
1073a542ad1bSJan Schmidt static int __data_list_add(struct list_head *head, u64 inum,
1074a542ad1bSJan Schmidt 				u64 extent_data_item_offset, u64 root)
1075a542ad1bSJan Schmidt {
1076a542ad1bSJan Schmidt 	struct __data_ref *ref;
1077a542ad1bSJan Schmidt 
1078a542ad1bSJan Schmidt 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
1079a542ad1bSJan Schmidt 	if (!ref)
1080a542ad1bSJan Schmidt 		return -ENOMEM;
1081a542ad1bSJan Schmidt 
1082a542ad1bSJan Schmidt 	ref->inum = inum;
1083a542ad1bSJan Schmidt 	ref->extent_data_item_offset = extent_data_item_offset;
1084a542ad1bSJan Schmidt 	ref->root = root;
1085a542ad1bSJan Schmidt 	list_add_tail(&ref->list, head);
1086a542ad1bSJan Schmidt 
1087a542ad1bSJan Schmidt 	return 0;
1088a542ad1bSJan Schmidt }
1089a542ad1bSJan Schmidt 
1090a542ad1bSJan Schmidt static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb,
1091a542ad1bSJan Schmidt 				struct btrfs_extent_data_ref *dref)
1092a542ad1bSJan Schmidt {
1093a542ad1bSJan Schmidt 	return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref),
1094a542ad1bSJan Schmidt 				btrfs_extent_data_ref_offset(eb, dref),
1095a542ad1bSJan Schmidt 				btrfs_extent_data_ref_root(eb, dref));
1096a542ad1bSJan Schmidt }
1097a542ad1bSJan Schmidt 
1098a542ad1bSJan Schmidt static int __shared_list_add(struct list_head *head, u64 disk_byte)
1099a542ad1bSJan Schmidt {
1100a542ad1bSJan Schmidt 	struct __shared_ref *ref;
1101a542ad1bSJan Schmidt 
1102a542ad1bSJan Schmidt 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
1103a542ad1bSJan Schmidt 	if (!ref)
1104a542ad1bSJan Schmidt 		return -ENOMEM;
1105a542ad1bSJan Schmidt 
1106a542ad1bSJan Schmidt 	ref->disk_byte = disk_byte;
1107a542ad1bSJan Schmidt 	list_add_tail(&ref->list, head);
1108a542ad1bSJan Schmidt 
1109a542ad1bSJan Schmidt 	return 0;
1110a542ad1bSJan Schmidt }
1111a542ad1bSJan Schmidt 
1112a542ad1bSJan Schmidt static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info,
1113a542ad1bSJan Schmidt 					   u64 logical, u64 inum,
1114a542ad1bSJan Schmidt 					   u64 extent_data_item_offset,
1115a542ad1bSJan Schmidt 					   u64 extent_offset,
1116a542ad1bSJan Schmidt 					   struct btrfs_path *path,
1117a542ad1bSJan Schmidt 					   struct list_head *data_refs,
1118a542ad1bSJan Schmidt 					   iterate_extent_inodes_t *iterate,
1119a542ad1bSJan Schmidt 					   void *ctx)
1120a542ad1bSJan Schmidt {
1121a542ad1bSJan Schmidt 	u64 ref_root;
1122a542ad1bSJan Schmidt 	u32 item_size;
1123a542ad1bSJan Schmidt 	struct btrfs_key key;
1124a542ad1bSJan Schmidt 	struct extent_buffer *eb;
1125a542ad1bSJan Schmidt 	struct btrfs_extent_item *ei;
1126a542ad1bSJan Schmidt 	struct btrfs_extent_inline_ref *eiref;
1127a542ad1bSJan Schmidt 	struct __data_ref *ref;
1128a542ad1bSJan Schmidt 	int ret;
1129a542ad1bSJan Schmidt 	int type;
1130a542ad1bSJan Schmidt 	int last;
1131a542ad1bSJan Schmidt 	unsigned long ptr = 0;
1132a542ad1bSJan Schmidt 
1133a542ad1bSJan Schmidt 	WARN_ON(!list_empty(data_refs));
1134a542ad1bSJan Schmidt 	ret = extent_from_logical(fs_info, logical, path, &key);
1135a542ad1bSJan Schmidt 	if (ret & BTRFS_EXTENT_FLAG_DATA)
1136a542ad1bSJan Schmidt 		ret = -EIO;
1137a542ad1bSJan Schmidt 	if (ret < 0)
1138a542ad1bSJan Schmidt 		goto out;
1139a542ad1bSJan Schmidt 
1140a542ad1bSJan Schmidt 	eb = path->nodes[0];
1141a542ad1bSJan Schmidt 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1142a542ad1bSJan Schmidt 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1143a542ad1bSJan Schmidt 
1144a542ad1bSJan Schmidt 	ret = 0;
1145a542ad1bSJan Schmidt 	ref_root = 0;
1146a542ad1bSJan Schmidt 	/*
1147a542ad1bSJan Schmidt 	 * as done in iterate_extent_inodes, we first build a list of refs to
1148a542ad1bSJan Schmidt 	 * iterate, then free the path and then iterate them to avoid deadlocks.
1149a542ad1bSJan Schmidt 	 */
1150a542ad1bSJan Schmidt 	do {
1151a542ad1bSJan Schmidt 		last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
1152a542ad1bSJan Schmidt 						&eiref, &type);
1153a542ad1bSJan Schmidt 		if (last < 0) {
1154a542ad1bSJan Schmidt 			ret = last;
1155a542ad1bSJan Schmidt 			goto out;
1156a542ad1bSJan Schmidt 		}
1157a542ad1bSJan Schmidt 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1158a542ad1bSJan Schmidt 		    type == BTRFS_SHARED_BLOCK_REF_KEY) {
1159a542ad1bSJan Schmidt 			ref_root = btrfs_extent_inline_ref_offset(eb, eiref);
1160a542ad1bSJan Schmidt 			ret = __data_list_add(data_refs, inum,
1161a542ad1bSJan Schmidt 						extent_data_item_offset,
1162a542ad1bSJan Schmidt 						ref_root);
1163a542ad1bSJan Schmidt 		}
1164a542ad1bSJan Schmidt 	} while (!ret && !last);
1165a542ad1bSJan Schmidt 
1166a542ad1bSJan Schmidt 	btrfs_release_path(path);
1167a542ad1bSJan Schmidt 
1168a542ad1bSJan Schmidt 	if (ref_root == 0) {
1169a542ad1bSJan Schmidt 		printk(KERN_ERR "btrfs: failed to find tree block ref "
1170a542ad1bSJan Schmidt 			"for shared data backref %llu\n", logical);
1171a542ad1bSJan Schmidt 		WARN_ON(1);
1172a542ad1bSJan Schmidt 		ret = -EIO;
1173a542ad1bSJan Schmidt 	}
1174a542ad1bSJan Schmidt 
1175a542ad1bSJan Schmidt out:
1176a542ad1bSJan Schmidt 	while (!list_empty(data_refs)) {
1177a542ad1bSJan Schmidt 		ref = list_first_entry(data_refs, struct __data_ref, list);
1178a542ad1bSJan Schmidt 		list_del(&ref->list);
1179a542ad1bSJan Schmidt 		if (!ret)
1180a542ad1bSJan Schmidt 			ret = iterate(ref->inum, extent_offset +
1181a542ad1bSJan Schmidt 					ref->extent_data_item_offset,
1182a542ad1bSJan Schmidt 					ref->root, ctx);
1183a542ad1bSJan Schmidt 		kfree(ref);
1184a542ad1bSJan Schmidt 	}
1185a542ad1bSJan Schmidt 
1186a542ad1bSJan Schmidt 	return ret;
1187a542ad1bSJan Schmidt }
1188a542ad1bSJan Schmidt 
1189a542ad1bSJan Schmidt static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
1190a542ad1bSJan Schmidt 				    u64 logical, u64 orig_extent_item_objectid,
1191a542ad1bSJan Schmidt 				    u64 extent_offset, struct btrfs_path *path,
1192a542ad1bSJan Schmidt 				    struct list_head *data_refs,
1193a542ad1bSJan Schmidt 				    iterate_extent_inodes_t *iterate,
1194a542ad1bSJan Schmidt 				    void *ctx)
1195a542ad1bSJan Schmidt {
1196a542ad1bSJan Schmidt 	u64 disk_byte;
1197a542ad1bSJan Schmidt 	struct btrfs_key key;
1198a542ad1bSJan Schmidt 	struct btrfs_file_extent_item *fi;
1199a542ad1bSJan Schmidt 	struct extent_buffer *eb;
1200a542ad1bSJan Schmidt 	int slot;
1201a542ad1bSJan Schmidt 	int nritems;
1202a542ad1bSJan Schmidt 	int ret;
1203a542ad1bSJan Schmidt 	int found = 0;
1204a542ad1bSJan Schmidt 
1205a542ad1bSJan Schmidt 	eb = read_tree_block(fs_info->tree_root, logical,
1206a542ad1bSJan Schmidt 				fs_info->tree_root->leafsize, 0);
1207a542ad1bSJan Schmidt 	if (!eb)
1208a542ad1bSJan Schmidt 		return -EIO;
1209a542ad1bSJan Schmidt 
1210a542ad1bSJan Schmidt 	/*
1211a542ad1bSJan Schmidt 	 * from the shared data ref, we only have the leaf but we need
1212a542ad1bSJan Schmidt 	 * the key. thus, we must look into all items and see that we
1213a542ad1bSJan Schmidt 	 * find one (some) with a reference to our extent item.
1214a542ad1bSJan Schmidt 	 */
1215a542ad1bSJan Schmidt 	nritems = btrfs_header_nritems(eb);
1216a542ad1bSJan Schmidt 	for (slot = 0; slot < nritems; ++slot) {
1217a542ad1bSJan Schmidt 		btrfs_item_key_to_cpu(eb, &key, slot);
1218a542ad1bSJan Schmidt 		if (key.type != BTRFS_EXTENT_DATA_KEY)
1219a542ad1bSJan Schmidt 			continue;
1220a542ad1bSJan Schmidt 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
1221a542ad1bSJan Schmidt 		if (!fi) {
1222a542ad1bSJan Schmidt 			free_extent_buffer(eb);
1223a542ad1bSJan Schmidt 			return -EIO;
1224a542ad1bSJan Schmidt 		}
1225a542ad1bSJan Schmidt 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1226a542ad1bSJan Schmidt 		if (disk_byte != orig_extent_item_objectid) {
1227a542ad1bSJan Schmidt 			if (found)
1228a542ad1bSJan Schmidt 				break;
1229a542ad1bSJan Schmidt 			else
1230a542ad1bSJan Schmidt 				continue;
1231a542ad1bSJan Schmidt 		}
1232a542ad1bSJan Schmidt 		++found;
1233a542ad1bSJan Schmidt 		ret = __iter_shared_inline_ref_inodes(fs_info, logical,
1234a542ad1bSJan Schmidt 							key.objectid,
1235a542ad1bSJan Schmidt 							key.offset,
1236a542ad1bSJan Schmidt 							extent_offset, path,
1237a542ad1bSJan Schmidt 							data_refs,
1238a542ad1bSJan Schmidt 							iterate, ctx);
1239a542ad1bSJan Schmidt 		if (ret)
1240a542ad1bSJan Schmidt 			break;
1241a542ad1bSJan Schmidt 	}
1242a542ad1bSJan Schmidt 
1243a542ad1bSJan Schmidt 	if (!found) {
1244a542ad1bSJan Schmidt 		printk(KERN_ERR "btrfs: failed to follow shared data backref "
1245a542ad1bSJan Schmidt 			"to parent %llu\n", logical);
1246a542ad1bSJan Schmidt 		WARN_ON(1);
1247a542ad1bSJan Schmidt 		ret = -EIO;
1248a542ad1bSJan Schmidt 	}
1249a542ad1bSJan Schmidt 
1250a542ad1bSJan Schmidt 	free_extent_buffer(eb);
1251a542ad1bSJan Schmidt 	return ret;
1252a542ad1bSJan Schmidt }
1253a542ad1bSJan Schmidt 
1254a542ad1bSJan Schmidt /*
1255a542ad1bSJan Schmidt  * calls iterate() for every inode that references the extent identified by
1256a542ad1bSJan Schmidt  * the given parameters. will use the path given as a parameter and return it
1257a542ad1bSJan Schmidt  * released.
1258a542ad1bSJan Schmidt  * when the iterator function returns a non-zero value, iteration stops.
1259a542ad1bSJan Schmidt  */
1260a542ad1bSJan Schmidt int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1261a542ad1bSJan Schmidt 				struct btrfs_path *path,
1262a542ad1bSJan Schmidt 				u64 extent_item_objectid,
1263a542ad1bSJan Schmidt 				u64 extent_offset,
1264a542ad1bSJan Schmidt 				iterate_extent_inodes_t *iterate, void *ctx)
1265a542ad1bSJan Schmidt {
1266a542ad1bSJan Schmidt 	unsigned long ptr = 0;
1267a542ad1bSJan Schmidt 	int last;
1268a542ad1bSJan Schmidt 	int ret;
1269a542ad1bSJan Schmidt 	int type;
1270a542ad1bSJan Schmidt 	u64 logical;
1271a542ad1bSJan Schmidt 	u32 item_size;
1272a542ad1bSJan Schmidt 	struct btrfs_extent_inline_ref *eiref;
1273a542ad1bSJan Schmidt 	struct btrfs_extent_data_ref *dref;
1274a542ad1bSJan Schmidt 	struct extent_buffer *eb;
1275a542ad1bSJan Schmidt 	struct btrfs_extent_item *ei;
1276a542ad1bSJan Schmidt 	struct btrfs_key key;
1277a542ad1bSJan Schmidt 	struct list_head data_refs = LIST_HEAD_INIT(data_refs);
1278a542ad1bSJan Schmidt 	struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
1279a542ad1bSJan Schmidt 	struct __data_ref *ref_d;
1280a542ad1bSJan Schmidt 	struct __shared_ref *ref_s;
1281a542ad1bSJan Schmidt 
1282a542ad1bSJan Schmidt 	eb = path->nodes[0];
1283a542ad1bSJan Schmidt 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1284a542ad1bSJan Schmidt 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1285a542ad1bSJan Schmidt 
1286a542ad1bSJan Schmidt 	/* first we iterate the inline refs, ... */
1287a542ad1bSJan Schmidt 	do {
1288a542ad1bSJan Schmidt 		last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
1289a542ad1bSJan Schmidt 						&eiref, &type);
1290a542ad1bSJan Schmidt 		if (last == -ENOENT) {
1291a542ad1bSJan Schmidt 			ret = 0;
1292a542ad1bSJan Schmidt 			break;
1293a542ad1bSJan Schmidt 		}
1294a542ad1bSJan Schmidt 		if (last < 0) {
1295a542ad1bSJan Schmidt 			ret = last;
1296a542ad1bSJan Schmidt 			break;
1297a542ad1bSJan Schmidt 		}
1298a542ad1bSJan Schmidt 
1299a542ad1bSJan Schmidt 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1300a542ad1bSJan Schmidt 			dref = (struct btrfs_extent_data_ref *)(&eiref->offset);
1301a542ad1bSJan Schmidt 			ret = __data_list_add_eb(&data_refs, eb, dref);
1302a542ad1bSJan Schmidt 		} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1303a542ad1bSJan Schmidt 			logical = btrfs_extent_inline_ref_offset(eb, eiref);
1304a542ad1bSJan Schmidt 			ret = __shared_list_add(&shared_refs, logical);
1305a542ad1bSJan Schmidt 		}
1306a542ad1bSJan Schmidt 	} while (!ret && !last);
1307a542ad1bSJan Schmidt 
1308a542ad1bSJan Schmidt 	/* ... then we proceed to in-tree references and ... */
1309a542ad1bSJan Schmidt 	while (!ret) {
1310a542ad1bSJan Schmidt 		++path->slots[0];
1311a542ad1bSJan Schmidt 		if (path->slots[0] > btrfs_header_nritems(eb)) {
1312a542ad1bSJan Schmidt 			ret = btrfs_next_leaf(fs_info->extent_root, path);
1313a542ad1bSJan Schmidt 			if (ret) {
1314a542ad1bSJan Schmidt 				if (ret == 1)
1315a542ad1bSJan Schmidt 					ret = 0; /* we're done */
1316a542ad1bSJan Schmidt 				break;
1317a542ad1bSJan Schmidt 			}
1318a542ad1bSJan Schmidt 			eb = path->nodes[0];
1319a542ad1bSJan Schmidt 		}
1320a542ad1bSJan Schmidt 		btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
1321a542ad1bSJan Schmidt 		if (key.objectid != extent_item_objectid)
1322a542ad1bSJan Schmidt 			break;
1323a542ad1bSJan Schmidt 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1324a542ad1bSJan Schmidt 			dref = btrfs_item_ptr(eb, path->slots[0],
1325a542ad1bSJan Schmidt 						struct btrfs_extent_data_ref);
1326a542ad1bSJan Schmidt 			ret = __data_list_add_eb(&data_refs, eb, dref);
1327a542ad1bSJan Schmidt 		} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1328a542ad1bSJan Schmidt 			ret = __shared_list_add(&shared_refs, key.offset);
1329a542ad1bSJan Schmidt 		}
1330a542ad1bSJan Schmidt 	}
1331a542ad1bSJan Schmidt 
1332a542ad1bSJan Schmidt 	btrfs_release_path(path);
1333a542ad1bSJan Schmidt 
1334a542ad1bSJan Schmidt 	/*
1335a542ad1bSJan Schmidt 	 * ... only at the very end we can process the refs we found. this is
1336a542ad1bSJan Schmidt 	 * because the iterator function we call is allowed to make tree lookups
1337a542ad1bSJan Schmidt 	 * and we have to avoid deadlocks. additionally, we need more tree
1338a542ad1bSJan Schmidt 	 * lookups ourselves for shared data refs.
1339a542ad1bSJan Schmidt 	 */
1340a542ad1bSJan Schmidt 	while (!list_empty(&data_refs)) {
1341a542ad1bSJan Schmidt 		ref_d = list_first_entry(&data_refs, struct __data_ref, list);
1342a542ad1bSJan Schmidt 		list_del(&ref_d->list);
1343a542ad1bSJan Schmidt 		if (!ret)
1344a542ad1bSJan Schmidt 			ret = iterate(ref_d->inum, extent_offset +
1345a542ad1bSJan Schmidt 					ref_d->extent_data_item_offset,
1346a542ad1bSJan Schmidt 					ref_d->root, ctx);
1347a542ad1bSJan Schmidt 		kfree(ref_d);
1348a542ad1bSJan Schmidt 	}
1349a542ad1bSJan Schmidt 
1350a542ad1bSJan Schmidt 	while (!list_empty(&shared_refs)) {
1351a542ad1bSJan Schmidt 		ref_s = list_first_entry(&shared_refs, struct __shared_ref,
1352a542ad1bSJan Schmidt 					list);
1353a542ad1bSJan Schmidt 		list_del(&ref_s->list);
1354a542ad1bSJan Schmidt 		if (!ret)
1355a542ad1bSJan Schmidt 			ret = __iter_shared_inline_ref(fs_info,
1356a542ad1bSJan Schmidt 							ref_s->disk_byte,
1357a542ad1bSJan Schmidt 							extent_item_objectid,
1358a542ad1bSJan Schmidt 							extent_offset, path,
1359a542ad1bSJan Schmidt 							&data_refs,
1360a542ad1bSJan Schmidt 							iterate, ctx);
1361a542ad1bSJan Schmidt 		kfree(ref_s);
1362a542ad1bSJan Schmidt 	}
1363a542ad1bSJan Schmidt 
1364a542ad1bSJan Schmidt 	return ret;
1365a542ad1bSJan Schmidt }
1366a542ad1bSJan Schmidt 
1367a542ad1bSJan Schmidt int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1368a542ad1bSJan Schmidt 				struct btrfs_path *path,
1369a542ad1bSJan Schmidt 				iterate_extent_inodes_t *iterate, void *ctx)
1370a542ad1bSJan Schmidt {
1371a542ad1bSJan Schmidt 	int ret;
1372a542ad1bSJan Schmidt 	u64 offset;
1373a542ad1bSJan Schmidt 	struct btrfs_key found_key;
1374a542ad1bSJan Schmidt 
1375a542ad1bSJan Schmidt 	ret = extent_from_logical(fs_info, logical, path,
1376a542ad1bSJan Schmidt 					&found_key);
1377a542ad1bSJan Schmidt 	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1378a542ad1bSJan Schmidt 		ret = -EINVAL;
1379a542ad1bSJan Schmidt 	if (ret < 0)
1380a542ad1bSJan Schmidt 		return ret;
1381a542ad1bSJan Schmidt 
1382a542ad1bSJan Schmidt 	offset = logical - found_key.objectid;
1383a542ad1bSJan Schmidt 	ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
1384a542ad1bSJan Schmidt 					offset, iterate, ctx);
1385a542ad1bSJan Schmidt 
1386a542ad1bSJan Schmidt 	return ret;
1387a542ad1bSJan Schmidt }
1388a542ad1bSJan Schmidt 
1389a542ad1bSJan Schmidt static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1390a542ad1bSJan Schmidt 				struct btrfs_path *path,
1391a542ad1bSJan Schmidt 				iterate_irefs_t *iterate, void *ctx)
1392a542ad1bSJan Schmidt {
1393a542ad1bSJan Schmidt 	int ret;
1394a542ad1bSJan Schmidt 	int slot;
1395a542ad1bSJan Schmidt 	u32 cur;
1396a542ad1bSJan Schmidt 	u32 len;
1397a542ad1bSJan Schmidt 	u32 name_len;
1398a542ad1bSJan Schmidt 	u64 parent = 0;
1399a542ad1bSJan Schmidt 	int found = 0;
1400a542ad1bSJan Schmidt 	struct extent_buffer *eb;
1401a542ad1bSJan Schmidt 	struct btrfs_item *item;
1402a542ad1bSJan Schmidt 	struct btrfs_inode_ref *iref;
1403a542ad1bSJan Schmidt 	struct btrfs_key found_key;
1404a542ad1bSJan Schmidt 
1405a542ad1bSJan Schmidt 	while (1) {
1406a542ad1bSJan Schmidt 		ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1407a542ad1bSJan Schmidt 					&found_key);
1408a542ad1bSJan Schmidt 		if (ret < 0)
1409a542ad1bSJan Schmidt 			break;
1410a542ad1bSJan Schmidt 		if (ret) {
1411a542ad1bSJan Schmidt 			ret = found ? 0 : -ENOENT;
1412a542ad1bSJan Schmidt 			break;
1413a542ad1bSJan Schmidt 		}
1414a542ad1bSJan Schmidt 		++found;
1415a542ad1bSJan Schmidt 
1416a542ad1bSJan Schmidt 		parent = found_key.offset;
1417a542ad1bSJan Schmidt 		slot = path->slots[0];
1418a542ad1bSJan Schmidt 		eb = path->nodes[0];
1419a542ad1bSJan Schmidt 		/* make sure we can use eb after releasing the path */
1420a542ad1bSJan Schmidt 		atomic_inc(&eb->refs);
1421a542ad1bSJan Schmidt 		btrfs_release_path(path);
1422a542ad1bSJan Schmidt 
1423a542ad1bSJan Schmidt 		item = btrfs_item_nr(eb, slot);
1424a542ad1bSJan Schmidt 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1425a542ad1bSJan Schmidt 
1426a542ad1bSJan Schmidt 		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1427a542ad1bSJan Schmidt 			name_len = btrfs_inode_ref_name_len(eb, iref);
1428a542ad1bSJan Schmidt 			/* path must be released before calling iterate()! */
1429a542ad1bSJan Schmidt 			ret = iterate(parent, iref, eb, ctx);
1430a542ad1bSJan Schmidt 			if (ret) {
1431a542ad1bSJan Schmidt 				free_extent_buffer(eb);
1432a542ad1bSJan Schmidt 				break;
1433a542ad1bSJan Schmidt 			}
1434a542ad1bSJan Schmidt 			len = sizeof(*iref) + name_len;
1435a542ad1bSJan Schmidt 			iref = (struct btrfs_inode_ref *)((char *)iref + len);
1436a542ad1bSJan Schmidt 		}
1437a542ad1bSJan Schmidt 		free_extent_buffer(eb);
1438a542ad1bSJan Schmidt 	}
1439a542ad1bSJan Schmidt 
1440a542ad1bSJan Schmidt 	btrfs_release_path(path);
1441a542ad1bSJan Schmidt 
1442a542ad1bSJan Schmidt 	return ret;
1443a542ad1bSJan Schmidt }
1444a542ad1bSJan Schmidt 
1445a542ad1bSJan Schmidt /*
1446a542ad1bSJan Schmidt  * returns 0 if the path could be dumped (probably truncated)
1447a542ad1bSJan Schmidt  * returns <0 in case of an error
1448a542ad1bSJan Schmidt  */
1449a542ad1bSJan Schmidt static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
1450a542ad1bSJan Schmidt 				struct extent_buffer *eb, void *ctx)
1451a542ad1bSJan Schmidt {
1452a542ad1bSJan Schmidt 	struct inode_fs_paths *ipath = ctx;
1453a542ad1bSJan Schmidt 	char *fspath;
1454a542ad1bSJan Schmidt 	char *fspath_min;
1455a542ad1bSJan Schmidt 	int i = ipath->fspath->elem_cnt;
1456a542ad1bSJan Schmidt 	const int s_ptr = sizeof(char *);
1457a542ad1bSJan Schmidt 	u32 bytes_left;
1458a542ad1bSJan Schmidt 
1459a542ad1bSJan Schmidt 	bytes_left = ipath->fspath->bytes_left > s_ptr ?
1460a542ad1bSJan Schmidt 					ipath->fspath->bytes_left - s_ptr : 0;
1461a542ad1bSJan Schmidt 
1462740c3d22SChris Mason 	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1463a542ad1bSJan Schmidt 	fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
1464a542ad1bSJan Schmidt 				inum, fspath_min, bytes_left);
1465a542ad1bSJan Schmidt 	if (IS_ERR(fspath))
1466a542ad1bSJan Schmidt 		return PTR_ERR(fspath);
1467a542ad1bSJan Schmidt 
1468a542ad1bSJan Schmidt 	if (fspath > fspath_min) {
1469745c4d8eSJeff Mahoney 		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1470a542ad1bSJan Schmidt 		++ipath->fspath->elem_cnt;
1471a542ad1bSJan Schmidt 		ipath->fspath->bytes_left = fspath - fspath_min;
1472a542ad1bSJan Schmidt 	} else {
1473a542ad1bSJan Schmidt 		++ipath->fspath->elem_missed;
1474a542ad1bSJan Schmidt 		ipath->fspath->bytes_missing += fspath_min - fspath;
1475a542ad1bSJan Schmidt 		ipath->fspath->bytes_left = 0;
1476a542ad1bSJan Schmidt 	}
1477a542ad1bSJan Schmidt 
1478a542ad1bSJan Schmidt 	return 0;
1479a542ad1bSJan Schmidt }
1480a542ad1bSJan Schmidt 
1481a542ad1bSJan Schmidt /*
1482a542ad1bSJan Schmidt  * this dumps all file system paths to the inode into the ipath struct, provided
1483a542ad1bSJan Schmidt  * is has been created large enough. each path is zero-terminated and accessed
1484740c3d22SChris Mason  * from ipath->fspath->val[i].
1485a542ad1bSJan Schmidt  * when it returns, there are ipath->fspath->elem_cnt number of paths available
1486740c3d22SChris Mason  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1487a542ad1bSJan Schmidt  * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1488a542ad1bSJan Schmidt  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1489a542ad1bSJan Schmidt  * have been needed to return all paths.
1490a542ad1bSJan Schmidt  */
1491a542ad1bSJan Schmidt int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1492a542ad1bSJan Schmidt {
1493a542ad1bSJan Schmidt 	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1494a542ad1bSJan Schmidt 				inode_to_path, ipath);
1495a542ad1bSJan Schmidt }
1496a542ad1bSJan Schmidt 
1497a542ad1bSJan Schmidt /*
1498a542ad1bSJan Schmidt  * allocates space to return multiple file system paths for an inode.
1499a542ad1bSJan Schmidt  * total_bytes to allocate are passed, note that space usable for actual path
1500a542ad1bSJan Schmidt  * information will be total_bytes - sizeof(struct inode_fs_paths).
1501a542ad1bSJan Schmidt  * the returned pointer must be freed with free_ipath() in the end.
1502a542ad1bSJan Schmidt  */
1503a542ad1bSJan Schmidt struct btrfs_data_container *init_data_container(u32 total_bytes)
1504a542ad1bSJan Schmidt {
1505a542ad1bSJan Schmidt 	struct btrfs_data_container *data;
1506a542ad1bSJan Schmidt 	size_t alloc_bytes;
1507a542ad1bSJan Schmidt 
1508a542ad1bSJan Schmidt 	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1509a542ad1bSJan Schmidt 	data = kmalloc(alloc_bytes, GFP_NOFS);
1510a542ad1bSJan Schmidt 	if (!data)
1511a542ad1bSJan Schmidt 		return ERR_PTR(-ENOMEM);
1512a542ad1bSJan Schmidt 
1513a542ad1bSJan Schmidt 	if (total_bytes >= sizeof(*data)) {
1514a542ad1bSJan Schmidt 		data->bytes_left = total_bytes - sizeof(*data);
1515a542ad1bSJan Schmidt 		data->bytes_missing = 0;
1516a542ad1bSJan Schmidt 	} else {
1517a542ad1bSJan Schmidt 		data->bytes_missing = sizeof(*data) - total_bytes;
1518a542ad1bSJan Schmidt 		data->bytes_left = 0;
1519a542ad1bSJan Schmidt 	}
1520a542ad1bSJan Schmidt 
1521a542ad1bSJan Schmidt 	data->elem_cnt = 0;
1522a542ad1bSJan Schmidt 	data->elem_missed = 0;
1523a542ad1bSJan Schmidt 
1524a542ad1bSJan Schmidt 	return data;
1525a542ad1bSJan Schmidt }
1526a542ad1bSJan Schmidt 
1527a542ad1bSJan Schmidt /*
1528a542ad1bSJan Schmidt  * allocates space to return multiple file system paths for an inode.
1529a542ad1bSJan Schmidt  * total_bytes to allocate are passed, note that space usable for actual path
1530a542ad1bSJan Schmidt  * information will be total_bytes - sizeof(struct inode_fs_paths).
1531a542ad1bSJan Schmidt  * the returned pointer must be freed with free_ipath() in the end.
1532a542ad1bSJan Schmidt  */
1533a542ad1bSJan Schmidt struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1534a542ad1bSJan Schmidt 					struct btrfs_path *path)
1535a542ad1bSJan Schmidt {
1536a542ad1bSJan Schmidt 	struct inode_fs_paths *ifp;
1537a542ad1bSJan Schmidt 	struct btrfs_data_container *fspath;
1538a542ad1bSJan Schmidt 
1539a542ad1bSJan Schmidt 	fspath = init_data_container(total_bytes);
1540a542ad1bSJan Schmidt 	if (IS_ERR(fspath))
1541a542ad1bSJan Schmidt 		return (void *)fspath;
1542a542ad1bSJan Schmidt 
1543a542ad1bSJan Schmidt 	ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1544a542ad1bSJan Schmidt 	if (!ifp) {
1545a542ad1bSJan Schmidt 		kfree(fspath);
1546a542ad1bSJan Schmidt 		return ERR_PTR(-ENOMEM);
1547a542ad1bSJan Schmidt 	}
1548a542ad1bSJan Schmidt 
1549a542ad1bSJan Schmidt 	ifp->btrfs_path = path;
1550a542ad1bSJan Schmidt 	ifp->fspath = fspath;
1551a542ad1bSJan Schmidt 	ifp->fs_root = fs_root;
1552a542ad1bSJan Schmidt 
1553a542ad1bSJan Schmidt 	return ifp;
1554a542ad1bSJan Schmidt }
1555a542ad1bSJan Schmidt 
1556a542ad1bSJan Schmidt void free_ipath(struct inode_fs_paths *ipath)
1557a542ad1bSJan Schmidt {
1558a542ad1bSJan Schmidt 	kfree(ipath);
1559a542ad1bSJan Schmidt }
1560