xref: /openbmc/linux/fs/btrfs/backref.c (revision a542ad1bafc7df9fc16de8a6894b350a4df75572)
1*a542ad1bSJan Schmidt /*
2*a542ad1bSJan Schmidt  * Copyright (C) 2011 STRATO.  All rights reserved.
3*a542ad1bSJan Schmidt  *
4*a542ad1bSJan Schmidt  * This program is free software; you can redistribute it and/or
5*a542ad1bSJan Schmidt  * modify it under the terms of the GNU General Public
6*a542ad1bSJan Schmidt  * License v2 as published by the Free Software Foundation.
7*a542ad1bSJan Schmidt  *
8*a542ad1bSJan Schmidt  * This program is distributed in the hope that it will be useful,
9*a542ad1bSJan Schmidt  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10*a542ad1bSJan Schmidt  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11*a542ad1bSJan Schmidt  * General Public License for more details.
12*a542ad1bSJan Schmidt  *
13*a542ad1bSJan Schmidt  * You should have received a copy of the GNU General Public
14*a542ad1bSJan Schmidt  * License along with this program; if not, write to the
15*a542ad1bSJan Schmidt  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16*a542ad1bSJan Schmidt  * Boston, MA 021110-1307, USA.
17*a542ad1bSJan Schmidt  */
18*a542ad1bSJan Schmidt 
19*a542ad1bSJan Schmidt #include "ctree.h"
20*a542ad1bSJan Schmidt #include "disk-io.h"
21*a542ad1bSJan Schmidt #include "backref.h"
22*a542ad1bSJan Schmidt 
23*a542ad1bSJan Schmidt struct __data_ref {
24*a542ad1bSJan Schmidt 	struct list_head list;
25*a542ad1bSJan Schmidt 	u64 inum;
26*a542ad1bSJan Schmidt 	u64 root;
27*a542ad1bSJan Schmidt 	u64 extent_data_item_offset;
28*a542ad1bSJan Schmidt };
29*a542ad1bSJan Schmidt 
30*a542ad1bSJan Schmidt struct __shared_ref {
31*a542ad1bSJan Schmidt 	struct list_head list;
32*a542ad1bSJan Schmidt 	u64 disk_byte;
33*a542ad1bSJan Schmidt };
34*a542ad1bSJan Schmidt 
35*a542ad1bSJan Schmidt static int __inode_info(u64 inum, u64 ioff, u8 key_type,
36*a542ad1bSJan Schmidt 			struct btrfs_root *fs_root, struct btrfs_path *path,
37*a542ad1bSJan Schmidt 			struct btrfs_key *found_key)
38*a542ad1bSJan Schmidt {
39*a542ad1bSJan Schmidt 	int ret;
40*a542ad1bSJan Schmidt 	struct btrfs_key key;
41*a542ad1bSJan Schmidt 	struct extent_buffer *eb;
42*a542ad1bSJan Schmidt 
43*a542ad1bSJan Schmidt 	key.type = key_type;
44*a542ad1bSJan Schmidt 	key.objectid = inum;
45*a542ad1bSJan Schmidt 	key.offset = ioff;
46*a542ad1bSJan Schmidt 
47*a542ad1bSJan Schmidt 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
48*a542ad1bSJan Schmidt 	if (ret < 0)
49*a542ad1bSJan Schmidt 		return ret;
50*a542ad1bSJan Schmidt 
51*a542ad1bSJan Schmidt 	eb = path->nodes[0];
52*a542ad1bSJan Schmidt 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
53*a542ad1bSJan Schmidt 		ret = btrfs_next_leaf(fs_root, path);
54*a542ad1bSJan Schmidt 		if (ret)
55*a542ad1bSJan Schmidt 			return ret;
56*a542ad1bSJan Schmidt 		eb = path->nodes[0];
57*a542ad1bSJan Schmidt 	}
58*a542ad1bSJan Schmidt 
59*a542ad1bSJan Schmidt 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
60*a542ad1bSJan Schmidt 	if (found_key->type != key.type || found_key->objectid != key.objectid)
61*a542ad1bSJan Schmidt 		return 1;
62*a542ad1bSJan Schmidt 
63*a542ad1bSJan Schmidt 	return 0;
64*a542ad1bSJan Schmidt }
65*a542ad1bSJan Schmidt 
66*a542ad1bSJan Schmidt /*
67*a542ad1bSJan Schmidt  * this makes the path point to (inum INODE_ITEM ioff)
68*a542ad1bSJan Schmidt  */
69*a542ad1bSJan Schmidt int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
70*a542ad1bSJan Schmidt 			struct btrfs_path *path)
71*a542ad1bSJan Schmidt {
72*a542ad1bSJan Schmidt 	struct btrfs_key key;
73*a542ad1bSJan Schmidt 	return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
74*a542ad1bSJan Schmidt 				&key);
75*a542ad1bSJan Schmidt }
76*a542ad1bSJan Schmidt 
77*a542ad1bSJan Schmidt static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
78*a542ad1bSJan Schmidt 				struct btrfs_path *path,
79*a542ad1bSJan Schmidt 				struct btrfs_key *found_key)
80*a542ad1bSJan Schmidt {
81*a542ad1bSJan Schmidt 	return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
82*a542ad1bSJan Schmidt 				found_key);
83*a542ad1bSJan Schmidt }
84*a542ad1bSJan Schmidt 
85*a542ad1bSJan Schmidt /*
86*a542ad1bSJan Schmidt  * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
87*a542ad1bSJan Schmidt  * of the path are separated by '/' and the path is guaranteed to be
88*a542ad1bSJan Schmidt  * 0-terminated. the path is only given within the current file system.
89*a542ad1bSJan Schmidt  * Therefore, it never starts with a '/'. the caller is responsible to provide
90*a542ad1bSJan Schmidt  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
91*a542ad1bSJan Schmidt  * the start point of the resulting string is returned. this pointer is within
92*a542ad1bSJan Schmidt  * dest, normally.
93*a542ad1bSJan Schmidt  * in case the path buffer would overflow, the pointer is decremented further
94*a542ad1bSJan Schmidt  * as if output was written to the buffer, though no more output is actually
95*a542ad1bSJan Schmidt  * generated. that way, the caller can determine how much space would be
96*a542ad1bSJan Schmidt  * required for the path to fit into the buffer. in that case, the returned
97*a542ad1bSJan Schmidt  * value will be smaller than dest. callers must check this!
98*a542ad1bSJan Schmidt  */
99*a542ad1bSJan Schmidt static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
100*a542ad1bSJan Schmidt 				struct btrfs_inode_ref *iref,
101*a542ad1bSJan Schmidt 				struct extent_buffer *eb_in, u64 parent,
102*a542ad1bSJan Schmidt 				char *dest, u32 size)
103*a542ad1bSJan Schmidt {
104*a542ad1bSJan Schmidt 	u32 len;
105*a542ad1bSJan Schmidt 	int slot;
106*a542ad1bSJan Schmidt 	u64 next_inum;
107*a542ad1bSJan Schmidt 	int ret;
108*a542ad1bSJan Schmidt 	s64 bytes_left = size - 1;
109*a542ad1bSJan Schmidt 	struct extent_buffer *eb = eb_in;
110*a542ad1bSJan Schmidt 	struct btrfs_key found_key;
111*a542ad1bSJan Schmidt 
112*a542ad1bSJan Schmidt 	if (bytes_left >= 0)
113*a542ad1bSJan Schmidt 		dest[bytes_left] = '\0';
114*a542ad1bSJan Schmidt 
115*a542ad1bSJan Schmidt 	while (1) {
116*a542ad1bSJan Schmidt 		len = btrfs_inode_ref_name_len(eb, iref);
117*a542ad1bSJan Schmidt 		bytes_left -= len;
118*a542ad1bSJan Schmidt 		if (bytes_left >= 0)
119*a542ad1bSJan Schmidt 			read_extent_buffer(eb, dest + bytes_left,
120*a542ad1bSJan Schmidt 						(unsigned long)(iref + 1), len);
121*a542ad1bSJan Schmidt 		if (eb != eb_in)
122*a542ad1bSJan Schmidt 			free_extent_buffer(eb);
123*a542ad1bSJan Schmidt 		ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
124*a542ad1bSJan Schmidt 		if (ret)
125*a542ad1bSJan Schmidt 			break;
126*a542ad1bSJan Schmidt 		next_inum = found_key.offset;
127*a542ad1bSJan Schmidt 
128*a542ad1bSJan Schmidt 		/* regular exit ahead */
129*a542ad1bSJan Schmidt 		if (parent == next_inum)
130*a542ad1bSJan Schmidt 			break;
131*a542ad1bSJan Schmidt 
132*a542ad1bSJan Schmidt 		slot = path->slots[0];
133*a542ad1bSJan Schmidt 		eb = path->nodes[0];
134*a542ad1bSJan Schmidt 		/* make sure we can use eb after releasing the path */
135*a542ad1bSJan Schmidt 		if (eb != eb_in)
136*a542ad1bSJan Schmidt 			atomic_inc(&eb->refs);
137*a542ad1bSJan Schmidt 		btrfs_release_path(path);
138*a542ad1bSJan Schmidt 
139*a542ad1bSJan Schmidt 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
140*a542ad1bSJan Schmidt 		parent = next_inum;
141*a542ad1bSJan Schmidt 		--bytes_left;
142*a542ad1bSJan Schmidt 		if (bytes_left >= 0)
143*a542ad1bSJan Schmidt 			dest[bytes_left] = '/';
144*a542ad1bSJan Schmidt 	}
145*a542ad1bSJan Schmidt 
146*a542ad1bSJan Schmidt 	btrfs_release_path(path);
147*a542ad1bSJan Schmidt 
148*a542ad1bSJan Schmidt 	if (ret)
149*a542ad1bSJan Schmidt 		return ERR_PTR(ret);
150*a542ad1bSJan Schmidt 
151*a542ad1bSJan Schmidt 	return dest + bytes_left;
152*a542ad1bSJan Schmidt }
153*a542ad1bSJan Schmidt 
154*a542ad1bSJan Schmidt /*
155*a542ad1bSJan Schmidt  * this makes the path point to (logical EXTENT_ITEM *)
156*a542ad1bSJan Schmidt  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
157*a542ad1bSJan Schmidt  * tree blocks and <0 on error.
158*a542ad1bSJan Schmidt  */
159*a542ad1bSJan Schmidt int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
160*a542ad1bSJan Schmidt 			struct btrfs_path *path, struct btrfs_key *found_key)
161*a542ad1bSJan Schmidt {
162*a542ad1bSJan Schmidt 	int ret;
163*a542ad1bSJan Schmidt 	u64 flags;
164*a542ad1bSJan Schmidt 	u32 item_size;
165*a542ad1bSJan Schmidt 	struct extent_buffer *eb;
166*a542ad1bSJan Schmidt 	struct btrfs_extent_item *ei;
167*a542ad1bSJan Schmidt 	struct btrfs_key key;
168*a542ad1bSJan Schmidt 
169*a542ad1bSJan Schmidt 	key.type = BTRFS_EXTENT_ITEM_KEY;
170*a542ad1bSJan Schmidt 	key.objectid = logical;
171*a542ad1bSJan Schmidt 	key.offset = (u64)-1;
172*a542ad1bSJan Schmidt 
173*a542ad1bSJan Schmidt 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
174*a542ad1bSJan Schmidt 	if (ret < 0)
175*a542ad1bSJan Schmidt 		return ret;
176*a542ad1bSJan Schmidt 	ret = btrfs_previous_item(fs_info->extent_root, path,
177*a542ad1bSJan Schmidt 					0, BTRFS_EXTENT_ITEM_KEY);
178*a542ad1bSJan Schmidt 	if (ret < 0)
179*a542ad1bSJan Schmidt 		return ret;
180*a542ad1bSJan Schmidt 
181*a542ad1bSJan Schmidt 	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
182*a542ad1bSJan Schmidt 	if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
183*a542ad1bSJan Schmidt 	    found_key->objectid > logical ||
184*a542ad1bSJan Schmidt 	    found_key->objectid + found_key->offset <= logical)
185*a542ad1bSJan Schmidt 		return -ENOENT;
186*a542ad1bSJan Schmidt 
187*a542ad1bSJan Schmidt 	eb = path->nodes[0];
188*a542ad1bSJan Schmidt 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
189*a542ad1bSJan Schmidt 	BUG_ON(item_size < sizeof(*ei));
190*a542ad1bSJan Schmidt 
191*a542ad1bSJan Schmidt 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
192*a542ad1bSJan Schmidt 	flags = btrfs_extent_flags(eb, ei);
193*a542ad1bSJan Schmidt 
194*a542ad1bSJan Schmidt 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
195*a542ad1bSJan Schmidt 		return BTRFS_EXTENT_FLAG_TREE_BLOCK;
196*a542ad1bSJan Schmidt 	if (flags & BTRFS_EXTENT_FLAG_DATA)
197*a542ad1bSJan Schmidt 		return BTRFS_EXTENT_FLAG_DATA;
198*a542ad1bSJan Schmidt 
199*a542ad1bSJan Schmidt 	return -EIO;
200*a542ad1bSJan Schmidt }
201*a542ad1bSJan Schmidt 
202*a542ad1bSJan Schmidt /*
203*a542ad1bSJan Schmidt  * helper function to iterate extent inline refs. ptr must point to a 0 value
204*a542ad1bSJan Schmidt  * for the first call and may be modified. it is used to track state.
205*a542ad1bSJan Schmidt  * if more refs exist, 0 is returned and the next call to
206*a542ad1bSJan Schmidt  * __get_extent_inline_ref must pass the modified ptr parameter to get the
207*a542ad1bSJan Schmidt  * next ref. after the last ref was processed, 1 is returned.
208*a542ad1bSJan Schmidt  * returns <0 on error
209*a542ad1bSJan Schmidt  */
210*a542ad1bSJan Schmidt static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
211*a542ad1bSJan Schmidt 				struct btrfs_extent_item *ei, u32 item_size,
212*a542ad1bSJan Schmidt 				struct btrfs_extent_inline_ref **out_eiref,
213*a542ad1bSJan Schmidt 				int *out_type)
214*a542ad1bSJan Schmidt {
215*a542ad1bSJan Schmidt 	unsigned long end;
216*a542ad1bSJan Schmidt 	u64 flags;
217*a542ad1bSJan Schmidt 	struct btrfs_tree_block_info *info;
218*a542ad1bSJan Schmidt 
219*a542ad1bSJan Schmidt 	if (!*ptr) {
220*a542ad1bSJan Schmidt 		/* first call */
221*a542ad1bSJan Schmidt 		flags = btrfs_extent_flags(eb, ei);
222*a542ad1bSJan Schmidt 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
223*a542ad1bSJan Schmidt 			info = (struct btrfs_tree_block_info *)(ei + 1);
224*a542ad1bSJan Schmidt 			*out_eiref =
225*a542ad1bSJan Schmidt 				(struct btrfs_extent_inline_ref *)(info + 1);
226*a542ad1bSJan Schmidt 		} else {
227*a542ad1bSJan Schmidt 			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
228*a542ad1bSJan Schmidt 		}
229*a542ad1bSJan Schmidt 		*ptr = (unsigned long)*out_eiref;
230*a542ad1bSJan Schmidt 		if ((void *)*ptr >= (void *)ei + item_size)
231*a542ad1bSJan Schmidt 			return -ENOENT;
232*a542ad1bSJan Schmidt 	}
233*a542ad1bSJan Schmidt 
234*a542ad1bSJan Schmidt 	end = (unsigned long)ei + item_size;
235*a542ad1bSJan Schmidt 	*out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
236*a542ad1bSJan Schmidt 	*out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
237*a542ad1bSJan Schmidt 
238*a542ad1bSJan Schmidt 	*ptr += btrfs_extent_inline_ref_size(*out_type);
239*a542ad1bSJan Schmidt 	WARN_ON(*ptr > end);
240*a542ad1bSJan Schmidt 	if (*ptr == end)
241*a542ad1bSJan Schmidt 		return 1; /* last */
242*a542ad1bSJan Schmidt 
243*a542ad1bSJan Schmidt 	return 0;
244*a542ad1bSJan Schmidt }
245*a542ad1bSJan Schmidt 
246*a542ad1bSJan Schmidt /*
247*a542ad1bSJan Schmidt  * reads the tree block backref for an extent. tree level and root are returned
248*a542ad1bSJan Schmidt  * through out_level and out_root. ptr must point to a 0 value for the first
249*a542ad1bSJan Schmidt  * call and may be modified (see __get_extent_inline_ref comment).
250*a542ad1bSJan Schmidt  * returns 0 if data was provided, 1 if there was no more data to provide or
251*a542ad1bSJan Schmidt  * <0 on error.
252*a542ad1bSJan Schmidt  */
253*a542ad1bSJan Schmidt int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
254*a542ad1bSJan Schmidt 				struct btrfs_extent_item *ei, u32 item_size,
255*a542ad1bSJan Schmidt 				u64 *out_root, u8 *out_level)
256*a542ad1bSJan Schmidt {
257*a542ad1bSJan Schmidt 	int ret;
258*a542ad1bSJan Schmidt 	int type;
259*a542ad1bSJan Schmidt 	struct btrfs_tree_block_info *info;
260*a542ad1bSJan Schmidt 	struct btrfs_extent_inline_ref *eiref;
261*a542ad1bSJan Schmidt 
262*a542ad1bSJan Schmidt 	if (*ptr == (unsigned long)-1)
263*a542ad1bSJan Schmidt 		return 1;
264*a542ad1bSJan Schmidt 
265*a542ad1bSJan Schmidt 	while (1) {
266*a542ad1bSJan Schmidt 		ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
267*a542ad1bSJan Schmidt 						&eiref, &type);
268*a542ad1bSJan Schmidt 		if (ret < 0)
269*a542ad1bSJan Schmidt 			return ret;
270*a542ad1bSJan Schmidt 
271*a542ad1bSJan Schmidt 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
272*a542ad1bSJan Schmidt 		    type == BTRFS_SHARED_BLOCK_REF_KEY)
273*a542ad1bSJan Schmidt 			break;
274*a542ad1bSJan Schmidt 
275*a542ad1bSJan Schmidt 		if (ret == 1)
276*a542ad1bSJan Schmidt 			return 1;
277*a542ad1bSJan Schmidt 	}
278*a542ad1bSJan Schmidt 
279*a542ad1bSJan Schmidt 	/* we can treat both ref types equally here */
280*a542ad1bSJan Schmidt 	info = (struct btrfs_tree_block_info *)(ei + 1);
281*a542ad1bSJan Schmidt 	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
282*a542ad1bSJan Schmidt 	*out_level = btrfs_tree_block_level(eb, info);
283*a542ad1bSJan Schmidt 
284*a542ad1bSJan Schmidt 	if (ret == 1)
285*a542ad1bSJan Schmidt 		*ptr = (unsigned long)-1;
286*a542ad1bSJan Schmidt 
287*a542ad1bSJan Schmidt 	return 0;
288*a542ad1bSJan Schmidt }
289*a542ad1bSJan Schmidt 
290*a542ad1bSJan Schmidt static int __data_list_add(struct list_head *head, u64 inum,
291*a542ad1bSJan Schmidt 				u64 extent_data_item_offset, u64 root)
292*a542ad1bSJan Schmidt {
293*a542ad1bSJan Schmidt 	struct __data_ref *ref;
294*a542ad1bSJan Schmidt 
295*a542ad1bSJan Schmidt 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
296*a542ad1bSJan Schmidt 	if (!ref)
297*a542ad1bSJan Schmidt 		return -ENOMEM;
298*a542ad1bSJan Schmidt 
299*a542ad1bSJan Schmidt 	ref->inum = inum;
300*a542ad1bSJan Schmidt 	ref->extent_data_item_offset = extent_data_item_offset;
301*a542ad1bSJan Schmidt 	ref->root = root;
302*a542ad1bSJan Schmidt 	list_add_tail(&ref->list, head);
303*a542ad1bSJan Schmidt 
304*a542ad1bSJan Schmidt 	return 0;
305*a542ad1bSJan Schmidt }
306*a542ad1bSJan Schmidt 
307*a542ad1bSJan Schmidt static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb,
308*a542ad1bSJan Schmidt 				struct btrfs_extent_data_ref *dref)
309*a542ad1bSJan Schmidt {
310*a542ad1bSJan Schmidt 	return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref),
311*a542ad1bSJan Schmidt 				btrfs_extent_data_ref_offset(eb, dref),
312*a542ad1bSJan Schmidt 				btrfs_extent_data_ref_root(eb, dref));
313*a542ad1bSJan Schmidt }
314*a542ad1bSJan Schmidt 
315*a542ad1bSJan Schmidt static int __shared_list_add(struct list_head *head, u64 disk_byte)
316*a542ad1bSJan Schmidt {
317*a542ad1bSJan Schmidt 	struct __shared_ref *ref;
318*a542ad1bSJan Schmidt 
319*a542ad1bSJan Schmidt 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
320*a542ad1bSJan Schmidt 	if (!ref)
321*a542ad1bSJan Schmidt 		return -ENOMEM;
322*a542ad1bSJan Schmidt 
323*a542ad1bSJan Schmidt 	ref->disk_byte = disk_byte;
324*a542ad1bSJan Schmidt 	list_add_tail(&ref->list, head);
325*a542ad1bSJan Schmidt 
326*a542ad1bSJan Schmidt 	return 0;
327*a542ad1bSJan Schmidt }
328*a542ad1bSJan Schmidt 
329*a542ad1bSJan Schmidt static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info,
330*a542ad1bSJan Schmidt 					   u64 logical, u64 inum,
331*a542ad1bSJan Schmidt 					   u64 extent_data_item_offset,
332*a542ad1bSJan Schmidt 					   u64 extent_offset,
333*a542ad1bSJan Schmidt 					   struct btrfs_path *path,
334*a542ad1bSJan Schmidt 					   struct list_head *data_refs,
335*a542ad1bSJan Schmidt 					   iterate_extent_inodes_t *iterate,
336*a542ad1bSJan Schmidt 					   void *ctx)
337*a542ad1bSJan Schmidt {
338*a542ad1bSJan Schmidt 	u64 ref_root;
339*a542ad1bSJan Schmidt 	u32 item_size;
340*a542ad1bSJan Schmidt 	struct btrfs_key key;
341*a542ad1bSJan Schmidt 	struct extent_buffer *eb;
342*a542ad1bSJan Schmidt 	struct btrfs_extent_item *ei;
343*a542ad1bSJan Schmidt 	struct btrfs_extent_inline_ref *eiref;
344*a542ad1bSJan Schmidt 	struct __data_ref *ref;
345*a542ad1bSJan Schmidt 	int ret;
346*a542ad1bSJan Schmidt 	int type;
347*a542ad1bSJan Schmidt 	int last;
348*a542ad1bSJan Schmidt 	unsigned long ptr = 0;
349*a542ad1bSJan Schmidt 
350*a542ad1bSJan Schmidt 	WARN_ON(!list_empty(data_refs));
351*a542ad1bSJan Schmidt 	ret = extent_from_logical(fs_info, logical, path, &key);
352*a542ad1bSJan Schmidt 	if (ret & BTRFS_EXTENT_FLAG_DATA)
353*a542ad1bSJan Schmidt 		ret = -EIO;
354*a542ad1bSJan Schmidt 	if (ret < 0)
355*a542ad1bSJan Schmidt 		goto out;
356*a542ad1bSJan Schmidt 
357*a542ad1bSJan Schmidt 	eb = path->nodes[0];
358*a542ad1bSJan Schmidt 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
359*a542ad1bSJan Schmidt 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
360*a542ad1bSJan Schmidt 
361*a542ad1bSJan Schmidt 	ret = 0;
362*a542ad1bSJan Schmidt 	ref_root = 0;
363*a542ad1bSJan Schmidt 	/*
364*a542ad1bSJan Schmidt 	 * as done in iterate_extent_inodes, we first build a list of refs to
365*a542ad1bSJan Schmidt 	 * iterate, then free the path and then iterate them to avoid deadlocks.
366*a542ad1bSJan Schmidt 	 */
367*a542ad1bSJan Schmidt 	do {
368*a542ad1bSJan Schmidt 		last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
369*a542ad1bSJan Schmidt 						&eiref, &type);
370*a542ad1bSJan Schmidt 		if (last < 0) {
371*a542ad1bSJan Schmidt 			ret = last;
372*a542ad1bSJan Schmidt 			goto out;
373*a542ad1bSJan Schmidt 		}
374*a542ad1bSJan Schmidt 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
375*a542ad1bSJan Schmidt 		    type == BTRFS_SHARED_BLOCK_REF_KEY) {
376*a542ad1bSJan Schmidt 			ref_root = btrfs_extent_inline_ref_offset(eb, eiref);
377*a542ad1bSJan Schmidt 			ret = __data_list_add(data_refs, inum,
378*a542ad1bSJan Schmidt 						extent_data_item_offset,
379*a542ad1bSJan Schmidt 						ref_root);
380*a542ad1bSJan Schmidt 		}
381*a542ad1bSJan Schmidt 	} while (!ret && !last);
382*a542ad1bSJan Schmidt 
383*a542ad1bSJan Schmidt 	btrfs_release_path(path);
384*a542ad1bSJan Schmidt 
385*a542ad1bSJan Schmidt 	if (ref_root == 0) {
386*a542ad1bSJan Schmidt 		printk(KERN_ERR "btrfs: failed to find tree block ref "
387*a542ad1bSJan Schmidt 			"for shared data backref %llu\n", logical);
388*a542ad1bSJan Schmidt 		WARN_ON(1);
389*a542ad1bSJan Schmidt 		ret = -EIO;
390*a542ad1bSJan Schmidt 	}
391*a542ad1bSJan Schmidt 
392*a542ad1bSJan Schmidt out:
393*a542ad1bSJan Schmidt 	while (!list_empty(data_refs)) {
394*a542ad1bSJan Schmidt 		ref = list_first_entry(data_refs, struct __data_ref, list);
395*a542ad1bSJan Schmidt 		list_del(&ref->list);
396*a542ad1bSJan Schmidt 		if (!ret)
397*a542ad1bSJan Schmidt 			ret = iterate(ref->inum, extent_offset +
398*a542ad1bSJan Schmidt 					ref->extent_data_item_offset,
399*a542ad1bSJan Schmidt 					ref->root, ctx);
400*a542ad1bSJan Schmidt 		kfree(ref);
401*a542ad1bSJan Schmidt 	}
402*a542ad1bSJan Schmidt 
403*a542ad1bSJan Schmidt 	return ret;
404*a542ad1bSJan Schmidt }
405*a542ad1bSJan Schmidt 
406*a542ad1bSJan Schmidt static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
407*a542ad1bSJan Schmidt 				    u64 logical, u64 orig_extent_item_objectid,
408*a542ad1bSJan Schmidt 				    u64 extent_offset, struct btrfs_path *path,
409*a542ad1bSJan Schmidt 				    struct list_head *data_refs,
410*a542ad1bSJan Schmidt 				    iterate_extent_inodes_t *iterate,
411*a542ad1bSJan Schmidt 				    void *ctx)
412*a542ad1bSJan Schmidt {
413*a542ad1bSJan Schmidt 	u64 disk_byte;
414*a542ad1bSJan Schmidt 	struct btrfs_key key;
415*a542ad1bSJan Schmidt 	struct btrfs_file_extent_item *fi;
416*a542ad1bSJan Schmidt 	struct extent_buffer *eb;
417*a542ad1bSJan Schmidt 	int slot;
418*a542ad1bSJan Schmidt 	int nritems;
419*a542ad1bSJan Schmidt 	int ret;
420*a542ad1bSJan Schmidt 	int found = 0;
421*a542ad1bSJan Schmidt 
422*a542ad1bSJan Schmidt 	eb = read_tree_block(fs_info->tree_root, logical,
423*a542ad1bSJan Schmidt 				fs_info->tree_root->leafsize, 0);
424*a542ad1bSJan Schmidt 	if (!eb)
425*a542ad1bSJan Schmidt 		return -EIO;
426*a542ad1bSJan Schmidt 
427*a542ad1bSJan Schmidt 	/*
428*a542ad1bSJan Schmidt 	 * from the shared data ref, we only have the leaf but we need
429*a542ad1bSJan Schmidt 	 * the key. thus, we must look into all items and see that we
430*a542ad1bSJan Schmidt 	 * find one (some) with a reference to our extent item.
431*a542ad1bSJan Schmidt 	 */
432*a542ad1bSJan Schmidt 	nritems = btrfs_header_nritems(eb);
433*a542ad1bSJan Schmidt 	for (slot = 0; slot < nritems; ++slot) {
434*a542ad1bSJan Schmidt 		btrfs_item_key_to_cpu(eb, &key, slot);
435*a542ad1bSJan Schmidt 		if (key.type != BTRFS_EXTENT_DATA_KEY)
436*a542ad1bSJan Schmidt 			continue;
437*a542ad1bSJan Schmidt 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
438*a542ad1bSJan Schmidt 		if (!fi) {
439*a542ad1bSJan Schmidt 			free_extent_buffer(eb);
440*a542ad1bSJan Schmidt 			return -EIO;
441*a542ad1bSJan Schmidt 		}
442*a542ad1bSJan Schmidt 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
443*a542ad1bSJan Schmidt 		if (disk_byte != orig_extent_item_objectid) {
444*a542ad1bSJan Schmidt 			if (found)
445*a542ad1bSJan Schmidt 				break;
446*a542ad1bSJan Schmidt 			else
447*a542ad1bSJan Schmidt 				continue;
448*a542ad1bSJan Schmidt 		}
449*a542ad1bSJan Schmidt 		++found;
450*a542ad1bSJan Schmidt 		ret = __iter_shared_inline_ref_inodes(fs_info, logical,
451*a542ad1bSJan Schmidt 							key.objectid,
452*a542ad1bSJan Schmidt 							key.offset,
453*a542ad1bSJan Schmidt 							extent_offset, path,
454*a542ad1bSJan Schmidt 							data_refs,
455*a542ad1bSJan Schmidt 							iterate, ctx);
456*a542ad1bSJan Schmidt 		if (ret)
457*a542ad1bSJan Schmidt 			break;
458*a542ad1bSJan Schmidt 	}
459*a542ad1bSJan Schmidt 
460*a542ad1bSJan Schmidt 	if (!found) {
461*a542ad1bSJan Schmidt 		printk(KERN_ERR "btrfs: failed to follow shared data backref "
462*a542ad1bSJan Schmidt 			"to parent %llu\n", logical);
463*a542ad1bSJan Schmidt 		WARN_ON(1);
464*a542ad1bSJan Schmidt 		ret = -EIO;
465*a542ad1bSJan Schmidt 	}
466*a542ad1bSJan Schmidt 
467*a542ad1bSJan Schmidt 	free_extent_buffer(eb);
468*a542ad1bSJan Schmidt 	return ret;
469*a542ad1bSJan Schmidt }
470*a542ad1bSJan Schmidt 
471*a542ad1bSJan Schmidt /*
472*a542ad1bSJan Schmidt  * calls iterate() for every inode that references the extent identified by
473*a542ad1bSJan Schmidt  * the given parameters. will use the path given as a parameter and return it
474*a542ad1bSJan Schmidt  * released.
475*a542ad1bSJan Schmidt  * when the iterator function returns a non-zero value, iteration stops.
476*a542ad1bSJan Schmidt  */
477*a542ad1bSJan Schmidt int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
478*a542ad1bSJan Schmidt 				struct btrfs_path *path,
479*a542ad1bSJan Schmidt 				u64 extent_item_objectid,
480*a542ad1bSJan Schmidt 				u64 extent_offset,
481*a542ad1bSJan Schmidt 				iterate_extent_inodes_t *iterate, void *ctx)
482*a542ad1bSJan Schmidt {
483*a542ad1bSJan Schmidt 	unsigned long ptr = 0;
484*a542ad1bSJan Schmidt 	int last;
485*a542ad1bSJan Schmidt 	int ret;
486*a542ad1bSJan Schmidt 	int type;
487*a542ad1bSJan Schmidt 	u64 logical;
488*a542ad1bSJan Schmidt 	u32 item_size;
489*a542ad1bSJan Schmidt 	struct btrfs_extent_inline_ref *eiref;
490*a542ad1bSJan Schmidt 	struct btrfs_extent_data_ref *dref;
491*a542ad1bSJan Schmidt 	struct extent_buffer *eb;
492*a542ad1bSJan Schmidt 	struct btrfs_extent_item *ei;
493*a542ad1bSJan Schmidt 	struct btrfs_key key;
494*a542ad1bSJan Schmidt 	struct list_head data_refs = LIST_HEAD_INIT(data_refs);
495*a542ad1bSJan Schmidt 	struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
496*a542ad1bSJan Schmidt 	struct __data_ref *ref_d;
497*a542ad1bSJan Schmidt 	struct __shared_ref *ref_s;
498*a542ad1bSJan Schmidt 
499*a542ad1bSJan Schmidt 	eb = path->nodes[0];
500*a542ad1bSJan Schmidt 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
501*a542ad1bSJan Schmidt 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
502*a542ad1bSJan Schmidt 
503*a542ad1bSJan Schmidt 	/* first we iterate the inline refs, ... */
504*a542ad1bSJan Schmidt 	do {
505*a542ad1bSJan Schmidt 		last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
506*a542ad1bSJan Schmidt 						&eiref, &type);
507*a542ad1bSJan Schmidt 		if (last == -ENOENT) {
508*a542ad1bSJan Schmidt 			ret = 0;
509*a542ad1bSJan Schmidt 			break;
510*a542ad1bSJan Schmidt 		}
511*a542ad1bSJan Schmidt 		if (last < 0) {
512*a542ad1bSJan Schmidt 			ret = last;
513*a542ad1bSJan Schmidt 			break;
514*a542ad1bSJan Schmidt 		}
515*a542ad1bSJan Schmidt 
516*a542ad1bSJan Schmidt 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
517*a542ad1bSJan Schmidt 			dref = (struct btrfs_extent_data_ref *)(&eiref->offset);
518*a542ad1bSJan Schmidt 			ret = __data_list_add_eb(&data_refs, eb, dref);
519*a542ad1bSJan Schmidt 		} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
520*a542ad1bSJan Schmidt 			logical = btrfs_extent_inline_ref_offset(eb, eiref);
521*a542ad1bSJan Schmidt 			ret = __shared_list_add(&shared_refs, logical);
522*a542ad1bSJan Schmidt 		}
523*a542ad1bSJan Schmidt 	} while (!ret && !last);
524*a542ad1bSJan Schmidt 
525*a542ad1bSJan Schmidt 	/* ... then we proceed to in-tree references and ... */
526*a542ad1bSJan Schmidt 	while (!ret) {
527*a542ad1bSJan Schmidt 		++path->slots[0];
528*a542ad1bSJan Schmidt 		if (path->slots[0] > btrfs_header_nritems(eb)) {
529*a542ad1bSJan Schmidt 			ret = btrfs_next_leaf(fs_info->extent_root, path);
530*a542ad1bSJan Schmidt 			if (ret) {
531*a542ad1bSJan Schmidt 				if (ret == 1)
532*a542ad1bSJan Schmidt 					ret = 0; /* we're done */
533*a542ad1bSJan Schmidt 				break;
534*a542ad1bSJan Schmidt 			}
535*a542ad1bSJan Schmidt 			eb = path->nodes[0];
536*a542ad1bSJan Schmidt 		}
537*a542ad1bSJan Schmidt 		btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
538*a542ad1bSJan Schmidt 		if (key.objectid != extent_item_objectid)
539*a542ad1bSJan Schmidt 			break;
540*a542ad1bSJan Schmidt 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
541*a542ad1bSJan Schmidt 			dref = btrfs_item_ptr(eb, path->slots[0],
542*a542ad1bSJan Schmidt 						struct btrfs_extent_data_ref);
543*a542ad1bSJan Schmidt 			ret = __data_list_add_eb(&data_refs, eb, dref);
544*a542ad1bSJan Schmidt 		} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
545*a542ad1bSJan Schmidt 			ret = __shared_list_add(&shared_refs, key.offset);
546*a542ad1bSJan Schmidt 		}
547*a542ad1bSJan Schmidt 	}
548*a542ad1bSJan Schmidt 
549*a542ad1bSJan Schmidt 	btrfs_release_path(path);
550*a542ad1bSJan Schmidt 
551*a542ad1bSJan Schmidt 	/*
552*a542ad1bSJan Schmidt 	 * ... only at the very end we can process the refs we found. this is
553*a542ad1bSJan Schmidt 	 * because the iterator function we call is allowed to make tree lookups
554*a542ad1bSJan Schmidt 	 * and we have to avoid deadlocks. additionally, we need more tree
555*a542ad1bSJan Schmidt 	 * lookups ourselves for shared data refs.
556*a542ad1bSJan Schmidt 	 */
557*a542ad1bSJan Schmidt 	while (!list_empty(&data_refs)) {
558*a542ad1bSJan Schmidt 		ref_d = list_first_entry(&data_refs, struct __data_ref, list);
559*a542ad1bSJan Schmidt 		list_del(&ref_d->list);
560*a542ad1bSJan Schmidt 		if (!ret)
561*a542ad1bSJan Schmidt 			ret = iterate(ref_d->inum, extent_offset +
562*a542ad1bSJan Schmidt 					ref_d->extent_data_item_offset,
563*a542ad1bSJan Schmidt 					ref_d->root, ctx);
564*a542ad1bSJan Schmidt 		kfree(ref_d);
565*a542ad1bSJan Schmidt 	}
566*a542ad1bSJan Schmidt 
567*a542ad1bSJan Schmidt 	while (!list_empty(&shared_refs)) {
568*a542ad1bSJan Schmidt 		ref_s = list_first_entry(&shared_refs, struct __shared_ref,
569*a542ad1bSJan Schmidt 					list);
570*a542ad1bSJan Schmidt 		list_del(&ref_s->list);
571*a542ad1bSJan Schmidt 		if (!ret)
572*a542ad1bSJan Schmidt 			ret = __iter_shared_inline_ref(fs_info,
573*a542ad1bSJan Schmidt 							ref_s->disk_byte,
574*a542ad1bSJan Schmidt 							extent_item_objectid,
575*a542ad1bSJan Schmidt 							extent_offset, path,
576*a542ad1bSJan Schmidt 							&data_refs,
577*a542ad1bSJan Schmidt 							iterate, ctx);
578*a542ad1bSJan Schmidt 		kfree(ref_s);
579*a542ad1bSJan Schmidt 	}
580*a542ad1bSJan Schmidt 
581*a542ad1bSJan Schmidt 	return ret;
582*a542ad1bSJan Schmidt }
583*a542ad1bSJan Schmidt 
584*a542ad1bSJan Schmidt int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
585*a542ad1bSJan Schmidt 				struct btrfs_path *path,
586*a542ad1bSJan Schmidt 				iterate_extent_inodes_t *iterate, void *ctx)
587*a542ad1bSJan Schmidt {
588*a542ad1bSJan Schmidt 	int ret;
589*a542ad1bSJan Schmidt 	u64 offset;
590*a542ad1bSJan Schmidt 	struct btrfs_key found_key;
591*a542ad1bSJan Schmidt 
592*a542ad1bSJan Schmidt 	ret = extent_from_logical(fs_info, logical, path,
593*a542ad1bSJan Schmidt 					&found_key);
594*a542ad1bSJan Schmidt 	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
595*a542ad1bSJan Schmidt 		ret = -EINVAL;
596*a542ad1bSJan Schmidt 	if (ret < 0)
597*a542ad1bSJan Schmidt 		return ret;
598*a542ad1bSJan Schmidt 
599*a542ad1bSJan Schmidt 	offset = logical - found_key.objectid;
600*a542ad1bSJan Schmidt 	ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
601*a542ad1bSJan Schmidt 					offset, iterate, ctx);
602*a542ad1bSJan Schmidt 
603*a542ad1bSJan Schmidt 	return ret;
604*a542ad1bSJan Schmidt }
605*a542ad1bSJan Schmidt 
606*a542ad1bSJan Schmidt static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
607*a542ad1bSJan Schmidt 				struct btrfs_path *path,
608*a542ad1bSJan Schmidt 				iterate_irefs_t *iterate, void *ctx)
609*a542ad1bSJan Schmidt {
610*a542ad1bSJan Schmidt 	int ret;
611*a542ad1bSJan Schmidt 	int slot;
612*a542ad1bSJan Schmidt 	u32 cur;
613*a542ad1bSJan Schmidt 	u32 len;
614*a542ad1bSJan Schmidt 	u32 name_len;
615*a542ad1bSJan Schmidt 	u64 parent = 0;
616*a542ad1bSJan Schmidt 	int found = 0;
617*a542ad1bSJan Schmidt 	struct extent_buffer *eb;
618*a542ad1bSJan Schmidt 	struct btrfs_item *item;
619*a542ad1bSJan Schmidt 	struct btrfs_inode_ref *iref;
620*a542ad1bSJan Schmidt 	struct btrfs_key found_key;
621*a542ad1bSJan Schmidt 
622*a542ad1bSJan Schmidt 	while (1) {
623*a542ad1bSJan Schmidt 		ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
624*a542ad1bSJan Schmidt 					&found_key);
625*a542ad1bSJan Schmidt 		if (ret < 0)
626*a542ad1bSJan Schmidt 			break;
627*a542ad1bSJan Schmidt 		if (ret) {
628*a542ad1bSJan Schmidt 			ret = found ? 0 : -ENOENT;
629*a542ad1bSJan Schmidt 			break;
630*a542ad1bSJan Schmidt 		}
631*a542ad1bSJan Schmidt 		++found;
632*a542ad1bSJan Schmidt 
633*a542ad1bSJan Schmidt 		parent = found_key.offset;
634*a542ad1bSJan Schmidt 		slot = path->slots[0];
635*a542ad1bSJan Schmidt 		eb = path->nodes[0];
636*a542ad1bSJan Schmidt 		/* make sure we can use eb after releasing the path */
637*a542ad1bSJan Schmidt 		atomic_inc(&eb->refs);
638*a542ad1bSJan Schmidt 		btrfs_release_path(path);
639*a542ad1bSJan Schmidt 
640*a542ad1bSJan Schmidt 		item = btrfs_item_nr(eb, slot);
641*a542ad1bSJan Schmidt 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
642*a542ad1bSJan Schmidt 
643*a542ad1bSJan Schmidt 		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
644*a542ad1bSJan Schmidt 			name_len = btrfs_inode_ref_name_len(eb, iref);
645*a542ad1bSJan Schmidt 			/* path must be released before calling iterate()! */
646*a542ad1bSJan Schmidt 			ret = iterate(parent, iref, eb, ctx);
647*a542ad1bSJan Schmidt 			if (ret) {
648*a542ad1bSJan Schmidt 				free_extent_buffer(eb);
649*a542ad1bSJan Schmidt 				break;
650*a542ad1bSJan Schmidt 			}
651*a542ad1bSJan Schmidt 			len = sizeof(*iref) + name_len;
652*a542ad1bSJan Schmidt 			iref = (struct btrfs_inode_ref *)((char *)iref + len);
653*a542ad1bSJan Schmidt 		}
654*a542ad1bSJan Schmidt 		free_extent_buffer(eb);
655*a542ad1bSJan Schmidt 	}
656*a542ad1bSJan Schmidt 
657*a542ad1bSJan Schmidt 	btrfs_release_path(path);
658*a542ad1bSJan Schmidt 
659*a542ad1bSJan Schmidt 	return ret;
660*a542ad1bSJan Schmidt }
661*a542ad1bSJan Schmidt 
662*a542ad1bSJan Schmidt /*
663*a542ad1bSJan Schmidt  * returns 0 if the path could be dumped (probably truncated)
664*a542ad1bSJan Schmidt  * returns <0 in case of an error
665*a542ad1bSJan Schmidt  */
666*a542ad1bSJan Schmidt static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
667*a542ad1bSJan Schmidt 				struct extent_buffer *eb, void *ctx)
668*a542ad1bSJan Schmidt {
669*a542ad1bSJan Schmidt 	struct inode_fs_paths *ipath = ctx;
670*a542ad1bSJan Schmidt 	char *fspath;
671*a542ad1bSJan Schmidt 	char *fspath_min;
672*a542ad1bSJan Schmidt 	int i = ipath->fspath->elem_cnt;
673*a542ad1bSJan Schmidt 	const int s_ptr = sizeof(char *);
674*a542ad1bSJan Schmidt 	u32 bytes_left;
675*a542ad1bSJan Schmidt 
676*a542ad1bSJan Schmidt 	bytes_left = ipath->fspath->bytes_left > s_ptr ?
677*a542ad1bSJan Schmidt 					ipath->fspath->bytes_left - s_ptr : 0;
678*a542ad1bSJan Schmidt 
679*a542ad1bSJan Schmidt 	fspath_min = (char *)ipath->fspath->str + (i + 1) * s_ptr;
680*a542ad1bSJan Schmidt 	fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
681*a542ad1bSJan Schmidt 				inum, fspath_min, bytes_left);
682*a542ad1bSJan Schmidt 	if (IS_ERR(fspath))
683*a542ad1bSJan Schmidt 		return PTR_ERR(fspath);
684*a542ad1bSJan Schmidt 
685*a542ad1bSJan Schmidt 	if (fspath > fspath_min) {
686*a542ad1bSJan Schmidt 		ipath->fspath->str[i] = fspath;
687*a542ad1bSJan Schmidt 		++ipath->fspath->elem_cnt;
688*a542ad1bSJan Schmidt 		ipath->fspath->bytes_left = fspath - fspath_min;
689*a542ad1bSJan Schmidt 	} else {
690*a542ad1bSJan Schmidt 		++ipath->fspath->elem_missed;
691*a542ad1bSJan Schmidt 		ipath->fspath->bytes_missing += fspath_min - fspath;
692*a542ad1bSJan Schmidt 		ipath->fspath->bytes_left = 0;
693*a542ad1bSJan Schmidt 	}
694*a542ad1bSJan Schmidt 
695*a542ad1bSJan Schmidt 	return 0;
696*a542ad1bSJan Schmidt }
697*a542ad1bSJan Schmidt 
698*a542ad1bSJan Schmidt /*
699*a542ad1bSJan Schmidt  * this dumps all file system paths to the inode into the ipath struct, provided
700*a542ad1bSJan Schmidt  * is has been created large enough. each path is zero-terminated and accessed
701*a542ad1bSJan Schmidt  * from ipath->fspath->str[i].
702*a542ad1bSJan Schmidt  * when it returns, there are ipath->fspath->elem_cnt number of paths available
703*a542ad1bSJan Schmidt  * in ipath->fspath->str[]. when the allocated space wasn't sufficient, the
704*a542ad1bSJan Schmidt  * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
705*a542ad1bSJan Schmidt  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
706*a542ad1bSJan Schmidt  * have been needed to return all paths.
707*a542ad1bSJan Schmidt  */
708*a542ad1bSJan Schmidt int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
709*a542ad1bSJan Schmidt {
710*a542ad1bSJan Schmidt 	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
711*a542ad1bSJan Schmidt 				inode_to_path, ipath);
712*a542ad1bSJan Schmidt }
713*a542ad1bSJan Schmidt 
714*a542ad1bSJan Schmidt /*
715*a542ad1bSJan Schmidt  * allocates space to return multiple file system paths for an inode.
716*a542ad1bSJan Schmidt  * total_bytes to allocate are passed, note that space usable for actual path
717*a542ad1bSJan Schmidt  * information will be total_bytes - sizeof(struct inode_fs_paths).
718*a542ad1bSJan Schmidt  * the returned pointer must be freed with free_ipath() in the end.
719*a542ad1bSJan Schmidt  */
720*a542ad1bSJan Schmidt struct btrfs_data_container *init_data_container(u32 total_bytes)
721*a542ad1bSJan Schmidt {
722*a542ad1bSJan Schmidt 	struct btrfs_data_container *data;
723*a542ad1bSJan Schmidt 	size_t alloc_bytes;
724*a542ad1bSJan Schmidt 
725*a542ad1bSJan Schmidt 	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
726*a542ad1bSJan Schmidt 	data = kmalloc(alloc_bytes, GFP_NOFS);
727*a542ad1bSJan Schmidt 	if (!data)
728*a542ad1bSJan Schmidt 		return ERR_PTR(-ENOMEM);
729*a542ad1bSJan Schmidt 
730*a542ad1bSJan Schmidt 	if (total_bytes >= sizeof(*data)) {
731*a542ad1bSJan Schmidt 		data->bytes_left = total_bytes - sizeof(*data);
732*a542ad1bSJan Schmidt 		data->bytes_missing = 0;
733*a542ad1bSJan Schmidt 	} else {
734*a542ad1bSJan Schmidt 		data->bytes_missing = sizeof(*data) - total_bytes;
735*a542ad1bSJan Schmidt 		data->bytes_left = 0;
736*a542ad1bSJan Schmidt 	}
737*a542ad1bSJan Schmidt 
738*a542ad1bSJan Schmidt 	data->elem_cnt = 0;
739*a542ad1bSJan Schmidt 	data->elem_missed = 0;
740*a542ad1bSJan Schmidt 
741*a542ad1bSJan Schmidt 	return data;
742*a542ad1bSJan Schmidt }
743*a542ad1bSJan Schmidt 
744*a542ad1bSJan Schmidt /*
745*a542ad1bSJan Schmidt  * allocates space to return multiple file system paths for an inode.
746*a542ad1bSJan Schmidt  * total_bytes to allocate are passed, note that space usable for actual path
747*a542ad1bSJan Schmidt  * information will be total_bytes - sizeof(struct inode_fs_paths).
748*a542ad1bSJan Schmidt  * the returned pointer must be freed with free_ipath() in the end.
749*a542ad1bSJan Schmidt  */
750*a542ad1bSJan Schmidt struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
751*a542ad1bSJan Schmidt 					struct btrfs_path *path)
752*a542ad1bSJan Schmidt {
753*a542ad1bSJan Schmidt 	struct inode_fs_paths *ifp;
754*a542ad1bSJan Schmidt 	struct btrfs_data_container *fspath;
755*a542ad1bSJan Schmidt 
756*a542ad1bSJan Schmidt 	fspath = init_data_container(total_bytes);
757*a542ad1bSJan Schmidt 	if (IS_ERR(fspath))
758*a542ad1bSJan Schmidt 		return (void *)fspath;
759*a542ad1bSJan Schmidt 
760*a542ad1bSJan Schmidt 	ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
761*a542ad1bSJan Schmidt 	if (!ifp) {
762*a542ad1bSJan Schmidt 		kfree(fspath);
763*a542ad1bSJan Schmidt 		return ERR_PTR(-ENOMEM);
764*a542ad1bSJan Schmidt 	}
765*a542ad1bSJan Schmidt 
766*a542ad1bSJan Schmidt 	ifp->btrfs_path = path;
767*a542ad1bSJan Schmidt 	ifp->fspath = fspath;
768*a542ad1bSJan Schmidt 	ifp->fs_root = fs_root;
769*a542ad1bSJan Schmidt 
770*a542ad1bSJan Schmidt 	return ifp;
771*a542ad1bSJan Schmidt }
772*a542ad1bSJan Schmidt 
773*a542ad1bSJan Schmidt void free_ipath(struct inode_fs_paths *ipath)
774*a542ad1bSJan Schmidt {
775*a542ad1bSJan Schmidt 	kfree(ipath);
776*a542ad1bSJan Schmidt }
777