xref: /openbmc/linux/fs/btrfs/extent_map.c (revision 9601e3f6)
1 #include <linux/err.h>
2 #include <linux/gfp.h>
3 #include <linux/slab.h>
4 #include <linux/module.h>
5 #include <linux/spinlock.h>
6 #include <linux/hardirq.h>
7 #include "extent_map.h"
8 
9 
10 static struct kmem_cache *extent_map_cache;
11 
12 int __init extent_map_init(void)
13 {
14 	extent_map_cache = kmem_cache_create("extent_map",
15 			sizeof(struct extent_map), 0,
16 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
17 	if (!extent_map_cache)
18 		return -ENOMEM;
19 	return 0;
20 }
21 
22 void extent_map_exit(void)
23 {
24 	if (extent_map_cache)
25 		kmem_cache_destroy(extent_map_cache);
26 }
27 
28 /**
29  * extent_map_tree_init - initialize extent map tree
30  * @tree:		tree to initialize
31  * @mask:		flags for memory allocations during tree operations
32  *
33  * Initialize the extent tree @tree.  Should be called for each new inode
34  * or other user of the extent_map interface.
35  */
36 void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
37 {
38 	tree->map.rb_node = NULL;
39 	spin_lock_init(&tree->lock);
40 }
41 
42 /**
43  * alloc_extent_map - allocate new extent map structure
44  * @mask:	memory allocation flags
45  *
46  * Allocate a new extent_map structure.  The new structure is
47  * returned with a reference count of one and needs to be
48  * freed using free_extent_map()
49  */
50 struct extent_map *alloc_extent_map(gfp_t mask)
51 {
52 	struct extent_map *em;
53 	em = kmem_cache_alloc(extent_map_cache, mask);
54 	if (!em || IS_ERR(em))
55 		return em;
56 	em->in_tree = 0;
57 	em->flags = 0;
58 	atomic_set(&em->refs, 1);
59 	return em;
60 }
61 
62 /**
63  * free_extent_map - drop reference count of an extent_map
64  * @em:		extent map beeing releasead
65  *
66  * Drops the reference out on @em by one and free the structure
67  * if the reference count hits zero.
68  */
69 void free_extent_map(struct extent_map *em)
70 {
71 	if (!em)
72 		return;
73 	WARN_ON(atomic_read(&em->refs) == 0);
74 	if (atomic_dec_and_test(&em->refs)) {
75 		WARN_ON(em->in_tree);
76 		kmem_cache_free(extent_map_cache, em);
77 	}
78 }
79 
80 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
81 				   struct rb_node *node)
82 {
83 	struct rb_node **p = &root->rb_node;
84 	struct rb_node *parent = NULL;
85 	struct extent_map *entry;
86 
87 	while (*p) {
88 		parent = *p;
89 		entry = rb_entry(parent, struct extent_map, rb_node);
90 
91 		WARN_ON(!entry->in_tree);
92 
93 		if (offset < entry->start)
94 			p = &(*p)->rb_left;
95 		else if (offset >= extent_map_end(entry))
96 			p = &(*p)->rb_right;
97 		else
98 			return parent;
99 	}
100 
101 	entry = rb_entry(node, struct extent_map, rb_node);
102 	entry->in_tree = 1;
103 	rb_link_node(node, parent, p);
104 	rb_insert_color(node, root);
105 	return NULL;
106 }
107 
108 /*
109  * search through the tree for an extent_map with a given offset.  If
110  * it can't be found, try to find some neighboring extents
111  */
112 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
113 				     struct rb_node **prev_ret,
114 				     struct rb_node **next_ret)
115 {
116 	struct rb_node *n = root->rb_node;
117 	struct rb_node *prev = NULL;
118 	struct rb_node *orig_prev = NULL;
119 	struct extent_map *entry;
120 	struct extent_map *prev_entry = NULL;
121 
122 	while (n) {
123 		entry = rb_entry(n, struct extent_map, rb_node);
124 		prev = n;
125 		prev_entry = entry;
126 
127 		WARN_ON(!entry->in_tree);
128 
129 		if (offset < entry->start)
130 			n = n->rb_left;
131 		else if (offset >= extent_map_end(entry))
132 			n = n->rb_right;
133 		else
134 			return n;
135 	}
136 
137 	if (prev_ret) {
138 		orig_prev = prev;
139 		while (prev && offset >= extent_map_end(prev_entry)) {
140 			prev = rb_next(prev);
141 			prev_entry = rb_entry(prev, struct extent_map, rb_node);
142 		}
143 		*prev_ret = prev;
144 		prev = orig_prev;
145 	}
146 
147 	if (next_ret) {
148 		prev_entry = rb_entry(prev, struct extent_map, rb_node);
149 		while (prev && offset < prev_entry->start) {
150 			prev = rb_prev(prev);
151 			prev_entry = rb_entry(prev, struct extent_map, rb_node);
152 		}
153 		*next_ret = prev;
154 	}
155 	return NULL;
156 }
157 
158 /*
159  * look for an offset in the tree, and if it can't be found, return
160  * the first offset we can find smaller than 'offset'.
161  */
162 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
163 {
164 	struct rb_node *prev;
165 	struct rb_node *ret;
166 	ret = __tree_search(root, offset, &prev, NULL);
167 	if (!ret)
168 		return prev;
169 	return ret;
170 }
171 
172 /* check to see if two extent_map structs are adjacent and safe to merge */
173 static int mergable_maps(struct extent_map *prev, struct extent_map *next)
174 {
175 	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
176 		return 0;
177 
178 	/*
179 	 * don't merge compressed extents, we need to know their
180 	 * actual size
181 	 */
182 	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
183 		return 0;
184 
185 	if (extent_map_end(prev) == next->start &&
186 	    prev->flags == next->flags &&
187 	    prev->bdev == next->bdev &&
188 	    ((next->block_start == EXTENT_MAP_HOLE &&
189 	      prev->block_start == EXTENT_MAP_HOLE) ||
190 	     (next->block_start == EXTENT_MAP_INLINE &&
191 	      prev->block_start == EXTENT_MAP_INLINE) ||
192 	     (next->block_start == EXTENT_MAP_DELALLOC &&
193 	      prev->block_start == EXTENT_MAP_DELALLOC) ||
194 	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
195 	      next->block_start == extent_map_block_end(prev)))) {
196 		return 1;
197 	}
198 	return 0;
199 }
200 
201 /**
202  * add_extent_mapping - add new extent map to the extent tree
203  * @tree:	tree to insert new map in
204  * @em:		map to insert
205  *
206  * Insert @em into @tree or perform a simple forward/backward merge with
207  * existing mappings.  The extent_map struct passed in will be inserted
208  * into the tree directly, with an additional reference taken, or a
209  * reference dropped if the merge attempt was sucessfull.
210  */
211 int add_extent_mapping(struct extent_map_tree *tree,
212 		       struct extent_map *em)
213 {
214 	int ret = 0;
215 	struct extent_map *merge = NULL;
216 	struct rb_node *rb;
217 	struct extent_map *exist;
218 
219 	exist = lookup_extent_mapping(tree, em->start, em->len);
220 	if (exist) {
221 		free_extent_map(exist);
222 		ret = -EEXIST;
223 		goto out;
224 	}
225 	assert_spin_locked(&tree->lock);
226 	rb = tree_insert(&tree->map, em->start, &em->rb_node);
227 	if (rb) {
228 		ret = -EEXIST;
229 		goto out;
230 	}
231 	atomic_inc(&em->refs);
232 	if (em->start != 0) {
233 		rb = rb_prev(&em->rb_node);
234 		if (rb)
235 			merge = rb_entry(rb, struct extent_map, rb_node);
236 		if (rb && mergable_maps(merge, em)) {
237 			em->start = merge->start;
238 			em->len += merge->len;
239 			em->block_len += merge->block_len;
240 			em->block_start = merge->block_start;
241 			merge->in_tree = 0;
242 			rb_erase(&merge->rb_node, &tree->map);
243 			free_extent_map(merge);
244 		}
245 	 }
246 	rb = rb_next(&em->rb_node);
247 	if (rb)
248 		merge = rb_entry(rb, struct extent_map, rb_node);
249 	if (rb && mergable_maps(em, merge)) {
250 		em->len += merge->len;
251 		em->block_len += merge->len;
252 		rb_erase(&merge->rb_node, &tree->map);
253 		merge->in_tree = 0;
254 		free_extent_map(merge);
255 	}
256 out:
257 	return ret;
258 }
259 
260 /* simple helper to do math around the end of an extent, handling wrap */
261 static u64 range_end(u64 start, u64 len)
262 {
263 	if (start + len < start)
264 		return (u64)-1;
265 	return start + len;
266 }
267 
268 /**
269  * lookup_extent_mapping - lookup extent_map
270  * @tree:	tree to lookup in
271  * @start:	byte offset to start the search
272  * @len:	length of the lookup range
273  *
274  * Find and return the first extent_map struct in @tree that intersects the
275  * [start, len] range.  There may be additional objects in the tree that
276  * intersect, so check the object returned carefully to make sure that no
277  * additional lookups are needed.
278  */
279 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
280 					 u64 start, u64 len)
281 {
282 	struct extent_map *em;
283 	struct rb_node *rb_node;
284 	struct rb_node *prev = NULL;
285 	struct rb_node *next = NULL;
286 	u64 end = range_end(start, len);
287 
288 	assert_spin_locked(&tree->lock);
289 	rb_node = __tree_search(&tree->map, start, &prev, &next);
290 	if (!rb_node && prev) {
291 		em = rb_entry(prev, struct extent_map, rb_node);
292 		if (end > em->start && start < extent_map_end(em))
293 			goto found;
294 	}
295 	if (!rb_node && next) {
296 		em = rb_entry(next, struct extent_map, rb_node);
297 		if (end > em->start && start < extent_map_end(em))
298 			goto found;
299 	}
300 	if (!rb_node) {
301 		em = NULL;
302 		goto out;
303 	}
304 	if (IS_ERR(rb_node)) {
305 		em = ERR_PTR(PTR_ERR(rb_node));
306 		goto out;
307 	}
308 	em = rb_entry(rb_node, struct extent_map, rb_node);
309 	if (end > em->start && start < extent_map_end(em))
310 		goto found;
311 
312 	em = NULL;
313 	goto out;
314 
315 found:
316 	atomic_inc(&em->refs);
317 out:
318 	return em;
319 }
320 
321 /**
322  * remove_extent_mapping - removes an extent_map from the extent tree
323  * @tree:	extent tree to remove from
324  * @em:		extent map beeing removed
325  *
326  * Removes @em from @tree.  No reference counts are dropped, and no checks
327  * are done to see if the range is in use
328  */
329 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
330 {
331 	int ret = 0;
332 
333 	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
334 	assert_spin_locked(&tree->lock);
335 	rb_erase(&em->rb_node, &tree->map);
336 	em->in_tree = 0;
337 	return ret;
338 }
339