xref: /openbmc/linux/fs/btrfs/extent_map.c (revision ed64f066)
1 #include <linux/err.h>
2 #include <linux/slab.h>
3 #include <linux/module.h>
4 #include <linux/spinlock.h>
5 #include <linux/hardirq.h>
6 #include "ctree.h"
7 #include "extent_map.h"
8 
9 
10 static struct kmem_cache *extent_map_cache;
11 
12 int __init extent_map_init(void)
13 {
14 	extent_map_cache = kmem_cache_create("extent_map",
15 			sizeof(struct extent_map), 0,
16 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
17 	if (!extent_map_cache)
18 		return -ENOMEM;
19 	return 0;
20 }
21 
22 void extent_map_exit(void)
23 {
24 	if (extent_map_cache)
25 		kmem_cache_destroy(extent_map_cache);
26 }
27 
28 /**
29  * extent_map_tree_init - initialize extent map tree
30  * @tree:		tree to initialize
31  *
32  * Initialize the extent tree @tree.  Should be called for each new inode
33  * or other user of the extent_map interface.
34  */
35 void extent_map_tree_init(struct extent_map_tree *tree)
36 {
37 	tree->map = RB_ROOT;
38 	rwlock_init(&tree->lock);
39 }
40 
41 /**
42  * alloc_extent_map - allocate new extent map structure
43  *
44  * Allocate a new extent_map structure.  The new structure is
45  * returned with a reference count of one and needs to be
46  * freed using free_extent_map()
47  */
48 struct extent_map *alloc_extent_map(void)
49 {
50 	struct extent_map *em;
51 	em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
52 	if (!em)
53 		return NULL;
54 	em->in_tree = 0;
55 	em->flags = 0;
56 	em->compress_type = BTRFS_COMPRESS_NONE;
57 	atomic_set(&em->refs, 1);
58 	return em;
59 }
60 
61 /**
62  * free_extent_map - drop reference count of an extent_map
63  * @em:		extent map beeing releasead
64  *
65  * Drops the reference out on @em by one and free the structure
66  * if the reference count hits zero.
67  */
68 void free_extent_map(struct extent_map *em)
69 {
70 	if (!em)
71 		return;
72 	WARN_ON(atomic_read(&em->refs) == 0);
73 	if (atomic_dec_and_test(&em->refs)) {
74 		WARN_ON(em->in_tree);
75 		kmem_cache_free(extent_map_cache, em);
76 	}
77 }
78 
79 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
80 				   struct rb_node *node)
81 {
82 	struct rb_node **p = &root->rb_node;
83 	struct rb_node *parent = NULL;
84 	struct extent_map *entry;
85 
86 	while (*p) {
87 		parent = *p;
88 		entry = rb_entry(parent, struct extent_map, rb_node);
89 
90 		WARN_ON(!entry->in_tree);
91 
92 		if (offset < entry->start)
93 			p = &(*p)->rb_left;
94 		else if (offset >= extent_map_end(entry))
95 			p = &(*p)->rb_right;
96 		else
97 			return parent;
98 	}
99 
100 	entry = rb_entry(node, struct extent_map, rb_node);
101 	entry->in_tree = 1;
102 	rb_link_node(node, parent, p);
103 	rb_insert_color(node, root);
104 	return NULL;
105 }
106 
107 /*
108  * search through the tree for an extent_map with a given offset.  If
109  * it can't be found, try to find some neighboring extents
110  */
111 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
112 				     struct rb_node **prev_ret,
113 				     struct rb_node **next_ret)
114 {
115 	struct rb_node *n = root->rb_node;
116 	struct rb_node *prev = NULL;
117 	struct rb_node *orig_prev = NULL;
118 	struct extent_map *entry;
119 	struct extent_map *prev_entry = NULL;
120 
121 	while (n) {
122 		entry = rb_entry(n, struct extent_map, rb_node);
123 		prev = n;
124 		prev_entry = entry;
125 
126 		WARN_ON(!entry->in_tree);
127 
128 		if (offset < entry->start)
129 			n = n->rb_left;
130 		else if (offset >= extent_map_end(entry))
131 			n = n->rb_right;
132 		else
133 			return n;
134 	}
135 
136 	if (prev_ret) {
137 		orig_prev = prev;
138 		while (prev && offset >= extent_map_end(prev_entry)) {
139 			prev = rb_next(prev);
140 			prev_entry = rb_entry(prev, struct extent_map, rb_node);
141 		}
142 		*prev_ret = prev;
143 		prev = orig_prev;
144 	}
145 
146 	if (next_ret) {
147 		prev_entry = rb_entry(prev, struct extent_map, rb_node);
148 		while (prev && offset < prev_entry->start) {
149 			prev = rb_prev(prev);
150 			prev_entry = rb_entry(prev, struct extent_map, rb_node);
151 		}
152 		*next_ret = prev;
153 	}
154 	return NULL;
155 }
156 
157 /* check to see if two extent_map structs are adjacent and safe to merge */
158 static int mergable_maps(struct extent_map *prev, struct extent_map *next)
159 {
160 	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
161 		return 0;
162 
163 	/*
164 	 * don't merge compressed extents, we need to know their
165 	 * actual size
166 	 */
167 	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
168 		return 0;
169 
170 	if (extent_map_end(prev) == next->start &&
171 	    prev->flags == next->flags &&
172 	    prev->bdev == next->bdev &&
173 	    ((next->block_start == EXTENT_MAP_HOLE &&
174 	      prev->block_start == EXTENT_MAP_HOLE) ||
175 	     (next->block_start == EXTENT_MAP_INLINE &&
176 	      prev->block_start == EXTENT_MAP_INLINE) ||
177 	     (next->block_start == EXTENT_MAP_DELALLOC &&
178 	      prev->block_start == EXTENT_MAP_DELALLOC) ||
179 	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
180 	      next->block_start == extent_map_block_end(prev)))) {
181 		return 1;
182 	}
183 	return 0;
184 }
185 
186 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
187 {
188 	int ret = 0;
189 	struct extent_map *merge = NULL;
190 	struct rb_node *rb;
191 	struct extent_map *em;
192 
193 	write_lock(&tree->lock);
194 	em = lookup_extent_mapping(tree, start, len);
195 
196 	WARN_ON(!em || em->start != start);
197 
198 	if (!em)
199 		goto out;
200 
201 	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
202 
203 	if (em->start != 0) {
204 		rb = rb_prev(&em->rb_node);
205 		if (rb)
206 			merge = rb_entry(rb, struct extent_map, rb_node);
207 		if (rb && mergable_maps(merge, em)) {
208 			em->start = merge->start;
209 			em->len += merge->len;
210 			em->block_len += merge->block_len;
211 			em->block_start = merge->block_start;
212 			merge->in_tree = 0;
213 			rb_erase(&merge->rb_node, &tree->map);
214 			free_extent_map(merge);
215 		}
216 	}
217 
218 	rb = rb_next(&em->rb_node);
219 	if (rb)
220 		merge = rb_entry(rb, struct extent_map, rb_node);
221 	if (rb && mergable_maps(em, merge)) {
222 		em->len += merge->len;
223 		em->block_len += merge->len;
224 		rb_erase(&merge->rb_node, &tree->map);
225 		merge->in_tree = 0;
226 		free_extent_map(merge);
227 	}
228 
229 	free_extent_map(em);
230 out:
231 	write_unlock(&tree->lock);
232 	return ret;
233 
234 }
235 
236 /**
237  * add_extent_mapping - add new extent map to the extent tree
238  * @tree:	tree to insert new map in
239  * @em:		map to insert
240  *
241  * Insert @em into @tree or perform a simple forward/backward merge with
242  * existing mappings.  The extent_map struct passed in will be inserted
243  * into the tree directly, with an additional reference taken, or a
244  * reference dropped if the merge attempt was successful.
245  */
246 int add_extent_mapping(struct extent_map_tree *tree,
247 		       struct extent_map *em)
248 {
249 	int ret = 0;
250 	struct extent_map *merge = NULL;
251 	struct rb_node *rb;
252 	struct extent_map *exist;
253 
254 	exist = lookup_extent_mapping(tree, em->start, em->len);
255 	if (exist) {
256 		free_extent_map(exist);
257 		ret = -EEXIST;
258 		goto out;
259 	}
260 	rb = tree_insert(&tree->map, em->start, &em->rb_node);
261 	if (rb) {
262 		ret = -EEXIST;
263 		goto out;
264 	}
265 	atomic_inc(&em->refs);
266 	if (em->start != 0) {
267 		rb = rb_prev(&em->rb_node);
268 		if (rb)
269 			merge = rb_entry(rb, struct extent_map, rb_node);
270 		if (rb && mergable_maps(merge, em)) {
271 			em->start = merge->start;
272 			em->len += merge->len;
273 			em->block_len += merge->block_len;
274 			em->block_start = merge->block_start;
275 			merge->in_tree = 0;
276 			rb_erase(&merge->rb_node, &tree->map);
277 			free_extent_map(merge);
278 		}
279 	 }
280 	rb = rb_next(&em->rb_node);
281 	if (rb)
282 		merge = rb_entry(rb, struct extent_map, rb_node);
283 	if (rb && mergable_maps(em, merge)) {
284 		em->len += merge->len;
285 		em->block_len += merge->len;
286 		rb_erase(&merge->rb_node, &tree->map);
287 		merge->in_tree = 0;
288 		free_extent_map(merge);
289 	}
290 out:
291 	return ret;
292 }
293 
294 /* simple helper to do math around the end of an extent, handling wrap */
295 static u64 range_end(u64 start, u64 len)
296 {
297 	if (start + len < start)
298 		return (u64)-1;
299 	return start + len;
300 }
301 
302 struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
303 					   u64 start, u64 len, int strict)
304 {
305 	struct extent_map *em;
306 	struct rb_node *rb_node;
307 	struct rb_node *prev = NULL;
308 	struct rb_node *next = NULL;
309 	u64 end = range_end(start, len);
310 
311 	rb_node = __tree_search(&tree->map, start, &prev, &next);
312 	if (!rb_node) {
313 		if (prev)
314 			rb_node = prev;
315 		else if (next)
316 			rb_node = next;
317 		else
318 			return NULL;
319 	}
320 
321 	em = rb_entry(rb_node, struct extent_map, rb_node);
322 
323 	if (strict && !(end > em->start && start < extent_map_end(em)))
324 		return NULL;
325 
326 	atomic_inc(&em->refs);
327 	return em;
328 }
329 
330 /**
331  * lookup_extent_mapping - lookup extent_map
332  * @tree:	tree to lookup in
333  * @start:	byte offset to start the search
334  * @len:	length of the lookup range
335  *
336  * Find and return the first extent_map struct in @tree that intersects the
337  * [start, len] range.  There may be additional objects in the tree that
338  * intersect, so check the object returned carefully to make sure that no
339  * additional lookups are needed.
340  */
341 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
342 					 u64 start, u64 len)
343 {
344 	return __lookup_extent_mapping(tree, start, len, 1);
345 }
346 
347 /**
348  * search_extent_mapping - find a nearby extent map
349  * @tree:	tree to lookup in
350  * @start:	byte offset to start the search
351  * @len:	length of the lookup range
352  *
353  * Find and return the first extent_map struct in @tree that intersects the
354  * [start, len] range.
355  *
356  * If one can't be found, any nearby extent may be returned
357  */
358 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
359 					 u64 start, u64 len)
360 {
361 	return __lookup_extent_mapping(tree, start, len, 0);
362 }
363 
364 /**
365  * remove_extent_mapping - removes an extent_map from the extent tree
366  * @tree:	extent tree to remove from
367  * @em:		extent map beeing removed
368  *
369  * Removes @em from @tree.  No reference counts are dropped, and no checks
370  * are done to see if the range is in use
371  */
372 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
373 {
374 	int ret = 0;
375 
376 	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
377 	rb_erase(&em->rb_node, &tree->map);
378 	em->in_tree = 0;
379 	return ret;
380 }
381