xref: /openbmc/linux/fs/btrfs/extent_map.c (revision 32193c14)
1 #include <linux/err.h>
2 #include <linux/slab.h>
3 #include <linux/spinlock.h>
4 #include <linux/hardirq.h>
5 #include "ctree.h"
6 #include "extent_map.h"
7 
8 
9 static struct kmem_cache *extent_map_cache;
10 
11 int __init extent_map_init(void)
12 {
13 	extent_map_cache = kmem_cache_create("btrfs_extent_map",
14 			sizeof(struct extent_map), 0,
15 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
16 	if (!extent_map_cache)
17 		return -ENOMEM;
18 	return 0;
19 }
20 
21 void extent_map_exit(void)
22 {
23 	if (extent_map_cache)
24 		kmem_cache_destroy(extent_map_cache);
25 }
26 
27 /**
28  * extent_map_tree_init - initialize extent map tree
29  * @tree:		tree to initialize
30  *
31  * Initialize the extent tree @tree.  Should be called for each new inode
32  * or other user of the extent_map interface.
33  */
34 void extent_map_tree_init(struct extent_map_tree *tree)
35 {
36 	tree->map = RB_ROOT;
37 	INIT_LIST_HEAD(&tree->modified_extents);
38 	rwlock_init(&tree->lock);
39 }
40 
41 /**
42  * alloc_extent_map - allocate new extent map structure
43  *
44  * Allocate a new extent_map structure.  The new structure is
45  * returned with a reference count of one and needs to be
46  * freed using free_extent_map()
47  */
48 struct extent_map *alloc_extent_map(void)
49 {
50 	struct extent_map *em;
51 	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
52 	if (!em)
53 		return NULL;
54 	em->in_tree = 0;
55 	em->flags = 0;
56 	em->compress_type = BTRFS_COMPRESS_NONE;
57 	em->generation = 0;
58 	atomic_set(&em->refs, 1);
59 	INIT_LIST_HEAD(&em->list);
60 	return em;
61 }
62 
63 /**
64  * free_extent_map - drop reference count of an extent_map
65  * @em:		extent map beeing releasead
66  *
67  * Drops the reference out on @em by one and free the structure
68  * if the reference count hits zero.
69  */
70 void free_extent_map(struct extent_map *em)
71 {
72 	if (!em)
73 		return;
74 	WARN_ON(atomic_read(&em->refs) == 0);
75 	if (atomic_dec_and_test(&em->refs)) {
76 		WARN_ON(em->in_tree);
77 		WARN_ON(!list_empty(&em->list));
78 		kmem_cache_free(extent_map_cache, em);
79 	}
80 }
81 
82 /* simple helper to do math around the end of an extent, handling wrap */
83 static u64 range_end(u64 start, u64 len)
84 {
85 	if (start + len < start)
86 		return (u64)-1;
87 	return start + len;
88 }
89 
90 static int tree_insert(struct rb_root *root, struct extent_map *em)
91 {
92 	struct rb_node **p = &root->rb_node;
93 	struct rb_node *parent = NULL;
94 	struct extent_map *entry = NULL;
95 	struct rb_node *orig_parent = NULL;
96 	u64 end = range_end(em->start, em->len);
97 
98 	while (*p) {
99 		parent = *p;
100 		entry = rb_entry(parent, struct extent_map, rb_node);
101 
102 		WARN_ON(!entry->in_tree);
103 
104 		if (em->start < entry->start)
105 			p = &(*p)->rb_left;
106 		else if (em->start >= extent_map_end(entry))
107 			p = &(*p)->rb_right;
108 		else
109 			return -EEXIST;
110 	}
111 
112 	orig_parent = parent;
113 	while (parent && em->start >= extent_map_end(entry)) {
114 		parent = rb_next(parent);
115 		entry = rb_entry(parent, struct extent_map, rb_node);
116 	}
117 	if (parent)
118 		if (end > entry->start && em->start < extent_map_end(entry))
119 			return -EEXIST;
120 
121 	parent = orig_parent;
122 	entry = rb_entry(parent, struct extent_map, rb_node);
123 	while (parent && em->start < entry->start) {
124 		parent = rb_prev(parent);
125 		entry = rb_entry(parent, struct extent_map, rb_node);
126 	}
127 	if (parent)
128 		if (end > entry->start && em->start < extent_map_end(entry))
129 			return -EEXIST;
130 
131 	em->in_tree = 1;
132 	rb_link_node(&em->rb_node, orig_parent, p);
133 	rb_insert_color(&em->rb_node, root);
134 	return 0;
135 }
136 
137 /*
138  * search through the tree for an extent_map with a given offset.  If
139  * it can't be found, try to find some neighboring extents
140  */
141 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
142 				     struct rb_node **prev_ret,
143 				     struct rb_node **next_ret)
144 {
145 	struct rb_node *n = root->rb_node;
146 	struct rb_node *prev = NULL;
147 	struct rb_node *orig_prev = NULL;
148 	struct extent_map *entry;
149 	struct extent_map *prev_entry = NULL;
150 
151 	while (n) {
152 		entry = rb_entry(n, struct extent_map, rb_node);
153 		prev = n;
154 		prev_entry = entry;
155 
156 		WARN_ON(!entry->in_tree);
157 
158 		if (offset < entry->start)
159 			n = n->rb_left;
160 		else if (offset >= extent_map_end(entry))
161 			n = n->rb_right;
162 		else
163 			return n;
164 	}
165 
166 	if (prev_ret) {
167 		orig_prev = prev;
168 		while (prev && offset >= extent_map_end(prev_entry)) {
169 			prev = rb_next(prev);
170 			prev_entry = rb_entry(prev, struct extent_map, rb_node);
171 		}
172 		*prev_ret = prev;
173 		prev = orig_prev;
174 	}
175 
176 	if (next_ret) {
177 		prev_entry = rb_entry(prev, struct extent_map, rb_node);
178 		while (prev && offset < prev_entry->start) {
179 			prev = rb_prev(prev);
180 			prev_entry = rb_entry(prev, struct extent_map, rb_node);
181 		}
182 		*next_ret = prev;
183 	}
184 	return NULL;
185 }
186 
187 /* check to see if two extent_map structs are adjacent and safe to merge */
188 static int mergable_maps(struct extent_map *prev, struct extent_map *next)
189 {
190 	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
191 		return 0;
192 
193 	/*
194 	 * don't merge compressed extents, we need to know their
195 	 * actual size
196 	 */
197 	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
198 		return 0;
199 
200 	if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
201 	    test_bit(EXTENT_FLAG_LOGGING, &next->flags))
202 		return 0;
203 
204 	/*
205 	 * We don't want to merge stuff that hasn't been written to the log yet
206 	 * since it may not reflect exactly what is on disk, and that would be
207 	 * bad.
208 	 */
209 	if (!list_empty(&prev->list) || !list_empty(&next->list))
210 		return 0;
211 
212 	if (extent_map_end(prev) == next->start &&
213 	    prev->flags == next->flags &&
214 	    prev->bdev == next->bdev &&
215 	    ((next->block_start == EXTENT_MAP_HOLE &&
216 	      prev->block_start == EXTENT_MAP_HOLE) ||
217 	     (next->block_start == EXTENT_MAP_INLINE &&
218 	      prev->block_start == EXTENT_MAP_INLINE) ||
219 	     (next->block_start == EXTENT_MAP_DELALLOC &&
220 	      prev->block_start == EXTENT_MAP_DELALLOC) ||
221 	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
222 	      next->block_start == extent_map_block_end(prev)))) {
223 		return 1;
224 	}
225 	return 0;
226 }
227 
228 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
229 {
230 	struct extent_map *merge = NULL;
231 	struct rb_node *rb;
232 
233 	if (em->start != 0) {
234 		rb = rb_prev(&em->rb_node);
235 		if (rb)
236 			merge = rb_entry(rb, struct extent_map, rb_node);
237 		if (rb && mergable_maps(merge, em)) {
238 			em->start = merge->start;
239 			em->orig_start = merge->orig_start;
240 			em->len += merge->len;
241 			em->block_len += merge->block_len;
242 			em->block_start = merge->block_start;
243 			merge->in_tree = 0;
244 			em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
245 			em->mod_start = merge->mod_start;
246 			em->generation = max(em->generation, merge->generation);
247 
248 			rb_erase(&merge->rb_node, &tree->map);
249 			free_extent_map(merge);
250 		}
251 	}
252 
253 	rb = rb_next(&em->rb_node);
254 	if (rb)
255 		merge = rb_entry(rb, struct extent_map, rb_node);
256 	if (rb && mergable_maps(em, merge)) {
257 		em->len += merge->len;
258 		em->block_len += merge->len;
259 		rb_erase(&merge->rb_node, &tree->map);
260 		merge->in_tree = 0;
261 		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
262 		em->generation = max(em->generation, merge->generation);
263 		free_extent_map(merge);
264 	}
265 }
266 
267 /**
268  * unpin_extent_cache - unpin an extent from the cache
269  * @tree:	tree to unpin the extent in
270  * @start:	logical offset in the file
271  * @len:	length of the extent
272  * @gen:	generation that this extent has been modified in
273  *
274  * Called after an extent has been written to disk properly.  Set the generation
275  * to the generation that actually added the file item to the inode so we know
276  * we need to sync this extent when we call fsync().
277  */
278 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
279 		       u64 gen)
280 {
281 	int ret = 0;
282 	struct extent_map *em;
283 	bool prealloc = false;
284 
285 	write_lock(&tree->lock);
286 	em = lookup_extent_mapping(tree, start, len);
287 
288 	WARN_ON(!em || em->start != start);
289 
290 	if (!em)
291 		goto out;
292 
293 	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
294 		list_move(&em->list, &tree->modified_extents);
295 	em->generation = gen;
296 	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
297 	em->mod_start = em->start;
298 	em->mod_len = em->len;
299 
300 	if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
301 		prealloc = true;
302 		clear_bit(EXTENT_FLAG_FILLING, &em->flags);
303 	}
304 
305 	try_merge_map(tree, em);
306 
307 	if (prealloc) {
308 		em->mod_start = em->start;
309 		em->mod_len = em->len;
310 	}
311 
312 	free_extent_map(em);
313 out:
314 	write_unlock(&tree->lock);
315 	return ret;
316 
317 }
318 
319 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
320 {
321 	clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
322 	if (em->in_tree)
323 		try_merge_map(tree, em);
324 }
325 
326 /**
327  * add_extent_mapping - add new extent map to the extent tree
328  * @tree:	tree to insert new map in
329  * @em:		map to insert
330  *
331  * Insert @em into @tree or perform a simple forward/backward merge with
332  * existing mappings.  The extent_map struct passed in will be inserted
333  * into the tree directly, with an additional reference taken, or a
334  * reference dropped if the merge attempt was successful.
335  */
336 int add_extent_mapping(struct extent_map_tree *tree,
337 		       struct extent_map *em, int modified)
338 {
339 	int ret = 0;
340 
341 	ret = tree_insert(&tree->map, em);
342 	if (ret)
343 		goto out;
344 
345 	atomic_inc(&em->refs);
346 
347 	em->mod_start = em->start;
348 	em->mod_len = em->len;
349 
350 	if (modified)
351 		list_move(&em->list, &tree->modified_extents);
352 	else
353 		try_merge_map(tree, em);
354 out:
355 	return ret;
356 }
357 
358 static struct extent_map *
359 __lookup_extent_mapping(struct extent_map_tree *tree,
360 			u64 start, u64 len, int strict)
361 {
362 	struct extent_map *em;
363 	struct rb_node *rb_node;
364 	struct rb_node *prev = NULL;
365 	struct rb_node *next = NULL;
366 	u64 end = range_end(start, len);
367 
368 	rb_node = __tree_search(&tree->map, start, &prev, &next);
369 	if (!rb_node) {
370 		if (prev)
371 			rb_node = prev;
372 		else if (next)
373 			rb_node = next;
374 		else
375 			return NULL;
376 	}
377 
378 	em = rb_entry(rb_node, struct extent_map, rb_node);
379 
380 	if (strict && !(end > em->start && start < extent_map_end(em)))
381 		return NULL;
382 
383 	atomic_inc(&em->refs);
384 	return em;
385 }
386 
387 /**
388  * lookup_extent_mapping - lookup extent_map
389  * @tree:	tree to lookup in
390  * @start:	byte offset to start the search
391  * @len:	length of the lookup range
392  *
393  * Find and return the first extent_map struct in @tree that intersects the
394  * [start, len] range.  There may be additional objects in the tree that
395  * intersect, so check the object returned carefully to make sure that no
396  * additional lookups are needed.
397  */
398 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
399 					 u64 start, u64 len)
400 {
401 	return __lookup_extent_mapping(tree, start, len, 1);
402 }
403 
404 /**
405  * search_extent_mapping - find a nearby extent map
406  * @tree:	tree to lookup in
407  * @start:	byte offset to start the search
408  * @len:	length of the lookup range
409  *
410  * Find and return the first extent_map struct in @tree that intersects the
411  * [start, len] range.
412  *
413  * If one can't be found, any nearby extent may be returned
414  */
415 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
416 					 u64 start, u64 len)
417 {
418 	return __lookup_extent_mapping(tree, start, len, 0);
419 }
420 
421 /**
422  * remove_extent_mapping - removes an extent_map from the extent tree
423  * @tree:	extent tree to remove from
424  * @em:		extent map beeing removed
425  *
426  * Removes @em from @tree.  No reference counts are dropped, and no checks
427  * are done to see if the range is in use
428  */
429 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
430 {
431 	int ret = 0;
432 
433 	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
434 	rb_erase(&em->rb_node, &tree->map);
435 	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
436 		list_del_init(&em->list);
437 	em->in_tree = 0;
438 	return ret;
439 }
440