xref: /openbmc/linux/fs/btrfs/extent_map.c (revision 306929f3)
1 #include <linux/err.h>
2 #include <linux/gfp.h>
3 #include <linux/slab.h>
4 #include <linux/module.h>
5 #include <linux/spinlock.h>
6 #include <linux/version.h>
7 #include <linux/hardirq.h>
8 #include "extent_map.h"
9 
10 /* temporary define until extent_map moves out of btrfs */
11 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
12 				       unsigned long extra_flags,
13 				       void (*ctor)(void *, struct kmem_cache *,
14 						    unsigned long));
15 
16 static struct kmem_cache *extent_map_cache;
17 
18 int __init extent_map_init(void)
19 {
20 	extent_map_cache = btrfs_cache_create("extent_map",
21 					    sizeof(struct extent_map), 0,
22 					    NULL);
23 	if (!extent_map_cache)
24 		return -ENOMEM;
25 	return 0;
26 }
27 
28 void extent_map_exit(void)
29 {
30 	if (extent_map_cache)
31 		kmem_cache_destroy(extent_map_cache);
32 }
33 
34 void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
35 {
36 	tree->map.rb_node = NULL;
37 	tree->last = NULL;
38 	spin_lock_init(&tree->lock);
39 }
40 EXPORT_SYMBOL(extent_map_tree_init);
41 
42 struct extent_map *alloc_extent_map(gfp_t mask)
43 {
44 	struct extent_map *em;
45 	em = kmem_cache_alloc(extent_map_cache, mask);
46 	if (!em || IS_ERR(em))
47 		return em;
48 	em->in_tree = 0;
49 	em->flags = 0;
50 	atomic_set(&em->refs, 1);
51 	return em;
52 }
53 EXPORT_SYMBOL(alloc_extent_map);
54 
55 void free_extent_map(struct extent_map *em)
56 {
57 	if (!em)
58 		return;
59 	WARN_ON(atomic_read(&em->refs) == 0);
60 	if (atomic_dec_and_test(&em->refs)) {
61 		WARN_ON(em->in_tree);
62 		kmem_cache_free(extent_map_cache, em);
63 	}
64 }
65 EXPORT_SYMBOL(free_extent_map);
66 
67 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
68 				   struct rb_node *node)
69 {
70 	struct rb_node ** p = &root->rb_node;
71 	struct rb_node * parent = NULL;
72 	struct extent_map *entry;
73 
74 	while(*p) {
75 		parent = *p;
76 		entry = rb_entry(parent, struct extent_map, rb_node);
77 
78 		WARN_ON(!entry->in_tree);
79 
80 		if (offset < entry->start)
81 			p = &(*p)->rb_left;
82 		else if (offset >= extent_map_end(entry))
83 			p = &(*p)->rb_right;
84 		else
85 			return parent;
86 	}
87 
88 	entry = rb_entry(node, struct extent_map, rb_node);
89 	entry->in_tree = 1;
90 	rb_link_node(node, parent, p);
91 	rb_insert_color(node, root);
92 	return NULL;
93 }
94 
95 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
96 				     struct rb_node **prev_ret,
97 				     struct rb_node **next_ret)
98 {
99 	struct rb_node * n = root->rb_node;
100 	struct rb_node *prev = NULL;
101 	struct rb_node *orig_prev = NULL;
102 	struct extent_map *entry;
103 	struct extent_map *prev_entry = NULL;
104 
105 	while(n) {
106 		entry = rb_entry(n, struct extent_map, rb_node);
107 		prev = n;
108 		prev_entry = entry;
109 
110 		WARN_ON(!entry->in_tree);
111 
112 		if (offset < entry->start)
113 			n = n->rb_left;
114 		else if (offset >= extent_map_end(entry))
115 			n = n->rb_right;
116 		else
117 			return n;
118 	}
119 
120 	if (prev_ret) {
121 		orig_prev = prev;
122 		while(prev && offset >= extent_map_end(prev_entry)) {
123 			prev = rb_next(prev);
124 			prev_entry = rb_entry(prev, struct extent_map, rb_node);
125 		}
126 		*prev_ret = prev;
127 		prev = orig_prev;
128 	}
129 
130 	if (next_ret) {
131 		prev_entry = rb_entry(prev, struct extent_map, rb_node);
132 		while(prev && offset < prev_entry->start) {
133 			prev = rb_prev(prev);
134 			prev_entry = rb_entry(prev, struct extent_map, rb_node);
135 		}
136 		*next_ret = prev;
137 	}
138 	return NULL;
139 }
140 
141 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
142 {
143 	struct rb_node *prev;
144 	struct rb_node *ret;
145 	ret = __tree_search(root, offset, &prev, NULL);
146 	if (!ret)
147 		return prev;
148 	return ret;
149 }
150 
151 static int mergable_maps(struct extent_map *prev, struct extent_map *next)
152 {
153 	if (extent_map_end(prev) == next->start &&
154 	    prev->flags == next->flags &&
155 	    prev->bdev == next->bdev &&
156 	    ((next->block_start == EXTENT_MAP_HOLE &&
157 	      prev->block_start == EXTENT_MAP_HOLE) ||
158 	     (next->block_start == EXTENT_MAP_INLINE &&
159 	      prev->block_start == EXTENT_MAP_INLINE) ||
160 	     (next->block_start == EXTENT_MAP_DELALLOC &&
161 	      prev->block_start == EXTENT_MAP_DELALLOC) ||
162 	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
163 	      next->block_start == extent_map_block_end(prev)))) {
164 		return 1;
165 	}
166 	return 0;
167 }
168 
169 /*
170  * add_extent_mapping tries a simple forward/backward merge with existing
171  * mappings.  The extent_map struct passed in will be inserted into
172  * the tree directly (no copies made, just a reference taken).
173  */
174 int add_extent_mapping(struct extent_map_tree *tree,
175 		       struct extent_map *em)
176 {
177 	int ret = 0;
178 	struct extent_map *merge = NULL;
179 	struct rb_node *rb;
180 
181 	rb = tree_insert(&tree->map, em->start, &em->rb_node);
182 	if (rb) {
183 		merge = rb_entry(rb, struct extent_map, rb_node);
184 		ret = -EEXIST;
185 		goto out;
186 	}
187 	atomic_inc(&em->refs);
188 	if (em->start != 0) {
189 		rb = rb_prev(&em->rb_node);
190 		if (rb)
191 			merge = rb_entry(rb, struct extent_map, rb_node);
192 		if (rb && mergable_maps(merge, em)) {
193 			em->start = merge->start;
194 			em->len += merge->len;
195 			em->block_start = merge->block_start;
196 			merge->in_tree = 0;
197 			rb_erase(&merge->rb_node, &tree->map);
198 			free_extent_map(merge);
199 		}
200 	 }
201 	rb = rb_next(&em->rb_node);
202 	if (rb)
203 		merge = rb_entry(rb, struct extent_map, rb_node);
204 	if (rb && mergable_maps(em, merge)) {
205 		em->len += merge->len;
206 		rb_erase(&merge->rb_node, &tree->map);
207 		merge->in_tree = 0;
208 		free_extent_map(merge);
209 	}
210 	tree->last = em;
211 out:
212 	return ret;
213 }
214 EXPORT_SYMBOL(add_extent_mapping);
215 
216 static u64 range_end(u64 start, u64 len)
217 {
218 	if (start + len < start)
219 		return (u64)-1;
220 	return start + len;
221 }
222 
223 /*
224  * lookup_extent_mapping returns the first extent_map struct in the
225  * tree that intersects the [start, len] range.  There may
226  * be additional objects in the tree that intersect, so check the object
227  * returned carefully to make sure you don't need additional lookups.
228  */
229 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
230 					 u64 start, u64 len)
231 {
232 	struct extent_map *em;
233 	struct rb_node *rb_node;
234 	struct rb_node *prev = NULL;
235 	struct rb_node *next = NULL;
236 	u64 end = range_end(start, len);
237 
238 	em = tree->last;
239 	if (em && end > em->start && start < extent_map_end(em))
240 		goto found;
241 
242 	rb_node = __tree_search(&tree->map, start, &prev, &next);
243 	if (!rb_node && prev) {
244 		em = rb_entry(prev, struct extent_map, rb_node);
245 		if (end > em->start && start < extent_map_end(em))
246 			goto found;
247 	}
248 	if (!rb_node && next) {
249 		em = rb_entry(next, struct extent_map, rb_node);
250 		if (end > em->start && start < extent_map_end(em))
251 			goto found;
252 	}
253 	if (!rb_node) {
254 		em = NULL;
255 		goto out;
256 	}
257 	if (IS_ERR(rb_node)) {
258 		em = ERR_PTR(PTR_ERR(rb_node));
259 		goto out;
260 	}
261 	em = rb_entry(rb_node, struct extent_map, rb_node);
262 	if (end > em->start && start < extent_map_end(em))
263 		goto found;
264 
265 	em = NULL;
266 	goto out;
267 
268 found:
269 	atomic_inc(&em->refs);
270 	tree->last = em;
271 out:
272 	return em;
273 }
274 EXPORT_SYMBOL(lookup_extent_mapping);
275 
276 /*
277  * removes an extent_map struct from the tree.  No reference counts are
278  * dropped, and no checks are done to  see if the range is in use
279  */
280 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
281 {
282 	int ret = 0;
283 
284 	rb_erase(&em->rb_node, &tree->map);
285 	em->in_tree = 0;
286 	if (tree->last == em)
287 		tree->last = NULL;
288 	return ret;
289 }
290 EXPORT_SYMBOL(remove_extent_mapping);
291