1 #include <linux/err.h> 2 #include <linux/gfp.h> 3 #include <linux/slab.h> 4 #include <linux/module.h> 5 #include <linux/spinlock.h> 6 #include <linux/hardirq.h> 7 #include "extent_map.h" 8 9 /* temporary define until extent_map moves out of btrfs */ 10 struct kmem_cache *btrfs_cache_create(const char *name, size_t size, 11 unsigned long extra_flags, 12 void (*ctor)(void *, struct kmem_cache *, 13 unsigned long)); 14 15 static struct kmem_cache *extent_map_cache; 16 17 int __init extent_map_init(void) 18 { 19 extent_map_cache = btrfs_cache_create("extent_map", 20 sizeof(struct extent_map), 0, 21 NULL); 22 if (!extent_map_cache) 23 return -ENOMEM; 24 return 0; 25 } 26 27 void extent_map_exit(void) 28 { 29 if (extent_map_cache) 30 kmem_cache_destroy(extent_map_cache); 31 } 32 33 /** 34 * extent_map_tree_init - initialize extent map tree 35 * @tree: tree to initialize 36 * @mask: flags for memory allocations during tree operations 37 * 38 * Initialize the extent tree @tree. Should be called for each new inode 39 * or other user of the extent_map interface. 40 */ 41 void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) 42 { 43 tree->map.rb_node = NULL; 44 spin_lock_init(&tree->lock); 45 } 46 EXPORT_SYMBOL(extent_map_tree_init); 47 48 /** 49 * alloc_extent_map - allocate new extent map structure 50 * @mask: memory allocation flags 51 * 52 * Allocate a new extent_map structure. The new structure is 53 * returned with a reference count of one and needs to be 54 * freed using free_extent_map() 55 */ 56 struct extent_map *alloc_extent_map(gfp_t mask) 57 { 58 struct extent_map *em; 59 em = kmem_cache_alloc(extent_map_cache, mask); 60 if (!em || IS_ERR(em)) 61 return em; 62 em->in_tree = 0; 63 em->flags = 0; 64 atomic_set(&em->refs, 1); 65 return em; 66 } 67 EXPORT_SYMBOL(alloc_extent_map); 68 69 /** 70 * free_extent_map - drop reference count of an extent_map 71 * @em: extent map beeing releasead 72 * 73 * Drops the reference out on @em by one and free the structure 74 * if the reference count hits zero. 75 */ 76 void free_extent_map(struct extent_map *em) 77 { 78 if (!em) 79 return; 80 WARN_ON(atomic_read(&em->refs) == 0); 81 if (atomic_dec_and_test(&em->refs)) { 82 WARN_ON(em->in_tree); 83 kmem_cache_free(extent_map_cache, em); 84 } 85 } 86 EXPORT_SYMBOL(free_extent_map); 87 88 static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 89 struct rb_node *node) 90 { 91 struct rb_node **p = &root->rb_node; 92 struct rb_node *parent = NULL; 93 struct extent_map *entry; 94 95 while (*p) { 96 parent = *p; 97 entry = rb_entry(parent, struct extent_map, rb_node); 98 99 WARN_ON(!entry->in_tree); 100 101 if (offset < entry->start) 102 p = &(*p)->rb_left; 103 else if (offset >= extent_map_end(entry)) 104 p = &(*p)->rb_right; 105 else 106 return parent; 107 } 108 109 entry = rb_entry(node, struct extent_map, rb_node); 110 entry->in_tree = 1; 111 rb_link_node(node, parent, p); 112 rb_insert_color(node, root); 113 return NULL; 114 } 115 116 /* 117 * search through the tree for an extent_map with a given offset. If 118 * it can't be found, try to find some neighboring extents 119 */ 120 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 121 struct rb_node **prev_ret, 122 struct rb_node **next_ret) 123 { 124 struct rb_node *n = root->rb_node; 125 struct rb_node *prev = NULL; 126 struct rb_node *orig_prev = NULL; 127 struct extent_map *entry; 128 struct extent_map *prev_entry = NULL; 129 130 while (n) { 131 entry = rb_entry(n, struct extent_map, rb_node); 132 prev = n; 133 prev_entry = entry; 134 135 WARN_ON(!entry->in_tree); 136 137 if (offset < entry->start) 138 n = n->rb_left; 139 else if (offset >= extent_map_end(entry)) 140 n = n->rb_right; 141 else 142 return n; 143 } 144 145 if (prev_ret) { 146 orig_prev = prev; 147 while (prev && offset >= extent_map_end(prev_entry)) { 148 prev = rb_next(prev); 149 prev_entry = rb_entry(prev, struct extent_map, rb_node); 150 } 151 *prev_ret = prev; 152 prev = orig_prev; 153 } 154 155 if (next_ret) { 156 prev_entry = rb_entry(prev, struct extent_map, rb_node); 157 while (prev && offset < prev_entry->start) { 158 prev = rb_prev(prev); 159 prev_entry = rb_entry(prev, struct extent_map, rb_node); 160 } 161 *next_ret = prev; 162 } 163 return NULL; 164 } 165 166 /* 167 * look for an offset in the tree, and if it can't be found, return 168 * the first offset we can find smaller than 'offset'. 169 */ 170 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) 171 { 172 struct rb_node *prev; 173 struct rb_node *ret; 174 ret = __tree_search(root, offset, &prev, NULL); 175 if (!ret) 176 return prev; 177 return ret; 178 } 179 180 /* check to see if two extent_map structs are adjacent and safe to merge */ 181 static int mergable_maps(struct extent_map *prev, struct extent_map *next) 182 { 183 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) 184 return 0; 185 186 /* 187 * don't merge compressed extents, we need to know their 188 * actual size 189 */ 190 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 191 return 0; 192 193 if (extent_map_end(prev) == next->start && 194 prev->flags == next->flags && 195 prev->bdev == next->bdev && 196 ((next->block_start == EXTENT_MAP_HOLE && 197 prev->block_start == EXTENT_MAP_HOLE) || 198 (next->block_start == EXTENT_MAP_INLINE && 199 prev->block_start == EXTENT_MAP_INLINE) || 200 (next->block_start == EXTENT_MAP_DELALLOC && 201 prev->block_start == EXTENT_MAP_DELALLOC) || 202 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && 203 next->block_start == extent_map_block_end(prev)))) { 204 return 1; 205 } 206 return 0; 207 } 208 209 /** 210 * add_extent_mapping - add new extent map to the extent tree 211 * @tree: tree to insert new map in 212 * @em: map to insert 213 * 214 * Insert @em into @tree or perform a simple forward/backward merge with 215 * existing mappings. The extent_map struct passed in will be inserted 216 * into the tree directly, with an additional reference taken, or a 217 * reference dropped if the merge attempt was sucessfull. 218 */ 219 int add_extent_mapping(struct extent_map_tree *tree, 220 struct extent_map *em) 221 { 222 int ret = 0; 223 struct extent_map *merge = NULL; 224 struct rb_node *rb; 225 struct extent_map *exist; 226 227 exist = lookup_extent_mapping(tree, em->start, em->len); 228 if (exist) { 229 free_extent_map(exist); 230 ret = -EEXIST; 231 goto out; 232 } 233 assert_spin_locked(&tree->lock); 234 rb = tree_insert(&tree->map, em->start, &em->rb_node); 235 if (rb) { 236 ret = -EEXIST; 237 goto out; 238 } 239 atomic_inc(&em->refs); 240 if (em->start != 0) { 241 rb = rb_prev(&em->rb_node); 242 if (rb) 243 merge = rb_entry(rb, struct extent_map, rb_node); 244 if (rb && mergable_maps(merge, em)) { 245 em->start = merge->start; 246 em->len += merge->len; 247 em->block_len += merge->block_len; 248 em->block_start = merge->block_start; 249 merge->in_tree = 0; 250 rb_erase(&merge->rb_node, &tree->map); 251 free_extent_map(merge); 252 } 253 } 254 rb = rb_next(&em->rb_node); 255 if (rb) 256 merge = rb_entry(rb, struct extent_map, rb_node); 257 if (rb && mergable_maps(em, merge)) { 258 em->len += merge->len; 259 em->block_len += merge->len; 260 rb_erase(&merge->rb_node, &tree->map); 261 merge->in_tree = 0; 262 free_extent_map(merge); 263 } 264 out: 265 return ret; 266 } 267 EXPORT_SYMBOL(add_extent_mapping); 268 269 /* simple helper to do math around the end of an extent, handling wrap */ 270 static u64 range_end(u64 start, u64 len) 271 { 272 if (start + len < start) 273 return (u64)-1; 274 return start + len; 275 } 276 277 /** 278 * lookup_extent_mapping - lookup extent_map 279 * @tree: tree to lookup in 280 * @start: byte offset to start the search 281 * @len: length of the lookup range 282 * 283 * Find and return the first extent_map struct in @tree that intersects the 284 * [start, len] range. There may be additional objects in the tree that 285 * intersect, so check the object returned carefully to make sure that no 286 * additional lookups are needed. 287 */ 288 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 289 u64 start, u64 len) 290 { 291 struct extent_map *em; 292 struct rb_node *rb_node; 293 struct rb_node *prev = NULL; 294 struct rb_node *next = NULL; 295 u64 end = range_end(start, len); 296 297 assert_spin_locked(&tree->lock); 298 rb_node = __tree_search(&tree->map, start, &prev, &next); 299 if (!rb_node && prev) { 300 em = rb_entry(prev, struct extent_map, rb_node); 301 if (end > em->start && start < extent_map_end(em)) 302 goto found; 303 } 304 if (!rb_node && next) { 305 em = rb_entry(next, struct extent_map, rb_node); 306 if (end > em->start && start < extent_map_end(em)) 307 goto found; 308 } 309 if (!rb_node) { 310 em = NULL; 311 goto out; 312 } 313 if (IS_ERR(rb_node)) { 314 em = ERR_PTR(PTR_ERR(rb_node)); 315 goto out; 316 } 317 em = rb_entry(rb_node, struct extent_map, rb_node); 318 if (end > em->start && start < extent_map_end(em)) 319 goto found; 320 321 em = NULL; 322 goto out; 323 324 found: 325 atomic_inc(&em->refs); 326 out: 327 return em; 328 } 329 EXPORT_SYMBOL(lookup_extent_mapping); 330 331 /** 332 * remove_extent_mapping - removes an extent_map from the extent tree 333 * @tree: extent tree to remove from 334 * @em: extent map beeing removed 335 * 336 * Removes @em from @tree. No reference counts are dropped, and no checks 337 * are done to see if the range is in use 338 */ 339 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 340 { 341 int ret = 0; 342 343 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 344 assert_spin_locked(&tree->lock); 345 rb_erase(&em->rb_node, &tree->map); 346 em->in_tree = 0; 347 return ret; 348 } 349 EXPORT_SYMBOL(remove_extent_mapping); 350