1 #include <linux/err.h> 2 #include <linux/gfp.h> 3 #include <linux/slab.h> 4 #include <linux/module.h> 5 #include <linux/spinlock.h> 6 #include <linux/version.h> 7 #include <linux/hardirq.h> 8 #include "extent_map.h" 9 10 /* temporary define until extent_map moves out of btrfs */ 11 struct kmem_cache *btrfs_cache_create(const char *name, size_t size, 12 unsigned long extra_flags, 13 void (*ctor)(void *, struct kmem_cache *, 14 unsigned long)); 15 16 static struct kmem_cache *extent_map_cache; 17 18 int __init extent_map_init(void) 19 { 20 extent_map_cache = btrfs_cache_create("extent_map", 21 sizeof(struct extent_map), 0, 22 NULL); 23 if (!extent_map_cache) 24 return -ENOMEM; 25 return 0; 26 } 27 28 void extent_map_exit(void) 29 { 30 if (extent_map_cache) 31 kmem_cache_destroy(extent_map_cache); 32 } 33 34 /** 35 * extent_map_tree_init - initialize extent map tree 36 * @tree: tree to initialize 37 * @mask: flags for memory allocations during tree operations 38 * 39 * Initialize the extent tree @tree. Should be called for each new inode 40 * or other user of the extent_map interface. 41 */ 42 void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) 43 { 44 tree->map.rb_node = NULL; 45 spin_lock_init(&tree->lock); 46 } 47 EXPORT_SYMBOL(extent_map_tree_init); 48 49 /** 50 * alloc_extent_map - allocate new extent map structure 51 * @mask: memory allocation flags 52 * 53 * Allocate a new extent_map structure. The new structure is 54 * returned with a reference count of one and needs to be 55 * freed using free_extent_map() 56 */ 57 struct extent_map *alloc_extent_map(gfp_t mask) 58 { 59 struct extent_map *em; 60 em = kmem_cache_alloc(extent_map_cache, mask); 61 if (!em || IS_ERR(em)) 62 return em; 63 em->in_tree = 0; 64 em->flags = 0; 65 atomic_set(&em->refs, 1); 66 return em; 67 } 68 EXPORT_SYMBOL(alloc_extent_map); 69 70 /** 71 * free_extent_map - drop reference count of an extent_map 72 * @em: extent map beeing releasead 73 * 74 * Drops the reference out on @em by one and free the structure 75 * if the reference count hits zero. 76 */ 77 void free_extent_map(struct extent_map *em) 78 { 79 if (!em) 80 return; 81 WARN_ON(atomic_read(&em->refs) == 0); 82 if (atomic_dec_and_test(&em->refs)) { 83 WARN_ON(em->in_tree); 84 kmem_cache_free(extent_map_cache, em); 85 } 86 } 87 EXPORT_SYMBOL(free_extent_map); 88 89 static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 90 struct rb_node *node) 91 { 92 struct rb_node ** p = &root->rb_node; 93 struct rb_node * parent = NULL; 94 struct extent_map *entry; 95 96 while(*p) { 97 parent = *p; 98 entry = rb_entry(parent, struct extent_map, rb_node); 99 100 WARN_ON(!entry->in_tree); 101 102 if (offset < entry->start) 103 p = &(*p)->rb_left; 104 else if (offset >= extent_map_end(entry)) 105 p = &(*p)->rb_right; 106 else 107 return parent; 108 } 109 110 entry = rb_entry(node, struct extent_map, rb_node); 111 entry->in_tree = 1; 112 rb_link_node(node, parent, p); 113 rb_insert_color(node, root); 114 return NULL; 115 } 116 117 /* 118 * search through the tree for an extent_map with a given offset. If 119 * it can't be found, try to find some neighboring extents 120 */ 121 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 122 struct rb_node **prev_ret, 123 struct rb_node **next_ret) 124 { 125 struct rb_node * n = root->rb_node; 126 struct rb_node *prev = NULL; 127 struct rb_node *orig_prev = NULL; 128 struct extent_map *entry; 129 struct extent_map *prev_entry = NULL; 130 131 while(n) { 132 entry = rb_entry(n, struct extent_map, rb_node); 133 prev = n; 134 prev_entry = entry; 135 136 WARN_ON(!entry->in_tree); 137 138 if (offset < entry->start) 139 n = n->rb_left; 140 else if (offset >= extent_map_end(entry)) 141 n = n->rb_right; 142 else 143 return n; 144 } 145 146 if (prev_ret) { 147 orig_prev = prev; 148 while(prev && offset >= extent_map_end(prev_entry)) { 149 prev = rb_next(prev); 150 prev_entry = rb_entry(prev, struct extent_map, rb_node); 151 } 152 *prev_ret = prev; 153 prev = orig_prev; 154 } 155 156 if (next_ret) { 157 prev_entry = rb_entry(prev, struct extent_map, rb_node); 158 while(prev && offset < prev_entry->start) { 159 prev = rb_prev(prev); 160 prev_entry = rb_entry(prev, struct extent_map, rb_node); 161 } 162 *next_ret = prev; 163 } 164 return NULL; 165 } 166 167 /* 168 * look for an offset in the tree, and if it can't be found, return 169 * the first offset we can find smaller than 'offset'. 170 */ 171 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) 172 { 173 struct rb_node *prev; 174 struct rb_node *ret; 175 ret = __tree_search(root, offset, &prev, NULL); 176 if (!ret) 177 return prev; 178 return ret; 179 } 180 181 /* check to see if two extent_map structs are adjacent and safe to merge */ 182 static int mergable_maps(struct extent_map *prev, struct extent_map *next) 183 { 184 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) 185 return 0; 186 187 if (extent_map_end(prev) == next->start && 188 prev->flags == next->flags && 189 prev->bdev == next->bdev && 190 ((next->block_start == EXTENT_MAP_HOLE && 191 prev->block_start == EXTENT_MAP_HOLE) || 192 (next->block_start == EXTENT_MAP_INLINE && 193 prev->block_start == EXTENT_MAP_INLINE) || 194 (next->block_start == EXTENT_MAP_DELALLOC && 195 prev->block_start == EXTENT_MAP_DELALLOC) || 196 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && 197 next->block_start == extent_map_block_end(prev)))) { 198 return 1; 199 } 200 return 0; 201 } 202 203 /** 204 * add_extent_mapping - add new extent map to the extent tree 205 * @tree: tree to insert new map in 206 * @em: map to insert 207 * 208 * Insert @em into @tree or perform a simple forward/backward merge with 209 * existing mappings. The extent_map struct passed in will be inserted 210 * into the tree directly, with an additional reference taken, or a 211 * reference dropped if the merge attempt was sucessfull. 212 */ 213 int add_extent_mapping(struct extent_map_tree *tree, 214 struct extent_map *em) 215 { 216 int ret = 0; 217 struct extent_map *merge = NULL; 218 struct rb_node *rb; 219 struct extent_map *exist; 220 221 exist = lookup_extent_mapping(tree, em->start, em->len); 222 if (exist) { 223 free_extent_map(exist); 224 ret = -EEXIST; 225 goto out; 226 } 227 assert_spin_locked(&tree->lock); 228 rb = tree_insert(&tree->map, em->start, &em->rb_node); 229 if (rb) { 230 ret = -EEXIST; 231 free_extent_map(merge); 232 goto out; 233 } 234 atomic_inc(&em->refs); 235 if (em->start != 0) { 236 rb = rb_prev(&em->rb_node); 237 if (rb) 238 merge = rb_entry(rb, struct extent_map, rb_node); 239 if (rb && mergable_maps(merge, em)) { 240 em->start = merge->start; 241 em->len += merge->len; 242 em->block_start = merge->block_start; 243 merge->in_tree = 0; 244 rb_erase(&merge->rb_node, &tree->map); 245 free_extent_map(merge); 246 } 247 } 248 rb = rb_next(&em->rb_node); 249 if (rb) 250 merge = rb_entry(rb, struct extent_map, rb_node); 251 if (rb && mergable_maps(em, merge)) { 252 em->len += merge->len; 253 rb_erase(&merge->rb_node, &tree->map); 254 merge->in_tree = 0; 255 free_extent_map(merge); 256 } 257 out: 258 return ret; 259 } 260 EXPORT_SYMBOL(add_extent_mapping); 261 262 /* simple helper to do math around the end of an extent, handling wrap */ 263 static u64 range_end(u64 start, u64 len) 264 { 265 if (start + len < start) 266 return (u64)-1; 267 return start + len; 268 } 269 270 /** 271 * lookup_extent_mapping - lookup extent_map 272 * @tree: tree to lookup in 273 * @start: byte offset to start the search 274 * @len: length of the lookup range 275 * 276 * Find and return the first extent_map struct in @tree that intersects the 277 * [start, len] range. There may be additional objects in the tree that 278 * intersect, so check the object returned carefully to make sure that no 279 * additional lookups are needed. 280 */ 281 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 282 u64 start, u64 len) 283 { 284 struct extent_map *em; 285 struct rb_node *rb_node; 286 struct rb_node *prev = NULL; 287 struct rb_node *next = NULL; 288 u64 end = range_end(start, len); 289 290 assert_spin_locked(&tree->lock); 291 rb_node = __tree_search(&tree->map, start, &prev, &next); 292 if (!rb_node && prev) { 293 em = rb_entry(prev, struct extent_map, rb_node); 294 if (end > em->start && start < extent_map_end(em)) 295 goto found; 296 } 297 if (!rb_node && next) { 298 em = rb_entry(next, struct extent_map, rb_node); 299 if (end > em->start && start < extent_map_end(em)) 300 goto found; 301 } 302 if (!rb_node) { 303 em = NULL; 304 goto out; 305 } 306 if (IS_ERR(rb_node)) { 307 em = ERR_PTR(PTR_ERR(rb_node)); 308 goto out; 309 } 310 em = rb_entry(rb_node, struct extent_map, rb_node); 311 if (end > em->start && start < extent_map_end(em)) 312 goto found; 313 314 em = NULL; 315 goto out; 316 317 found: 318 atomic_inc(&em->refs); 319 out: 320 return em; 321 } 322 EXPORT_SYMBOL(lookup_extent_mapping); 323 324 /** 325 * remove_extent_mapping - removes an extent_map from the extent tree 326 * @tree: extent tree to remove from 327 * @em: extent map beeing removed 328 * 329 * Removes @em from @tree. No reference counts are dropped, and no checks 330 * are done to see if the range is in use 331 */ 332 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 333 { 334 int ret = 0; 335 336 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 337 assert_spin_locked(&tree->lock); 338 rb_erase(&em->rb_node, &tree->map); 339 em->in_tree = 0; 340 return ret; 341 } 342 EXPORT_SYMBOL(remove_extent_mapping); 343