1 #include <linux/err.h> 2 #include <linux/slab.h> 3 #include <linux/spinlock.h> 4 #include <linux/hardirq.h> 5 #include "ctree.h" 6 #include "extent_map.h" 7 8 9 static struct kmem_cache *extent_map_cache; 10 11 int __init extent_map_init(void) 12 { 13 extent_map_cache = kmem_cache_create("btrfs_extent_map", 14 sizeof(struct extent_map), 0, 15 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 16 if (!extent_map_cache) 17 return -ENOMEM; 18 return 0; 19 } 20 21 void extent_map_exit(void) 22 { 23 if (extent_map_cache) 24 kmem_cache_destroy(extent_map_cache); 25 } 26 27 /** 28 * extent_map_tree_init - initialize extent map tree 29 * @tree: tree to initialize 30 * 31 * Initialize the extent tree @tree. Should be called for each new inode 32 * or other user of the extent_map interface. 33 */ 34 void extent_map_tree_init(struct extent_map_tree *tree) 35 { 36 tree->map = RB_ROOT; 37 INIT_LIST_HEAD(&tree->modified_extents); 38 rwlock_init(&tree->lock); 39 } 40 41 /** 42 * alloc_extent_map - allocate new extent map structure 43 * 44 * Allocate a new extent_map structure. The new structure is 45 * returned with a reference count of one and needs to be 46 * freed using free_extent_map() 47 */ 48 struct extent_map *alloc_extent_map(void) 49 { 50 struct extent_map *em; 51 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); 52 if (!em) 53 return NULL; 54 em->in_tree = 0; 55 em->flags = 0; 56 em->compress_type = BTRFS_COMPRESS_NONE; 57 em->generation = 0; 58 atomic_set(&em->refs, 1); 59 INIT_LIST_HEAD(&em->list); 60 return em; 61 } 62 63 /** 64 * free_extent_map - drop reference count of an extent_map 65 * @em: extent map beeing releasead 66 * 67 * Drops the reference out on @em by one and free the structure 68 * if the reference count hits zero. 69 */ 70 void free_extent_map(struct extent_map *em) 71 { 72 if (!em) 73 return; 74 WARN_ON(atomic_read(&em->refs) == 0); 75 if (atomic_dec_and_test(&em->refs)) { 76 WARN_ON(em->in_tree); 77 WARN_ON(!list_empty(&em->list)); 78 kmem_cache_free(extent_map_cache, em); 79 } 80 } 81 82 static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 83 struct rb_node *node) 84 { 85 struct rb_node **p = &root->rb_node; 86 struct rb_node *parent = NULL; 87 struct extent_map *entry; 88 89 while (*p) { 90 parent = *p; 91 entry = rb_entry(parent, struct extent_map, rb_node); 92 93 WARN_ON(!entry->in_tree); 94 95 if (offset < entry->start) 96 p = &(*p)->rb_left; 97 else if (offset >= extent_map_end(entry)) 98 p = &(*p)->rb_right; 99 else 100 return parent; 101 } 102 103 entry = rb_entry(node, struct extent_map, rb_node); 104 entry->in_tree = 1; 105 rb_link_node(node, parent, p); 106 rb_insert_color(node, root); 107 return NULL; 108 } 109 110 /* 111 * search through the tree for an extent_map with a given offset. If 112 * it can't be found, try to find some neighboring extents 113 */ 114 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 115 struct rb_node **prev_ret, 116 struct rb_node **next_ret) 117 { 118 struct rb_node *n = root->rb_node; 119 struct rb_node *prev = NULL; 120 struct rb_node *orig_prev = NULL; 121 struct extent_map *entry; 122 struct extent_map *prev_entry = NULL; 123 124 while (n) { 125 entry = rb_entry(n, struct extent_map, rb_node); 126 prev = n; 127 prev_entry = entry; 128 129 WARN_ON(!entry->in_tree); 130 131 if (offset < entry->start) 132 n = n->rb_left; 133 else if (offset >= extent_map_end(entry)) 134 n = n->rb_right; 135 else 136 return n; 137 } 138 139 if (prev_ret) { 140 orig_prev = prev; 141 while (prev && offset >= extent_map_end(prev_entry)) { 142 prev = rb_next(prev); 143 prev_entry = rb_entry(prev, struct extent_map, rb_node); 144 } 145 *prev_ret = prev; 146 prev = orig_prev; 147 } 148 149 if (next_ret) { 150 prev_entry = rb_entry(prev, struct extent_map, rb_node); 151 while (prev && offset < prev_entry->start) { 152 prev = rb_prev(prev); 153 prev_entry = rb_entry(prev, struct extent_map, rb_node); 154 } 155 *next_ret = prev; 156 } 157 return NULL; 158 } 159 160 /* check to see if two extent_map structs are adjacent and safe to merge */ 161 static int mergable_maps(struct extent_map *prev, struct extent_map *next) 162 { 163 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) 164 return 0; 165 166 /* 167 * don't merge compressed extents, we need to know their 168 * actual size 169 */ 170 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 171 return 0; 172 173 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || 174 test_bit(EXTENT_FLAG_LOGGING, &next->flags)) 175 return 0; 176 177 if (extent_map_end(prev) == next->start && 178 prev->flags == next->flags && 179 prev->bdev == next->bdev && 180 ((next->block_start == EXTENT_MAP_HOLE && 181 prev->block_start == EXTENT_MAP_HOLE) || 182 (next->block_start == EXTENT_MAP_INLINE && 183 prev->block_start == EXTENT_MAP_INLINE) || 184 (next->block_start == EXTENT_MAP_DELALLOC && 185 prev->block_start == EXTENT_MAP_DELALLOC) || 186 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && 187 next->block_start == extent_map_block_end(prev)))) { 188 return 1; 189 } 190 return 0; 191 } 192 193 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) 194 { 195 struct extent_map *merge = NULL; 196 struct rb_node *rb; 197 198 if (em->start != 0) { 199 rb = rb_prev(&em->rb_node); 200 if (rb) 201 merge = rb_entry(rb, struct extent_map, rb_node); 202 if (rb && mergable_maps(merge, em)) { 203 em->start = merge->start; 204 em->orig_start = merge->orig_start; 205 em->len += merge->len; 206 em->block_len += merge->block_len; 207 em->block_start = merge->block_start; 208 merge->in_tree = 0; 209 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; 210 em->mod_start = merge->mod_start; 211 em->generation = max(em->generation, merge->generation); 212 list_move(&em->list, &tree->modified_extents); 213 214 list_del_init(&merge->list); 215 rb_erase(&merge->rb_node, &tree->map); 216 free_extent_map(merge); 217 } 218 } 219 220 rb = rb_next(&em->rb_node); 221 if (rb) 222 merge = rb_entry(rb, struct extent_map, rb_node); 223 if (rb && mergable_maps(em, merge)) { 224 em->len += merge->len; 225 em->block_len += merge->len; 226 rb_erase(&merge->rb_node, &tree->map); 227 merge->in_tree = 0; 228 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; 229 em->generation = max(em->generation, merge->generation); 230 list_del_init(&merge->list); 231 free_extent_map(merge); 232 } 233 } 234 235 /** 236 * unpin_extent_cache - unpin an extent from the cache 237 * @tree: tree to unpin the extent in 238 * @start: logical offset in the file 239 * @len: length of the extent 240 * @gen: generation that this extent has been modified in 241 * 242 * Called after an extent has been written to disk properly. Set the generation 243 * to the generation that actually added the file item to the inode so we know 244 * we need to sync this extent when we call fsync(). 245 */ 246 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, 247 u64 gen) 248 { 249 int ret = 0; 250 struct extent_map *em; 251 bool prealloc = false; 252 253 write_lock(&tree->lock); 254 em = lookup_extent_mapping(tree, start, len); 255 256 WARN_ON(!em || em->start != start); 257 258 if (!em) 259 goto out; 260 261 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) 262 list_move(&em->list, &tree->modified_extents); 263 em->generation = gen; 264 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 265 em->mod_start = em->start; 266 em->mod_len = em->len; 267 268 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { 269 prealloc = true; 270 clear_bit(EXTENT_FLAG_FILLING, &em->flags); 271 } 272 273 try_merge_map(tree, em); 274 275 if (prealloc) { 276 em->mod_start = em->start; 277 em->mod_len = em->len; 278 } 279 280 free_extent_map(em); 281 out: 282 write_unlock(&tree->lock); 283 return ret; 284 285 } 286 287 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) 288 { 289 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 290 if (em->in_tree) 291 try_merge_map(tree, em); 292 } 293 294 /** 295 * add_extent_mapping - add new extent map to the extent tree 296 * @tree: tree to insert new map in 297 * @em: map to insert 298 * 299 * Insert @em into @tree or perform a simple forward/backward merge with 300 * existing mappings. The extent_map struct passed in will be inserted 301 * into the tree directly, with an additional reference taken, or a 302 * reference dropped if the merge attempt was successful. 303 */ 304 int add_extent_mapping(struct extent_map_tree *tree, 305 struct extent_map *em) 306 { 307 int ret = 0; 308 struct rb_node *rb; 309 struct extent_map *exist; 310 311 exist = lookup_extent_mapping(tree, em->start, em->len); 312 if (exist) { 313 free_extent_map(exist); 314 ret = -EEXIST; 315 goto out; 316 } 317 rb = tree_insert(&tree->map, em->start, &em->rb_node); 318 if (rb) { 319 ret = -EEXIST; 320 goto out; 321 } 322 atomic_inc(&em->refs); 323 324 em->mod_start = em->start; 325 em->mod_len = em->len; 326 327 try_merge_map(tree, em); 328 out: 329 return ret; 330 } 331 332 /* simple helper to do math around the end of an extent, handling wrap */ 333 static u64 range_end(u64 start, u64 len) 334 { 335 if (start + len < start) 336 return (u64)-1; 337 return start + len; 338 } 339 340 struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree, 341 u64 start, u64 len, int strict) 342 { 343 struct extent_map *em; 344 struct rb_node *rb_node; 345 struct rb_node *prev = NULL; 346 struct rb_node *next = NULL; 347 u64 end = range_end(start, len); 348 349 rb_node = __tree_search(&tree->map, start, &prev, &next); 350 if (!rb_node) { 351 if (prev) 352 rb_node = prev; 353 else if (next) 354 rb_node = next; 355 else 356 return NULL; 357 } 358 359 em = rb_entry(rb_node, struct extent_map, rb_node); 360 361 if (strict && !(end > em->start && start < extent_map_end(em))) 362 return NULL; 363 364 atomic_inc(&em->refs); 365 return em; 366 } 367 368 /** 369 * lookup_extent_mapping - lookup extent_map 370 * @tree: tree to lookup in 371 * @start: byte offset to start the search 372 * @len: length of the lookup range 373 * 374 * Find and return the first extent_map struct in @tree that intersects the 375 * [start, len] range. There may be additional objects in the tree that 376 * intersect, so check the object returned carefully to make sure that no 377 * additional lookups are needed. 378 */ 379 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 380 u64 start, u64 len) 381 { 382 return __lookup_extent_mapping(tree, start, len, 1); 383 } 384 385 /** 386 * search_extent_mapping - find a nearby extent map 387 * @tree: tree to lookup in 388 * @start: byte offset to start the search 389 * @len: length of the lookup range 390 * 391 * Find and return the first extent_map struct in @tree that intersects the 392 * [start, len] range. 393 * 394 * If one can't be found, any nearby extent may be returned 395 */ 396 struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 397 u64 start, u64 len) 398 { 399 return __lookup_extent_mapping(tree, start, len, 0); 400 } 401 402 /** 403 * remove_extent_mapping - removes an extent_map from the extent tree 404 * @tree: extent tree to remove from 405 * @em: extent map beeing removed 406 * 407 * Removes @em from @tree. No reference counts are dropped, and no checks 408 * are done to see if the range is in use 409 */ 410 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 411 { 412 int ret = 0; 413 414 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 415 rb_erase(&em->rb_node, &tree->map); 416 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) 417 list_del_init(&em->list); 418 em->in_tree = 0; 419 return ret; 420 } 421