1d1310b2eSChris Mason #include <linux/err.h> 2a52d9a80SChris Mason #include <linux/gfp.h> 3d1310b2eSChris Mason #include <linux/slab.h> 4a52d9a80SChris Mason #include <linux/module.h> 5a52d9a80SChris Mason #include <linux/spinlock.h> 6d1310b2eSChris Mason #include <linux/hardirq.h> 7a52d9a80SChris Mason #include "extent_map.h" 8a52d9a80SChris Mason 986479a04SChris Mason 10a52d9a80SChris Mason static struct kmem_cache *extent_map_cache; 11ca664626SChris Mason 122f4cbe64SWyatt Banks int __init extent_map_init(void) 13a52d9a80SChris Mason { 14*9601e3f6SChristoph Hellwig extent_map_cache = kmem_cache_create("extent_map", 156d36dcd4SChris Mason sizeof(struct extent_map), 0, 16*9601e3f6SChristoph Hellwig SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 172f4cbe64SWyatt Banks if (!extent_map_cache) 182f4cbe64SWyatt Banks return -ENOMEM; 192f4cbe64SWyatt Banks return 0; 20a52d9a80SChris Mason } 21a52d9a80SChris Mason 2217636e03SChristian Hesse void extent_map_exit(void) 23a52d9a80SChris Mason { 24a52d9a80SChris Mason if (extent_map_cache) 25a52d9a80SChris Mason kmem_cache_destroy(extent_map_cache); 26a52d9a80SChris Mason } 27a52d9a80SChris Mason 289d2423c5SChristoph Hellwig /** 299d2423c5SChristoph Hellwig * extent_map_tree_init - initialize extent map tree 309d2423c5SChristoph Hellwig * @tree: tree to initialize 319d2423c5SChristoph Hellwig * @mask: flags for memory allocations during tree operations 329d2423c5SChristoph Hellwig * 339d2423c5SChristoph Hellwig * Initialize the extent tree @tree. Should be called for each new inode 349d2423c5SChristoph Hellwig * or other user of the extent_map interface. 359d2423c5SChristoph Hellwig */ 36d1310b2eSChris Mason void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) 37a52d9a80SChris Mason { 38a52d9a80SChris Mason tree->map.rb_node = NULL; 39d1310b2eSChris Mason spin_lock_init(&tree->lock); 40a52d9a80SChris Mason } 41a52d9a80SChris Mason 429d2423c5SChristoph Hellwig /** 439d2423c5SChristoph Hellwig * alloc_extent_map - allocate new extent map structure 449d2423c5SChristoph Hellwig * @mask: memory allocation flags 459d2423c5SChristoph Hellwig * 469d2423c5SChristoph Hellwig * Allocate a new extent_map structure. The new structure is 479d2423c5SChristoph Hellwig * returned with a reference count of one and needs to be 489d2423c5SChristoph Hellwig * freed using free_extent_map() 499d2423c5SChristoph Hellwig */ 50a52d9a80SChris Mason struct extent_map *alloc_extent_map(gfp_t mask) 51a52d9a80SChris Mason { 52a52d9a80SChris Mason struct extent_map *em; 53a52d9a80SChris Mason em = kmem_cache_alloc(extent_map_cache, mask); 54a52d9a80SChris Mason if (!em || IS_ERR(em)) 55a52d9a80SChris Mason return em; 56a52d9a80SChris Mason em->in_tree = 0; 57d1310b2eSChris Mason em->flags = 0; 58a52d9a80SChris Mason atomic_set(&em->refs, 1); 59a52d9a80SChris Mason return em; 60a52d9a80SChris Mason } 61a52d9a80SChris Mason 629d2423c5SChristoph Hellwig /** 639d2423c5SChristoph Hellwig * free_extent_map - drop reference count of an extent_map 649d2423c5SChristoph Hellwig * @em: extent map beeing releasead 659d2423c5SChristoph Hellwig * 669d2423c5SChristoph Hellwig * Drops the reference out on @em by one and free the structure 679d2423c5SChristoph Hellwig * if the reference count hits zero. 689d2423c5SChristoph Hellwig */ 69a52d9a80SChris Mason void free_extent_map(struct extent_map *em) 70a52d9a80SChris Mason { 712bf5a725SChris Mason if (!em) 722bf5a725SChris Mason return; 73d1310b2eSChris Mason WARN_ON(atomic_read(&em->refs) == 0); 74a52d9a80SChris Mason if (atomic_dec_and_test(&em->refs)) { 75a52d9a80SChris Mason WARN_ON(em->in_tree); 76a52d9a80SChris Mason kmem_cache_free(extent_map_cache, em); 77a52d9a80SChris Mason } 78a52d9a80SChris Mason } 79a52d9a80SChris Mason 80a52d9a80SChris Mason static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 81a52d9a80SChris Mason struct rb_node *node) 82a52d9a80SChris Mason { 83a52d9a80SChris Mason struct rb_node **p = &root->rb_node; 84a52d9a80SChris Mason struct rb_node *parent = NULL; 85d1310b2eSChris Mason struct extent_map *entry; 86a52d9a80SChris Mason 87a52d9a80SChris Mason while (*p) { 88a52d9a80SChris Mason parent = *p; 89d1310b2eSChris Mason entry = rb_entry(parent, struct extent_map, rb_node); 90d1310b2eSChris Mason 91d1310b2eSChris Mason WARN_ON(!entry->in_tree); 92a52d9a80SChris Mason 93a52d9a80SChris Mason if (offset < entry->start) 94a52d9a80SChris Mason p = &(*p)->rb_left; 95d1310b2eSChris Mason else if (offset >= extent_map_end(entry)) 96a52d9a80SChris Mason p = &(*p)->rb_right; 97a52d9a80SChris Mason else 98a52d9a80SChris Mason return parent; 99a52d9a80SChris Mason } 100a52d9a80SChris Mason 101d1310b2eSChris Mason entry = rb_entry(node, struct extent_map, rb_node); 102a52d9a80SChris Mason entry->in_tree = 1; 103a52d9a80SChris Mason rb_link_node(node, parent, p); 104a52d9a80SChris Mason rb_insert_color(node, root); 105a52d9a80SChris Mason return NULL; 106a52d9a80SChris Mason } 107a52d9a80SChris Mason 108d352ac68SChris Mason /* 109d352ac68SChris Mason * search through the tree for an extent_map with a given offset. If 110d352ac68SChris Mason * it can't be found, try to find some neighboring extents 111d352ac68SChris Mason */ 112a52d9a80SChris Mason static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 1135f56406aSChris Mason struct rb_node **prev_ret, 1145f56406aSChris Mason struct rb_node **next_ret) 115a52d9a80SChris Mason { 116a52d9a80SChris Mason struct rb_node *n = root->rb_node; 117a52d9a80SChris Mason struct rb_node *prev = NULL; 1185f56406aSChris Mason struct rb_node *orig_prev = NULL; 119d1310b2eSChris Mason struct extent_map *entry; 120d1310b2eSChris Mason struct extent_map *prev_entry = NULL; 121a52d9a80SChris Mason 122a52d9a80SChris Mason while (n) { 123d1310b2eSChris Mason entry = rb_entry(n, struct extent_map, rb_node); 124a52d9a80SChris Mason prev = n; 125a52d9a80SChris Mason prev_entry = entry; 126a52d9a80SChris Mason 127d1310b2eSChris Mason WARN_ON(!entry->in_tree); 128d1310b2eSChris Mason 129a52d9a80SChris Mason if (offset < entry->start) 130a52d9a80SChris Mason n = n->rb_left; 131d1310b2eSChris Mason else if (offset >= extent_map_end(entry)) 132a52d9a80SChris Mason n = n->rb_right; 133a52d9a80SChris Mason else 134a52d9a80SChris Mason return n; 135a52d9a80SChris Mason } 1365f56406aSChris Mason 1375f56406aSChris Mason if (prev_ret) { 1385f56406aSChris Mason orig_prev = prev; 139d1310b2eSChris Mason while (prev && offset >= extent_map_end(prev_entry)) { 140a52d9a80SChris Mason prev = rb_next(prev); 141d1310b2eSChris Mason prev_entry = rb_entry(prev, struct extent_map, rb_node); 142a52d9a80SChris Mason } 143a52d9a80SChris Mason *prev_ret = prev; 1445f56406aSChris Mason prev = orig_prev; 1455f56406aSChris Mason } 1465f56406aSChris Mason 1475f56406aSChris Mason if (next_ret) { 148d1310b2eSChris Mason prev_entry = rb_entry(prev, struct extent_map, rb_node); 1495f56406aSChris Mason while (prev && offset < prev_entry->start) { 1505f56406aSChris Mason prev = rb_prev(prev); 151d1310b2eSChris Mason prev_entry = rb_entry(prev, struct extent_map, rb_node); 1525f56406aSChris Mason } 1535f56406aSChris Mason *next_ret = prev; 1545f56406aSChris Mason } 155a52d9a80SChris Mason return NULL; 156a52d9a80SChris Mason } 157a52d9a80SChris Mason 158d352ac68SChris Mason /* 159d352ac68SChris Mason * look for an offset in the tree, and if it can't be found, return 160d352ac68SChris Mason * the first offset we can find smaller than 'offset'. 161d352ac68SChris Mason */ 162a52d9a80SChris Mason static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) 163a52d9a80SChris Mason { 164a52d9a80SChris Mason struct rb_node *prev; 165a52d9a80SChris Mason struct rb_node *ret; 1665f56406aSChris Mason ret = __tree_search(root, offset, &prev, NULL); 167a52d9a80SChris Mason if (!ret) 168a52d9a80SChris Mason return prev; 169a52d9a80SChris Mason return ret; 170a52d9a80SChris Mason } 171a52d9a80SChris Mason 172d352ac68SChris Mason /* check to see if two extent_map structs are adjacent and safe to merge */ 173d1310b2eSChris Mason static int mergable_maps(struct extent_map *prev, struct extent_map *next) 174a52d9a80SChris Mason { 1757f3c74fbSChris Mason if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) 1767f3c74fbSChris Mason return 0; 1777f3c74fbSChris Mason 178c8b97818SChris Mason /* 179c8b97818SChris Mason * don't merge compressed extents, we need to know their 180c8b97818SChris Mason * actual size 181c8b97818SChris Mason */ 182c8b97818SChris Mason if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 183c8b97818SChris Mason return 0; 184c8b97818SChris Mason 185d1310b2eSChris Mason if (extent_map_end(prev) == next->start && 186d1310b2eSChris Mason prev->flags == next->flags && 187d1310b2eSChris Mason prev->bdev == next->bdev && 188d1310b2eSChris Mason ((next->block_start == EXTENT_MAP_HOLE && 189d1310b2eSChris Mason prev->block_start == EXTENT_MAP_HOLE) || 190d1310b2eSChris Mason (next->block_start == EXTENT_MAP_INLINE && 191d1310b2eSChris Mason prev->block_start == EXTENT_MAP_INLINE) || 192d1310b2eSChris Mason (next->block_start == EXTENT_MAP_DELALLOC && 193d1310b2eSChris Mason prev->block_start == EXTENT_MAP_DELALLOC) || 194d1310b2eSChris Mason (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && 195d1310b2eSChris Mason next->block_start == extent_map_block_end(prev)))) { 196d1310b2eSChris Mason return 1; 197d1310b2eSChris Mason } 198a52d9a80SChris Mason return 0; 199a52d9a80SChris Mason } 200a52d9a80SChris Mason 2019d2423c5SChristoph Hellwig /** 2029d2423c5SChristoph Hellwig * add_extent_mapping - add new extent map to the extent tree 2039d2423c5SChristoph Hellwig * @tree: tree to insert new map in 2049d2423c5SChristoph Hellwig * @em: map to insert 2059d2423c5SChristoph Hellwig * 2069d2423c5SChristoph Hellwig * Insert @em into @tree or perform a simple forward/backward merge with 2079d2423c5SChristoph Hellwig * existing mappings. The extent_map struct passed in will be inserted 2089d2423c5SChristoph Hellwig * into the tree directly, with an additional reference taken, or a 2099d2423c5SChristoph Hellwig * reference dropped if the merge attempt was sucessfull. 210a52d9a80SChris Mason */ 211a52d9a80SChris Mason int add_extent_mapping(struct extent_map_tree *tree, 212a52d9a80SChris Mason struct extent_map *em) 213a52d9a80SChris Mason { 214a52d9a80SChris Mason int ret = 0; 215d1310b2eSChris Mason struct extent_map *merge = NULL; 216a52d9a80SChris Mason struct rb_node *rb; 2177c2fe32aSChris Mason struct extent_map *exist; 218a52d9a80SChris Mason 2197c2fe32aSChris Mason exist = lookup_extent_mapping(tree, em->start, em->len); 2207c2fe32aSChris Mason if (exist) { 2217c2fe32aSChris Mason free_extent_map(exist); 2227c2fe32aSChris Mason ret = -EEXIST; 2237c2fe32aSChris Mason goto out; 2247c2fe32aSChris Mason } 22564f26f74SDavid Woodhouse assert_spin_locked(&tree->lock); 226d1310b2eSChris Mason rb = tree_insert(&tree->map, em->start, &em->rb_node); 227a52d9a80SChris Mason if (rb) { 228a52d9a80SChris Mason ret = -EEXIST; 229a52d9a80SChris Mason goto out; 230a52d9a80SChris Mason } 231a52d9a80SChris Mason atomic_inc(&em->refs); 232a52d9a80SChris Mason if (em->start != 0) { 233a52d9a80SChris Mason rb = rb_prev(&em->rb_node); 234a52d9a80SChris Mason if (rb) 235d1310b2eSChris Mason merge = rb_entry(rb, struct extent_map, rb_node); 236d1310b2eSChris Mason if (rb && mergable_maps(merge, em)) { 237d1310b2eSChris Mason em->start = merge->start; 238d1310b2eSChris Mason em->len += merge->len; 239c8b97818SChris Mason em->block_len += merge->block_len; 240d1310b2eSChris Mason em->block_start = merge->block_start; 241d1310b2eSChris Mason merge->in_tree = 0; 242d1310b2eSChris Mason rb_erase(&merge->rb_node, &tree->map); 243d1310b2eSChris Mason free_extent_map(merge); 244a52d9a80SChris Mason } 245a52d9a80SChris Mason } 246d1310b2eSChris Mason rb = rb_next(&em->rb_node); 247d1310b2eSChris Mason if (rb) 248d1310b2eSChris Mason merge = rb_entry(rb, struct extent_map, rb_node); 249d1310b2eSChris Mason if (rb && mergable_maps(em, merge)) { 250d1310b2eSChris Mason em->len += merge->len; 251c8b97818SChris Mason em->block_len += merge->len; 252d1310b2eSChris Mason rb_erase(&merge->rb_node, &tree->map); 253d1310b2eSChris Mason merge->in_tree = 0; 254d1310b2eSChris Mason free_extent_map(merge); 255d1310b2eSChris Mason } 256a52d9a80SChris Mason out: 257a52d9a80SChris Mason return ret; 258a52d9a80SChris Mason } 259a52d9a80SChris Mason 260d352ac68SChris Mason /* simple helper to do math around the end of an extent, handling wrap */ 261d1310b2eSChris Mason static u64 range_end(u64 start, u64 len) 262d1310b2eSChris Mason { 263d1310b2eSChris Mason if (start + len < start) 264d1310b2eSChris Mason return (u64)-1; 265d1310b2eSChris Mason return start + len; 266d1310b2eSChris Mason } 267d1310b2eSChris Mason 2689d2423c5SChristoph Hellwig /** 2699d2423c5SChristoph Hellwig * lookup_extent_mapping - lookup extent_map 2709d2423c5SChristoph Hellwig * @tree: tree to lookup in 2719d2423c5SChristoph Hellwig * @start: byte offset to start the search 2729d2423c5SChristoph Hellwig * @len: length of the lookup range 2739d2423c5SChristoph Hellwig * 2749d2423c5SChristoph Hellwig * Find and return the first extent_map struct in @tree that intersects the 2759d2423c5SChristoph Hellwig * [start, len] range. There may be additional objects in the tree that 2769d2423c5SChristoph Hellwig * intersect, so check the object returned carefully to make sure that no 2779d2423c5SChristoph Hellwig * additional lookups are needed. 278a52d9a80SChris Mason */ 279a52d9a80SChris Mason struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 280d1310b2eSChris Mason u64 start, u64 len) 281a52d9a80SChris Mason { 282a52d9a80SChris Mason struct extent_map *em; 283a52d9a80SChris Mason struct rb_node *rb_node; 284306929f3SChristoph Hellwig struct rb_node *prev = NULL; 285306929f3SChristoph Hellwig struct rb_node *next = NULL; 286306929f3SChristoph Hellwig u64 end = range_end(start, len); 287306929f3SChristoph Hellwig 28864f26f74SDavid Woodhouse assert_spin_locked(&tree->lock); 2895f56406aSChris Mason rb_node = __tree_search(&tree->map, start, &prev, &next); 2905f56406aSChris Mason if (!rb_node && prev) { 2915f56406aSChris Mason em = rb_entry(prev, struct extent_map, rb_node); 292d1310b2eSChris Mason if (end > em->start && start < extent_map_end(em)) 2935f56406aSChris Mason goto found; 2945f56406aSChris Mason } 2955f56406aSChris Mason if (!rb_node && next) { 2965f56406aSChris Mason em = rb_entry(next, struct extent_map, rb_node); 297d1310b2eSChris Mason if (end > em->start && start < extent_map_end(em)) 2985f56406aSChris Mason goto found; 2995f56406aSChris Mason } 300a52d9a80SChris Mason if (!rb_node) { 301a52d9a80SChris Mason em = NULL; 302a52d9a80SChris Mason goto out; 303a52d9a80SChris Mason } 304a52d9a80SChris Mason if (IS_ERR(rb_node)) { 305a52d9a80SChris Mason em = ERR_PTR(PTR_ERR(rb_node)); 306a52d9a80SChris Mason goto out; 307a52d9a80SChris Mason } 308a52d9a80SChris Mason em = rb_entry(rb_node, struct extent_map, rb_node); 309d1310b2eSChris Mason if (end > em->start && start < extent_map_end(em)) 310d1310b2eSChris Mason goto found; 311d1310b2eSChris Mason 312a52d9a80SChris Mason em = NULL; 313a52d9a80SChris Mason goto out; 314d1310b2eSChris Mason 3155f56406aSChris Mason found: 316a52d9a80SChris Mason atomic_inc(&em->refs); 317a52d9a80SChris Mason out: 318a52d9a80SChris Mason return em; 319a52d9a80SChris Mason } 320a52d9a80SChris Mason 3219d2423c5SChristoph Hellwig /** 3229d2423c5SChristoph Hellwig * remove_extent_mapping - removes an extent_map from the extent tree 3239d2423c5SChristoph Hellwig * @tree: extent tree to remove from 3249d2423c5SChristoph Hellwig * @em: extent map beeing removed 3259d2423c5SChristoph Hellwig * 3269d2423c5SChristoph Hellwig * Removes @em from @tree. No reference counts are dropped, and no checks 3279d2423c5SChristoph Hellwig * are done to see if the range is in use 328a52d9a80SChris Mason */ 329a52d9a80SChris Mason int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 330a52d9a80SChris Mason { 331d1310b2eSChris Mason int ret = 0; 332a52d9a80SChris Mason 3337f3c74fbSChris Mason WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 33464f26f74SDavid Woodhouse assert_spin_locked(&tree->lock); 335d1310b2eSChris Mason rb_erase(&em->rb_node, &tree->map); 336d1310b2eSChris Mason em->in_tree = 0; 337a52d9a80SChris Mason return ret; 338a52d9a80SChris Mason } 339