1a52d9a80SChris Mason #include <linux/bitops.h> 2a52d9a80SChris Mason #include <linux/slab.h> 3a52d9a80SChris Mason #include <linux/bio.h> 4a52d9a80SChris Mason #include <linux/mm.h> 5a52d9a80SChris Mason #include <linux/gfp.h> 6a52d9a80SChris Mason #include <linux/pagemap.h> 7a52d9a80SChris Mason #include <linux/page-flags.h> 8a52d9a80SChris Mason #include <linux/module.h> 9a52d9a80SChris Mason #include <linux/spinlock.h> 10a52d9a80SChris Mason #include <linux/blkdev.h> 11a52d9a80SChris Mason #include "extent_map.h" 12a52d9a80SChris Mason 1386479a04SChris Mason /* temporary define until extent_map moves out of btrfs */ 1486479a04SChris Mason struct kmem_cache *btrfs_cache_create(const char *name, size_t size, 1586479a04SChris Mason unsigned long extra_flags, 1686479a04SChris Mason void (*ctor)(void *, struct kmem_cache *, 1786479a04SChris Mason unsigned long)); 1886479a04SChris Mason 19a52d9a80SChris Mason static struct kmem_cache *extent_map_cache; 20a52d9a80SChris Mason static struct kmem_cache *extent_state_cache; 216d36dcd4SChris Mason static struct kmem_cache *extent_buffer_cache; 22*f510cfecSChris Mason 236d36dcd4SChris Mason static LIST_HEAD(extent_buffers); 24*f510cfecSChris Mason static LIST_HEAD(buffers); 25*f510cfecSChris Mason static LIST_HEAD(states); 26*f510cfecSChris Mason 276d36dcd4SChris Mason static spinlock_t extent_buffers_lock; 28*f510cfecSChris Mason static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; 296d36dcd4SChris Mason static int nr_extent_buffers; 306d36dcd4SChris Mason #define MAX_EXTENT_BUFFER_CACHE 128 31a52d9a80SChris Mason 32a52d9a80SChris Mason struct tree_entry { 33a52d9a80SChris Mason u64 start; 34a52d9a80SChris Mason u64 end; 35a52d9a80SChris Mason int in_tree; 36a52d9a80SChris Mason struct rb_node rb_node; 37a52d9a80SChris Mason }; 38a52d9a80SChris Mason 39a52d9a80SChris Mason void __init extent_map_init(void) 40a52d9a80SChris Mason { 4186479a04SChris Mason extent_map_cache = btrfs_cache_create("extent_map", 426d36dcd4SChris Mason sizeof(struct extent_map), 0, 43a52d9a80SChris Mason NULL); 4486479a04SChris Mason extent_state_cache = btrfs_cache_create("extent_state", 456d36dcd4SChris Mason sizeof(struct extent_state), 0, 46a52d9a80SChris Mason NULL); 476d36dcd4SChris Mason extent_buffer_cache = btrfs_cache_create("extent_buffers", 486d36dcd4SChris Mason sizeof(struct extent_buffer), 0, 496d36dcd4SChris Mason NULL); 506d36dcd4SChris Mason spin_lock_init(&extent_buffers_lock); 51a52d9a80SChris Mason } 52a52d9a80SChris Mason 53a52d9a80SChris Mason void __exit extent_map_exit(void) 54a52d9a80SChris Mason { 556d36dcd4SChris Mason struct extent_buffer *eb; 56*f510cfecSChris Mason struct extent_state *state; 576d36dcd4SChris Mason 586d36dcd4SChris Mason while (!list_empty(&extent_buffers)) { 596d36dcd4SChris Mason eb = list_entry(extent_buffers.next, 606d36dcd4SChris Mason struct extent_buffer, list); 616d36dcd4SChris Mason list_del(&eb->list); 626d36dcd4SChris Mason kmem_cache_free(extent_buffer_cache, eb); 636d36dcd4SChris Mason } 64*f510cfecSChris Mason while (!list_empty(&states)) { 65*f510cfecSChris Mason state = list_entry(states.next, struct extent_state, list); 66*f510cfecSChris Mason printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); 67*f510cfecSChris Mason list_del(&state->list); 68*f510cfecSChris Mason kmem_cache_free(extent_state_cache, state); 69*f510cfecSChris Mason 70*f510cfecSChris Mason } 71*f510cfecSChris Mason while (!list_empty(&buffers)) { 72*f510cfecSChris Mason eb = list_entry(buffers.next, 73*f510cfecSChris Mason struct extent_buffer, leak_list); 74*f510cfecSChris Mason printk("buffer leak start %Lu len %lu return %lX\n", eb->start, eb->len, eb->alloc_addr); 75*f510cfecSChris Mason list_del(&eb->leak_list); 76*f510cfecSChris Mason kmem_cache_free(extent_buffer_cache, eb); 77*f510cfecSChris Mason } 78*f510cfecSChris Mason 79*f510cfecSChris Mason 80a52d9a80SChris Mason if (extent_map_cache) 81a52d9a80SChris Mason kmem_cache_destroy(extent_map_cache); 82a52d9a80SChris Mason if (extent_state_cache) 83a52d9a80SChris Mason kmem_cache_destroy(extent_state_cache); 846d36dcd4SChris Mason if (extent_buffer_cache) 856d36dcd4SChris Mason kmem_cache_destroy(extent_buffer_cache); 86a52d9a80SChris Mason } 87a52d9a80SChris Mason 88a52d9a80SChris Mason void extent_map_tree_init(struct extent_map_tree *tree, 89a52d9a80SChris Mason struct address_space *mapping, gfp_t mask) 90a52d9a80SChris Mason { 91a52d9a80SChris Mason tree->map.rb_node = NULL; 92a52d9a80SChris Mason tree->state.rb_node = NULL; 9307157aacSChris Mason tree->ops = NULL; 94a52d9a80SChris Mason rwlock_init(&tree->lock); 95a52d9a80SChris Mason tree->mapping = mapping; 96a52d9a80SChris Mason } 97a52d9a80SChris Mason EXPORT_SYMBOL(extent_map_tree_init); 98a52d9a80SChris Mason 99a52d9a80SChris Mason struct extent_map *alloc_extent_map(gfp_t mask) 100a52d9a80SChris Mason { 101a52d9a80SChris Mason struct extent_map *em; 102a52d9a80SChris Mason em = kmem_cache_alloc(extent_map_cache, mask); 103a52d9a80SChris Mason if (!em || IS_ERR(em)) 104a52d9a80SChris Mason return em; 105a52d9a80SChris Mason em->in_tree = 0; 106a52d9a80SChris Mason atomic_set(&em->refs, 1); 107a52d9a80SChris Mason return em; 108a52d9a80SChris Mason } 109a52d9a80SChris Mason EXPORT_SYMBOL(alloc_extent_map); 110a52d9a80SChris Mason 111a52d9a80SChris Mason void free_extent_map(struct extent_map *em) 112a52d9a80SChris Mason { 1132bf5a725SChris Mason if (!em) 1142bf5a725SChris Mason return; 115a52d9a80SChris Mason if (atomic_dec_and_test(&em->refs)) { 116a52d9a80SChris Mason WARN_ON(em->in_tree); 117a52d9a80SChris Mason kmem_cache_free(extent_map_cache, em); 118a52d9a80SChris Mason } 119a52d9a80SChris Mason } 120a52d9a80SChris Mason EXPORT_SYMBOL(free_extent_map); 121a52d9a80SChris Mason 122a52d9a80SChris Mason 123a52d9a80SChris Mason struct extent_state *alloc_extent_state(gfp_t mask) 124a52d9a80SChris Mason { 125a52d9a80SChris Mason struct extent_state *state; 126*f510cfecSChris Mason unsigned long flags; 127*f510cfecSChris Mason 128a52d9a80SChris Mason state = kmem_cache_alloc(extent_state_cache, mask); 129a52d9a80SChris Mason if (!state || IS_ERR(state)) 130a52d9a80SChris Mason return state; 131a52d9a80SChris Mason state->state = 0; 132a52d9a80SChris Mason state->in_tree = 0; 13307157aacSChris Mason state->private = 0; 134*f510cfecSChris Mason 135*f510cfecSChris Mason spin_lock_irqsave(&state_lock, flags); 136*f510cfecSChris Mason list_add(&state->list, &states); 137*f510cfecSChris Mason spin_unlock_irqrestore(&state_lock, flags); 138*f510cfecSChris Mason 139a52d9a80SChris Mason atomic_set(&state->refs, 1); 140a52d9a80SChris Mason init_waitqueue_head(&state->wq); 141a52d9a80SChris Mason return state; 142a52d9a80SChris Mason } 143a52d9a80SChris Mason EXPORT_SYMBOL(alloc_extent_state); 144a52d9a80SChris Mason 145a52d9a80SChris Mason void free_extent_state(struct extent_state *state) 146a52d9a80SChris Mason { 147*f510cfecSChris Mason unsigned long flags; 1482bf5a725SChris Mason if (!state) 1492bf5a725SChris Mason return; 150a52d9a80SChris Mason if (atomic_dec_and_test(&state->refs)) { 151a52d9a80SChris Mason WARN_ON(state->in_tree); 152*f510cfecSChris Mason spin_lock_irqsave(&state_lock, flags); 153*f510cfecSChris Mason list_del(&state->list); 154*f510cfecSChris Mason spin_unlock_irqrestore(&state_lock, flags); 155a52d9a80SChris Mason kmem_cache_free(extent_state_cache, state); 156a52d9a80SChris Mason } 157a52d9a80SChris Mason } 158a52d9a80SChris Mason EXPORT_SYMBOL(free_extent_state); 159a52d9a80SChris Mason 160a52d9a80SChris Mason static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 161a52d9a80SChris Mason struct rb_node *node) 162a52d9a80SChris Mason { 163a52d9a80SChris Mason struct rb_node ** p = &root->rb_node; 164a52d9a80SChris Mason struct rb_node * parent = NULL; 165a52d9a80SChris Mason struct tree_entry *entry; 166a52d9a80SChris Mason 167a52d9a80SChris Mason while(*p) { 168a52d9a80SChris Mason parent = *p; 169a52d9a80SChris Mason entry = rb_entry(parent, struct tree_entry, rb_node); 170a52d9a80SChris Mason 171a52d9a80SChris Mason if (offset < entry->start) 172a52d9a80SChris Mason p = &(*p)->rb_left; 173a52d9a80SChris Mason else if (offset > entry->end) 174a52d9a80SChris Mason p = &(*p)->rb_right; 175a52d9a80SChris Mason else 176a52d9a80SChris Mason return parent; 177a52d9a80SChris Mason } 178a52d9a80SChris Mason 179a52d9a80SChris Mason entry = rb_entry(node, struct tree_entry, rb_node); 180a52d9a80SChris Mason entry->in_tree = 1; 181a52d9a80SChris Mason rb_link_node(node, parent, p); 182a52d9a80SChris Mason rb_insert_color(node, root); 183a52d9a80SChris Mason return NULL; 184a52d9a80SChris Mason } 185a52d9a80SChris Mason 186a52d9a80SChris Mason static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 187a52d9a80SChris Mason struct rb_node **prev_ret) 188a52d9a80SChris Mason { 189a52d9a80SChris Mason struct rb_node * n = root->rb_node; 190a52d9a80SChris Mason struct rb_node *prev = NULL; 191a52d9a80SChris Mason struct tree_entry *entry; 192a52d9a80SChris Mason struct tree_entry *prev_entry = NULL; 193a52d9a80SChris Mason 194a52d9a80SChris Mason while(n) { 195a52d9a80SChris Mason entry = rb_entry(n, struct tree_entry, rb_node); 196a52d9a80SChris Mason prev = n; 197a52d9a80SChris Mason prev_entry = entry; 198a52d9a80SChris Mason 199a52d9a80SChris Mason if (offset < entry->start) 200a52d9a80SChris Mason n = n->rb_left; 201a52d9a80SChris Mason else if (offset > entry->end) 202a52d9a80SChris Mason n = n->rb_right; 203a52d9a80SChris Mason else 204a52d9a80SChris Mason return n; 205a52d9a80SChris Mason } 206a52d9a80SChris Mason if (!prev_ret) 207a52d9a80SChris Mason return NULL; 208a52d9a80SChris Mason while(prev && offset > prev_entry->end) { 209a52d9a80SChris Mason prev = rb_next(prev); 210a52d9a80SChris Mason prev_entry = rb_entry(prev, struct tree_entry, rb_node); 211a52d9a80SChris Mason } 212a52d9a80SChris Mason *prev_ret = prev; 213a52d9a80SChris Mason return NULL; 214a52d9a80SChris Mason } 215a52d9a80SChris Mason 216a52d9a80SChris Mason static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) 217a52d9a80SChris Mason { 218a52d9a80SChris Mason struct rb_node *prev; 219a52d9a80SChris Mason struct rb_node *ret; 220a52d9a80SChris Mason ret = __tree_search(root, offset, &prev); 221a52d9a80SChris Mason if (!ret) 222a52d9a80SChris Mason return prev; 223a52d9a80SChris Mason return ret; 224a52d9a80SChris Mason } 225a52d9a80SChris Mason 226a52d9a80SChris Mason static int tree_delete(struct rb_root *root, u64 offset) 227a52d9a80SChris Mason { 228a52d9a80SChris Mason struct rb_node *node; 229a52d9a80SChris Mason struct tree_entry *entry; 230a52d9a80SChris Mason 231a52d9a80SChris Mason node = __tree_search(root, offset, NULL); 232a52d9a80SChris Mason if (!node) 233a52d9a80SChris Mason return -ENOENT; 234a52d9a80SChris Mason entry = rb_entry(node, struct tree_entry, rb_node); 235a52d9a80SChris Mason entry->in_tree = 0; 236a52d9a80SChris Mason rb_erase(node, root); 237a52d9a80SChris Mason return 0; 238a52d9a80SChris Mason } 239a52d9a80SChris Mason 240a52d9a80SChris Mason /* 241a52d9a80SChris Mason * add_extent_mapping tries a simple backward merge with existing 242a52d9a80SChris Mason * mappings. The extent_map struct passed in will be inserted into 243a52d9a80SChris Mason * the tree directly (no copies made, just a reference taken). 244a52d9a80SChris Mason */ 245a52d9a80SChris Mason int add_extent_mapping(struct extent_map_tree *tree, 246a52d9a80SChris Mason struct extent_map *em) 247a52d9a80SChris Mason { 248a52d9a80SChris Mason int ret = 0; 249a52d9a80SChris Mason struct extent_map *prev = NULL; 250a52d9a80SChris Mason struct rb_node *rb; 251a52d9a80SChris Mason 252a52d9a80SChris Mason write_lock_irq(&tree->lock); 253a52d9a80SChris Mason rb = tree_insert(&tree->map, em->end, &em->rb_node); 254a52d9a80SChris Mason if (rb) { 255a52d9a80SChris Mason prev = rb_entry(rb, struct extent_map, rb_node); 256a52d9a80SChris Mason printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end); 257a52d9a80SChris Mason ret = -EEXIST; 258a52d9a80SChris Mason goto out; 259a52d9a80SChris Mason } 260a52d9a80SChris Mason atomic_inc(&em->refs); 261a52d9a80SChris Mason if (em->start != 0) { 262a52d9a80SChris Mason rb = rb_prev(&em->rb_node); 263a52d9a80SChris Mason if (rb) 264a52d9a80SChris Mason prev = rb_entry(rb, struct extent_map, rb_node); 265a52d9a80SChris Mason if (prev && prev->end + 1 == em->start && 2665f39d397SChris Mason ((em->block_start == EXTENT_MAP_HOLE && 2675f39d397SChris Mason prev->block_start == EXTENT_MAP_HOLE) || 268a52d9a80SChris Mason (em->block_start == prev->block_end + 1))) { 269a52d9a80SChris Mason em->start = prev->start; 270a52d9a80SChris Mason em->block_start = prev->block_start; 271a52d9a80SChris Mason rb_erase(&prev->rb_node, &tree->map); 272a52d9a80SChris Mason prev->in_tree = 0; 273a52d9a80SChris Mason free_extent_map(prev); 274a52d9a80SChris Mason } 275a52d9a80SChris Mason } 276a52d9a80SChris Mason out: 277a52d9a80SChris Mason write_unlock_irq(&tree->lock); 278a52d9a80SChris Mason return ret; 279a52d9a80SChris Mason } 280a52d9a80SChris Mason EXPORT_SYMBOL(add_extent_mapping); 281a52d9a80SChris Mason 282a52d9a80SChris Mason /* 283a52d9a80SChris Mason * lookup_extent_mapping returns the first extent_map struct in the 284a52d9a80SChris Mason * tree that intersects the [start, end] (inclusive) range. There may 285a52d9a80SChris Mason * be additional objects in the tree that intersect, so check the object 286a52d9a80SChris Mason * returned carefully to make sure you don't need additional lookups. 287a52d9a80SChris Mason */ 288a52d9a80SChris Mason struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 289a52d9a80SChris Mason u64 start, u64 end) 290a52d9a80SChris Mason { 291a52d9a80SChris Mason struct extent_map *em; 292a52d9a80SChris Mason struct rb_node *rb_node; 293a52d9a80SChris Mason 294a52d9a80SChris Mason read_lock_irq(&tree->lock); 295a52d9a80SChris Mason rb_node = tree_search(&tree->map, start); 296a52d9a80SChris Mason if (!rb_node) { 297a52d9a80SChris Mason em = NULL; 298a52d9a80SChris Mason goto out; 299a52d9a80SChris Mason } 300a52d9a80SChris Mason if (IS_ERR(rb_node)) { 301a52d9a80SChris Mason em = ERR_PTR(PTR_ERR(rb_node)); 302a52d9a80SChris Mason goto out; 303a52d9a80SChris Mason } 304a52d9a80SChris Mason em = rb_entry(rb_node, struct extent_map, rb_node); 305a52d9a80SChris Mason if (em->end < start || em->start > end) { 306a52d9a80SChris Mason em = NULL; 307a52d9a80SChris Mason goto out; 308a52d9a80SChris Mason } 309a52d9a80SChris Mason atomic_inc(&em->refs); 310a52d9a80SChris Mason out: 311a52d9a80SChris Mason read_unlock_irq(&tree->lock); 312a52d9a80SChris Mason return em; 313a52d9a80SChris Mason } 314a52d9a80SChris Mason EXPORT_SYMBOL(lookup_extent_mapping); 315a52d9a80SChris Mason 316a52d9a80SChris Mason /* 317a52d9a80SChris Mason * removes an extent_map struct from the tree. No reference counts are 318a52d9a80SChris Mason * dropped, and no checks are done to see if the range is in use 319a52d9a80SChris Mason */ 320a52d9a80SChris Mason int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 321a52d9a80SChris Mason { 322a52d9a80SChris Mason int ret; 323a52d9a80SChris Mason 324a52d9a80SChris Mason write_lock_irq(&tree->lock); 325a52d9a80SChris Mason ret = tree_delete(&tree->map, em->end); 326a52d9a80SChris Mason write_unlock_irq(&tree->lock); 327a52d9a80SChris Mason return ret; 328a52d9a80SChris Mason } 329a52d9a80SChris Mason EXPORT_SYMBOL(remove_extent_mapping); 330a52d9a80SChris Mason 331a52d9a80SChris Mason /* 332a52d9a80SChris Mason * utility function to look for merge candidates inside a given range. 333a52d9a80SChris Mason * Any extents with matching state are merged together into a single 334a52d9a80SChris Mason * extent in the tree. Extents with EXTENT_IO in their state field 335a52d9a80SChris Mason * are not merged because the end_io handlers need to be able to do 336a52d9a80SChris Mason * operations on them without sleeping (or doing allocations/splits). 337a52d9a80SChris Mason * 338a52d9a80SChris Mason * This should be called with the tree lock held. 339a52d9a80SChris Mason */ 340a52d9a80SChris Mason static int merge_state(struct extent_map_tree *tree, 341a52d9a80SChris Mason struct extent_state *state) 342a52d9a80SChris Mason { 343a52d9a80SChris Mason struct extent_state *other; 344a52d9a80SChris Mason struct rb_node *other_node; 345a52d9a80SChris Mason 346a52d9a80SChris Mason if (state->state & EXTENT_IOBITS) 347a52d9a80SChris Mason return 0; 348a52d9a80SChris Mason 349a52d9a80SChris Mason other_node = rb_prev(&state->rb_node); 350a52d9a80SChris Mason if (other_node) { 351a52d9a80SChris Mason other = rb_entry(other_node, struct extent_state, rb_node); 352a52d9a80SChris Mason if (other->end == state->start - 1 && 353a52d9a80SChris Mason other->state == state->state) { 354a52d9a80SChris Mason state->start = other->start; 355a52d9a80SChris Mason other->in_tree = 0; 356a52d9a80SChris Mason rb_erase(&other->rb_node, &tree->state); 357a52d9a80SChris Mason free_extent_state(other); 358a52d9a80SChris Mason } 359a52d9a80SChris Mason } 360a52d9a80SChris Mason other_node = rb_next(&state->rb_node); 361a52d9a80SChris Mason if (other_node) { 362a52d9a80SChris Mason other = rb_entry(other_node, struct extent_state, rb_node); 363a52d9a80SChris Mason if (other->start == state->end + 1 && 364a52d9a80SChris Mason other->state == state->state) { 365a52d9a80SChris Mason other->start = state->start; 366a52d9a80SChris Mason state->in_tree = 0; 367a52d9a80SChris Mason rb_erase(&state->rb_node, &tree->state); 368a52d9a80SChris Mason free_extent_state(state); 369a52d9a80SChris Mason } 370a52d9a80SChris Mason } 371a52d9a80SChris Mason return 0; 372a52d9a80SChris Mason } 373a52d9a80SChris Mason 374a52d9a80SChris Mason /* 375a52d9a80SChris Mason * insert an extent_state struct into the tree. 'bits' are set on the 376a52d9a80SChris Mason * struct before it is inserted. 377a52d9a80SChris Mason * 378a52d9a80SChris Mason * This may return -EEXIST if the extent is already there, in which case the 379a52d9a80SChris Mason * state struct is freed. 380a52d9a80SChris Mason * 381a52d9a80SChris Mason * The tree lock is not taken internally. This is a utility function and 382a52d9a80SChris Mason * probably isn't what you want to call (see set/clear_extent_bit). 383a52d9a80SChris Mason */ 384a52d9a80SChris Mason static int insert_state(struct extent_map_tree *tree, 385a52d9a80SChris Mason struct extent_state *state, u64 start, u64 end, 386a52d9a80SChris Mason int bits) 387a52d9a80SChris Mason { 388a52d9a80SChris Mason struct rb_node *node; 389a52d9a80SChris Mason 390a52d9a80SChris Mason if (end < start) { 391a52d9a80SChris Mason printk("end < start %Lu %Lu\n", end, start); 392a52d9a80SChris Mason WARN_ON(1); 393a52d9a80SChris Mason } 394a52d9a80SChris Mason state->state |= bits; 395a52d9a80SChris Mason state->start = start; 396a52d9a80SChris Mason state->end = end; 397a52d9a80SChris Mason node = tree_insert(&tree->state, end, &state->rb_node); 398a52d9a80SChris Mason if (node) { 399a52d9a80SChris Mason struct extent_state *found; 400a52d9a80SChris Mason found = rb_entry(node, struct extent_state, rb_node); 401a52d9a80SChris Mason printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); 402a52d9a80SChris Mason free_extent_state(state); 403a52d9a80SChris Mason return -EEXIST; 404a52d9a80SChris Mason } 405a52d9a80SChris Mason merge_state(tree, state); 406a52d9a80SChris Mason return 0; 407a52d9a80SChris Mason } 408a52d9a80SChris Mason 409a52d9a80SChris Mason /* 410a52d9a80SChris Mason * split a given extent state struct in two, inserting the preallocated 411a52d9a80SChris Mason * struct 'prealloc' as the newly created second half. 'split' indicates an 412a52d9a80SChris Mason * offset inside 'orig' where it should be split. 413a52d9a80SChris Mason * 414a52d9a80SChris Mason * Before calling, 415a52d9a80SChris Mason * the tree has 'orig' at [orig->start, orig->end]. After calling, there 416a52d9a80SChris Mason * are two extent state structs in the tree: 417a52d9a80SChris Mason * prealloc: [orig->start, split - 1] 418a52d9a80SChris Mason * orig: [ split, orig->end ] 419a52d9a80SChris Mason * 420a52d9a80SChris Mason * The tree locks are not taken by this function. They need to be held 421a52d9a80SChris Mason * by the caller. 422a52d9a80SChris Mason */ 423a52d9a80SChris Mason static int split_state(struct extent_map_tree *tree, struct extent_state *orig, 424a52d9a80SChris Mason struct extent_state *prealloc, u64 split) 425a52d9a80SChris Mason { 426a52d9a80SChris Mason struct rb_node *node; 427a52d9a80SChris Mason prealloc->start = orig->start; 428a52d9a80SChris Mason prealloc->end = split - 1; 429a52d9a80SChris Mason prealloc->state = orig->state; 430a52d9a80SChris Mason orig->start = split; 431*f510cfecSChris Mason 432a52d9a80SChris Mason node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); 433a52d9a80SChris Mason if (node) { 434a52d9a80SChris Mason struct extent_state *found; 435a52d9a80SChris Mason found = rb_entry(node, struct extent_state, rb_node); 436a52d9a80SChris Mason printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end); 437a52d9a80SChris Mason free_extent_state(prealloc); 438a52d9a80SChris Mason return -EEXIST; 439a52d9a80SChris Mason } 440a52d9a80SChris Mason return 0; 441a52d9a80SChris Mason } 442a52d9a80SChris Mason 443a52d9a80SChris Mason /* 444a52d9a80SChris Mason * utility function to clear some bits in an extent state struct. 445a52d9a80SChris Mason * it will optionally wake up any one waiting on this state (wake == 1), or 446a52d9a80SChris Mason * forcibly remove the state from the tree (delete == 1). 447a52d9a80SChris Mason * 448a52d9a80SChris Mason * If no bits are set on the state struct after clearing things, the 449a52d9a80SChris Mason * struct is freed and removed from the tree 450a52d9a80SChris Mason */ 451a52d9a80SChris Mason static int clear_state_bit(struct extent_map_tree *tree, 452a52d9a80SChris Mason struct extent_state *state, int bits, int wake, 453a52d9a80SChris Mason int delete) 454a52d9a80SChris Mason { 455a52d9a80SChris Mason int ret = state->state & bits; 456a52d9a80SChris Mason state->state &= ~bits; 457a52d9a80SChris Mason if (wake) 458a52d9a80SChris Mason wake_up(&state->wq); 459a52d9a80SChris Mason if (delete || state->state == 0) { 460a52d9a80SChris Mason if (state->in_tree) { 461a52d9a80SChris Mason rb_erase(&state->rb_node, &tree->state); 462a52d9a80SChris Mason state->in_tree = 0; 463a52d9a80SChris Mason free_extent_state(state); 464a52d9a80SChris Mason } else { 465a52d9a80SChris Mason WARN_ON(1); 466a52d9a80SChris Mason } 467a52d9a80SChris Mason } else { 468a52d9a80SChris Mason merge_state(tree, state); 469a52d9a80SChris Mason } 470a52d9a80SChris Mason return ret; 471a52d9a80SChris Mason } 472a52d9a80SChris Mason 473a52d9a80SChris Mason /* 474a52d9a80SChris Mason * clear some bits on a range in the tree. This may require splitting 475a52d9a80SChris Mason * or inserting elements in the tree, so the gfp mask is used to 476a52d9a80SChris Mason * indicate which allocations or sleeping are allowed. 477a52d9a80SChris Mason * 478a52d9a80SChris Mason * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove 479a52d9a80SChris Mason * the given range from the tree regardless of state (ie for truncate). 480a52d9a80SChris Mason * 481a52d9a80SChris Mason * the range [start, end] is inclusive. 482a52d9a80SChris Mason * 483a52d9a80SChris Mason * This takes the tree lock, and returns < 0 on error, > 0 if any of the 484a52d9a80SChris Mason * bits were already set, or zero if none of the bits were already set. 485a52d9a80SChris Mason */ 486a52d9a80SChris Mason int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, 487a52d9a80SChris Mason int bits, int wake, int delete, gfp_t mask) 488a52d9a80SChris Mason { 489a52d9a80SChris Mason struct extent_state *state; 490a52d9a80SChris Mason struct extent_state *prealloc = NULL; 491a52d9a80SChris Mason struct rb_node *node; 49290f1c19aSChristoph Hellwig unsigned long flags; 493a52d9a80SChris Mason int err; 494a52d9a80SChris Mason int set = 0; 495a52d9a80SChris Mason 496a52d9a80SChris Mason again: 497a52d9a80SChris Mason if (!prealloc && (mask & __GFP_WAIT)) { 498a52d9a80SChris Mason prealloc = alloc_extent_state(mask); 499a52d9a80SChris Mason if (!prealloc) 500a52d9a80SChris Mason return -ENOMEM; 501a52d9a80SChris Mason } 502a52d9a80SChris Mason 50390f1c19aSChristoph Hellwig write_lock_irqsave(&tree->lock, flags); 504a52d9a80SChris Mason /* 505a52d9a80SChris Mason * this search will find the extents that end after 506a52d9a80SChris Mason * our range starts 507a52d9a80SChris Mason */ 508a52d9a80SChris Mason node = tree_search(&tree->state, start); 509a52d9a80SChris Mason if (!node) 510a52d9a80SChris Mason goto out; 511a52d9a80SChris Mason state = rb_entry(node, struct extent_state, rb_node); 512a52d9a80SChris Mason if (state->start > end) 513a52d9a80SChris Mason goto out; 514a52d9a80SChris Mason WARN_ON(state->end < start); 515a52d9a80SChris Mason 516a52d9a80SChris Mason /* 517a52d9a80SChris Mason * | ---- desired range ---- | 518a52d9a80SChris Mason * | state | or 519a52d9a80SChris Mason * | ------------- state -------------- | 520a52d9a80SChris Mason * 521a52d9a80SChris Mason * We need to split the extent we found, and may flip 522a52d9a80SChris Mason * bits on second half. 523a52d9a80SChris Mason * 524a52d9a80SChris Mason * If the extent we found extends past our range, we 525a52d9a80SChris Mason * just split and search again. It'll get split again 526a52d9a80SChris Mason * the next time though. 527a52d9a80SChris Mason * 528a52d9a80SChris Mason * If the extent we found is inside our range, we clear 529a52d9a80SChris Mason * the desired bit on it. 530a52d9a80SChris Mason */ 531a52d9a80SChris Mason 532a52d9a80SChris Mason if (state->start < start) { 533a52d9a80SChris Mason err = split_state(tree, state, prealloc, start); 534a52d9a80SChris Mason BUG_ON(err == -EEXIST); 535a52d9a80SChris Mason prealloc = NULL; 536a52d9a80SChris Mason if (err) 537a52d9a80SChris Mason goto out; 538a52d9a80SChris Mason if (state->end <= end) { 539a52d9a80SChris Mason start = state->end + 1; 540a52d9a80SChris Mason set |= clear_state_bit(tree, state, bits, 541a52d9a80SChris Mason wake, delete); 542a52d9a80SChris Mason } else { 543a52d9a80SChris Mason start = state->start; 544a52d9a80SChris Mason } 545a52d9a80SChris Mason goto search_again; 546a52d9a80SChris Mason } 547a52d9a80SChris Mason /* 548a52d9a80SChris Mason * | ---- desired range ---- | 549a52d9a80SChris Mason * | state | 550a52d9a80SChris Mason * We need to split the extent, and clear the bit 551a52d9a80SChris Mason * on the first half 552a52d9a80SChris Mason */ 553a52d9a80SChris Mason if (state->start <= end && state->end > end) { 554a52d9a80SChris Mason err = split_state(tree, state, prealloc, end + 1); 555a52d9a80SChris Mason BUG_ON(err == -EEXIST); 556a52d9a80SChris Mason 557a52d9a80SChris Mason if (wake) 558a52d9a80SChris Mason wake_up(&state->wq); 559a52d9a80SChris Mason set |= clear_state_bit(tree, prealloc, bits, 560a52d9a80SChris Mason wake, delete); 561a52d9a80SChris Mason prealloc = NULL; 562a52d9a80SChris Mason goto out; 563a52d9a80SChris Mason } 564a52d9a80SChris Mason 565a52d9a80SChris Mason start = state->end + 1; 566a52d9a80SChris Mason set |= clear_state_bit(tree, state, bits, wake, delete); 567a52d9a80SChris Mason goto search_again; 568a52d9a80SChris Mason 569a52d9a80SChris Mason out: 57090f1c19aSChristoph Hellwig write_unlock_irqrestore(&tree->lock, flags); 571a52d9a80SChris Mason if (prealloc) 572a52d9a80SChris Mason free_extent_state(prealloc); 573a52d9a80SChris Mason 574a52d9a80SChris Mason return set; 575a52d9a80SChris Mason 576a52d9a80SChris Mason search_again: 577a52d9a80SChris Mason if (start >= end) 578a52d9a80SChris Mason goto out; 57990f1c19aSChristoph Hellwig write_unlock_irqrestore(&tree->lock, flags); 580a52d9a80SChris Mason if (mask & __GFP_WAIT) 581a52d9a80SChris Mason cond_resched(); 582a52d9a80SChris Mason goto again; 583a52d9a80SChris Mason } 584a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_bit); 585a52d9a80SChris Mason 586a52d9a80SChris Mason static int wait_on_state(struct extent_map_tree *tree, 587a52d9a80SChris Mason struct extent_state *state) 588a52d9a80SChris Mason { 589a52d9a80SChris Mason DEFINE_WAIT(wait); 590a52d9a80SChris Mason prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 591a52d9a80SChris Mason read_unlock_irq(&tree->lock); 592a52d9a80SChris Mason schedule(); 593a52d9a80SChris Mason read_lock_irq(&tree->lock); 594a52d9a80SChris Mason finish_wait(&state->wq, &wait); 595a52d9a80SChris Mason return 0; 596a52d9a80SChris Mason } 597a52d9a80SChris Mason 598a52d9a80SChris Mason /* 599a52d9a80SChris Mason * waits for one or more bits to clear on a range in the state tree. 600a52d9a80SChris Mason * The range [start, end] is inclusive. 601a52d9a80SChris Mason * The tree lock is taken by this function 602a52d9a80SChris Mason */ 603a52d9a80SChris Mason int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits) 604a52d9a80SChris Mason { 605a52d9a80SChris Mason struct extent_state *state; 606a52d9a80SChris Mason struct rb_node *node; 607a52d9a80SChris Mason 608a52d9a80SChris Mason read_lock_irq(&tree->lock); 609a52d9a80SChris Mason again: 610a52d9a80SChris Mason while (1) { 611a52d9a80SChris Mason /* 612a52d9a80SChris Mason * this search will find all the extents that end after 613a52d9a80SChris Mason * our range starts 614a52d9a80SChris Mason */ 615a52d9a80SChris Mason node = tree_search(&tree->state, start); 616a52d9a80SChris Mason if (!node) 617a52d9a80SChris Mason break; 618a52d9a80SChris Mason 619a52d9a80SChris Mason state = rb_entry(node, struct extent_state, rb_node); 620a52d9a80SChris Mason 621a52d9a80SChris Mason if (state->start > end) 622a52d9a80SChris Mason goto out; 623a52d9a80SChris Mason 624a52d9a80SChris Mason if (state->state & bits) { 625a52d9a80SChris Mason start = state->start; 626a52d9a80SChris Mason atomic_inc(&state->refs); 627a52d9a80SChris Mason wait_on_state(tree, state); 628a52d9a80SChris Mason free_extent_state(state); 629a52d9a80SChris Mason goto again; 630a52d9a80SChris Mason } 631a52d9a80SChris Mason start = state->end + 1; 632a52d9a80SChris Mason 633a52d9a80SChris Mason if (start > end) 634a52d9a80SChris Mason break; 635a52d9a80SChris Mason 636a52d9a80SChris Mason if (need_resched()) { 637a52d9a80SChris Mason read_unlock_irq(&tree->lock); 638a52d9a80SChris Mason cond_resched(); 639a52d9a80SChris Mason read_lock_irq(&tree->lock); 640a52d9a80SChris Mason } 641a52d9a80SChris Mason } 642a52d9a80SChris Mason out: 643a52d9a80SChris Mason read_unlock_irq(&tree->lock); 644a52d9a80SChris Mason return 0; 645a52d9a80SChris Mason } 646a52d9a80SChris Mason EXPORT_SYMBOL(wait_extent_bit); 647a52d9a80SChris Mason 648a52d9a80SChris Mason /* 649a52d9a80SChris Mason * set some bits on a range in the tree. This may require allocations 650a52d9a80SChris Mason * or sleeping, so the gfp mask is used to indicate what is allowed. 651a52d9a80SChris Mason * 652a52d9a80SChris Mason * If 'exclusive' == 1, this will fail with -EEXIST if some part of the 653a52d9a80SChris Mason * range already has the desired bits set. The start of the existing 654a52d9a80SChris Mason * range is returned in failed_start in this case. 655a52d9a80SChris Mason * 656a52d9a80SChris Mason * [start, end] is inclusive 657a52d9a80SChris Mason * This takes the tree lock. 658a52d9a80SChris Mason */ 659a52d9a80SChris Mason int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits, 660a52d9a80SChris Mason int exclusive, u64 *failed_start, gfp_t mask) 661a52d9a80SChris Mason { 662a52d9a80SChris Mason struct extent_state *state; 663a52d9a80SChris Mason struct extent_state *prealloc = NULL; 664a52d9a80SChris Mason struct rb_node *node; 66590f1c19aSChristoph Hellwig unsigned long flags; 666a52d9a80SChris Mason int err = 0; 667a52d9a80SChris Mason int set; 668a52d9a80SChris Mason u64 last_start; 669a52d9a80SChris Mason u64 last_end; 670a52d9a80SChris Mason again: 671a52d9a80SChris Mason if (!prealloc && (mask & __GFP_WAIT)) { 672a52d9a80SChris Mason prealloc = alloc_extent_state(mask); 673a52d9a80SChris Mason if (!prealloc) 674a52d9a80SChris Mason return -ENOMEM; 675a52d9a80SChris Mason } 676a52d9a80SChris Mason 67790f1c19aSChristoph Hellwig write_lock_irqsave(&tree->lock, flags); 678a52d9a80SChris Mason /* 679a52d9a80SChris Mason * this search will find all the extents that end after 680a52d9a80SChris Mason * our range starts. 681a52d9a80SChris Mason */ 682a52d9a80SChris Mason node = tree_search(&tree->state, start); 683a52d9a80SChris Mason if (!node) { 684a52d9a80SChris Mason err = insert_state(tree, prealloc, start, end, bits); 685a52d9a80SChris Mason prealloc = NULL; 686a52d9a80SChris Mason BUG_ON(err == -EEXIST); 687a52d9a80SChris Mason goto out; 688a52d9a80SChris Mason } 689a52d9a80SChris Mason 690a52d9a80SChris Mason state = rb_entry(node, struct extent_state, rb_node); 691a52d9a80SChris Mason last_start = state->start; 692a52d9a80SChris Mason last_end = state->end; 693a52d9a80SChris Mason 694a52d9a80SChris Mason /* 695a52d9a80SChris Mason * | ---- desired range ---- | 696a52d9a80SChris Mason * | state | 697a52d9a80SChris Mason * 698a52d9a80SChris Mason * Just lock what we found and keep going 699a52d9a80SChris Mason */ 700a52d9a80SChris Mason if (state->start == start && state->end <= end) { 701a52d9a80SChris Mason set = state->state & bits; 702a52d9a80SChris Mason if (set && exclusive) { 703a52d9a80SChris Mason *failed_start = state->start; 704a52d9a80SChris Mason err = -EEXIST; 705a52d9a80SChris Mason goto out; 706a52d9a80SChris Mason } 707a52d9a80SChris Mason state->state |= bits; 708a52d9a80SChris Mason start = state->end + 1; 709a52d9a80SChris Mason merge_state(tree, state); 710a52d9a80SChris Mason goto search_again; 711a52d9a80SChris Mason } 712a52d9a80SChris Mason 713a52d9a80SChris Mason /* 714a52d9a80SChris Mason * | ---- desired range ---- | 715a52d9a80SChris Mason * | state | 716a52d9a80SChris Mason * or 717a52d9a80SChris Mason * | ------------- state -------------- | 718a52d9a80SChris Mason * 719a52d9a80SChris Mason * We need to split the extent we found, and may flip bits on 720a52d9a80SChris Mason * second half. 721a52d9a80SChris Mason * 722a52d9a80SChris Mason * If the extent we found extends past our 723a52d9a80SChris Mason * range, we just split and search again. It'll get split 724a52d9a80SChris Mason * again the next time though. 725a52d9a80SChris Mason * 726a52d9a80SChris Mason * If the extent we found is inside our range, we set the 727a52d9a80SChris Mason * desired bit on it. 728a52d9a80SChris Mason */ 729a52d9a80SChris Mason if (state->start < start) { 730a52d9a80SChris Mason set = state->state & bits; 731a52d9a80SChris Mason if (exclusive && set) { 732a52d9a80SChris Mason *failed_start = start; 733a52d9a80SChris Mason err = -EEXIST; 734a52d9a80SChris Mason goto out; 735a52d9a80SChris Mason } 736a52d9a80SChris Mason err = split_state(tree, state, prealloc, start); 737a52d9a80SChris Mason BUG_ON(err == -EEXIST); 738a52d9a80SChris Mason prealloc = NULL; 739a52d9a80SChris Mason if (err) 740a52d9a80SChris Mason goto out; 741a52d9a80SChris Mason if (state->end <= end) { 742a52d9a80SChris Mason state->state |= bits; 743a52d9a80SChris Mason start = state->end + 1; 744a52d9a80SChris Mason merge_state(tree, state); 745a52d9a80SChris Mason } else { 746a52d9a80SChris Mason start = state->start; 747a52d9a80SChris Mason } 748a52d9a80SChris Mason goto search_again; 749a52d9a80SChris Mason } 750a52d9a80SChris Mason /* 751a52d9a80SChris Mason * | ---- desired range ---- | 752a52d9a80SChris Mason * | state | or | state | 753a52d9a80SChris Mason * 754a52d9a80SChris Mason * There's a hole, we need to insert something in it and 755a52d9a80SChris Mason * ignore the extent we found. 756a52d9a80SChris Mason */ 757a52d9a80SChris Mason if (state->start > start) { 758a52d9a80SChris Mason u64 this_end; 759a52d9a80SChris Mason if (end < last_start) 760a52d9a80SChris Mason this_end = end; 761a52d9a80SChris Mason else 762a52d9a80SChris Mason this_end = last_start -1; 763a52d9a80SChris Mason err = insert_state(tree, prealloc, start, this_end, 764a52d9a80SChris Mason bits); 765a52d9a80SChris Mason prealloc = NULL; 766a52d9a80SChris Mason BUG_ON(err == -EEXIST); 767a52d9a80SChris Mason if (err) 768a52d9a80SChris Mason goto out; 769a52d9a80SChris Mason start = this_end + 1; 770a52d9a80SChris Mason goto search_again; 771a52d9a80SChris Mason } 772a8c450b2SChris Mason /* 773a8c450b2SChris Mason * | ---- desired range ---- | 774a8c450b2SChris Mason * | state | 775a8c450b2SChris Mason * We need to split the extent, and set the bit 776a8c450b2SChris Mason * on the first half 777a8c450b2SChris Mason */ 778a8c450b2SChris Mason if (state->start <= end && state->end > end) { 779a8c450b2SChris Mason set = state->state & bits; 780a8c450b2SChris Mason if (exclusive && set) { 781a8c450b2SChris Mason *failed_start = start; 782a8c450b2SChris Mason err = -EEXIST; 783a8c450b2SChris Mason goto out; 784a8c450b2SChris Mason } 785a8c450b2SChris Mason err = split_state(tree, state, prealloc, end + 1); 786a8c450b2SChris Mason BUG_ON(err == -EEXIST); 787a8c450b2SChris Mason 788a8c450b2SChris Mason prealloc->state |= bits; 789a8c450b2SChris Mason merge_state(tree, prealloc); 790a8c450b2SChris Mason prealloc = NULL; 791a8c450b2SChris Mason goto out; 792a8c450b2SChris Mason } 793a8c450b2SChris Mason 794a52d9a80SChris Mason goto search_again; 795a52d9a80SChris Mason 796a52d9a80SChris Mason out: 79790f1c19aSChristoph Hellwig write_unlock_irqrestore(&tree->lock, flags); 798a52d9a80SChris Mason if (prealloc) 799a52d9a80SChris Mason free_extent_state(prealloc); 800a52d9a80SChris Mason 801a52d9a80SChris Mason return err; 802a52d9a80SChris Mason 803a52d9a80SChris Mason search_again: 804a52d9a80SChris Mason if (start > end) 805a52d9a80SChris Mason goto out; 80690f1c19aSChristoph Hellwig write_unlock_irqrestore(&tree->lock, flags); 807a52d9a80SChris Mason if (mask & __GFP_WAIT) 808a52d9a80SChris Mason cond_resched(); 809a52d9a80SChris Mason goto again; 810a52d9a80SChris Mason } 811a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_bit); 812a52d9a80SChris Mason 813a52d9a80SChris Mason /* wrappers around set/clear extent bit */ 814a52d9a80SChris Mason int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 815a52d9a80SChris Mason gfp_t mask) 816a52d9a80SChris Mason { 817a52d9a80SChris Mason return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 818a52d9a80SChris Mason mask); 819a52d9a80SChris Mason } 820a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_dirty); 821a52d9a80SChris Mason 822b888db2bSChris Mason int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end, 823b888db2bSChris Mason gfp_t mask) 824b888db2bSChris Mason { 825b888db2bSChris Mason return set_extent_bit(tree, start, end, 826b888db2bSChris Mason EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL, 827b888db2bSChris Mason mask); 828b888db2bSChris Mason } 829b888db2bSChris Mason EXPORT_SYMBOL(set_extent_delalloc); 830b888db2bSChris Mason 831a52d9a80SChris Mason int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 832a52d9a80SChris Mason gfp_t mask) 833a52d9a80SChris Mason { 834b888db2bSChris Mason return clear_extent_bit(tree, start, end, 835b888db2bSChris Mason EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask); 836a52d9a80SChris Mason } 837a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_dirty); 838a52d9a80SChris Mason 839a52d9a80SChris Mason int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 840a52d9a80SChris Mason gfp_t mask) 841a52d9a80SChris Mason { 842a52d9a80SChris Mason return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 843a52d9a80SChris Mason mask); 844a52d9a80SChris Mason } 845a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_new); 846a52d9a80SChris Mason 847a52d9a80SChris Mason int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 848a52d9a80SChris Mason gfp_t mask) 849a52d9a80SChris Mason { 850a52d9a80SChris Mason return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask); 851a52d9a80SChris Mason } 852a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_new); 853a52d9a80SChris Mason 854a52d9a80SChris Mason int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 855a52d9a80SChris Mason gfp_t mask) 856a52d9a80SChris Mason { 857a52d9a80SChris Mason return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, 858a52d9a80SChris Mason mask); 859a52d9a80SChris Mason } 860a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_uptodate); 861a52d9a80SChris Mason 862a52d9a80SChris Mason int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 863a52d9a80SChris Mason gfp_t mask) 864a52d9a80SChris Mason { 865a52d9a80SChris Mason return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); 866a52d9a80SChris Mason } 867a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_uptodate); 868a52d9a80SChris Mason 869a52d9a80SChris Mason int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 870a52d9a80SChris Mason gfp_t mask) 871a52d9a80SChris Mason { 872a52d9a80SChris Mason return set_extent_bit(tree, start, end, EXTENT_WRITEBACK, 873a52d9a80SChris Mason 0, NULL, mask); 874a52d9a80SChris Mason } 875a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_writeback); 876a52d9a80SChris Mason 877a52d9a80SChris Mason int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 878a52d9a80SChris Mason gfp_t mask) 879a52d9a80SChris Mason { 880a52d9a80SChris Mason return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); 881a52d9a80SChris Mason } 882a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_writeback); 883a52d9a80SChris Mason 884a52d9a80SChris Mason int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end) 885a52d9a80SChris Mason { 886a52d9a80SChris Mason return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); 887a52d9a80SChris Mason } 888a52d9a80SChris Mason EXPORT_SYMBOL(wait_on_extent_writeback); 889a52d9a80SChris Mason 890a52d9a80SChris Mason /* 891a52d9a80SChris Mason * locks a range in ascending order, waiting for any locked regions 892a52d9a80SChris Mason * it hits on the way. [start,end] are inclusive, and this will sleep. 893a52d9a80SChris Mason */ 894a52d9a80SChris Mason int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask) 895a52d9a80SChris Mason { 896a52d9a80SChris Mason int err; 897a52d9a80SChris Mason u64 failed_start; 898a52d9a80SChris Mason while (1) { 899a52d9a80SChris Mason err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 900a52d9a80SChris Mason &failed_start, mask); 901a52d9a80SChris Mason if (err == -EEXIST && (mask & __GFP_WAIT)) { 902a52d9a80SChris Mason wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 903a52d9a80SChris Mason start = failed_start; 904a52d9a80SChris Mason } else { 905a52d9a80SChris Mason break; 906a52d9a80SChris Mason } 907a52d9a80SChris Mason WARN_ON(start > end); 908a52d9a80SChris Mason } 909a52d9a80SChris Mason return err; 910a52d9a80SChris Mason } 911a52d9a80SChris Mason EXPORT_SYMBOL(lock_extent); 912a52d9a80SChris Mason 913a52d9a80SChris Mason int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end, 914a52d9a80SChris Mason gfp_t mask) 915a52d9a80SChris Mason { 916a52d9a80SChris Mason return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask); 917a52d9a80SChris Mason } 918a52d9a80SChris Mason EXPORT_SYMBOL(unlock_extent); 919a52d9a80SChris Mason 920a52d9a80SChris Mason /* 921a52d9a80SChris Mason * helper function to set pages and extents in the tree dirty 922a52d9a80SChris Mason */ 923a52d9a80SChris Mason int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end) 924a52d9a80SChris Mason { 925a52d9a80SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 926a52d9a80SChris Mason unsigned long end_index = end >> PAGE_CACHE_SHIFT; 927a52d9a80SChris Mason struct page *page; 928a52d9a80SChris Mason 929a52d9a80SChris Mason while (index <= end_index) { 930a52d9a80SChris Mason page = find_get_page(tree->mapping, index); 931a52d9a80SChris Mason BUG_ON(!page); 932a52d9a80SChris Mason __set_page_dirty_nobuffers(page); 933a52d9a80SChris Mason page_cache_release(page); 934a52d9a80SChris Mason index++; 935a52d9a80SChris Mason } 936a52d9a80SChris Mason set_extent_dirty(tree, start, end, GFP_NOFS); 937a52d9a80SChris Mason return 0; 938a52d9a80SChris Mason } 939a52d9a80SChris Mason EXPORT_SYMBOL(set_range_dirty); 940a52d9a80SChris Mason 941a52d9a80SChris Mason /* 942a52d9a80SChris Mason * helper function to set both pages and extents in the tree writeback 943a52d9a80SChris Mason */ 944a52d9a80SChris Mason int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end) 945a52d9a80SChris Mason { 946a52d9a80SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 947a52d9a80SChris Mason unsigned long end_index = end >> PAGE_CACHE_SHIFT; 948a52d9a80SChris Mason struct page *page; 949a52d9a80SChris Mason 950a52d9a80SChris Mason while (index <= end_index) { 951a52d9a80SChris Mason page = find_get_page(tree->mapping, index); 952a52d9a80SChris Mason BUG_ON(!page); 953a52d9a80SChris Mason set_page_writeback(page); 954a52d9a80SChris Mason page_cache_release(page); 955a52d9a80SChris Mason index++; 956a52d9a80SChris Mason } 957a52d9a80SChris Mason set_extent_writeback(tree, start, end, GFP_NOFS); 958a52d9a80SChris Mason return 0; 959a52d9a80SChris Mason } 960a52d9a80SChris Mason EXPORT_SYMBOL(set_range_writeback); 961a52d9a80SChris Mason 9625f39d397SChris Mason int find_first_extent_bit(struct extent_map_tree *tree, u64 start, 9635f39d397SChris Mason u64 *start_ret, u64 *end_ret, int bits) 9645f39d397SChris Mason { 9655f39d397SChris Mason struct rb_node *node; 9665f39d397SChris Mason struct extent_state *state; 9675f39d397SChris Mason int ret = 1; 9685f39d397SChris Mason 9695f39d397SChris Mason write_lock_irq(&tree->lock); 9705f39d397SChris Mason /* 9715f39d397SChris Mason * this search will find all the extents that end after 9725f39d397SChris Mason * our range starts. 9735f39d397SChris Mason */ 9745f39d397SChris Mason node = tree_search(&tree->state, start); 9755f39d397SChris Mason if (!node || IS_ERR(node)) { 9765f39d397SChris Mason goto out; 9775f39d397SChris Mason } 9785f39d397SChris Mason 9795f39d397SChris Mason while(1) { 9805f39d397SChris Mason state = rb_entry(node, struct extent_state, rb_node); 9815f39d397SChris Mason if (state->state & bits) { 9825f39d397SChris Mason *start_ret = state->start; 9835f39d397SChris Mason *end_ret = state->end; 9845f39d397SChris Mason ret = 0; 985*f510cfecSChris Mason break; 9865f39d397SChris Mason } 9875f39d397SChris Mason node = rb_next(node); 9885f39d397SChris Mason if (!node) 9895f39d397SChris Mason break; 9905f39d397SChris Mason } 9915f39d397SChris Mason out: 9925f39d397SChris Mason write_unlock_irq(&tree->lock); 9935f39d397SChris Mason return ret; 9945f39d397SChris Mason } 9955f39d397SChris Mason EXPORT_SYMBOL(find_first_extent_bit); 9965f39d397SChris Mason 997b888db2bSChris Mason u64 find_lock_delalloc_range(struct extent_map_tree *tree, 998b888db2bSChris Mason u64 start, u64 lock_start, u64 *end, u64 max_bytes) 999b888db2bSChris Mason { 1000b888db2bSChris Mason struct rb_node *node; 1001b888db2bSChris Mason struct extent_state *state; 1002b888db2bSChris Mason u64 cur_start = start; 1003b888db2bSChris Mason u64 found = 0; 1004b888db2bSChris Mason u64 total_bytes = 0; 1005b888db2bSChris Mason 1006b888db2bSChris Mason write_lock_irq(&tree->lock); 1007b888db2bSChris Mason /* 1008b888db2bSChris Mason * this search will find all the extents that end after 1009b888db2bSChris Mason * our range starts. 1010b888db2bSChris Mason */ 1011b888db2bSChris Mason search_again: 1012b888db2bSChris Mason node = tree_search(&tree->state, cur_start); 1013b888db2bSChris Mason if (!node || IS_ERR(node)) { 1014b888db2bSChris Mason goto out; 1015b888db2bSChris Mason } 1016b888db2bSChris Mason 1017b888db2bSChris Mason while(1) { 1018b888db2bSChris Mason state = rb_entry(node, struct extent_state, rb_node); 1019b888db2bSChris Mason if (state->start != cur_start) { 1020b888db2bSChris Mason goto out; 1021b888db2bSChris Mason } 1022b888db2bSChris Mason if (!(state->state & EXTENT_DELALLOC)) { 1023b888db2bSChris Mason goto out; 1024b888db2bSChris Mason } 1025b888db2bSChris Mason if (state->start >= lock_start) { 1026b888db2bSChris Mason if (state->state & EXTENT_LOCKED) { 1027b888db2bSChris Mason DEFINE_WAIT(wait); 1028b888db2bSChris Mason atomic_inc(&state->refs); 1029b888db2bSChris Mason write_unlock_irq(&tree->lock); 1030b888db2bSChris Mason schedule(); 1031b888db2bSChris Mason write_lock_irq(&tree->lock); 1032b888db2bSChris Mason finish_wait(&state->wq, &wait); 1033b888db2bSChris Mason free_extent_state(state); 1034b888db2bSChris Mason goto search_again; 1035b888db2bSChris Mason } 1036b888db2bSChris Mason state->state |= EXTENT_LOCKED; 1037b888db2bSChris Mason } 1038b888db2bSChris Mason found++; 1039b888db2bSChris Mason *end = state->end; 1040b888db2bSChris Mason cur_start = state->end + 1; 1041b888db2bSChris Mason node = rb_next(node); 1042b888db2bSChris Mason if (!node) 1043b888db2bSChris Mason break; 1044b888db2bSChris Mason total_bytes = state->end - state->start + 1; 1045b888db2bSChris Mason if (total_bytes >= max_bytes) 1046b888db2bSChris Mason break; 1047b888db2bSChris Mason } 1048b888db2bSChris Mason out: 1049b888db2bSChris Mason write_unlock_irq(&tree->lock); 1050b888db2bSChris Mason return found; 1051b888db2bSChris Mason } 1052b888db2bSChris Mason 1053a52d9a80SChris Mason /* 1054a52d9a80SChris Mason * helper function to lock both pages and extents in the tree. 1055a52d9a80SChris Mason * pages must be locked first. 1056a52d9a80SChris Mason */ 1057a52d9a80SChris Mason int lock_range(struct extent_map_tree *tree, u64 start, u64 end) 1058a52d9a80SChris Mason { 1059a52d9a80SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 1060a52d9a80SChris Mason unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1061a52d9a80SChris Mason struct page *page; 1062a52d9a80SChris Mason int err; 1063a52d9a80SChris Mason 1064a52d9a80SChris Mason while (index <= end_index) { 1065a52d9a80SChris Mason page = grab_cache_page(tree->mapping, index); 1066a52d9a80SChris Mason if (!page) { 1067a52d9a80SChris Mason err = -ENOMEM; 1068a52d9a80SChris Mason goto failed; 1069a52d9a80SChris Mason } 1070a52d9a80SChris Mason if (IS_ERR(page)) { 1071a52d9a80SChris Mason err = PTR_ERR(page); 1072a52d9a80SChris Mason goto failed; 1073a52d9a80SChris Mason } 1074a52d9a80SChris Mason index++; 1075a52d9a80SChris Mason } 1076a52d9a80SChris Mason lock_extent(tree, start, end, GFP_NOFS); 1077a52d9a80SChris Mason return 0; 1078a52d9a80SChris Mason 1079a52d9a80SChris Mason failed: 1080a52d9a80SChris Mason /* 1081a52d9a80SChris Mason * we failed above in getting the page at 'index', so we undo here 1082a52d9a80SChris Mason * up to but not including the page at 'index' 1083a52d9a80SChris Mason */ 1084a52d9a80SChris Mason end_index = index; 1085a52d9a80SChris Mason index = start >> PAGE_CACHE_SHIFT; 1086a52d9a80SChris Mason while (index < end_index) { 1087a52d9a80SChris Mason page = find_get_page(tree->mapping, index); 1088a52d9a80SChris Mason unlock_page(page); 1089a52d9a80SChris Mason page_cache_release(page); 1090a52d9a80SChris Mason index++; 1091a52d9a80SChris Mason } 1092a52d9a80SChris Mason return err; 1093a52d9a80SChris Mason } 1094a52d9a80SChris Mason EXPORT_SYMBOL(lock_range); 1095a52d9a80SChris Mason 1096a52d9a80SChris Mason /* 1097a52d9a80SChris Mason * helper function to unlock both pages and extents in the tree. 1098a52d9a80SChris Mason */ 1099a52d9a80SChris Mason int unlock_range(struct extent_map_tree *tree, u64 start, u64 end) 1100a52d9a80SChris Mason { 1101a52d9a80SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 1102a52d9a80SChris Mason unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1103a52d9a80SChris Mason struct page *page; 1104a52d9a80SChris Mason 1105a52d9a80SChris Mason while (index <= end_index) { 1106a52d9a80SChris Mason page = find_get_page(tree->mapping, index); 1107a52d9a80SChris Mason unlock_page(page); 1108a52d9a80SChris Mason page_cache_release(page); 1109a52d9a80SChris Mason index++; 1110a52d9a80SChris Mason } 1111a52d9a80SChris Mason unlock_extent(tree, start, end, GFP_NOFS); 1112a52d9a80SChris Mason return 0; 1113a52d9a80SChris Mason } 1114a52d9a80SChris Mason EXPORT_SYMBOL(unlock_range); 1115a52d9a80SChris Mason 111607157aacSChris Mason int set_state_private(struct extent_map_tree *tree, u64 start, u64 private) 111707157aacSChris Mason { 111807157aacSChris Mason struct rb_node *node; 111907157aacSChris Mason struct extent_state *state; 112007157aacSChris Mason int ret = 0; 112107157aacSChris Mason 112207157aacSChris Mason write_lock_irq(&tree->lock); 112307157aacSChris Mason /* 112407157aacSChris Mason * this search will find all the extents that end after 112507157aacSChris Mason * our range starts. 112607157aacSChris Mason */ 112707157aacSChris Mason node = tree_search(&tree->state, start); 112807157aacSChris Mason if (!node || IS_ERR(node)) { 112907157aacSChris Mason ret = -ENOENT; 113007157aacSChris Mason goto out; 113107157aacSChris Mason } 113207157aacSChris Mason state = rb_entry(node, struct extent_state, rb_node); 113307157aacSChris Mason if (state->start != start) { 113407157aacSChris Mason ret = -ENOENT; 113507157aacSChris Mason goto out; 113607157aacSChris Mason } 113707157aacSChris Mason state->private = private; 113807157aacSChris Mason out: 113907157aacSChris Mason write_unlock_irq(&tree->lock); 114007157aacSChris Mason return ret; 114107157aacSChris Mason 114207157aacSChris Mason } 114307157aacSChris Mason 114407157aacSChris Mason int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private) 114507157aacSChris Mason { 114607157aacSChris Mason struct rb_node *node; 114707157aacSChris Mason struct extent_state *state; 114807157aacSChris Mason int ret = 0; 114907157aacSChris Mason 115007157aacSChris Mason read_lock_irq(&tree->lock); 115107157aacSChris Mason /* 115207157aacSChris Mason * this search will find all the extents that end after 115307157aacSChris Mason * our range starts. 115407157aacSChris Mason */ 115507157aacSChris Mason node = tree_search(&tree->state, start); 115607157aacSChris Mason if (!node || IS_ERR(node)) { 115707157aacSChris Mason ret = -ENOENT; 115807157aacSChris Mason goto out; 115907157aacSChris Mason } 116007157aacSChris Mason state = rb_entry(node, struct extent_state, rb_node); 116107157aacSChris Mason if (state->start != start) { 116207157aacSChris Mason ret = -ENOENT; 116307157aacSChris Mason goto out; 116407157aacSChris Mason } 116507157aacSChris Mason *private = state->private; 116607157aacSChris Mason out: 116707157aacSChris Mason read_unlock_irq(&tree->lock); 116807157aacSChris Mason return ret; 116907157aacSChris Mason } 117007157aacSChris Mason 1171a52d9a80SChris Mason /* 1172a52d9a80SChris Mason * searches a range in the state tree for a given mask. 1173a52d9a80SChris Mason * If 'filled' == 1, this returns 1 only if ever extent in the tree 1174a52d9a80SChris Mason * has the bits set. Otherwise, 1 is returned if any bit in the 1175a52d9a80SChris Mason * range is found set. 1176a52d9a80SChris Mason */ 1177a52d9a80SChris Mason static int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end, 1178a52d9a80SChris Mason int bits, int filled) 1179a52d9a80SChris Mason { 1180a52d9a80SChris Mason struct extent_state *state = NULL; 1181a52d9a80SChris Mason struct rb_node *node; 1182a52d9a80SChris Mason int bitset = 0; 1183a52d9a80SChris Mason 1184a52d9a80SChris Mason read_lock_irq(&tree->lock); 1185a52d9a80SChris Mason node = tree_search(&tree->state, start); 1186a52d9a80SChris Mason while (node && start <= end) { 1187a52d9a80SChris Mason state = rb_entry(node, struct extent_state, rb_node); 1188a52d9a80SChris Mason if (state->start > end) 1189a52d9a80SChris Mason break; 1190a52d9a80SChris Mason 1191a52d9a80SChris Mason if (filled && state->start > start) { 1192a52d9a80SChris Mason bitset = 0; 1193a52d9a80SChris Mason break; 1194a52d9a80SChris Mason } 1195a52d9a80SChris Mason if (state->state & bits) { 1196a52d9a80SChris Mason bitset = 1; 1197a52d9a80SChris Mason if (!filled) 1198a52d9a80SChris Mason break; 1199a52d9a80SChris Mason } else if (filled) { 1200a52d9a80SChris Mason bitset = 0; 1201a52d9a80SChris Mason break; 1202a52d9a80SChris Mason } 1203a52d9a80SChris Mason start = state->end + 1; 1204a52d9a80SChris Mason if (start > end) 1205a52d9a80SChris Mason break; 1206a52d9a80SChris Mason node = rb_next(node); 1207a52d9a80SChris Mason } 1208a52d9a80SChris Mason read_unlock_irq(&tree->lock); 1209a52d9a80SChris Mason return bitset; 1210a52d9a80SChris Mason } 1211a52d9a80SChris Mason 1212a52d9a80SChris Mason /* 1213a52d9a80SChris Mason * helper function to set a given page up to date if all the 1214a52d9a80SChris Mason * extents in the tree for that page are up to date 1215a52d9a80SChris Mason */ 1216a52d9a80SChris Mason static int check_page_uptodate(struct extent_map_tree *tree, 1217a52d9a80SChris Mason struct page *page) 1218a52d9a80SChris Mason { 1219a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1220a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1221a52d9a80SChris Mason if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1)) 1222a52d9a80SChris Mason SetPageUptodate(page); 1223a52d9a80SChris Mason return 0; 1224a52d9a80SChris Mason } 1225a52d9a80SChris Mason 1226a52d9a80SChris Mason /* 1227a52d9a80SChris Mason * helper function to unlock a page if all the extents in the tree 1228a52d9a80SChris Mason * for that page are unlocked 1229a52d9a80SChris Mason */ 1230a52d9a80SChris Mason static int check_page_locked(struct extent_map_tree *tree, 1231a52d9a80SChris Mason struct page *page) 1232a52d9a80SChris Mason { 1233a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1234a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1235a52d9a80SChris Mason if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0)) 1236a52d9a80SChris Mason unlock_page(page); 1237a52d9a80SChris Mason return 0; 1238a52d9a80SChris Mason } 1239a52d9a80SChris Mason 1240a52d9a80SChris Mason /* 1241a52d9a80SChris Mason * helper function to end page writeback if all the extents 1242a52d9a80SChris Mason * in the tree for that page are done with writeback 1243a52d9a80SChris Mason */ 1244a52d9a80SChris Mason static int check_page_writeback(struct extent_map_tree *tree, 1245a52d9a80SChris Mason struct page *page) 1246a52d9a80SChris Mason { 1247a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1248a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1249a52d9a80SChris Mason if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0)) 1250a52d9a80SChris Mason end_page_writeback(page); 1251a52d9a80SChris Mason return 0; 1252a52d9a80SChris Mason } 1253a52d9a80SChris Mason 1254a52d9a80SChris Mason /* lots and lots of room for performance fixes in the end_bio funcs */ 1255a52d9a80SChris Mason 1256a52d9a80SChris Mason /* 1257a52d9a80SChris Mason * after a writepage IO is done, we need to: 1258a52d9a80SChris Mason * clear the uptodate bits on error 1259a52d9a80SChris Mason * clear the writeback bits in the extent tree for this IO 1260a52d9a80SChris Mason * end_page_writeback if the page has no more pending IO 1261a52d9a80SChris Mason * 1262a52d9a80SChris Mason * Scheduling is not allowed, so the extent state tree is expected 1263a52d9a80SChris Mason * to have one and only one object corresponding to this IO. 1264a52d9a80SChris Mason */ 1265a52d9a80SChris Mason static int end_bio_extent_writepage(struct bio *bio, 1266a52d9a80SChris Mason unsigned int bytes_done, int err) 1267a52d9a80SChris Mason { 1268a52d9a80SChris Mason const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1269a52d9a80SChris Mason struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1270a52d9a80SChris Mason struct extent_map_tree *tree = bio->bi_private; 1271a52d9a80SChris Mason u64 start; 1272a52d9a80SChris Mason u64 end; 1273a52d9a80SChris Mason int whole_page; 1274a52d9a80SChris Mason 1275a52d9a80SChris Mason if (bio->bi_size) 1276a52d9a80SChris Mason return 1; 1277a52d9a80SChris Mason 1278a52d9a80SChris Mason do { 1279a52d9a80SChris Mason struct page *page = bvec->bv_page; 1280a52d9a80SChris Mason start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 1281a52d9a80SChris Mason end = start + bvec->bv_len - 1; 1282a52d9a80SChris Mason 1283a52d9a80SChris Mason if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1284a52d9a80SChris Mason whole_page = 1; 1285a52d9a80SChris Mason else 1286a52d9a80SChris Mason whole_page = 0; 1287a52d9a80SChris Mason 1288a52d9a80SChris Mason if (--bvec >= bio->bi_io_vec) 1289a52d9a80SChris Mason prefetchw(&bvec->bv_page->flags); 1290a52d9a80SChris Mason 1291a52d9a80SChris Mason if (!uptodate) { 1292a52d9a80SChris Mason clear_extent_uptodate(tree, start, end, GFP_ATOMIC); 1293a52d9a80SChris Mason ClearPageUptodate(page); 1294a52d9a80SChris Mason SetPageError(page); 1295a52d9a80SChris Mason } 1296a52d9a80SChris Mason clear_extent_writeback(tree, start, end, GFP_ATOMIC); 1297a52d9a80SChris Mason 1298a52d9a80SChris Mason if (whole_page) 1299a52d9a80SChris Mason end_page_writeback(page); 1300a52d9a80SChris Mason else 1301a52d9a80SChris Mason check_page_writeback(tree, page); 13020e2752a7SChristoph Hellwig if (tree->ops && tree->ops->writepage_end_io_hook) 13030e2752a7SChristoph Hellwig tree->ops->writepage_end_io_hook(page, start, end); 1304a52d9a80SChris Mason } while (bvec >= bio->bi_io_vec); 1305a52d9a80SChris Mason 1306a52d9a80SChris Mason bio_put(bio); 1307a52d9a80SChris Mason return 0; 1308a52d9a80SChris Mason } 1309a52d9a80SChris Mason 1310a52d9a80SChris Mason /* 1311a52d9a80SChris Mason * after a readpage IO is done, we need to: 1312a52d9a80SChris Mason * clear the uptodate bits on error 1313a52d9a80SChris Mason * set the uptodate bits if things worked 1314a52d9a80SChris Mason * set the page up to date if all extents in the tree are uptodate 1315a52d9a80SChris Mason * clear the lock bit in the extent tree 1316a52d9a80SChris Mason * unlock the page if there are no other extents locked for it 1317a52d9a80SChris Mason * 1318a52d9a80SChris Mason * Scheduling is not allowed, so the extent state tree is expected 1319a52d9a80SChris Mason * to have one and only one object corresponding to this IO. 1320a52d9a80SChris Mason */ 1321a52d9a80SChris Mason static int end_bio_extent_readpage(struct bio *bio, 1322a52d9a80SChris Mason unsigned int bytes_done, int err) 1323a52d9a80SChris Mason { 132407157aacSChris Mason int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1325a52d9a80SChris Mason struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1326a52d9a80SChris Mason struct extent_map_tree *tree = bio->bi_private; 1327a52d9a80SChris Mason u64 start; 1328a52d9a80SChris Mason u64 end; 1329a52d9a80SChris Mason int whole_page; 133007157aacSChris Mason int ret; 1331a52d9a80SChris Mason 1332a52d9a80SChris Mason if (bio->bi_size) 1333a52d9a80SChris Mason return 1; 1334a52d9a80SChris Mason 1335a52d9a80SChris Mason do { 1336a52d9a80SChris Mason struct page *page = bvec->bv_page; 1337a52d9a80SChris Mason start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 1338a52d9a80SChris Mason end = start + bvec->bv_len - 1; 1339a52d9a80SChris Mason 1340a52d9a80SChris Mason if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1341a52d9a80SChris Mason whole_page = 1; 1342a52d9a80SChris Mason else 1343a52d9a80SChris Mason whole_page = 0; 1344a52d9a80SChris Mason 1345a52d9a80SChris Mason if (--bvec >= bio->bi_io_vec) 1346a52d9a80SChris Mason prefetchw(&bvec->bv_page->flags); 1347a52d9a80SChris Mason 134807157aacSChris Mason if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 134907157aacSChris Mason ret = tree->ops->readpage_end_io_hook(page, start, end); 135007157aacSChris Mason if (ret) 135107157aacSChris Mason uptodate = 0; 135207157aacSChris Mason } 1353a52d9a80SChris Mason if (uptodate) { 1354a52d9a80SChris Mason set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1355a52d9a80SChris Mason if (whole_page) 1356a52d9a80SChris Mason SetPageUptodate(page); 1357a52d9a80SChris Mason else 1358a52d9a80SChris Mason check_page_uptodate(tree, page); 1359a52d9a80SChris Mason } else { 1360a52d9a80SChris Mason ClearPageUptodate(page); 1361a52d9a80SChris Mason SetPageError(page); 1362a52d9a80SChris Mason } 1363a52d9a80SChris Mason 1364a52d9a80SChris Mason unlock_extent(tree, start, end, GFP_ATOMIC); 1365a52d9a80SChris Mason 1366a52d9a80SChris Mason if (whole_page) 1367a52d9a80SChris Mason unlock_page(page); 1368a52d9a80SChris Mason else 1369a52d9a80SChris Mason check_page_locked(tree, page); 1370a52d9a80SChris Mason } while (bvec >= bio->bi_io_vec); 1371a52d9a80SChris Mason 1372a52d9a80SChris Mason bio_put(bio); 1373a52d9a80SChris Mason return 0; 1374a52d9a80SChris Mason } 1375a52d9a80SChris Mason 1376a52d9a80SChris Mason /* 1377a52d9a80SChris Mason * IO done from prepare_write is pretty simple, we just unlock 1378a52d9a80SChris Mason * the structs in the extent tree when done, and set the uptodate bits 1379a52d9a80SChris Mason * as appropriate. 1380a52d9a80SChris Mason */ 1381a52d9a80SChris Mason static int end_bio_extent_preparewrite(struct bio *bio, 1382a52d9a80SChris Mason unsigned int bytes_done, int err) 1383a52d9a80SChris Mason { 1384a52d9a80SChris Mason const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1385a52d9a80SChris Mason struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1386a52d9a80SChris Mason struct extent_map_tree *tree = bio->bi_private; 1387a52d9a80SChris Mason u64 start; 1388a52d9a80SChris Mason u64 end; 1389a52d9a80SChris Mason 1390a52d9a80SChris Mason if (bio->bi_size) 1391a52d9a80SChris Mason return 1; 1392a52d9a80SChris Mason 1393a52d9a80SChris Mason do { 1394a52d9a80SChris Mason struct page *page = bvec->bv_page; 1395a52d9a80SChris Mason start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 1396a52d9a80SChris Mason end = start + bvec->bv_len - 1; 1397a52d9a80SChris Mason 1398a52d9a80SChris Mason if (--bvec >= bio->bi_io_vec) 1399a52d9a80SChris Mason prefetchw(&bvec->bv_page->flags); 1400a52d9a80SChris Mason 1401a52d9a80SChris Mason if (uptodate) { 1402a52d9a80SChris Mason set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1403a52d9a80SChris Mason } else { 1404a52d9a80SChris Mason ClearPageUptodate(page); 1405a52d9a80SChris Mason SetPageError(page); 1406a52d9a80SChris Mason } 1407a52d9a80SChris Mason 1408a52d9a80SChris Mason unlock_extent(tree, start, end, GFP_ATOMIC); 1409a52d9a80SChris Mason 1410a52d9a80SChris Mason } while (bvec >= bio->bi_io_vec); 1411a52d9a80SChris Mason 1412a52d9a80SChris Mason bio_put(bio); 1413a52d9a80SChris Mason return 0; 1414a52d9a80SChris Mason } 1415a52d9a80SChris Mason 1416a52d9a80SChris Mason static int submit_extent_page(int rw, struct extent_map_tree *tree, 1417a52d9a80SChris Mason struct page *page, sector_t sector, 1418a52d9a80SChris Mason size_t size, unsigned long offset, 1419a52d9a80SChris Mason struct block_device *bdev, 1420a52d9a80SChris Mason bio_end_io_t end_io_func) 1421a52d9a80SChris Mason { 1422a52d9a80SChris Mason struct bio *bio; 1423a52d9a80SChris Mason int ret = 0; 1424a52d9a80SChris Mason 1425a52d9a80SChris Mason bio = bio_alloc(GFP_NOIO, 1); 1426a52d9a80SChris Mason 1427a52d9a80SChris Mason bio->bi_sector = sector; 1428a52d9a80SChris Mason bio->bi_bdev = bdev; 1429a52d9a80SChris Mason bio->bi_io_vec[0].bv_page = page; 1430a52d9a80SChris Mason bio->bi_io_vec[0].bv_len = size; 1431a52d9a80SChris Mason bio->bi_io_vec[0].bv_offset = offset; 1432a52d9a80SChris Mason 1433a52d9a80SChris Mason bio->bi_vcnt = 1; 1434a52d9a80SChris Mason bio->bi_idx = 0; 1435a52d9a80SChris Mason bio->bi_size = size; 1436a52d9a80SChris Mason 1437a52d9a80SChris Mason bio->bi_end_io = end_io_func; 1438a52d9a80SChris Mason bio->bi_private = tree; 1439a52d9a80SChris Mason 1440a52d9a80SChris Mason bio_get(bio); 1441a52d9a80SChris Mason submit_bio(rw, bio); 1442a52d9a80SChris Mason 1443a52d9a80SChris Mason if (bio_flagged(bio, BIO_EOPNOTSUPP)) 1444a52d9a80SChris Mason ret = -EOPNOTSUPP; 1445a52d9a80SChris Mason 1446a52d9a80SChris Mason bio_put(bio); 1447a52d9a80SChris Mason return ret; 1448a52d9a80SChris Mason } 1449a52d9a80SChris Mason 1450b3cfa35aSChristoph Hellwig void set_page_extent_mapped(struct page *page) 1451b3cfa35aSChristoph Hellwig { 1452b3cfa35aSChristoph Hellwig if (!PagePrivate(page)) { 1453b3cfa35aSChristoph Hellwig SetPagePrivate(page); 1454b3cfa35aSChristoph Hellwig WARN_ON(!page->mapping->a_ops->invalidatepage); 1455b3cfa35aSChristoph Hellwig set_page_private(page, 1); 1456b3cfa35aSChristoph Hellwig page_cache_get(page); 1457b3cfa35aSChristoph Hellwig } 1458b3cfa35aSChristoph Hellwig } 1459b3cfa35aSChristoph Hellwig 1460a52d9a80SChris Mason /* 1461a52d9a80SChris Mason * basic readpage implementation. Locked extent state structs are inserted 1462a52d9a80SChris Mason * into the tree that are removed when the IO is done (by the end_io 1463a52d9a80SChris Mason * handlers) 1464a52d9a80SChris Mason */ 1465a52d9a80SChris Mason int extent_read_full_page(struct extent_map_tree *tree, struct page *page, 1466a52d9a80SChris Mason get_extent_t *get_extent) 1467a52d9a80SChris Mason { 1468a52d9a80SChris Mason struct inode *inode = page->mapping->host; 1469a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1470a52d9a80SChris Mason u64 page_end = start + PAGE_CACHE_SIZE - 1; 1471a52d9a80SChris Mason u64 end; 1472a52d9a80SChris Mason u64 cur = start; 1473a52d9a80SChris Mason u64 extent_offset; 1474a52d9a80SChris Mason u64 last_byte = i_size_read(inode); 1475a52d9a80SChris Mason u64 block_start; 1476a52d9a80SChris Mason u64 cur_end; 1477a52d9a80SChris Mason sector_t sector; 1478a52d9a80SChris Mason struct extent_map *em; 1479a52d9a80SChris Mason struct block_device *bdev; 1480a52d9a80SChris Mason int ret; 1481a52d9a80SChris Mason int nr = 0; 1482a52d9a80SChris Mason size_t page_offset = 0; 1483a52d9a80SChris Mason size_t iosize; 1484a52d9a80SChris Mason size_t blocksize = inode->i_sb->s_blocksize; 1485a52d9a80SChris Mason 1486b3cfa35aSChristoph Hellwig set_page_extent_mapped(page); 1487a52d9a80SChris Mason 1488a52d9a80SChris Mason end = page_end; 1489a52d9a80SChris Mason lock_extent(tree, start, end, GFP_NOFS); 1490a52d9a80SChris Mason 1491a52d9a80SChris Mason while (cur <= end) { 1492a52d9a80SChris Mason if (cur >= last_byte) { 1493a52d9a80SChris Mason iosize = PAGE_CACHE_SIZE - page_offset; 1494a52d9a80SChris Mason zero_user_page(page, page_offset, iosize, KM_USER0); 1495a52d9a80SChris Mason set_extent_uptodate(tree, cur, cur + iosize - 1, 1496a52d9a80SChris Mason GFP_NOFS); 1497a52d9a80SChris Mason unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1498a52d9a80SChris Mason break; 1499a52d9a80SChris Mason } 1500a52d9a80SChris Mason em = get_extent(inode, page, page_offset, cur, end, 0); 1501a52d9a80SChris Mason if (IS_ERR(em) || !em) { 1502a52d9a80SChris Mason SetPageError(page); 1503a52d9a80SChris Mason unlock_extent(tree, cur, end, GFP_NOFS); 1504a52d9a80SChris Mason break; 1505a52d9a80SChris Mason } 1506a52d9a80SChris Mason 1507a52d9a80SChris Mason extent_offset = cur - em->start; 1508a52d9a80SChris Mason BUG_ON(em->end < cur); 1509a52d9a80SChris Mason BUG_ON(end < cur); 1510a52d9a80SChris Mason 1511a52d9a80SChris Mason iosize = min(em->end - cur, end - cur) + 1; 1512a52d9a80SChris Mason cur_end = min(em->end, end); 1513a52d9a80SChris Mason iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1514a52d9a80SChris Mason sector = (em->block_start + extent_offset) >> 9; 1515a52d9a80SChris Mason bdev = em->bdev; 1516a52d9a80SChris Mason block_start = em->block_start; 1517a52d9a80SChris Mason free_extent_map(em); 1518a52d9a80SChris Mason em = NULL; 1519a52d9a80SChris Mason 1520a52d9a80SChris Mason /* we've found a hole, just zero and go on */ 15215f39d397SChris Mason if (block_start == EXTENT_MAP_HOLE) { 1522a52d9a80SChris Mason zero_user_page(page, page_offset, iosize, KM_USER0); 1523a52d9a80SChris Mason set_extent_uptodate(tree, cur, cur + iosize - 1, 1524a52d9a80SChris Mason GFP_NOFS); 1525a52d9a80SChris Mason unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1526a52d9a80SChris Mason cur = cur + iosize; 1527a52d9a80SChris Mason page_offset += iosize; 1528a52d9a80SChris Mason continue; 1529a52d9a80SChris Mason } 1530a52d9a80SChris Mason /* the get_extent function already copied into the page */ 1531a52d9a80SChris Mason if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { 1532a52d9a80SChris Mason unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1533a52d9a80SChris Mason cur = cur + iosize; 1534a52d9a80SChris Mason page_offset += iosize; 1535a52d9a80SChris Mason continue; 1536a52d9a80SChris Mason } 1537a52d9a80SChris Mason 153807157aacSChris Mason ret = 0; 153907157aacSChris Mason if (tree->ops && tree->ops->readpage_io_hook) { 154007157aacSChris Mason ret = tree->ops->readpage_io_hook(page, cur, 154107157aacSChris Mason cur + iosize - 1); 154207157aacSChris Mason } 154307157aacSChris Mason if (!ret) { 1544a52d9a80SChris Mason ret = submit_extent_page(READ, tree, page, 154507157aacSChris Mason sector, iosize, page_offset, 154607157aacSChris Mason bdev, end_bio_extent_readpage); 154707157aacSChris Mason } 1548a52d9a80SChris Mason if (ret) 1549a52d9a80SChris Mason SetPageError(page); 1550a52d9a80SChris Mason cur = cur + iosize; 1551a52d9a80SChris Mason page_offset += iosize; 1552a52d9a80SChris Mason nr++; 1553a52d9a80SChris Mason } 1554a52d9a80SChris Mason if (!nr) { 1555a52d9a80SChris Mason if (!PageError(page)) 1556a52d9a80SChris Mason SetPageUptodate(page); 1557a52d9a80SChris Mason unlock_page(page); 1558a52d9a80SChris Mason } 1559a52d9a80SChris Mason return 0; 1560a52d9a80SChris Mason } 1561a52d9a80SChris Mason EXPORT_SYMBOL(extent_read_full_page); 1562a52d9a80SChris Mason 1563a52d9a80SChris Mason /* 1564a52d9a80SChris Mason * the writepage semantics are similar to regular writepage. extent 1565a52d9a80SChris Mason * records are inserted to lock ranges in the tree, and as dirty areas 1566a52d9a80SChris Mason * are found, they are marked writeback. Then the lock bits are removed 1567a52d9a80SChris Mason * and the end_io handler clears the writeback ranges 1568a52d9a80SChris Mason */ 1569a52d9a80SChris Mason int extent_write_full_page(struct extent_map_tree *tree, struct page *page, 1570a52d9a80SChris Mason get_extent_t *get_extent, 1571a52d9a80SChris Mason struct writeback_control *wbc) 1572a52d9a80SChris Mason { 1573a52d9a80SChris Mason struct inode *inode = page->mapping->host; 1574a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1575a52d9a80SChris Mason u64 page_end = start + PAGE_CACHE_SIZE - 1; 1576a52d9a80SChris Mason u64 end; 1577a52d9a80SChris Mason u64 cur = start; 1578a52d9a80SChris Mason u64 extent_offset; 1579a52d9a80SChris Mason u64 last_byte = i_size_read(inode); 1580a52d9a80SChris Mason u64 block_start; 1581a52d9a80SChris Mason sector_t sector; 1582a52d9a80SChris Mason struct extent_map *em; 1583a52d9a80SChris Mason struct block_device *bdev; 1584a52d9a80SChris Mason int ret; 1585a52d9a80SChris Mason int nr = 0; 1586a52d9a80SChris Mason size_t page_offset = 0; 1587a52d9a80SChris Mason size_t iosize; 1588a52d9a80SChris Mason size_t blocksize; 1589a52d9a80SChris Mason loff_t i_size = i_size_read(inode); 1590a52d9a80SChris Mason unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 1591b888db2bSChris Mason u64 nr_delalloc; 1592b888db2bSChris Mason u64 delalloc_end; 1593a52d9a80SChris Mason 1594b888db2bSChris Mason WARN_ON(!PageLocked(page)); 1595a52d9a80SChris Mason if (page->index > end_index) { 1596a52d9a80SChris Mason clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1597a52d9a80SChris Mason unlock_page(page); 1598a52d9a80SChris Mason return 0; 1599a52d9a80SChris Mason } 1600a52d9a80SChris Mason 1601a52d9a80SChris Mason if (page->index == end_index) { 1602a52d9a80SChris Mason size_t offset = i_size & (PAGE_CACHE_SIZE - 1); 1603a52d9a80SChris Mason zero_user_page(page, offset, 1604a52d9a80SChris Mason PAGE_CACHE_SIZE - offset, KM_USER0); 1605a52d9a80SChris Mason } 1606a52d9a80SChris Mason 1607b3cfa35aSChristoph Hellwig set_page_extent_mapped(page); 1608a52d9a80SChris Mason 1609a52d9a80SChris Mason lock_extent(tree, start, page_end, GFP_NOFS); 1610b888db2bSChris Mason nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1, 1611b888db2bSChris Mason &delalloc_end, 1612b888db2bSChris Mason 128 * 1024 * 1024); 1613b888db2bSChris Mason if (nr_delalloc) { 161407157aacSChris Mason tree->ops->fill_delalloc(inode, start, delalloc_end); 1615b888db2bSChris Mason if (delalloc_end >= page_end + 1) { 1616b888db2bSChris Mason clear_extent_bit(tree, page_end + 1, delalloc_end, 1617b888db2bSChris Mason EXTENT_LOCKED | EXTENT_DELALLOC, 1618b888db2bSChris Mason 1, 0, GFP_NOFS); 1619b888db2bSChris Mason } 1620b888db2bSChris Mason clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC, 1621b888db2bSChris Mason 0, 0, GFP_NOFS); 1622b888db2bSChris Mason if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1623b888db2bSChris Mason printk("found delalloc bits after clear extent_bit\n"); 1624b888db2bSChris Mason } 1625b888db2bSChris Mason } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1626b888db2bSChris Mason printk("found delalloc bits after find_delalloc_range returns 0\n"); 1627b888db2bSChris Mason } 1628b888db2bSChris Mason 1629b888db2bSChris Mason end = page_end; 1630b888db2bSChris Mason if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1631b888db2bSChris Mason printk("found delalloc bits after lock_extent\n"); 1632b888db2bSChris Mason } 1633a52d9a80SChris Mason 1634a52d9a80SChris Mason if (last_byte <= start) { 1635a52d9a80SChris Mason clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1636a52d9a80SChris Mason goto done; 1637a52d9a80SChris Mason } 1638a52d9a80SChris Mason 1639a52d9a80SChris Mason set_extent_uptodate(tree, start, page_end, GFP_NOFS); 1640a52d9a80SChris Mason blocksize = inode->i_sb->s_blocksize; 1641a52d9a80SChris Mason 1642a52d9a80SChris Mason while (cur <= end) { 1643a52d9a80SChris Mason if (cur >= last_byte) { 1644a52d9a80SChris Mason clear_extent_dirty(tree, cur, page_end, GFP_NOFS); 1645a52d9a80SChris Mason break; 1646a52d9a80SChris Mason } 1647b888db2bSChris Mason em = get_extent(inode, page, page_offset, cur, end, 0); 1648a52d9a80SChris Mason if (IS_ERR(em) || !em) { 1649a52d9a80SChris Mason SetPageError(page); 1650a52d9a80SChris Mason break; 1651a52d9a80SChris Mason } 1652a52d9a80SChris Mason 1653a52d9a80SChris Mason extent_offset = cur - em->start; 1654a52d9a80SChris Mason BUG_ON(em->end < cur); 1655a52d9a80SChris Mason BUG_ON(end < cur); 1656a52d9a80SChris Mason iosize = min(em->end - cur, end - cur) + 1; 1657a52d9a80SChris Mason iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1658a52d9a80SChris Mason sector = (em->block_start + extent_offset) >> 9; 1659a52d9a80SChris Mason bdev = em->bdev; 1660a52d9a80SChris Mason block_start = em->block_start; 1661a52d9a80SChris Mason free_extent_map(em); 1662a52d9a80SChris Mason em = NULL; 1663a52d9a80SChris Mason 16645f39d397SChris Mason if (block_start == EXTENT_MAP_HOLE || 16655f39d397SChris Mason block_start == EXTENT_MAP_INLINE) { 1666a52d9a80SChris Mason clear_extent_dirty(tree, cur, 1667a52d9a80SChris Mason cur + iosize - 1, GFP_NOFS); 1668a52d9a80SChris Mason cur = cur + iosize; 1669a52d9a80SChris Mason page_offset += iosize; 1670a52d9a80SChris Mason continue; 1671a52d9a80SChris Mason } 1672a52d9a80SChris Mason 1673a52d9a80SChris Mason /* leave this out until we have a page_mkwrite call */ 1674a52d9a80SChris Mason if (0 && !test_range_bit(tree, cur, cur + iosize - 1, 1675a52d9a80SChris Mason EXTENT_DIRTY, 0)) { 1676a52d9a80SChris Mason cur = cur + iosize; 1677a52d9a80SChris Mason page_offset += iosize; 1678a52d9a80SChris Mason continue; 1679a52d9a80SChris Mason } 1680a52d9a80SChris Mason clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); 1681b06355f0SChristoph Hellwig if (tree->ops && tree->ops->writepage_io_hook) { 1682b06355f0SChristoph Hellwig ret = tree->ops->writepage_io_hook(page, cur, 1683b06355f0SChristoph Hellwig cur + iosize - 1); 1684b06355f0SChristoph Hellwig } else { 1685b06355f0SChristoph Hellwig ret = 0; 1686b06355f0SChristoph Hellwig } 168707157aacSChris Mason if (ret) 168807157aacSChris Mason SetPageError(page); 168907157aacSChris Mason else { 1690a52d9a80SChris Mason set_range_writeback(tree, cur, cur + iosize - 1); 169107157aacSChris Mason ret = submit_extent_page(WRITE, tree, page, sector, 169207157aacSChris Mason iosize, page_offset, bdev, 1693a52d9a80SChris Mason end_bio_extent_writepage); 1694a52d9a80SChris Mason if (ret) 1695a52d9a80SChris Mason SetPageError(page); 169607157aacSChris Mason } 1697a52d9a80SChris Mason cur = cur + iosize; 1698a52d9a80SChris Mason page_offset += iosize; 1699a52d9a80SChris Mason nr++; 1700a52d9a80SChris Mason } 1701a52d9a80SChris Mason done: 1702a52d9a80SChris Mason unlock_extent(tree, start, page_end, GFP_NOFS); 1703a52d9a80SChris Mason unlock_page(page); 1704a52d9a80SChris Mason return 0; 1705a52d9a80SChris Mason } 1706a52d9a80SChris Mason EXPORT_SYMBOL(extent_write_full_page); 1707a52d9a80SChris Mason 1708a52d9a80SChris Mason /* 1709a52d9a80SChris Mason * basic invalidatepage code, this waits on any locked or writeback 1710a52d9a80SChris Mason * ranges corresponding to the page, and then deletes any extent state 1711a52d9a80SChris Mason * records from the tree 1712a52d9a80SChris Mason */ 1713a52d9a80SChris Mason int extent_invalidatepage(struct extent_map_tree *tree, 1714a52d9a80SChris Mason struct page *page, unsigned long offset) 1715a52d9a80SChris Mason { 1716a52d9a80SChris Mason u64 start = (page->index << PAGE_CACHE_SHIFT); 1717a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1718a52d9a80SChris Mason size_t blocksize = page->mapping->host->i_sb->s_blocksize; 1719a52d9a80SChris Mason 1720a52d9a80SChris Mason start += (offset + blocksize -1) & ~(blocksize - 1); 1721a52d9a80SChris Mason if (start > end) 1722a52d9a80SChris Mason return 0; 1723a52d9a80SChris Mason 1724a52d9a80SChris Mason lock_extent(tree, start, end, GFP_NOFS); 1725a52d9a80SChris Mason wait_on_extent_writeback(tree, start, end); 17262bf5a725SChris Mason clear_extent_bit(tree, start, end, 17272bf5a725SChris Mason EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 1728a52d9a80SChris Mason 1, 1, GFP_NOFS); 1729a52d9a80SChris Mason return 0; 1730a52d9a80SChris Mason } 1731a52d9a80SChris Mason EXPORT_SYMBOL(extent_invalidatepage); 1732a52d9a80SChris Mason 1733a52d9a80SChris Mason /* 1734a52d9a80SChris Mason * simple commit_write call, set_range_dirty is used to mark both 1735a52d9a80SChris Mason * the pages and the extent records as dirty 1736a52d9a80SChris Mason */ 1737a52d9a80SChris Mason int extent_commit_write(struct extent_map_tree *tree, 1738a52d9a80SChris Mason struct inode *inode, struct page *page, 1739a52d9a80SChris Mason unsigned from, unsigned to) 1740a52d9a80SChris Mason { 1741a52d9a80SChris Mason loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1742a52d9a80SChris Mason 1743b3cfa35aSChristoph Hellwig set_page_extent_mapped(page); 1744a52d9a80SChris Mason set_page_dirty(page); 1745a52d9a80SChris Mason 1746a52d9a80SChris Mason if (pos > inode->i_size) { 1747a52d9a80SChris Mason i_size_write(inode, pos); 1748a52d9a80SChris Mason mark_inode_dirty(inode); 1749a52d9a80SChris Mason } 1750a52d9a80SChris Mason return 0; 1751a52d9a80SChris Mason } 1752a52d9a80SChris Mason EXPORT_SYMBOL(extent_commit_write); 1753a52d9a80SChris Mason 1754a52d9a80SChris Mason int extent_prepare_write(struct extent_map_tree *tree, 1755a52d9a80SChris Mason struct inode *inode, struct page *page, 1756a52d9a80SChris Mason unsigned from, unsigned to, get_extent_t *get_extent) 1757a52d9a80SChris Mason { 1758a52d9a80SChris Mason u64 page_start = page->index << PAGE_CACHE_SHIFT; 1759a52d9a80SChris Mason u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 1760a52d9a80SChris Mason u64 block_start; 1761a52d9a80SChris Mason u64 orig_block_start; 1762a52d9a80SChris Mason u64 block_end; 1763a52d9a80SChris Mason u64 cur_end; 1764a52d9a80SChris Mason struct extent_map *em; 1765a52d9a80SChris Mason unsigned blocksize = 1 << inode->i_blkbits; 1766a52d9a80SChris Mason size_t page_offset = 0; 1767a52d9a80SChris Mason size_t block_off_start; 1768a52d9a80SChris Mason size_t block_off_end; 1769a52d9a80SChris Mason int err = 0; 1770a52d9a80SChris Mason int iocount = 0; 1771a52d9a80SChris Mason int ret = 0; 1772a52d9a80SChris Mason int isnew; 1773a52d9a80SChris Mason 1774b3cfa35aSChristoph Hellwig set_page_extent_mapped(page); 1775b3cfa35aSChristoph Hellwig 1776a52d9a80SChris Mason block_start = (page_start + from) & ~((u64)blocksize - 1); 1777a52d9a80SChris Mason block_end = (page_start + to - 1) | (blocksize - 1); 1778a52d9a80SChris Mason orig_block_start = block_start; 1779a52d9a80SChris Mason 1780a52d9a80SChris Mason lock_extent(tree, page_start, page_end, GFP_NOFS); 1781a52d9a80SChris Mason while(block_start <= block_end) { 1782a52d9a80SChris Mason em = get_extent(inode, page, page_offset, block_start, 1783a52d9a80SChris Mason block_end, 1); 1784a52d9a80SChris Mason if (IS_ERR(em) || !em) { 1785a52d9a80SChris Mason goto err; 1786a52d9a80SChris Mason } 1787a52d9a80SChris Mason cur_end = min(block_end, em->end); 1788a52d9a80SChris Mason block_off_start = block_start & (PAGE_CACHE_SIZE - 1); 1789a52d9a80SChris Mason block_off_end = block_off_start + blocksize; 1790a52d9a80SChris Mason isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); 1791a52d9a80SChris Mason 1792a52d9a80SChris Mason if (!PageUptodate(page) && isnew && 1793a52d9a80SChris Mason (block_off_end > to || block_off_start < from)) { 1794a52d9a80SChris Mason void *kaddr; 1795a52d9a80SChris Mason 1796a52d9a80SChris Mason kaddr = kmap_atomic(page, KM_USER0); 1797a52d9a80SChris Mason if (block_off_end > to) 1798a52d9a80SChris Mason memset(kaddr + to, 0, block_off_end - to); 1799a52d9a80SChris Mason if (block_off_start < from) 1800a52d9a80SChris Mason memset(kaddr + block_off_start, 0, 1801a52d9a80SChris Mason from - block_off_start); 1802a52d9a80SChris Mason flush_dcache_page(page); 1803a52d9a80SChris Mason kunmap_atomic(kaddr, KM_USER0); 1804a52d9a80SChris Mason } 1805a52d9a80SChris Mason if (!isnew && !PageUptodate(page) && 1806a52d9a80SChris Mason (block_off_end > to || block_off_start < from) && 1807a52d9a80SChris Mason !test_range_bit(tree, block_start, cur_end, 1808a52d9a80SChris Mason EXTENT_UPTODATE, 1)) { 1809a52d9a80SChris Mason u64 sector; 1810a52d9a80SChris Mason u64 extent_offset = block_start - em->start; 1811a52d9a80SChris Mason size_t iosize; 1812a52d9a80SChris Mason sector = (em->block_start + extent_offset) >> 9; 1813a52d9a80SChris Mason iosize = (cur_end - block_start + blocksize - 1) & 1814a52d9a80SChris Mason ~((u64)blocksize - 1); 1815a52d9a80SChris Mason /* 1816a52d9a80SChris Mason * we've already got the extent locked, but we 1817a52d9a80SChris Mason * need to split the state such that our end_bio 1818a52d9a80SChris Mason * handler can clear the lock. 1819a52d9a80SChris Mason */ 1820a52d9a80SChris Mason set_extent_bit(tree, block_start, 1821a52d9a80SChris Mason block_start + iosize - 1, 1822a52d9a80SChris Mason EXTENT_LOCKED, 0, NULL, GFP_NOFS); 1823a52d9a80SChris Mason ret = submit_extent_page(READ, tree, page, 1824a52d9a80SChris Mason sector, iosize, page_offset, em->bdev, 1825a52d9a80SChris Mason end_bio_extent_preparewrite); 1826a52d9a80SChris Mason iocount++; 1827a52d9a80SChris Mason block_start = block_start + iosize; 1828a52d9a80SChris Mason } else { 1829a52d9a80SChris Mason set_extent_uptodate(tree, block_start, cur_end, 1830a52d9a80SChris Mason GFP_NOFS); 1831a52d9a80SChris Mason unlock_extent(tree, block_start, cur_end, GFP_NOFS); 1832a52d9a80SChris Mason block_start = cur_end + 1; 1833a52d9a80SChris Mason } 1834a52d9a80SChris Mason page_offset = block_start & (PAGE_CACHE_SIZE - 1); 1835a52d9a80SChris Mason free_extent_map(em); 1836a52d9a80SChris Mason } 1837a52d9a80SChris Mason if (iocount) { 1838a52d9a80SChris Mason wait_extent_bit(tree, orig_block_start, 1839a52d9a80SChris Mason block_end, EXTENT_LOCKED); 1840a52d9a80SChris Mason } 1841a52d9a80SChris Mason check_page_uptodate(tree, page); 1842a52d9a80SChris Mason err: 1843a52d9a80SChris Mason /* FIXME, zero out newly allocated blocks on error */ 1844a52d9a80SChris Mason return err; 1845a52d9a80SChris Mason } 1846a52d9a80SChris Mason EXPORT_SYMBOL(extent_prepare_write); 1847a52d9a80SChris Mason 1848a52d9a80SChris Mason /* 1849a52d9a80SChris Mason * a helper for releasepage. As long as there are no locked extents 1850a52d9a80SChris Mason * in the range corresponding to the page, both state records and extent 1851a52d9a80SChris Mason * map records are removed 1852a52d9a80SChris Mason */ 1853a52d9a80SChris Mason int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page) 1854a52d9a80SChris Mason { 1855a52d9a80SChris Mason struct extent_map *em; 1856a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1857a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1858a52d9a80SChris Mason u64 orig_start = start; 1859b888db2bSChris Mason int ret = 1; 1860a52d9a80SChris Mason 1861a52d9a80SChris Mason while (start <= end) { 1862a52d9a80SChris Mason em = lookup_extent_mapping(tree, start, end); 1863a52d9a80SChris Mason if (!em || IS_ERR(em)) 1864a52d9a80SChris Mason break; 1865b888db2bSChris Mason if (!test_range_bit(tree, em->start, em->end, 1866a52d9a80SChris Mason EXTENT_LOCKED, 0)) { 1867a52d9a80SChris Mason remove_extent_mapping(tree, em); 1868a52d9a80SChris Mason /* once for the rb tree */ 1869a52d9a80SChris Mason free_extent_map(em); 1870b888db2bSChris Mason } 1871b888db2bSChris Mason start = em->end + 1; 1872a52d9a80SChris Mason /* once for us */ 1873a52d9a80SChris Mason free_extent_map(em); 1874a52d9a80SChris Mason } 1875b888db2bSChris Mason if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0)) 1876b888db2bSChris Mason ret = 0; 1877b888db2bSChris Mason else 1878a52d9a80SChris Mason clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE, 1879a52d9a80SChris Mason 1, 1, GFP_NOFS); 1880b888db2bSChris Mason return ret; 1881a52d9a80SChris Mason } 1882a52d9a80SChris Mason EXPORT_SYMBOL(try_release_extent_mapping); 1883a52d9a80SChris Mason 1884d396c6f5SChristoph Hellwig sector_t extent_bmap(struct address_space *mapping, sector_t iblock, 1885d396c6f5SChristoph Hellwig get_extent_t *get_extent) 1886d396c6f5SChristoph Hellwig { 1887d396c6f5SChristoph Hellwig struct inode *inode = mapping->host; 1888d396c6f5SChristoph Hellwig u64 start = iblock << inode->i_blkbits; 1889d396c6f5SChristoph Hellwig u64 end = start + (1 << inode->i_blkbits) - 1; 1890d396c6f5SChristoph Hellwig struct extent_map *em; 1891d396c6f5SChristoph Hellwig 1892d396c6f5SChristoph Hellwig em = get_extent(inode, NULL, 0, start, end, 0); 1893d396c6f5SChristoph Hellwig if (!em || IS_ERR(em)) 1894d396c6f5SChristoph Hellwig return 0; 1895d396c6f5SChristoph Hellwig 1896d396c6f5SChristoph Hellwig if (em->block_start == EXTENT_MAP_INLINE || 18975f39d397SChris Mason em->block_start == EXTENT_MAP_HOLE) 1898d396c6f5SChristoph Hellwig return 0; 1899d396c6f5SChristoph Hellwig 1900d396c6f5SChristoph Hellwig return (em->block_start + start - em->start) >> inode->i_blkbits; 1901d396c6f5SChristoph Hellwig } 19025f39d397SChris Mason 19036d36dcd4SChris Mason static struct extent_buffer *__alloc_extent_buffer(gfp_t mask) 19046d36dcd4SChris Mason { 19056d36dcd4SChris Mason struct extent_buffer *eb = NULL; 1906*f510cfecSChris Mason 19076d36dcd4SChris Mason spin_lock(&extent_buffers_lock); 19086d36dcd4SChris Mason if (!list_empty(&extent_buffers)) { 19096d36dcd4SChris Mason eb = list_entry(extent_buffers.next, struct extent_buffer, 19106d36dcd4SChris Mason list); 19116d36dcd4SChris Mason list_del(&eb->list); 19126d36dcd4SChris Mason WARN_ON(nr_extent_buffers == 0); 19136d36dcd4SChris Mason nr_extent_buffers--; 19146d36dcd4SChris Mason } 19156d36dcd4SChris Mason spin_unlock(&extent_buffers_lock); 1916*f510cfecSChris Mason 19176d36dcd4SChris Mason if (eb) { 19186d36dcd4SChris Mason memset(eb, 0, sizeof(*eb)); 1919*f510cfecSChris Mason } else { 1920*f510cfecSChris Mason eb = kmem_cache_zalloc(extent_buffer_cache, mask); 19216d36dcd4SChris Mason } 1922*f510cfecSChris Mason spin_lock(&extent_buffers_lock); 1923*f510cfecSChris Mason list_add(&eb->leak_list, &buffers); 1924*f510cfecSChris Mason spin_unlock(&extent_buffers_lock); 1925*f510cfecSChris Mason 1926*f510cfecSChris Mason return eb; 19276d36dcd4SChris Mason } 19286d36dcd4SChris Mason 19296d36dcd4SChris Mason static void __free_extent_buffer(struct extent_buffer *eb) 19306d36dcd4SChris Mason { 1931*f510cfecSChris Mason 1932*f510cfecSChris Mason spin_lock(&extent_buffers_lock); 1933*f510cfecSChris Mason list_del_init(&eb->leak_list); 1934*f510cfecSChris Mason spin_unlock(&extent_buffers_lock); 1935*f510cfecSChris Mason 19366d36dcd4SChris Mason if (nr_extent_buffers >= MAX_EXTENT_BUFFER_CACHE) { 19376d36dcd4SChris Mason kmem_cache_free(extent_buffer_cache, eb); 19386d36dcd4SChris Mason } else { 19396d36dcd4SChris Mason spin_lock(&extent_buffers_lock); 19406d36dcd4SChris Mason list_add(&eb->list, &extent_buffers); 19416d36dcd4SChris Mason nr_extent_buffers++; 19426d36dcd4SChris Mason spin_unlock(&extent_buffers_lock); 19436d36dcd4SChris Mason } 19446d36dcd4SChris Mason } 19456d36dcd4SChris Mason 19466d36dcd4SChris Mason static inline struct page *extent_buffer_page(struct extent_buffer *eb, int i) 19476d36dcd4SChris Mason { 19486d36dcd4SChris Mason struct page *p; 19496d36dcd4SChris Mason if (i == 0) 19506d36dcd4SChris Mason return eb->first_page; 19516d36dcd4SChris Mason i += eb->start >> PAGE_CACHE_SHIFT; 19526d36dcd4SChris Mason p = find_get_page(eb->first_page->mapping, i); 19536d36dcd4SChris Mason page_cache_release(p); 19546d36dcd4SChris Mason return p; 19556d36dcd4SChris Mason } 19566d36dcd4SChris Mason 19575f39d397SChris Mason struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, 19585f39d397SChris Mason u64 start, unsigned long len, 19595f39d397SChris Mason gfp_t mask) 19605f39d397SChris Mason { 19615f39d397SChris Mason unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) - 19625f39d397SChris Mason (start >> PAGE_CACHE_SHIFT) + 1; 19635f39d397SChris Mason unsigned long i; 19645f39d397SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 19655f39d397SChris Mason struct extent_buffer *eb; 19665f39d397SChris Mason struct page *p; 19675f39d397SChris Mason struct address_space *mapping = tree->mapping; 19685f39d397SChris Mason int uptodate = 0; 19695f39d397SChris Mason 19706d36dcd4SChris Mason eb = __alloc_extent_buffer(mask); 19715f39d397SChris Mason if (!eb || IS_ERR(eb)) 19725f39d397SChris Mason return NULL; 19735f39d397SChris Mason 1974*f510cfecSChris Mason eb->alloc_addr = __builtin_return_address(0); 19755f39d397SChris Mason eb->start = start; 19765f39d397SChris Mason eb->len = len; 19775f39d397SChris Mason atomic_set(&eb->refs, 1); 19785f39d397SChris Mason 19795f39d397SChris Mason for (i = 0; i < num_pages; i++, index++) { 19805f39d397SChris Mason p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 19816d36dcd4SChris Mason if (!p) { 19826d36dcd4SChris Mason /* make sure the free only frees the pages we've 19836d36dcd4SChris Mason * grabbed a reference on 19846d36dcd4SChris Mason */ 19856d36dcd4SChris Mason eb->len = i << PAGE_CACHE_SHIFT; 19866d36dcd4SChris Mason eb->start &= ~((u64)PAGE_CACHE_SIZE - 1); 19875f39d397SChris Mason goto fail; 19886d36dcd4SChris Mason } 1989*f510cfecSChris Mason set_page_extent_mapped(p); 19906d36dcd4SChris Mason if (i == 0) 19916d36dcd4SChris Mason eb->first_page = p; 19925f39d397SChris Mason if (!PageUptodate(p)) 19935f39d397SChris Mason uptodate = 0; 19945f39d397SChris Mason unlock_page(p); 19955f39d397SChris Mason } 19965f39d397SChris Mason if (uptodate) 19975f39d397SChris Mason eb->flags |= EXTENT_UPTODATE; 19985f39d397SChris Mason return eb; 19995f39d397SChris Mason fail: 20005f39d397SChris Mason free_extent_buffer(eb); 20015f39d397SChris Mason return NULL; 20025f39d397SChris Mason } 20035f39d397SChris Mason EXPORT_SYMBOL(alloc_extent_buffer); 20045f39d397SChris Mason 20055f39d397SChris Mason struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree, 20065f39d397SChris Mason u64 start, unsigned long len, 20075f39d397SChris Mason gfp_t mask) 20085f39d397SChris Mason { 20095f39d397SChris Mason unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) - 20105f39d397SChris Mason (start >> PAGE_CACHE_SHIFT) + 1; 20115f39d397SChris Mason unsigned long i; 20125f39d397SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 20135f39d397SChris Mason struct extent_buffer *eb; 20145f39d397SChris Mason struct page *p; 20155f39d397SChris Mason struct address_space *mapping = tree->mapping; 20165f39d397SChris Mason 20176d36dcd4SChris Mason eb = __alloc_extent_buffer(mask); 20185f39d397SChris Mason if (!eb || IS_ERR(eb)) 20195f39d397SChris Mason return NULL; 20205f39d397SChris Mason 2021*f510cfecSChris Mason eb->alloc_addr = __builtin_return_address(0); 20225f39d397SChris Mason eb->start = start; 20235f39d397SChris Mason eb->len = len; 20245f39d397SChris Mason atomic_set(&eb->refs, 1); 20255f39d397SChris Mason 20265f39d397SChris Mason for (i = 0; i < num_pages; i++, index++) { 20275f39d397SChris Mason p = find_get_page(mapping, index); 20286d36dcd4SChris Mason if (!p) { 20296d36dcd4SChris Mason /* make sure the free only frees the pages we've 20306d36dcd4SChris Mason * grabbed a reference on 20316d36dcd4SChris Mason */ 20326d36dcd4SChris Mason eb->len = i << PAGE_CACHE_SHIFT; 20336d36dcd4SChris Mason eb->start &= ~((u64)PAGE_CACHE_SIZE - 1); 20345f39d397SChris Mason goto fail; 20356d36dcd4SChris Mason } 2036*f510cfecSChris Mason set_page_extent_mapped(p); 20376d36dcd4SChris Mason if (i == 0) 20386d36dcd4SChris Mason eb->first_page = p; 20395f39d397SChris Mason } 20405f39d397SChris Mason return eb; 20415f39d397SChris Mason fail: 20425f39d397SChris Mason free_extent_buffer(eb); 20435f39d397SChris Mason return NULL; 20445f39d397SChris Mason } 20455f39d397SChris Mason EXPORT_SYMBOL(find_extent_buffer); 20465f39d397SChris Mason 20475f39d397SChris Mason void free_extent_buffer(struct extent_buffer *eb) 20485f39d397SChris Mason { 20495f39d397SChris Mason unsigned long i; 20505f39d397SChris Mason unsigned long num_pages; 20515f39d397SChris Mason 20525f39d397SChris Mason if (!eb) 20535f39d397SChris Mason return; 20545f39d397SChris Mason 20555f39d397SChris Mason if (!atomic_dec_and_test(&eb->refs)) 20565f39d397SChris Mason return; 20575f39d397SChris Mason 20585f39d397SChris Mason num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 20595f39d397SChris Mason (eb->start >> PAGE_CACHE_SHIFT) + 1; 20605f39d397SChris Mason 20616d36dcd4SChris Mason if (eb->first_page) 20626d36dcd4SChris Mason page_cache_release(eb->first_page); 20636d36dcd4SChris Mason for (i = 1; i < num_pages; i++) { 20646d36dcd4SChris Mason page_cache_release(extent_buffer_page(eb, i)); 20655f39d397SChris Mason } 20666d36dcd4SChris Mason __free_extent_buffer(eb); 20675f39d397SChris Mason } 20685f39d397SChris Mason EXPORT_SYMBOL(free_extent_buffer); 20695f39d397SChris Mason 20705f39d397SChris Mason int clear_extent_buffer_dirty(struct extent_map_tree *tree, 20715f39d397SChris Mason struct extent_buffer *eb) 20725f39d397SChris Mason { 20735f39d397SChris Mason int set; 20745f39d397SChris Mason unsigned long i; 20755f39d397SChris Mason unsigned long num_pages; 20765f39d397SChris Mason struct page *page; 20775f39d397SChris Mason 20785f39d397SChris Mason u64 start = eb->start; 20795f39d397SChris Mason u64 end = start + eb->len - 1; 20805f39d397SChris Mason 20815f39d397SChris Mason set = clear_extent_dirty(tree, start, end, GFP_NOFS); 20825f39d397SChris Mason num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 20835f39d397SChris Mason (eb->start >> PAGE_CACHE_SHIFT) + 1; 20845f39d397SChris Mason 20855f39d397SChris Mason for (i = 0; i < num_pages; i++) { 20866d36dcd4SChris Mason page = extent_buffer_page(eb, i); 20875f39d397SChris Mason lock_page(page); 20885f39d397SChris Mason /* 20895f39d397SChris Mason * if we're on the last page or the first page and the 20905f39d397SChris Mason * block isn't aligned on a page boundary, do extra checks 20915f39d397SChris Mason * to make sure we don't clean page that is partially dirty 20925f39d397SChris Mason */ 20935f39d397SChris Mason if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 20945f39d397SChris Mason ((i == num_pages - 1) && 20955f39d397SChris Mason ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) { 20965f39d397SChris Mason start = page->index << PAGE_CACHE_SHIFT; 20975f39d397SChris Mason end = start + PAGE_CACHE_SIZE - 1; 20985f39d397SChris Mason if (test_range_bit(tree, start, end, 20995f39d397SChris Mason EXTENT_DIRTY, 0)) { 21005f39d397SChris Mason unlock_page(page); 21015f39d397SChris Mason continue; 21025f39d397SChris Mason } 21035f39d397SChris Mason } 21045f39d397SChris Mason clear_page_dirty_for_io(page); 21055f39d397SChris Mason unlock_page(page); 21065f39d397SChris Mason } 21075f39d397SChris Mason return 0; 21085f39d397SChris Mason } 21095f39d397SChris Mason EXPORT_SYMBOL(clear_extent_buffer_dirty); 21105f39d397SChris Mason 21115f39d397SChris Mason int wait_on_extent_buffer_writeback(struct extent_map_tree *tree, 21125f39d397SChris Mason struct extent_buffer *eb) 21135f39d397SChris Mason { 21145f39d397SChris Mason return wait_on_extent_writeback(tree, eb->start, 21155f39d397SChris Mason eb->start + eb->len - 1); 21165f39d397SChris Mason } 21175f39d397SChris Mason EXPORT_SYMBOL(wait_on_extent_buffer_writeback); 21185f39d397SChris Mason 21195f39d397SChris Mason int set_extent_buffer_dirty(struct extent_map_tree *tree, 21205f39d397SChris Mason struct extent_buffer *eb) 21215f39d397SChris Mason { 21225f39d397SChris Mason return set_range_dirty(tree, eb->start, eb->start + eb->len - 1); 21235f39d397SChris Mason } 21245f39d397SChris Mason EXPORT_SYMBOL(set_extent_buffer_dirty); 21255f39d397SChris Mason 21265f39d397SChris Mason int set_extent_buffer_uptodate(struct extent_map_tree *tree, 21275f39d397SChris Mason struct extent_buffer *eb) 21285f39d397SChris Mason { 21295f39d397SChris Mason unsigned long i; 21305f39d397SChris Mason struct page *page; 21315f39d397SChris Mason unsigned long num_pages; 21325f39d397SChris Mason 21335f39d397SChris Mason num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 21345f39d397SChris Mason (eb->start >> PAGE_CACHE_SHIFT) + 1; 21355f39d397SChris Mason 21365f39d397SChris Mason set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 21375f39d397SChris Mason GFP_NOFS); 21385f39d397SChris Mason for (i = 0; i < num_pages; i++) { 21396d36dcd4SChris Mason page = extent_buffer_page(eb, i); 21405f39d397SChris Mason if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 21415f39d397SChris Mason ((i == num_pages - 1) && 21425f39d397SChris Mason ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) { 21435f39d397SChris Mason check_page_uptodate(tree, page); 21445f39d397SChris Mason continue; 21455f39d397SChris Mason } 21465f39d397SChris Mason SetPageUptodate(page); 21475f39d397SChris Mason } 21485f39d397SChris Mason return 0; 21495f39d397SChris Mason } 21505f39d397SChris Mason EXPORT_SYMBOL(set_extent_buffer_uptodate); 21515f39d397SChris Mason 21525f39d397SChris Mason int extent_buffer_uptodate(struct extent_map_tree *tree, 21535f39d397SChris Mason struct extent_buffer *eb) 21545f39d397SChris Mason { 21555f39d397SChris Mason if (eb->flags & EXTENT_UPTODATE) 21565f39d397SChris Mason return 1; 21575f39d397SChris Mason return test_range_bit(tree, eb->start, eb->start + eb->len - 1, 21585f39d397SChris Mason EXTENT_UPTODATE, 1); 21595f39d397SChris Mason } 21605f39d397SChris Mason EXPORT_SYMBOL(extent_buffer_uptodate); 21615f39d397SChris Mason 21625f39d397SChris Mason int read_extent_buffer_pages(struct extent_map_tree *tree, 21635f39d397SChris Mason struct extent_buffer *eb, int wait) 21645f39d397SChris Mason { 21655f39d397SChris Mason unsigned long i; 21665f39d397SChris Mason struct page *page; 21675f39d397SChris Mason int err; 21685f39d397SChris Mason int ret = 0; 21695f39d397SChris Mason unsigned long num_pages; 21705f39d397SChris Mason 21715f39d397SChris Mason if (eb->flags & EXTENT_UPTODATE) 21725f39d397SChris Mason return 0; 21735f39d397SChris Mason 21745f39d397SChris Mason if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, 21755f39d397SChris Mason EXTENT_UPTODATE, 1)) { 21765f39d397SChris Mason return 0; 21775f39d397SChris Mason } 21785f39d397SChris Mason 21795f39d397SChris Mason num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 21805f39d397SChris Mason (eb->start >> PAGE_CACHE_SHIFT) + 1; 21815f39d397SChris Mason for (i = 0; i < num_pages; i++) { 21826d36dcd4SChris Mason page = extent_buffer_page(eb, i); 21835f39d397SChris Mason if (PageUptodate(page)) { 21845f39d397SChris Mason continue; 21855f39d397SChris Mason } 21865f39d397SChris Mason if (!wait) { 21875f39d397SChris Mason if (TestSetPageLocked(page)) { 21885f39d397SChris Mason continue; 21895f39d397SChris Mason } 21905f39d397SChris Mason } else { 21915f39d397SChris Mason lock_page(page); 21925f39d397SChris Mason } 21935f39d397SChris Mason if (!PageUptodate(page)) { 21945f39d397SChris Mason err = page->mapping->a_ops->readpage(NULL, page); 21955f39d397SChris Mason if (err) { 21965f39d397SChris Mason ret = err; 21975f39d397SChris Mason } 21985f39d397SChris Mason } else { 21995f39d397SChris Mason unlock_page(page); 22005f39d397SChris Mason } 22015f39d397SChris Mason } 22025f39d397SChris Mason 22035f39d397SChris Mason if (ret || !wait) { 22045f39d397SChris Mason return ret; 22055f39d397SChris Mason } 22065f39d397SChris Mason 22075f39d397SChris Mason for (i = 0; i < num_pages; i++) { 22086d36dcd4SChris Mason page = extent_buffer_page(eb, i); 22095f39d397SChris Mason wait_on_page_locked(page); 22105f39d397SChris Mason if (!PageUptodate(page)) { 22115f39d397SChris Mason ret = -EIO; 22125f39d397SChris Mason } 22135f39d397SChris Mason } 22145f39d397SChris Mason eb->flags |= EXTENT_UPTODATE; 22155f39d397SChris Mason return ret; 22165f39d397SChris Mason } 22175f39d397SChris Mason EXPORT_SYMBOL(read_extent_buffer_pages); 22185f39d397SChris Mason 22195f39d397SChris Mason void read_extent_buffer(struct extent_buffer *eb, void *dstv, 22205f39d397SChris Mason unsigned long start, 22215f39d397SChris Mason unsigned long len) 22225f39d397SChris Mason { 22235f39d397SChris Mason size_t cur; 22245f39d397SChris Mason size_t offset; 22255f39d397SChris Mason struct page *page; 22265f39d397SChris Mason char *kaddr; 22275f39d397SChris Mason char *dst = (char *)dstv; 22285f39d397SChris Mason size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 22295f39d397SChris Mason unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 22305f39d397SChris Mason 22315f39d397SChris Mason WARN_ON(start > eb->len); 22325f39d397SChris Mason WARN_ON(start + len > eb->start + eb->len); 22335f39d397SChris Mason 22345f39d397SChris Mason offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1); 22355f39d397SChris Mason if (i == 0) 22365f39d397SChris Mason offset += start_offset; 22375f39d397SChris Mason 22385f39d397SChris Mason while(len > 0) { 22396d36dcd4SChris Mason page = extent_buffer_page(eb, i); 22405f39d397SChris Mason WARN_ON(!PageUptodate(page)); 22415f39d397SChris Mason 22425f39d397SChris Mason cur = min(len, (PAGE_CACHE_SIZE - offset)); 2243ae5252bdSChris Mason kaddr = kmap_atomic(page, KM_USER0); 22445f39d397SChris Mason memcpy(dst, kaddr + offset, cur); 2245ae5252bdSChris Mason kunmap_atomic(kaddr, KM_USER0); 22465f39d397SChris Mason 22475f39d397SChris Mason dst += cur; 22485f39d397SChris Mason len -= cur; 22495f39d397SChris Mason offset = 0; 22505f39d397SChris Mason i++; 22515f39d397SChris Mason } 22525f39d397SChris Mason } 22535f39d397SChris Mason EXPORT_SYMBOL(read_extent_buffer); 22545f39d397SChris Mason 22555f39d397SChris Mason int map_extent_buffer(struct extent_buffer *eb, unsigned long start, 2256479965d6SChris Mason unsigned long min_len, 22575f39d397SChris Mason char **token, char **map, 22585f39d397SChris Mason unsigned long *map_start, 22595f39d397SChris Mason unsigned long *map_len, int km) 22605f39d397SChris Mason { 2261479965d6SChris Mason size_t offset = start & (PAGE_CACHE_SIZE - 1); 22625f39d397SChris Mason char *kaddr; 22635f39d397SChris Mason size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 22645f39d397SChris Mason unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2265479965d6SChris Mason unsigned long end_i = (start_offset + start + min_len) >> 2266479965d6SChris Mason PAGE_CACHE_SHIFT; 2267479965d6SChris Mason 2268479965d6SChris Mason if (i != end_i) 2269479965d6SChris Mason return -EINVAL; 22705f39d397SChris Mason 22715f39d397SChris Mason WARN_ON(start > eb->len); 22725f39d397SChris Mason 22735f39d397SChris Mason if (i == 0) { 22745f39d397SChris Mason offset = start_offset; 22755f39d397SChris Mason *map_start = 0; 22765f39d397SChris Mason } else { 2277479965d6SChris Mason *map_start = (i << PAGE_CACHE_SHIFT) - start_offset; 22785f39d397SChris Mason } 22795f39d397SChris Mason 2280ae5252bdSChris Mason kaddr = kmap_atomic(extent_buffer_page(eb, i), km); 22815f39d397SChris Mason *token = kaddr; 22825f39d397SChris Mason *map = kaddr + offset; 22835f39d397SChris Mason *map_len = PAGE_CACHE_SIZE - offset; 22845f39d397SChris Mason return 0; 22855f39d397SChris Mason } 22865f39d397SChris Mason EXPORT_SYMBOL(map_extent_buffer); 22875f39d397SChris Mason 22885f39d397SChris Mason void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km) 22895f39d397SChris Mason { 2290ae5252bdSChris Mason kunmap_atomic(token, km); 22915f39d397SChris Mason } 22925f39d397SChris Mason EXPORT_SYMBOL(unmap_extent_buffer); 22935f39d397SChris Mason 22945f39d397SChris Mason int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, 22955f39d397SChris Mason unsigned long start, 22965f39d397SChris Mason unsigned long len) 22975f39d397SChris Mason { 22985f39d397SChris Mason size_t cur; 22995f39d397SChris Mason size_t offset; 23005f39d397SChris Mason struct page *page; 23015f39d397SChris Mason char *kaddr; 23025f39d397SChris Mason char *ptr = (char *)ptrv; 23035f39d397SChris Mason size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 23045f39d397SChris Mason unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 23055f39d397SChris Mason int ret = 0; 23065f39d397SChris Mason 23075f39d397SChris Mason WARN_ON(start > eb->len); 23085f39d397SChris Mason WARN_ON(start + len > eb->start + eb->len); 23095f39d397SChris Mason 23105f39d397SChris Mason offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1); 23115f39d397SChris Mason if (i == 0) 23125f39d397SChris Mason offset += start_offset; 23135f39d397SChris Mason 23145f39d397SChris Mason while(len > 0) { 23156d36dcd4SChris Mason page = extent_buffer_page(eb, i); 23165f39d397SChris Mason WARN_ON(!PageUptodate(page)); 23175f39d397SChris Mason 23185f39d397SChris Mason cur = min(len, (PAGE_CACHE_SIZE - offset)); 23195f39d397SChris Mason 2320ae5252bdSChris Mason kaddr = kmap_atomic(page, KM_USER0); 23215f39d397SChris Mason ret = memcmp(ptr, kaddr + offset, cur); 2322ae5252bdSChris Mason kunmap_atomic(kaddr, KM_USER0); 23235f39d397SChris Mason if (ret) 23245f39d397SChris Mason break; 23255f39d397SChris Mason 23265f39d397SChris Mason ptr += cur; 23275f39d397SChris Mason len -= cur; 23285f39d397SChris Mason offset = 0; 23295f39d397SChris Mason i++; 23305f39d397SChris Mason } 23315f39d397SChris Mason return ret; 23325f39d397SChris Mason } 23335f39d397SChris Mason EXPORT_SYMBOL(memcmp_extent_buffer); 23345f39d397SChris Mason 23355f39d397SChris Mason void write_extent_buffer(struct extent_buffer *eb, const void *srcv, 23365f39d397SChris Mason unsigned long start, unsigned long len) 23375f39d397SChris Mason { 23385f39d397SChris Mason size_t cur; 23395f39d397SChris Mason size_t offset; 23405f39d397SChris Mason struct page *page; 23415f39d397SChris Mason char *kaddr; 23425f39d397SChris Mason char *src = (char *)srcv; 23435f39d397SChris Mason size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 23445f39d397SChris Mason unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 23455f39d397SChris Mason 23465f39d397SChris Mason WARN_ON(start > eb->len); 23475f39d397SChris Mason WARN_ON(start + len > eb->start + eb->len); 23485f39d397SChris Mason 23495f39d397SChris Mason offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1); 23505f39d397SChris Mason if (i == 0) 23515f39d397SChris Mason offset += start_offset; 23525f39d397SChris Mason 23535f39d397SChris Mason while(len > 0) { 23546d36dcd4SChris Mason page = extent_buffer_page(eb, i); 23555f39d397SChris Mason WARN_ON(!PageUptodate(page)); 23565f39d397SChris Mason 23575f39d397SChris Mason cur = min(len, PAGE_CACHE_SIZE - offset); 2358ae5252bdSChris Mason kaddr = kmap_atomic(page, KM_USER0); 23595f39d397SChris Mason memcpy(kaddr + offset, src, cur); 2360ae5252bdSChris Mason kunmap_atomic(kaddr, KM_USER0); 23615f39d397SChris Mason 23625f39d397SChris Mason src += cur; 23635f39d397SChris Mason len -= cur; 23645f39d397SChris Mason offset = 0; 23655f39d397SChris Mason i++; 23665f39d397SChris Mason } 23675f39d397SChris Mason } 23685f39d397SChris Mason EXPORT_SYMBOL(write_extent_buffer); 23695f39d397SChris Mason 23705f39d397SChris Mason void memset_extent_buffer(struct extent_buffer *eb, char c, 23715f39d397SChris Mason unsigned long start, unsigned long len) 23725f39d397SChris Mason { 23735f39d397SChris Mason size_t cur; 23745f39d397SChris Mason size_t offset; 23755f39d397SChris Mason struct page *page; 23765f39d397SChris Mason char *kaddr; 23775f39d397SChris Mason size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 23785f39d397SChris Mason unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 23795f39d397SChris Mason 23805f39d397SChris Mason WARN_ON(start > eb->len); 23815f39d397SChris Mason WARN_ON(start + len > eb->start + eb->len); 23825f39d397SChris Mason 23835f39d397SChris Mason offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1); 23845f39d397SChris Mason if (i == 0) 23855f39d397SChris Mason offset += start_offset; 23865f39d397SChris Mason 23875f39d397SChris Mason while(len > 0) { 23886d36dcd4SChris Mason page = extent_buffer_page(eb, i); 23895f39d397SChris Mason WARN_ON(!PageUptodate(page)); 23905f39d397SChris Mason 23915f39d397SChris Mason cur = min(len, PAGE_CACHE_SIZE - offset); 2392ae5252bdSChris Mason kaddr = kmap_atomic(page, KM_USER0); 23935f39d397SChris Mason memset(kaddr + offset, c, cur); 2394ae5252bdSChris Mason kunmap_atomic(kaddr, KM_USER0); 23955f39d397SChris Mason 23965f39d397SChris Mason len -= cur; 23975f39d397SChris Mason offset = 0; 23985f39d397SChris Mason i++; 23995f39d397SChris Mason } 24005f39d397SChris Mason } 24015f39d397SChris Mason EXPORT_SYMBOL(memset_extent_buffer); 24025f39d397SChris Mason 24035f39d397SChris Mason void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, 24045f39d397SChris Mason unsigned long dst_offset, unsigned long src_offset, 24055f39d397SChris Mason unsigned long len) 24065f39d397SChris Mason { 24075f39d397SChris Mason u64 dst_len = dst->len; 24085f39d397SChris Mason size_t cur; 24095f39d397SChris Mason size_t offset; 24105f39d397SChris Mason struct page *page; 24115f39d397SChris Mason char *kaddr; 24125f39d397SChris Mason size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 24135f39d397SChris Mason unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 24145f39d397SChris Mason 24155f39d397SChris Mason WARN_ON(src->len != dst_len); 24165f39d397SChris Mason 24175f39d397SChris Mason offset = dst_offset & ((unsigned long)PAGE_CACHE_SIZE - 1); 24185f39d397SChris Mason if (i == 0) 24195f39d397SChris Mason offset += start_offset; 24205f39d397SChris Mason 24215f39d397SChris Mason while(len > 0) { 24226d36dcd4SChris Mason page = extent_buffer_page(dst, i); 24235f39d397SChris Mason WARN_ON(!PageUptodate(page)); 24245f39d397SChris Mason 24255f39d397SChris Mason cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 24265f39d397SChris Mason 2427ae5252bdSChris Mason kaddr = kmap_atomic(page, KM_USER1); 24285f39d397SChris Mason read_extent_buffer(src, kaddr + offset, src_offset, cur); 2429ae5252bdSChris Mason kunmap_atomic(kaddr, KM_USER1); 24305f39d397SChris Mason 24315f39d397SChris Mason src_offset += cur; 24325f39d397SChris Mason len -= cur; 24335f39d397SChris Mason offset = 0; 24345f39d397SChris Mason i++; 24355f39d397SChris Mason } 24365f39d397SChris Mason } 24375f39d397SChris Mason EXPORT_SYMBOL(copy_extent_buffer); 24385f39d397SChris Mason 24395f39d397SChris Mason static void move_pages(struct page *dst_page, struct page *src_page, 24405f39d397SChris Mason unsigned long dst_off, unsigned long src_off, 24415f39d397SChris Mason unsigned long len) 24425f39d397SChris Mason { 2443ae5252bdSChris Mason char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 24445f39d397SChris Mason if (dst_page == src_page) { 24455f39d397SChris Mason memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); 24465f39d397SChris Mason } else { 2447ae5252bdSChris Mason char *src_kaddr = kmap_atomic(src_page, KM_USER1); 24485f39d397SChris Mason char *p = dst_kaddr + dst_off + len; 24495f39d397SChris Mason char *s = src_kaddr + src_off + len; 24505f39d397SChris Mason 24515f39d397SChris Mason while (len--) 24525f39d397SChris Mason *--p = *--s; 24535f39d397SChris Mason 2454ae5252bdSChris Mason kunmap_atomic(src_kaddr, KM_USER1); 24555f39d397SChris Mason } 2456ae5252bdSChris Mason kunmap_atomic(dst_kaddr, KM_USER0); 24575f39d397SChris Mason } 24585f39d397SChris Mason 24595f39d397SChris Mason static void copy_pages(struct page *dst_page, struct page *src_page, 24605f39d397SChris Mason unsigned long dst_off, unsigned long src_off, 24615f39d397SChris Mason unsigned long len) 24625f39d397SChris Mason { 2463ae5252bdSChris Mason char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 24645f39d397SChris Mason char *src_kaddr; 24655f39d397SChris Mason 24665f39d397SChris Mason if (dst_page != src_page) 2467ae5252bdSChris Mason src_kaddr = kmap_atomic(src_page, KM_USER1); 24685f39d397SChris Mason else 24695f39d397SChris Mason src_kaddr = dst_kaddr; 24705f39d397SChris Mason 24715f39d397SChris Mason memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 24725f39d397SChris Mason kunmap_atomic(dst_kaddr, KM_USER0); 24735f39d397SChris Mason if (dst_page != src_page) 24745f39d397SChris Mason kunmap_atomic(src_kaddr, KM_USER1); 24755f39d397SChris Mason } 24765f39d397SChris Mason 24775f39d397SChris Mason void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 24785f39d397SChris Mason unsigned long src_offset, unsigned long len) 24795f39d397SChris Mason { 24805f39d397SChris Mason size_t cur; 24815f39d397SChris Mason size_t dst_off_in_page; 24825f39d397SChris Mason size_t src_off_in_page; 24835f39d397SChris Mason size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 24845f39d397SChris Mason unsigned long dst_i; 24855f39d397SChris Mason unsigned long src_i; 24865f39d397SChris Mason 24875f39d397SChris Mason if (src_offset + len > dst->len) { 24885f39d397SChris Mason printk("memmove bogus src_offset %lu move len %lu len %lu\n", 24895f39d397SChris Mason src_offset, len, dst->len); 24905f39d397SChris Mason BUG_ON(1); 24915f39d397SChris Mason } 24925f39d397SChris Mason if (dst_offset + len > dst->len) { 24935f39d397SChris Mason printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 24945f39d397SChris Mason dst_offset, len, dst->len); 24955f39d397SChris Mason BUG_ON(1); 24965f39d397SChris Mason } 24975f39d397SChris Mason 24985f39d397SChris Mason while(len > 0) { 24995f39d397SChris Mason dst_off_in_page = dst_offset & 25005f39d397SChris Mason ((unsigned long)PAGE_CACHE_SIZE - 1); 25015f39d397SChris Mason src_off_in_page = src_offset & 25025f39d397SChris Mason ((unsigned long)PAGE_CACHE_SIZE - 1); 25035f39d397SChris Mason 25045f39d397SChris Mason dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 25055f39d397SChris Mason src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; 25065f39d397SChris Mason 25075f39d397SChris Mason if (src_i == 0) 25085f39d397SChris Mason src_off_in_page += start_offset; 25095f39d397SChris Mason if (dst_i == 0) 25105f39d397SChris Mason dst_off_in_page += start_offset; 25115f39d397SChris Mason 25125f39d397SChris Mason cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - 25135f39d397SChris Mason src_off_in_page)); 25145f39d397SChris Mason cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE - 25155f39d397SChris Mason dst_off_in_page)); 25165f39d397SChris Mason 25176d36dcd4SChris Mason copy_pages(extent_buffer_page(dst, dst_i), 25186d36dcd4SChris Mason extent_buffer_page(dst, src_i), 25195f39d397SChris Mason dst_off_in_page, src_off_in_page, cur); 25205f39d397SChris Mason 25215f39d397SChris Mason src_offset += cur; 25225f39d397SChris Mason dst_offset += cur; 25235f39d397SChris Mason len -= cur; 25245f39d397SChris Mason } 25255f39d397SChris Mason } 25265f39d397SChris Mason EXPORT_SYMBOL(memcpy_extent_buffer); 25275f39d397SChris Mason 25285f39d397SChris Mason void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 25295f39d397SChris Mason unsigned long src_offset, unsigned long len) 25305f39d397SChris Mason { 25315f39d397SChris Mason size_t cur; 25325f39d397SChris Mason size_t dst_off_in_page; 25335f39d397SChris Mason size_t src_off_in_page; 25345f39d397SChris Mason unsigned long dst_end = dst_offset + len - 1; 25355f39d397SChris Mason unsigned long src_end = src_offset + len - 1; 25365f39d397SChris Mason size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 25375f39d397SChris Mason unsigned long dst_i; 25385f39d397SChris Mason unsigned long src_i; 25395f39d397SChris Mason 25405f39d397SChris Mason if (src_offset + len > dst->len) { 25415f39d397SChris Mason printk("memmove bogus src_offset %lu move len %lu len %lu\n", 25425f39d397SChris Mason src_offset, len, dst->len); 25435f39d397SChris Mason BUG_ON(1); 25445f39d397SChris Mason } 25455f39d397SChris Mason if (dst_offset + len > dst->len) { 25465f39d397SChris Mason printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 25475f39d397SChris Mason dst_offset, len, dst->len); 25485f39d397SChris Mason BUG_ON(1); 25495f39d397SChris Mason } 25505f39d397SChris Mason if (dst_offset < src_offset) { 25515f39d397SChris Mason memcpy_extent_buffer(dst, dst_offset, src_offset, len); 25525f39d397SChris Mason return; 25535f39d397SChris Mason } 25545f39d397SChris Mason while(len > 0) { 25555f39d397SChris Mason dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 25565f39d397SChris Mason src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 25575f39d397SChris Mason 25585f39d397SChris Mason dst_off_in_page = dst_end & 25595f39d397SChris Mason ((unsigned long)PAGE_CACHE_SIZE - 1); 25605f39d397SChris Mason src_off_in_page = src_end & 25615f39d397SChris Mason ((unsigned long)PAGE_CACHE_SIZE - 1); 25625f39d397SChris Mason 25635f39d397SChris Mason if (src_i == 0) 25645f39d397SChris Mason src_off_in_page += start_offset; 25655f39d397SChris Mason if (dst_i == 0) 25665f39d397SChris Mason dst_off_in_page += start_offset; 25675f39d397SChris Mason 25685f39d397SChris Mason cur = min(len, src_off_in_page + 1); 25695f39d397SChris Mason cur = min(cur, dst_off_in_page + 1); 2570ae5252bdSChris Mason 25716d36dcd4SChris Mason move_pages(extent_buffer_page(dst, dst_i), 25726d36dcd4SChris Mason extent_buffer_page(dst, src_i), 25735f39d397SChris Mason dst_off_in_page - cur + 1, 25745f39d397SChris Mason src_off_in_page - cur + 1, cur); 25755f39d397SChris Mason 25765f39d397SChris Mason dst_end -= cur - 1; 25775f39d397SChris Mason src_end -= cur - 1; 25785f39d397SChris Mason len -= cur; 25795f39d397SChris Mason } 25805f39d397SChris Mason } 25815f39d397SChris Mason EXPORT_SYMBOL(memmove_extent_buffer); 2582