1a52d9a80SChris Mason #include <linux/bitops.h> 2a52d9a80SChris Mason #include <linux/slab.h> 3a52d9a80SChris Mason #include <linux/bio.h> 4a52d9a80SChris Mason #include <linux/mm.h> 5a52d9a80SChris Mason #include <linux/gfp.h> 6a52d9a80SChris Mason #include <linux/pagemap.h> 7a52d9a80SChris Mason #include <linux/page-flags.h> 8a52d9a80SChris Mason #include <linux/module.h> 9a52d9a80SChris Mason #include <linux/spinlock.h> 10a52d9a80SChris Mason #include <linux/blkdev.h> 11a52d9a80SChris Mason #include "extent_map.h" 12a52d9a80SChris Mason 1386479a04SChris Mason /* temporary define until extent_map moves out of btrfs */ 1486479a04SChris Mason struct kmem_cache *btrfs_cache_create(const char *name, size_t size, 1586479a04SChris Mason unsigned long extra_flags, 1686479a04SChris Mason void (*ctor)(void *, struct kmem_cache *, 1786479a04SChris Mason unsigned long)); 1886479a04SChris Mason 19a52d9a80SChris Mason static struct kmem_cache *extent_map_cache; 20a52d9a80SChris Mason static struct kmem_cache *extent_state_cache; 21a52d9a80SChris Mason 22a52d9a80SChris Mason struct tree_entry { 23a52d9a80SChris Mason u64 start; 24a52d9a80SChris Mason u64 end; 25a52d9a80SChris Mason int in_tree; 26a52d9a80SChris Mason struct rb_node rb_node; 27a52d9a80SChris Mason }; 28a52d9a80SChris Mason 29a52d9a80SChris Mason /* bits for the extent state */ 30a52d9a80SChris Mason #define EXTENT_DIRTY 1 31a52d9a80SChris Mason #define EXTENT_WRITEBACK (1 << 1) 32a52d9a80SChris Mason #define EXTENT_UPTODATE (1 << 2) 33a52d9a80SChris Mason #define EXTENT_LOCKED (1 << 3) 34a52d9a80SChris Mason #define EXTENT_NEW (1 << 4) 35a52d9a80SChris Mason #define EXTENT_DELALLOC (1 << 5) 36a52d9a80SChris Mason 37a52d9a80SChris Mason #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) 38a52d9a80SChris Mason 39a52d9a80SChris Mason void __init extent_map_init(void) 40a52d9a80SChris Mason { 4186479a04SChris Mason extent_map_cache = btrfs_cache_create("extent_map", 4286479a04SChris Mason sizeof(struct extent_map), 43a52d9a80SChris Mason SLAB_DESTROY_BY_RCU, 44a52d9a80SChris Mason NULL); 4586479a04SChris Mason extent_state_cache = btrfs_cache_create("extent_state", 4686479a04SChris Mason sizeof(struct extent_state), 47a52d9a80SChris Mason SLAB_DESTROY_BY_RCU, 48a52d9a80SChris Mason NULL); 49a52d9a80SChris Mason } 50a52d9a80SChris Mason 51a52d9a80SChris Mason void __exit extent_map_exit(void) 52a52d9a80SChris Mason { 53a52d9a80SChris Mason if (extent_map_cache) 54a52d9a80SChris Mason kmem_cache_destroy(extent_map_cache); 55a52d9a80SChris Mason if (extent_state_cache) 56a52d9a80SChris Mason kmem_cache_destroy(extent_state_cache); 57a52d9a80SChris Mason } 58a52d9a80SChris Mason 59a52d9a80SChris Mason void extent_map_tree_init(struct extent_map_tree *tree, 60a52d9a80SChris Mason struct address_space *mapping, gfp_t mask) 61a52d9a80SChris Mason { 62a52d9a80SChris Mason tree->map.rb_node = NULL; 63a52d9a80SChris Mason tree->state.rb_node = NULL; 6407157aacSChris Mason tree->ops = NULL; 65a52d9a80SChris Mason rwlock_init(&tree->lock); 66a52d9a80SChris Mason tree->mapping = mapping; 67a52d9a80SChris Mason } 68a52d9a80SChris Mason EXPORT_SYMBOL(extent_map_tree_init); 69a52d9a80SChris Mason 70a52d9a80SChris Mason struct extent_map *alloc_extent_map(gfp_t mask) 71a52d9a80SChris Mason { 72a52d9a80SChris Mason struct extent_map *em; 73a52d9a80SChris Mason em = kmem_cache_alloc(extent_map_cache, mask); 74a52d9a80SChris Mason if (!em || IS_ERR(em)) 75a52d9a80SChris Mason return em; 76a52d9a80SChris Mason em->in_tree = 0; 77a52d9a80SChris Mason atomic_set(&em->refs, 1); 78a52d9a80SChris Mason return em; 79a52d9a80SChris Mason } 80a52d9a80SChris Mason EXPORT_SYMBOL(alloc_extent_map); 81a52d9a80SChris Mason 82a52d9a80SChris Mason void free_extent_map(struct extent_map *em) 83a52d9a80SChris Mason { 842bf5a725SChris Mason if (!em) 852bf5a725SChris Mason return; 86a52d9a80SChris Mason if (atomic_dec_and_test(&em->refs)) { 87a52d9a80SChris Mason WARN_ON(em->in_tree); 88a52d9a80SChris Mason kmem_cache_free(extent_map_cache, em); 89a52d9a80SChris Mason } 90a52d9a80SChris Mason } 91a52d9a80SChris Mason EXPORT_SYMBOL(free_extent_map); 92a52d9a80SChris Mason 93a52d9a80SChris Mason 94a52d9a80SChris Mason struct extent_state *alloc_extent_state(gfp_t mask) 95a52d9a80SChris Mason { 96a52d9a80SChris Mason struct extent_state *state; 97a52d9a80SChris Mason state = kmem_cache_alloc(extent_state_cache, mask); 98a52d9a80SChris Mason if (!state || IS_ERR(state)) 99a52d9a80SChris Mason return state; 100a52d9a80SChris Mason state->state = 0; 101a52d9a80SChris Mason state->in_tree = 0; 10207157aacSChris Mason state->private = 0; 103a52d9a80SChris Mason atomic_set(&state->refs, 1); 104a52d9a80SChris Mason init_waitqueue_head(&state->wq); 105a52d9a80SChris Mason return state; 106a52d9a80SChris Mason } 107a52d9a80SChris Mason EXPORT_SYMBOL(alloc_extent_state); 108a52d9a80SChris Mason 109a52d9a80SChris Mason void free_extent_state(struct extent_state *state) 110a52d9a80SChris Mason { 1112bf5a725SChris Mason if (!state) 1122bf5a725SChris Mason return; 113a52d9a80SChris Mason if (atomic_dec_and_test(&state->refs)) { 114a52d9a80SChris Mason WARN_ON(state->in_tree); 115a52d9a80SChris Mason kmem_cache_free(extent_state_cache, state); 116a52d9a80SChris Mason } 117a52d9a80SChris Mason } 118a52d9a80SChris Mason EXPORT_SYMBOL(free_extent_state); 119a52d9a80SChris Mason 120a52d9a80SChris Mason static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 121a52d9a80SChris Mason struct rb_node *node) 122a52d9a80SChris Mason { 123a52d9a80SChris Mason struct rb_node ** p = &root->rb_node; 124a52d9a80SChris Mason struct rb_node * parent = NULL; 125a52d9a80SChris Mason struct tree_entry *entry; 126a52d9a80SChris Mason 127a52d9a80SChris Mason while(*p) { 128a52d9a80SChris Mason parent = *p; 129a52d9a80SChris Mason entry = rb_entry(parent, struct tree_entry, rb_node); 130a52d9a80SChris Mason 131a52d9a80SChris Mason if (offset < entry->start) 132a52d9a80SChris Mason p = &(*p)->rb_left; 133a52d9a80SChris Mason else if (offset > entry->end) 134a52d9a80SChris Mason p = &(*p)->rb_right; 135a52d9a80SChris Mason else 136a52d9a80SChris Mason return parent; 137a52d9a80SChris Mason } 138a52d9a80SChris Mason 139a52d9a80SChris Mason entry = rb_entry(node, struct tree_entry, rb_node); 140a52d9a80SChris Mason entry->in_tree = 1; 141a52d9a80SChris Mason rb_link_node(node, parent, p); 142a52d9a80SChris Mason rb_insert_color(node, root); 143a52d9a80SChris Mason return NULL; 144a52d9a80SChris Mason } 145a52d9a80SChris Mason 146a52d9a80SChris Mason static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 147a52d9a80SChris Mason struct rb_node **prev_ret) 148a52d9a80SChris Mason { 149a52d9a80SChris Mason struct rb_node * n = root->rb_node; 150a52d9a80SChris Mason struct rb_node *prev = NULL; 151a52d9a80SChris Mason struct tree_entry *entry; 152a52d9a80SChris Mason struct tree_entry *prev_entry = NULL; 153a52d9a80SChris Mason 154a52d9a80SChris Mason while(n) { 155a52d9a80SChris Mason entry = rb_entry(n, struct tree_entry, rb_node); 156a52d9a80SChris Mason prev = n; 157a52d9a80SChris Mason prev_entry = entry; 158a52d9a80SChris Mason 159a52d9a80SChris Mason if (offset < entry->start) 160a52d9a80SChris Mason n = n->rb_left; 161a52d9a80SChris Mason else if (offset > entry->end) 162a52d9a80SChris Mason n = n->rb_right; 163a52d9a80SChris Mason else 164a52d9a80SChris Mason return n; 165a52d9a80SChris Mason } 166a52d9a80SChris Mason if (!prev_ret) 167a52d9a80SChris Mason return NULL; 168a52d9a80SChris Mason while(prev && offset > prev_entry->end) { 169a52d9a80SChris Mason prev = rb_next(prev); 170a52d9a80SChris Mason prev_entry = rb_entry(prev, struct tree_entry, rb_node); 171a52d9a80SChris Mason } 172a52d9a80SChris Mason *prev_ret = prev; 173a52d9a80SChris Mason return NULL; 174a52d9a80SChris Mason } 175a52d9a80SChris Mason 176a52d9a80SChris Mason static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) 177a52d9a80SChris Mason { 178a52d9a80SChris Mason struct rb_node *prev; 179a52d9a80SChris Mason struct rb_node *ret; 180a52d9a80SChris Mason ret = __tree_search(root, offset, &prev); 181a52d9a80SChris Mason if (!ret) 182a52d9a80SChris Mason return prev; 183a52d9a80SChris Mason return ret; 184a52d9a80SChris Mason } 185a52d9a80SChris Mason 186a52d9a80SChris Mason static int tree_delete(struct rb_root *root, u64 offset) 187a52d9a80SChris Mason { 188a52d9a80SChris Mason struct rb_node *node; 189a52d9a80SChris Mason struct tree_entry *entry; 190a52d9a80SChris Mason 191a52d9a80SChris Mason node = __tree_search(root, offset, NULL); 192a52d9a80SChris Mason if (!node) 193a52d9a80SChris Mason return -ENOENT; 194a52d9a80SChris Mason entry = rb_entry(node, struct tree_entry, rb_node); 195a52d9a80SChris Mason entry->in_tree = 0; 196a52d9a80SChris Mason rb_erase(node, root); 197a52d9a80SChris Mason return 0; 198a52d9a80SChris Mason } 199a52d9a80SChris Mason 200a52d9a80SChris Mason /* 201a52d9a80SChris Mason * add_extent_mapping tries a simple backward merge with existing 202a52d9a80SChris Mason * mappings. The extent_map struct passed in will be inserted into 203a52d9a80SChris Mason * the tree directly (no copies made, just a reference taken). 204a52d9a80SChris Mason */ 205a52d9a80SChris Mason int add_extent_mapping(struct extent_map_tree *tree, 206a52d9a80SChris Mason struct extent_map *em) 207a52d9a80SChris Mason { 208a52d9a80SChris Mason int ret = 0; 209a52d9a80SChris Mason struct extent_map *prev = NULL; 210a52d9a80SChris Mason struct rb_node *rb; 211a52d9a80SChris Mason 212a52d9a80SChris Mason write_lock_irq(&tree->lock); 213a52d9a80SChris Mason rb = tree_insert(&tree->map, em->end, &em->rb_node); 214a52d9a80SChris Mason if (rb) { 215a52d9a80SChris Mason prev = rb_entry(rb, struct extent_map, rb_node); 216a52d9a80SChris Mason printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end); 217a52d9a80SChris Mason ret = -EEXIST; 218a52d9a80SChris Mason goto out; 219a52d9a80SChris Mason } 220a52d9a80SChris Mason atomic_inc(&em->refs); 221a52d9a80SChris Mason if (em->start != 0) { 222a52d9a80SChris Mason rb = rb_prev(&em->rb_node); 223a52d9a80SChris Mason if (rb) 224a52d9a80SChris Mason prev = rb_entry(rb, struct extent_map, rb_node); 225a52d9a80SChris Mason if (prev && prev->end + 1 == em->start && 226a52d9a80SChris Mason ((em->block_start == 0 && prev->block_start == 0) || 227a52d9a80SChris Mason (em->block_start == prev->block_end + 1))) { 228a52d9a80SChris Mason em->start = prev->start; 229a52d9a80SChris Mason em->block_start = prev->block_start; 230a52d9a80SChris Mason rb_erase(&prev->rb_node, &tree->map); 231a52d9a80SChris Mason prev->in_tree = 0; 232a52d9a80SChris Mason free_extent_map(prev); 233a52d9a80SChris Mason } 234a52d9a80SChris Mason } 235a52d9a80SChris Mason out: 236a52d9a80SChris Mason write_unlock_irq(&tree->lock); 237a52d9a80SChris Mason return ret; 238a52d9a80SChris Mason } 239a52d9a80SChris Mason EXPORT_SYMBOL(add_extent_mapping); 240a52d9a80SChris Mason 241a52d9a80SChris Mason /* 242a52d9a80SChris Mason * lookup_extent_mapping returns the first extent_map struct in the 243a52d9a80SChris Mason * tree that intersects the [start, end] (inclusive) range. There may 244a52d9a80SChris Mason * be additional objects in the tree that intersect, so check the object 245a52d9a80SChris Mason * returned carefully to make sure you don't need additional lookups. 246a52d9a80SChris Mason */ 247a52d9a80SChris Mason struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 248a52d9a80SChris Mason u64 start, u64 end) 249a52d9a80SChris Mason { 250a52d9a80SChris Mason struct extent_map *em; 251a52d9a80SChris Mason struct rb_node *rb_node; 252a52d9a80SChris Mason 253a52d9a80SChris Mason read_lock_irq(&tree->lock); 254a52d9a80SChris Mason rb_node = tree_search(&tree->map, start); 255a52d9a80SChris Mason if (!rb_node) { 256a52d9a80SChris Mason em = NULL; 257a52d9a80SChris Mason goto out; 258a52d9a80SChris Mason } 259a52d9a80SChris Mason if (IS_ERR(rb_node)) { 260a52d9a80SChris Mason em = ERR_PTR(PTR_ERR(rb_node)); 261a52d9a80SChris Mason goto out; 262a52d9a80SChris Mason } 263a52d9a80SChris Mason em = rb_entry(rb_node, struct extent_map, rb_node); 264a52d9a80SChris Mason if (em->end < start || em->start > end) { 265a52d9a80SChris Mason em = NULL; 266a52d9a80SChris Mason goto out; 267a52d9a80SChris Mason } 268a52d9a80SChris Mason atomic_inc(&em->refs); 269a52d9a80SChris Mason out: 270a52d9a80SChris Mason read_unlock_irq(&tree->lock); 271a52d9a80SChris Mason return em; 272a52d9a80SChris Mason } 273a52d9a80SChris Mason EXPORT_SYMBOL(lookup_extent_mapping); 274a52d9a80SChris Mason 275a52d9a80SChris Mason /* 276a52d9a80SChris Mason * removes an extent_map struct from the tree. No reference counts are 277a52d9a80SChris Mason * dropped, and no checks are done to see if the range is in use 278a52d9a80SChris Mason */ 279a52d9a80SChris Mason int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 280a52d9a80SChris Mason { 281a52d9a80SChris Mason int ret; 282a52d9a80SChris Mason 283a52d9a80SChris Mason write_lock_irq(&tree->lock); 284a52d9a80SChris Mason ret = tree_delete(&tree->map, em->end); 285a52d9a80SChris Mason write_unlock_irq(&tree->lock); 286a52d9a80SChris Mason return ret; 287a52d9a80SChris Mason } 288a52d9a80SChris Mason EXPORT_SYMBOL(remove_extent_mapping); 289a52d9a80SChris Mason 290a52d9a80SChris Mason /* 291a52d9a80SChris Mason * utility function to look for merge candidates inside a given range. 292a52d9a80SChris Mason * Any extents with matching state are merged together into a single 293a52d9a80SChris Mason * extent in the tree. Extents with EXTENT_IO in their state field 294a52d9a80SChris Mason * are not merged because the end_io handlers need to be able to do 295a52d9a80SChris Mason * operations on them without sleeping (or doing allocations/splits). 296a52d9a80SChris Mason * 297a52d9a80SChris Mason * This should be called with the tree lock held. 298a52d9a80SChris Mason */ 299a52d9a80SChris Mason static int merge_state(struct extent_map_tree *tree, 300a52d9a80SChris Mason struct extent_state *state) 301a52d9a80SChris Mason { 302a52d9a80SChris Mason struct extent_state *other; 303a52d9a80SChris Mason struct rb_node *other_node; 304a52d9a80SChris Mason 305a52d9a80SChris Mason if (state->state & EXTENT_IOBITS) 306a52d9a80SChris Mason return 0; 307a52d9a80SChris Mason 308a52d9a80SChris Mason other_node = rb_prev(&state->rb_node); 309a52d9a80SChris Mason if (other_node) { 310a52d9a80SChris Mason other = rb_entry(other_node, struct extent_state, rb_node); 311a52d9a80SChris Mason if (other->end == state->start - 1 && 312a52d9a80SChris Mason other->state == state->state) { 313a52d9a80SChris Mason state->start = other->start; 314a52d9a80SChris Mason other->in_tree = 0; 315a52d9a80SChris Mason rb_erase(&other->rb_node, &tree->state); 316a52d9a80SChris Mason free_extent_state(other); 317a52d9a80SChris Mason } 318a52d9a80SChris Mason } 319a52d9a80SChris Mason other_node = rb_next(&state->rb_node); 320a52d9a80SChris Mason if (other_node) { 321a52d9a80SChris Mason other = rb_entry(other_node, struct extent_state, rb_node); 322a52d9a80SChris Mason if (other->start == state->end + 1 && 323a52d9a80SChris Mason other->state == state->state) { 324a52d9a80SChris Mason other->start = state->start; 325a52d9a80SChris Mason state->in_tree = 0; 326a52d9a80SChris Mason rb_erase(&state->rb_node, &tree->state); 327a52d9a80SChris Mason free_extent_state(state); 328a52d9a80SChris Mason } 329a52d9a80SChris Mason } 330a52d9a80SChris Mason return 0; 331a52d9a80SChris Mason } 332a52d9a80SChris Mason 333a52d9a80SChris Mason /* 334a52d9a80SChris Mason * insert an extent_state struct into the tree. 'bits' are set on the 335a52d9a80SChris Mason * struct before it is inserted. 336a52d9a80SChris Mason * 337a52d9a80SChris Mason * This may return -EEXIST if the extent is already there, in which case the 338a52d9a80SChris Mason * state struct is freed. 339a52d9a80SChris Mason * 340a52d9a80SChris Mason * The tree lock is not taken internally. This is a utility function and 341a52d9a80SChris Mason * probably isn't what you want to call (see set/clear_extent_bit). 342a52d9a80SChris Mason */ 343a52d9a80SChris Mason static int insert_state(struct extent_map_tree *tree, 344a52d9a80SChris Mason struct extent_state *state, u64 start, u64 end, 345a52d9a80SChris Mason int bits) 346a52d9a80SChris Mason { 347a52d9a80SChris Mason struct rb_node *node; 348a52d9a80SChris Mason 349a52d9a80SChris Mason if (end < start) { 350a52d9a80SChris Mason printk("end < start %Lu %Lu\n", end, start); 351a52d9a80SChris Mason WARN_ON(1); 352a52d9a80SChris Mason } 353a52d9a80SChris Mason state->state |= bits; 354a52d9a80SChris Mason state->start = start; 355a52d9a80SChris Mason state->end = end; 356a52d9a80SChris Mason if ((end & 4095) == 0) { 357a52d9a80SChris Mason printk("insert state %Lu %Lu strange end\n", start, end); 358a52d9a80SChris Mason WARN_ON(1); 359a52d9a80SChris Mason } 360a52d9a80SChris Mason node = tree_insert(&tree->state, end, &state->rb_node); 361a52d9a80SChris Mason if (node) { 362a52d9a80SChris Mason struct extent_state *found; 363a52d9a80SChris Mason found = rb_entry(node, struct extent_state, rb_node); 364a52d9a80SChris Mason printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); 365a52d9a80SChris Mason free_extent_state(state); 366a52d9a80SChris Mason return -EEXIST; 367a52d9a80SChris Mason } 368a52d9a80SChris Mason merge_state(tree, state); 369a52d9a80SChris Mason return 0; 370a52d9a80SChris Mason } 371a52d9a80SChris Mason 372a52d9a80SChris Mason /* 373a52d9a80SChris Mason * split a given extent state struct in two, inserting the preallocated 374a52d9a80SChris Mason * struct 'prealloc' as the newly created second half. 'split' indicates an 375a52d9a80SChris Mason * offset inside 'orig' where it should be split. 376a52d9a80SChris Mason * 377a52d9a80SChris Mason * Before calling, 378a52d9a80SChris Mason * the tree has 'orig' at [orig->start, orig->end]. After calling, there 379a52d9a80SChris Mason * are two extent state structs in the tree: 380a52d9a80SChris Mason * prealloc: [orig->start, split - 1] 381a52d9a80SChris Mason * orig: [ split, orig->end ] 382a52d9a80SChris Mason * 383a52d9a80SChris Mason * The tree locks are not taken by this function. They need to be held 384a52d9a80SChris Mason * by the caller. 385a52d9a80SChris Mason */ 386a52d9a80SChris Mason static int split_state(struct extent_map_tree *tree, struct extent_state *orig, 387a52d9a80SChris Mason struct extent_state *prealloc, u64 split) 388a52d9a80SChris Mason { 389a52d9a80SChris Mason struct rb_node *node; 390a52d9a80SChris Mason prealloc->start = orig->start; 391a52d9a80SChris Mason prealloc->end = split - 1; 392a52d9a80SChris Mason prealloc->state = orig->state; 393a52d9a80SChris Mason orig->start = split; 394a52d9a80SChris Mason if ((prealloc->end & 4095) == 0) { 395a52d9a80SChris Mason printk("insert state %Lu %Lu strange end\n", prealloc->start, 396a52d9a80SChris Mason prealloc->end); 397a52d9a80SChris Mason WARN_ON(1); 398a52d9a80SChris Mason } 399a52d9a80SChris Mason node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); 400a52d9a80SChris Mason if (node) { 401a52d9a80SChris Mason struct extent_state *found; 402a52d9a80SChris Mason found = rb_entry(node, struct extent_state, rb_node); 403a52d9a80SChris Mason printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end); 404a52d9a80SChris Mason free_extent_state(prealloc); 405a52d9a80SChris Mason return -EEXIST; 406a52d9a80SChris Mason } 407a52d9a80SChris Mason return 0; 408a52d9a80SChris Mason } 409a52d9a80SChris Mason 410a52d9a80SChris Mason /* 411a52d9a80SChris Mason * utility function to clear some bits in an extent state struct. 412a52d9a80SChris Mason * it will optionally wake up any one waiting on this state (wake == 1), or 413a52d9a80SChris Mason * forcibly remove the state from the tree (delete == 1). 414a52d9a80SChris Mason * 415a52d9a80SChris Mason * If no bits are set on the state struct after clearing things, the 416a52d9a80SChris Mason * struct is freed and removed from the tree 417a52d9a80SChris Mason */ 418a52d9a80SChris Mason static int clear_state_bit(struct extent_map_tree *tree, 419a52d9a80SChris Mason struct extent_state *state, int bits, int wake, 420a52d9a80SChris Mason int delete) 421a52d9a80SChris Mason { 422a52d9a80SChris Mason int ret = state->state & bits; 423a52d9a80SChris Mason state->state &= ~bits; 424a52d9a80SChris Mason if (wake) 425a52d9a80SChris Mason wake_up(&state->wq); 426a52d9a80SChris Mason if (delete || state->state == 0) { 427a52d9a80SChris Mason if (state->in_tree) { 428a52d9a80SChris Mason rb_erase(&state->rb_node, &tree->state); 429a52d9a80SChris Mason state->in_tree = 0; 430a52d9a80SChris Mason free_extent_state(state); 431a52d9a80SChris Mason } else { 432a52d9a80SChris Mason WARN_ON(1); 433a52d9a80SChris Mason } 434a52d9a80SChris Mason } else { 435a52d9a80SChris Mason merge_state(tree, state); 436a52d9a80SChris Mason } 437a52d9a80SChris Mason return ret; 438a52d9a80SChris Mason } 439a52d9a80SChris Mason 440a52d9a80SChris Mason /* 441a52d9a80SChris Mason * clear some bits on a range in the tree. This may require splitting 442a52d9a80SChris Mason * or inserting elements in the tree, so the gfp mask is used to 443a52d9a80SChris Mason * indicate which allocations or sleeping are allowed. 444a52d9a80SChris Mason * 445a52d9a80SChris Mason * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove 446a52d9a80SChris Mason * the given range from the tree regardless of state (ie for truncate). 447a52d9a80SChris Mason * 448a52d9a80SChris Mason * the range [start, end] is inclusive. 449a52d9a80SChris Mason * 450a52d9a80SChris Mason * This takes the tree lock, and returns < 0 on error, > 0 if any of the 451a52d9a80SChris Mason * bits were already set, or zero if none of the bits were already set. 452a52d9a80SChris Mason */ 453a52d9a80SChris Mason int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, 454a52d9a80SChris Mason int bits, int wake, int delete, gfp_t mask) 455a52d9a80SChris Mason { 456a52d9a80SChris Mason struct extent_state *state; 457a52d9a80SChris Mason struct extent_state *prealloc = NULL; 458a52d9a80SChris Mason struct rb_node *node; 45990f1c19aSChristoph Hellwig unsigned long flags; 460a52d9a80SChris Mason int err; 461a52d9a80SChris Mason int set = 0; 462a52d9a80SChris Mason 463a52d9a80SChris Mason again: 464a52d9a80SChris Mason if (!prealloc && (mask & __GFP_WAIT)) { 465a52d9a80SChris Mason prealloc = alloc_extent_state(mask); 466a52d9a80SChris Mason if (!prealloc) 467a52d9a80SChris Mason return -ENOMEM; 468a52d9a80SChris Mason } 469a52d9a80SChris Mason 47090f1c19aSChristoph Hellwig write_lock_irqsave(&tree->lock, flags); 471a52d9a80SChris Mason /* 472a52d9a80SChris Mason * this search will find the extents that end after 473a52d9a80SChris Mason * our range starts 474a52d9a80SChris Mason */ 475a52d9a80SChris Mason node = tree_search(&tree->state, start); 476a52d9a80SChris Mason if (!node) 477a52d9a80SChris Mason goto out; 478a52d9a80SChris Mason state = rb_entry(node, struct extent_state, rb_node); 479a52d9a80SChris Mason if (state->start > end) 480a52d9a80SChris Mason goto out; 481a52d9a80SChris Mason WARN_ON(state->end < start); 482a52d9a80SChris Mason 483a52d9a80SChris Mason /* 484a52d9a80SChris Mason * | ---- desired range ---- | 485a52d9a80SChris Mason * | state | or 486a52d9a80SChris Mason * | ------------- state -------------- | 487a52d9a80SChris Mason * 488a52d9a80SChris Mason * We need to split the extent we found, and may flip 489a52d9a80SChris Mason * bits on second half. 490a52d9a80SChris Mason * 491a52d9a80SChris Mason * If the extent we found extends past our range, we 492a52d9a80SChris Mason * just split and search again. It'll get split again 493a52d9a80SChris Mason * the next time though. 494a52d9a80SChris Mason * 495a52d9a80SChris Mason * If the extent we found is inside our range, we clear 496a52d9a80SChris Mason * the desired bit on it. 497a52d9a80SChris Mason */ 498a52d9a80SChris Mason 499a52d9a80SChris Mason if (state->start < start) { 500a52d9a80SChris Mason err = split_state(tree, state, prealloc, start); 501a52d9a80SChris Mason BUG_ON(err == -EEXIST); 502a52d9a80SChris Mason prealloc = NULL; 503a52d9a80SChris Mason if (err) 504a52d9a80SChris Mason goto out; 505a52d9a80SChris Mason if (state->end <= end) { 506a52d9a80SChris Mason start = state->end + 1; 507a52d9a80SChris Mason set |= clear_state_bit(tree, state, bits, 508a52d9a80SChris Mason wake, delete); 509a52d9a80SChris Mason } else { 510a52d9a80SChris Mason start = state->start; 511a52d9a80SChris Mason } 512a52d9a80SChris Mason goto search_again; 513a52d9a80SChris Mason } 514a52d9a80SChris Mason /* 515a52d9a80SChris Mason * | ---- desired range ---- | 516a52d9a80SChris Mason * | state | 517a52d9a80SChris Mason * We need to split the extent, and clear the bit 518a52d9a80SChris Mason * on the first half 519a52d9a80SChris Mason */ 520a52d9a80SChris Mason if (state->start <= end && state->end > end) { 521a52d9a80SChris Mason err = split_state(tree, state, prealloc, end + 1); 522a52d9a80SChris Mason BUG_ON(err == -EEXIST); 523a52d9a80SChris Mason 524a52d9a80SChris Mason if (wake) 525a52d9a80SChris Mason wake_up(&state->wq); 526a52d9a80SChris Mason set |= clear_state_bit(tree, prealloc, bits, 527a52d9a80SChris Mason wake, delete); 528a52d9a80SChris Mason prealloc = NULL; 529a52d9a80SChris Mason goto out; 530a52d9a80SChris Mason } 531a52d9a80SChris Mason 532a52d9a80SChris Mason start = state->end + 1; 533a52d9a80SChris Mason set |= clear_state_bit(tree, state, bits, wake, delete); 534a52d9a80SChris Mason goto search_again; 535a52d9a80SChris Mason 536a52d9a80SChris Mason out: 53790f1c19aSChristoph Hellwig write_unlock_irqrestore(&tree->lock, flags); 538a52d9a80SChris Mason if (prealloc) 539a52d9a80SChris Mason free_extent_state(prealloc); 540a52d9a80SChris Mason 541a52d9a80SChris Mason return set; 542a52d9a80SChris Mason 543a52d9a80SChris Mason search_again: 544a52d9a80SChris Mason if (start >= end) 545a52d9a80SChris Mason goto out; 54690f1c19aSChristoph Hellwig write_unlock_irqrestore(&tree->lock, flags); 547a52d9a80SChris Mason if (mask & __GFP_WAIT) 548a52d9a80SChris Mason cond_resched(); 549a52d9a80SChris Mason goto again; 550a52d9a80SChris Mason } 551a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_bit); 552a52d9a80SChris Mason 553a52d9a80SChris Mason static int wait_on_state(struct extent_map_tree *tree, 554a52d9a80SChris Mason struct extent_state *state) 555a52d9a80SChris Mason { 556a52d9a80SChris Mason DEFINE_WAIT(wait); 557a52d9a80SChris Mason prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 558a52d9a80SChris Mason read_unlock_irq(&tree->lock); 559a52d9a80SChris Mason schedule(); 560a52d9a80SChris Mason read_lock_irq(&tree->lock); 561a52d9a80SChris Mason finish_wait(&state->wq, &wait); 562a52d9a80SChris Mason return 0; 563a52d9a80SChris Mason } 564a52d9a80SChris Mason 565a52d9a80SChris Mason /* 566a52d9a80SChris Mason * waits for one or more bits to clear on a range in the state tree. 567a52d9a80SChris Mason * The range [start, end] is inclusive. 568a52d9a80SChris Mason * The tree lock is taken by this function 569a52d9a80SChris Mason */ 570a52d9a80SChris Mason int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits) 571a52d9a80SChris Mason { 572a52d9a80SChris Mason struct extent_state *state; 573a52d9a80SChris Mason struct rb_node *node; 574a52d9a80SChris Mason 575a52d9a80SChris Mason read_lock_irq(&tree->lock); 576a52d9a80SChris Mason again: 577a52d9a80SChris Mason while (1) { 578a52d9a80SChris Mason /* 579a52d9a80SChris Mason * this search will find all the extents that end after 580a52d9a80SChris Mason * our range starts 581a52d9a80SChris Mason */ 582a52d9a80SChris Mason node = tree_search(&tree->state, start); 583a52d9a80SChris Mason if (!node) 584a52d9a80SChris Mason break; 585a52d9a80SChris Mason 586a52d9a80SChris Mason state = rb_entry(node, struct extent_state, rb_node); 587a52d9a80SChris Mason 588a52d9a80SChris Mason if (state->start > end) 589a52d9a80SChris Mason goto out; 590a52d9a80SChris Mason 591a52d9a80SChris Mason if (state->state & bits) { 592a52d9a80SChris Mason start = state->start; 593a52d9a80SChris Mason atomic_inc(&state->refs); 594a52d9a80SChris Mason wait_on_state(tree, state); 595a52d9a80SChris Mason free_extent_state(state); 596a52d9a80SChris Mason goto again; 597a52d9a80SChris Mason } 598a52d9a80SChris Mason start = state->end + 1; 599a52d9a80SChris Mason 600a52d9a80SChris Mason if (start > end) 601a52d9a80SChris Mason break; 602a52d9a80SChris Mason 603a52d9a80SChris Mason if (need_resched()) { 604a52d9a80SChris Mason read_unlock_irq(&tree->lock); 605a52d9a80SChris Mason cond_resched(); 606a52d9a80SChris Mason read_lock_irq(&tree->lock); 607a52d9a80SChris Mason } 608a52d9a80SChris Mason } 609a52d9a80SChris Mason out: 610a52d9a80SChris Mason read_unlock_irq(&tree->lock); 611a52d9a80SChris Mason return 0; 612a52d9a80SChris Mason } 613a52d9a80SChris Mason EXPORT_SYMBOL(wait_extent_bit); 614a52d9a80SChris Mason 615a52d9a80SChris Mason /* 616a52d9a80SChris Mason * set some bits on a range in the tree. This may require allocations 617a52d9a80SChris Mason * or sleeping, so the gfp mask is used to indicate what is allowed. 618a52d9a80SChris Mason * 619a52d9a80SChris Mason * If 'exclusive' == 1, this will fail with -EEXIST if some part of the 620a52d9a80SChris Mason * range already has the desired bits set. The start of the existing 621a52d9a80SChris Mason * range is returned in failed_start in this case. 622a52d9a80SChris Mason * 623a52d9a80SChris Mason * [start, end] is inclusive 624a52d9a80SChris Mason * This takes the tree lock. 625a52d9a80SChris Mason */ 626a52d9a80SChris Mason int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits, 627a52d9a80SChris Mason int exclusive, u64 *failed_start, gfp_t mask) 628a52d9a80SChris Mason { 629a52d9a80SChris Mason struct extent_state *state; 630a52d9a80SChris Mason struct extent_state *prealloc = NULL; 631a52d9a80SChris Mason struct rb_node *node; 63290f1c19aSChristoph Hellwig unsigned long flags; 633a52d9a80SChris Mason int err = 0; 634a52d9a80SChris Mason int set; 635a52d9a80SChris Mason u64 last_start; 636a52d9a80SChris Mason u64 last_end; 637a52d9a80SChris Mason again: 638a52d9a80SChris Mason if (!prealloc && (mask & __GFP_WAIT)) { 639a52d9a80SChris Mason prealloc = alloc_extent_state(mask); 640a52d9a80SChris Mason if (!prealloc) 641a52d9a80SChris Mason return -ENOMEM; 642a52d9a80SChris Mason } 643a52d9a80SChris Mason 64490f1c19aSChristoph Hellwig write_lock_irqsave(&tree->lock, flags); 645a52d9a80SChris Mason /* 646a52d9a80SChris Mason * this search will find all the extents that end after 647a52d9a80SChris Mason * our range starts. 648a52d9a80SChris Mason */ 649a52d9a80SChris Mason node = tree_search(&tree->state, start); 650a52d9a80SChris Mason if (!node) { 651a52d9a80SChris Mason err = insert_state(tree, prealloc, start, end, bits); 652a52d9a80SChris Mason prealloc = NULL; 653a52d9a80SChris Mason BUG_ON(err == -EEXIST); 654a52d9a80SChris Mason goto out; 655a52d9a80SChris Mason } 656a52d9a80SChris Mason 657a52d9a80SChris Mason state = rb_entry(node, struct extent_state, rb_node); 658a52d9a80SChris Mason last_start = state->start; 659a52d9a80SChris Mason last_end = state->end; 660a52d9a80SChris Mason 661a52d9a80SChris Mason /* 662a52d9a80SChris Mason * | ---- desired range ---- | 663a52d9a80SChris Mason * | state | 664a52d9a80SChris Mason * 665a52d9a80SChris Mason * Just lock what we found and keep going 666a52d9a80SChris Mason */ 667a52d9a80SChris Mason if (state->start == start && state->end <= end) { 668a52d9a80SChris Mason set = state->state & bits; 669a52d9a80SChris Mason if (set && exclusive) { 670a52d9a80SChris Mason *failed_start = state->start; 671a52d9a80SChris Mason err = -EEXIST; 672a52d9a80SChris Mason goto out; 673a52d9a80SChris Mason } 674a52d9a80SChris Mason state->state |= bits; 675a52d9a80SChris Mason start = state->end + 1; 676a52d9a80SChris Mason merge_state(tree, state); 677a52d9a80SChris Mason goto search_again; 678a52d9a80SChris Mason } 679a52d9a80SChris Mason 680a52d9a80SChris Mason /* 681a52d9a80SChris Mason * | ---- desired range ---- | 682a52d9a80SChris Mason * | state | 683a52d9a80SChris Mason * or 684a52d9a80SChris Mason * | ------------- state -------------- | 685a52d9a80SChris Mason * 686a52d9a80SChris Mason * We need to split the extent we found, and may flip bits on 687a52d9a80SChris Mason * second half. 688a52d9a80SChris Mason * 689a52d9a80SChris Mason * If the extent we found extends past our 690a52d9a80SChris Mason * range, we just split and search again. It'll get split 691a52d9a80SChris Mason * again the next time though. 692a52d9a80SChris Mason * 693a52d9a80SChris Mason * If the extent we found is inside our range, we set the 694a52d9a80SChris Mason * desired bit on it. 695a52d9a80SChris Mason */ 696a52d9a80SChris Mason if (state->start < start) { 697a52d9a80SChris Mason set = state->state & bits; 698a52d9a80SChris Mason if (exclusive && set) { 699a52d9a80SChris Mason *failed_start = start; 700a52d9a80SChris Mason err = -EEXIST; 701a52d9a80SChris Mason goto out; 702a52d9a80SChris Mason } 703a52d9a80SChris Mason err = split_state(tree, state, prealloc, start); 704a52d9a80SChris Mason BUG_ON(err == -EEXIST); 705a52d9a80SChris Mason prealloc = NULL; 706a52d9a80SChris Mason if (err) 707a52d9a80SChris Mason goto out; 708a52d9a80SChris Mason if (state->end <= end) { 709a52d9a80SChris Mason state->state |= bits; 710a52d9a80SChris Mason start = state->end + 1; 711a52d9a80SChris Mason merge_state(tree, state); 712a52d9a80SChris Mason } else { 713a52d9a80SChris Mason start = state->start; 714a52d9a80SChris Mason } 715a52d9a80SChris Mason goto search_again; 716a52d9a80SChris Mason } 717a52d9a80SChris Mason /* 718a52d9a80SChris Mason * | ---- desired range ---- | 719a52d9a80SChris Mason * | state | or | state | 720a52d9a80SChris Mason * 721a52d9a80SChris Mason * There's a hole, we need to insert something in it and 722a52d9a80SChris Mason * ignore the extent we found. 723a52d9a80SChris Mason */ 724a52d9a80SChris Mason if (state->start > start) { 725a52d9a80SChris Mason u64 this_end; 726a52d9a80SChris Mason if (end < last_start) 727a52d9a80SChris Mason this_end = end; 728a52d9a80SChris Mason else 729a52d9a80SChris Mason this_end = last_start -1; 730a52d9a80SChris Mason err = insert_state(tree, prealloc, start, this_end, 731a52d9a80SChris Mason bits); 732a52d9a80SChris Mason prealloc = NULL; 733a52d9a80SChris Mason BUG_ON(err == -EEXIST); 734a52d9a80SChris Mason if (err) 735a52d9a80SChris Mason goto out; 736a52d9a80SChris Mason start = this_end + 1; 737a52d9a80SChris Mason goto search_again; 738a52d9a80SChris Mason } 739a8c450b2SChris Mason /* 740a8c450b2SChris Mason * | ---- desired range ---- | 741a8c450b2SChris Mason * | state | 742a8c450b2SChris Mason * We need to split the extent, and set the bit 743a8c450b2SChris Mason * on the first half 744a8c450b2SChris Mason */ 745a8c450b2SChris Mason if (state->start <= end && state->end > end) { 746a8c450b2SChris Mason set = state->state & bits; 747a8c450b2SChris Mason if (exclusive && set) { 748a8c450b2SChris Mason *failed_start = start; 749a8c450b2SChris Mason err = -EEXIST; 750a8c450b2SChris Mason goto out; 751a8c450b2SChris Mason } 752a8c450b2SChris Mason err = split_state(tree, state, prealloc, end + 1); 753a8c450b2SChris Mason BUG_ON(err == -EEXIST); 754a8c450b2SChris Mason 755a8c450b2SChris Mason prealloc->state |= bits; 756a8c450b2SChris Mason merge_state(tree, prealloc); 757a8c450b2SChris Mason prealloc = NULL; 758a8c450b2SChris Mason goto out; 759a8c450b2SChris Mason } 760a8c450b2SChris Mason 761a52d9a80SChris Mason goto search_again; 762a52d9a80SChris Mason 763a52d9a80SChris Mason out: 76490f1c19aSChristoph Hellwig write_unlock_irqrestore(&tree->lock, flags); 765a52d9a80SChris Mason if (prealloc) 766a52d9a80SChris Mason free_extent_state(prealloc); 767a52d9a80SChris Mason 768a52d9a80SChris Mason return err; 769a52d9a80SChris Mason 770a52d9a80SChris Mason search_again: 771a52d9a80SChris Mason if (start > end) 772a52d9a80SChris Mason goto out; 77390f1c19aSChristoph Hellwig write_unlock_irqrestore(&tree->lock, flags); 774a52d9a80SChris Mason if (mask & __GFP_WAIT) 775a52d9a80SChris Mason cond_resched(); 776a52d9a80SChris Mason goto again; 777a52d9a80SChris Mason } 778a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_bit); 779a52d9a80SChris Mason 780a52d9a80SChris Mason /* wrappers around set/clear extent bit */ 781a52d9a80SChris Mason int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 782a52d9a80SChris Mason gfp_t mask) 783a52d9a80SChris Mason { 784a52d9a80SChris Mason return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 785a52d9a80SChris Mason mask); 786a52d9a80SChris Mason } 787a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_dirty); 788a52d9a80SChris Mason 789b888db2bSChris Mason int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end, 790b888db2bSChris Mason gfp_t mask) 791b888db2bSChris Mason { 792b888db2bSChris Mason return set_extent_bit(tree, start, end, 793b888db2bSChris Mason EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL, 794b888db2bSChris Mason mask); 795b888db2bSChris Mason } 796b888db2bSChris Mason EXPORT_SYMBOL(set_extent_delalloc); 797b888db2bSChris Mason 798a52d9a80SChris Mason int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 799a52d9a80SChris Mason gfp_t mask) 800a52d9a80SChris Mason { 801b888db2bSChris Mason return clear_extent_bit(tree, start, end, 802b888db2bSChris Mason EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask); 803a52d9a80SChris Mason } 804a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_dirty); 805a52d9a80SChris Mason 806a52d9a80SChris Mason int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 807a52d9a80SChris Mason gfp_t mask) 808a52d9a80SChris Mason { 809a52d9a80SChris Mason return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 810a52d9a80SChris Mason mask); 811a52d9a80SChris Mason } 812a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_new); 813a52d9a80SChris Mason 814a52d9a80SChris Mason int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 815a52d9a80SChris Mason gfp_t mask) 816a52d9a80SChris Mason { 817a52d9a80SChris Mason return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask); 818a52d9a80SChris Mason } 819a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_new); 820a52d9a80SChris Mason 821a52d9a80SChris Mason int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 822a52d9a80SChris Mason gfp_t mask) 823a52d9a80SChris Mason { 824a52d9a80SChris Mason return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, 825a52d9a80SChris Mason mask); 826a52d9a80SChris Mason } 827a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_uptodate); 828a52d9a80SChris Mason 829a52d9a80SChris Mason int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 830a52d9a80SChris Mason gfp_t mask) 831a52d9a80SChris Mason { 832a52d9a80SChris Mason return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); 833a52d9a80SChris Mason } 834a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_uptodate); 835a52d9a80SChris Mason 836a52d9a80SChris Mason int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 837a52d9a80SChris Mason gfp_t mask) 838a52d9a80SChris Mason { 839a52d9a80SChris Mason return set_extent_bit(tree, start, end, EXTENT_WRITEBACK, 840a52d9a80SChris Mason 0, NULL, mask); 841a52d9a80SChris Mason } 842a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_writeback); 843a52d9a80SChris Mason 844a52d9a80SChris Mason int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 845a52d9a80SChris Mason gfp_t mask) 846a52d9a80SChris Mason { 847a52d9a80SChris Mason return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); 848a52d9a80SChris Mason } 849a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_writeback); 850a52d9a80SChris Mason 851a52d9a80SChris Mason int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end) 852a52d9a80SChris Mason { 853a52d9a80SChris Mason return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); 854a52d9a80SChris Mason } 855a52d9a80SChris Mason EXPORT_SYMBOL(wait_on_extent_writeback); 856a52d9a80SChris Mason 857a52d9a80SChris Mason /* 858a52d9a80SChris Mason * locks a range in ascending order, waiting for any locked regions 859a52d9a80SChris Mason * it hits on the way. [start,end] are inclusive, and this will sleep. 860a52d9a80SChris Mason */ 861a52d9a80SChris Mason int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask) 862a52d9a80SChris Mason { 863a52d9a80SChris Mason int err; 864a52d9a80SChris Mason u64 failed_start; 865a52d9a80SChris Mason while (1) { 866a52d9a80SChris Mason err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 867a52d9a80SChris Mason &failed_start, mask); 868a52d9a80SChris Mason if (err == -EEXIST && (mask & __GFP_WAIT)) { 869a52d9a80SChris Mason wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 870a52d9a80SChris Mason start = failed_start; 871a52d9a80SChris Mason } else { 872a52d9a80SChris Mason break; 873a52d9a80SChris Mason } 874a52d9a80SChris Mason WARN_ON(start > end); 875a52d9a80SChris Mason } 876a52d9a80SChris Mason return err; 877a52d9a80SChris Mason } 878a52d9a80SChris Mason EXPORT_SYMBOL(lock_extent); 879a52d9a80SChris Mason 880a52d9a80SChris Mason int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end, 881a52d9a80SChris Mason gfp_t mask) 882a52d9a80SChris Mason { 883a52d9a80SChris Mason return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask); 884a52d9a80SChris Mason } 885a52d9a80SChris Mason EXPORT_SYMBOL(unlock_extent); 886a52d9a80SChris Mason 887a52d9a80SChris Mason /* 888a52d9a80SChris Mason * helper function to set pages and extents in the tree dirty 889a52d9a80SChris Mason */ 890a52d9a80SChris Mason int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end) 891a52d9a80SChris Mason { 892a52d9a80SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 893a52d9a80SChris Mason unsigned long end_index = end >> PAGE_CACHE_SHIFT; 894a52d9a80SChris Mason struct page *page; 895a52d9a80SChris Mason 896a52d9a80SChris Mason while (index <= end_index) { 897a52d9a80SChris Mason page = find_get_page(tree->mapping, index); 898a52d9a80SChris Mason BUG_ON(!page); 899a52d9a80SChris Mason __set_page_dirty_nobuffers(page); 900a52d9a80SChris Mason page_cache_release(page); 901a52d9a80SChris Mason index++; 902a52d9a80SChris Mason } 903a52d9a80SChris Mason set_extent_dirty(tree, start, end, GFP_NOFS); 904a52d9a80SChris Mason return 0; 905a52d9a80SChris Mason } 906a52d9a80SChris Mason EXPORT_SYMBOL(set_range_dirty); 907a52d9a80SChris Mason 908a52d9a80SChris Mason /* 909a52d9a80SChris Mason * helper function to set both pages and extents in the tree writeback 910a52d9a80SChris Mason */ 911a52d9a80SChris Mason int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end) 912a52d9a80SChris Mason { 913a52d9a80SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 914a52d9a80SChris Mason unsigned long end_index = end >> PAGE_CACHE_SHIFT; 915a52d9a80SChris Mason struct page *page; 916a52d9a80SChris Mason 917a52d9a80SChris Mason while (index <= end_index) { 918a52d9a80SChris Mason page = find_get_page(tree->mapping, index); 919a52d9a80SChris Mason BUG_ON(!page); 920a52d9a80SChris Mason set_page_writeback(page); 921a52d9a80SChris Mason page_cache_release(page); 922a52d9a80SChris Mason index++; 923a52d9a80SChris Mason } 924a52d9a80SChris Mason set_extent_writeback(tree, start, end, GFP_NOFS); 925a52d9a80SChris Mason return 0; 926a52d9a80SChris Mason } 927a52d9a80SChris Mason EXPORT_SYMBOL(set_range_writeback); 928a52d9a80SChris Mason 929b888db2bSChris Mason u64 find_lock_delalloc_range(struct extent_map_tree *tree, 930b888db2bSChris Mason u64 start, u64 lock_start, u64 *end, u64 max_bytes) 931b888db2bSChris Mason { 932b888db2bSChris Mason struct rb_node *node; 933b888db2bSChris Mason struct extent_state *state; 934b888db2bSChris Mason u64 cur_start = start; 935b888db2bSChris Mason u64 found = 0; 936b888db2bSChris Mason u64 total_bytes = 0; 937b888db2bSChris Mason 938b888db2bSChris Mason write_lock_irq(&tree->lock); 939b888db2bSChris Mason /* 940b888db2bSChris Mason * this search will find all the extents that end after 941b888db2bSChris Mason * our range starts. 942b888db2bSChris Mason */ 943b888db2bSChris Mason search_again: 944b888db2bSChris Mason node = tree_search(&tree->state, cur_start); 945b888db2bSChris Mason if (!node || IS_ERR(node)) { 946b888db2bSChris Mason goto out; 947b888db2bSChris Mason } 948b888db2bSChris Mason 949b888db2bSChris Mason while(1) { 950b888db2bSChris Mason state = rb_entry(node, struct extent_state, rb_node); 951b888db2bSChris Mason if (state->start != cur_start) { 952b888db2bSChris Mason goto out; 953b888db2bSChris Mason } 954b888db2bSChris Mason if (!(state->state & EXTENT_DELALLOC)) { 955b888db2bSChris Mason goto out; 956b888db2bSChris Mason } 957b888db2bSChris Mason if (state->start >= lock_start) { 958b888db2bSChris Mason if (state->state & EXTENT_LOCKED) { 959b888db2bSChris Mason DEFINE_WAIT(wait); 960b888db2bSChris Mason atomic_inc(&state->refs); 961b888db2bSChris Mason write_unlock_irq(&tree->lock); 962b888db2bSChris Mason schedule(); 963b888db2bSChris Mason write_lock_irq(&tree->lock); 964b888db2bSChris Mason finish_wait(&state->wq, &wait); 965b888db2bSChris Mason free_extent_state(state); 966b888db2bSChris Mason goto search_again; 967b888db2bSChris Mason } 968b888db2bSChris Mason state->state |= EXTENT_LOCKED; 969b888db2bSChris Mason } 970b888db2bSChris Mason found++; 971b888db2bSChris Mason *end = state->end; 972b888db2bSChris Mason cur_start = state->end + 1; 973b888db2bSChris Mason node = rb_next(node); 974b888db2bSChris Mason if (!node) 975b888db2bSChris Mason break; 976b888db2bSChris Mason total_bytes = state->end - state->start + 1; 977b888db2bSChris Mason if (total_bytes >= max_bytes) 978b888db2bSChris Mason break; 979b888db2bSChris Mason } 980b888db2bSChris Mason out: 981b888db2bSChris Mason write_unlock_irq(&tree->lock); 982b888db2bSChris Mason return found; 983b888db2bSChris Mason } 984b888db2bSChris Mason 985a52d9a80SChris Mason /* 986a52d9a80SChris Mason * helper function to lock both pages and extents in the tree. 987a52d9a80SChris Mason * pages must be locked first. 988a52d9a80SChris Mason */ 989a52d9a80SChris Mason int lock_range(struct extent_map_tree *tree, u64 start, u64 end) 990a52d9a80SChris Mason { 991a52d9a80SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 992a52d9a80SChris Mason unsigned long end_index = end >> PAGE_CACHE_SHIFT; 993a52d9a80SChris Mason struct page *page; 994a52d9a80SChris Mason int err; 995a52d9a80SChris Mason 996a52d9a80SChris Mason while (index <= end_index) { 997a52d9a80SChris Mason page = grab_cache_page(tree->mapping, index); 998a52d9a80SChris Mason if (!page) { 999a52d9a80SChris Mason err = -ENOMEM; 1000a52d9a80SChris Mason goto failed; 1001a52d9a80SChris Mason } 1002a52d9a80SChris Mason if (IS_ERR(page)) { 1003a52d9a80SChris Mason err = PTR_ERR(page); 1004a52d9a80SChris Mason goto failed; 1005a52d9a80SChris Mason } 1006a52d9a80SChris Mason index++; 1007a52d9a80SChris Mason } 1008a52d9a80SChris Mason lock_extent(tree, start, end, GFP_NOFS); 1009a52d9a80SChris Mason return 0; 1010a52d9a80SChris Mason 1011a52d9a80SChris Mason failed: 1012a52d9a80SChris Mason /* 1013a52d9a80SChris Mason * we failed above in getting the page at 'index', so we undo here 1014a52d9a80SChris Mason * up to but not including the page at 'index' 1015a52d9a80SChris Mason */ 1016a52d9a80SChris Mason end_index = index; 1017a52d9a80SChris Mason index = start >> PAGE_CACHE_SHIFT; 1018a52d9a80SChris Mason while (index < end_index) { 1019a52d9a80SChris Mason page = find_get_page(tree->mapping, index); 1020a52d9a80SChris Mason unlock_page(page); 1021a52d9a80SChris Mason page_cache_release(page); 1022a52d9a80SChris Mason index++; 1023a52d9a80SChris Mason } 1024a52d9a80SChris Mason return err; 1025a52d9a80SChris Mason } 1026a52d9a80SChris Mason EXPORT_SYMBOL(lock_range); 1027a52d9a80SChris Mason 1028a52d9a80SChris Mason /* 1029a52d9a80SChris Mason * helper function to unlock both pages and extents in the tree. 1030a52d9a80SChris Mason */ 1031a52d9a80SChris Mason int unlock_range(struct extent_map_tree *tree, u64 start, u64 end) 1032a52d9a80SChris Mason { 1033a52d9a80SChris Mason unsigned long index = start >> PAGE_CACHE_SHIFT; 1034a52d9a80SChris Mason unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1035a52d9a80SChris Mason struct page *page; 1036a52d9a80SChris Mason 1037a52d9a80SChris Mason while (index <= end_index) { 1038a52d9a80SChris Mason page = find_get_page(tree->mapping, index); 1039a52d9a80SChris Mason unlock_page(page); 1040a52d9a80SChris Mason page_cache_release(page); 1041a52d9a80SChris Mason index++; 1042a52d9a80SChris Mason } 1043a52d9a80SChris Mason unlock_extent(tree, start, end, GFP_NOFS); 1044a52d9a80SChris Mason return 0; 1045a52d9a80SChris Mason } 1046a52d9a80SChris Mason EXPORT_SYMBOL(unlock_range); 1047a52d9a80SChris Mason 104807157aacSChris Mason int set_state_private(struct extent_map_tree *tree, u64 start, u64 private) 104907157aacSChris Mason { 105007157aacSChris Mason struct rb_node *node; 105107157aacSChris Mason struct extent_state *state; 105207157aacSChris Mason int ret = 0; 105307157aacSChris Mason 105407157aacSChris Mason write_lock_irq(&tree->lock); 105507157aacSChris Mason /* 105607157aacSChris Mason * this search will find all the extents that end after 105707157aacSChris Mason * our range starts. 105807157aacSChris Mason */ 105907157aacSChris Mason node = tree_search(&tree->state, start); 106007157aacSChris Mason if (!node || IS_ERR(node)) { 106107157aacSChris Mason ret = -ENOENT; 106207157aacSChris Mason goto out; 106307157aacSChris Mason } 106407157aacSChris Mason state = rb_entry(node, struct extent_state, rb_node); 106507157aacSChris Mason if (state->start != start) { 106607157aacSChris Mason ret = -ENOENT; 106707157aacSChris Mason goto out; 106807157aacSChris Mason } 106907157aacSChris Mason state->private = private; 107007157aacSChris Mason out: 107107157aacSChris Mason write_unlock_irq(&tree->lock); 107207157aacSChris Mason return ret; 107307157aacSChris Mason 107407157aacSChris Mason } 107507157aacSChris Mason 107607157aacSChris Mason int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private) 107707157aacSChris Mason { 107807157aacSChris Mason struct rb_node *node; 107907157aacSChris Mason struct extent_state *state; 108007157aacSChris Mason int ret = 0; 108107157aacSChris Mason 108207157aacSChris Mason read_lock_irq(&tree->lock); 108307157aacSChris Mason /* 108407157aacSChris Mason * this search will find all the extents that end after 108507157aacSChris Mason * our range starts. 108607157aacSChris Mason */ 108707157aacSChris Mason node = tree_search(&tree->state, start); 108807157aacSChris Mason if (!node || IS_ERR(node)) { 108907157aacSChris Mason ret = -ENOENT; 109007157aacSChris Mason goto out; 109107157aacSChris Mason } 109207157aacSChris Mason state = rb_entry(node, struct extent_state, rb_node); 109307157aacSChris Mason if (state->start != start) { 109407157aacSChris Mason ret = -ENOENT; 109507157aacSChris Mason goto out; 109607157aacSChris Mason } 109707157aacSChris Mason *private = state->private; 109807157aacSChris Mason out: 109907157aacSChris Mason read_unlock_irq(&tree->lock); 110007157aacSChris Mason return ret; 110107157aacSChris Mason } 110207157aacSChris Mason 1103a52d9a80SChris Mason /* 1104a52d9a80SChris Mason * searches a range in the state tree for a given mask. 1105a52d9a80SChris Mason * If 'filled' == 1, this returns 1 only if ever extent in the tree 1106a52d9a80SChris Mason * has the bits set. Otherwise, 1 is returned if any bit in the 1107a52d9a80SChris Mason * range is found set. 1108a52d9a80SChris Mason */ 1109a52d9a80SChris Mason static int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end, 1110a52d9a80SChris Mason int bits, int filled) 1111a52d9a80SChris Mason { 1112a52d9a80SChris Mason struct extent_state *state = NULL; 1113a52d9a80SChris Mason struct rb_node *node; 1114a52d9a80SChris Mason int bitset = 0; 1115a52d9a80SChris Mason 1116a52d9a80SChris Mason read_lock_irq(&tree->lock); 1117a52d9a80SChris Mason node = tree_search(&tree->state, start); 1118a52d9a80SChris Mason while (node && start <= end) { 1119a52d9a80SChris Mason state = rb_entry(node, struct extent_state, rb_node); 1120a52d9a80SChris Mason if (state->start > end) 1121a52d9a80SChris Mason break; 1122a52d9a80SChris Mason 1123a52d9a80SChris Mason if (filled && state->start > start) { 1124a52d9a80SChris Mason bitset = 0; 1125a52d9a80SChris Mason break; 1126a52d9a80SChris Mason } 1127a52d9a80SChris Mason if (state->state & bits) { 1128a52d9a80SChris Mason bitset = 1; 1129a52d9a80SChris Mason if (!filled) 1130a52d9a80SChris Mason break; 1131a52d9a80SChris Mason } else if (filled) { 1132a52d9a80SChris Mason bitset = 0; 1133a52d9a80SChris Mason break; 1134a52d9a80SChris Mason } 1135a52d9a80SChris Mason start = state->end + 1; 1136a52d9a80SChris Mason if (start > end) 1137a52d9a80SChris Mason break; 1138a52d9a80SChris Mason node = rb_next(node); 1139a52d9a80SChris Mason } 1140a52d9a80SChris Mason read_unlock_irq(&tree->lock); 1141a52d9a80SChris Mason return bitset; 1142a52d9a80SChris Mason } 1143a52d9a80SChris Mason 1144a52d9a80SChris Mason /* 1145a52d9a80SChris Mason * helper function to set a given page up to date if all the 1146a52d9a80SChris Mason * extents in the tree for that page are up to date 1147a52d9a80SChris Mason */ 1148a52d9a80SChris Mason static int check_page_uptodate(struct extent_map_tree *tree, 1149a52d9a80SChris Mason struct page *page) 1150a52d9a80SChris Mason { 1151a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1152a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1153a52d9a80SChris Mason if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1)) 1154a52d9a80SChris Mason SetPageUptodate(page); 1155a52d9a80SChris Mason return 0; 1156a52d9a80SChris Mason } 1157a52d9a80SChris Mason 1158a52d9a80SChris Mason /* 1159a52d9a80SChris Mason * helper function to unlock a page if all the extents in the tree 1160a52d9a80SChris Mason * for that page are unlocked 1161a52d9a80SChris Mason */ 1162a52d9a80SChris Mason static int check_page_locked(struct extent_map_tree *tree, 1163a52d9a80SChris Mason struct page *page) 1164a52d9a80SChris Mason { 1165a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1166a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1167a52d9a80SChris Mason if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0)) 1168a52d9a80SChris Mason unlock_page(page); 1169a52d9a80SChris Mason return 0; 1170a52d9a80SChris Mason } 1171a52d9a80SChris Mason 1172a52d9a80SChris Mason /* 1173a52d9a80SChris Mason * helper function to end page writeback if all the extents 1174a52d9a80SChris Mason * in the tree for that page are done with writeback 1175a52d9a80SChris Mason */ 1176a52d9a80SChris Mason static int check_page_writeback(struct extent_map_tree *tree, 1177a52d9a80SChris Mason struct page *page) 1178a52d9a80SChris Mason { 1179a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1180a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1181a52d9a80SChris Mason if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0)) 1182a52d9a80SChris Mason end_page_writeback(page); 1183a52d9a80SChris Mason return 0; 1184a52d9a80SChris Mason } 1185a52d9a80SChris Mason 1186a52d9a80SChris Mason /* lots and lots of room for performance fixes in the end_bio funcs */ 1187a52d9a80SChris Mason 1188a52d9a80SChris Mason /* 1189a52d9a80SChris Mason * after a writepage IO is done, we need to: 1190a52d9a80SChris Mason * clear the uptodate bits on error 1191a52d9a80SChris Mason * clear the writeback bits in the extent tree for this IO 1192a52d9a80SChris Mason * end_page_writeback if the page has no more pending IO 1193a52d9a80SChris Mason * 1194a52d9a80SChris Mason * Scheduling is not allowed, so the extent state tree is expected 1195a52d9a80SChris Mason * to have one and only one object corresponding to this IO. 1196a52d9a80SChris Mason */ 1197a52d9a80SChris Mason static int end_bio_extent_writepage(struct bio *bio, 1198a52d9a80SChris Mason unsigned int bytes_done, int err) 1199a52d9a80SChris Mason { 1200a52d9a80SChris Mason const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1201a52d9a80SChris Mason struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1202a52d9a80SChris Mason struct extent_map_tree *tree = bio->bi_private; 1203a52d9a80SChris Mason u64 start; 1204a52d9a80SChris Mason u64 end; 1205a52d9a80SChris Mason int whole_page; 1206a52d9a80SChris Mason 1207a52d9a80SChris Mason if (bio->bi_size) 1208a52d9a80SChris Mason return 1; 1209a52d9a80SChris Mason 1210a52d9a80SChris Mason do { 1211a52d9a80SChris Mason struct page *page = bvec->bv_page; 1212a52d9a80SChris Mason start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 1213a52d9a80SChris Mason end = start + bvec->bv_len - 1; 1214a52d9a80SChris Mason 1215a52d9a80SChris Mason if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1216a52d9a80SChris Mason whole_page = 1; 1217a52d9a80SChris Mason else 1218a52d9a80SChris Mason whole_page = 0; 1219a52d9a80SChris Mason 1220a52d9a80SChris Mason if (--bvec >= bio->bi_io_vec) 1221a52d9a80SChris Mason prefetchw(&bvec->bv_page->flags); 1222a52d9a80SChris Mason 1223a52d9a80SChris Mason if (!uptodate) { 1224a52d9a80SChris Mason clear_extent_uptodate(tree, start, end, GFP_ATOMIC); 1225a52d9a80SChris Mason ClearPageUptodate(page); 1226a52d9a80SChris Mason SetPageError(page); 1227a52d9a80SChris Mason } 1228a52d9a80SChris Mason clear_extent_writeback(tree, start, end, GFP_ATOMIC); 1229a52d9a80SChris Mason 1230a52d9a80SChris Mason if (whole_page) 1231a52d9a80SChris Mason end_page_writeback(page); 1232a52d9a80SChris Mason else 1233a52d9a80SChris Mason check_page_writeback(tree, page); 1234a52d9a80SChris Mason } while (bvec >= bio->bi_io_vec); 1235a52d9a80SChris Mason 1236a52d9a80SChris Mason bio_put(bio); 1237a52d9a80SChris Mason return 0; 1238a52d9a80SChris Mason } 1239a52d9a80SChris Mason 1240a52d9a80SChris Mason /* 1241a52d9a80SChris Mason * after a readpage IO is done, we need to: 1242a52d9a80SChris Mason * clear the uptodate bits on error 1243a52d9a80SChris Mason * set the uptodate bits if things worked 1244a52d9a80SChris Mason * set the page up to date if all extents in the tree are uptodate 1245a52d9a80SChris Mason * clear the lock bit in the extent tree 1246a52d9a80SChris Mason * unlock the page if there are no other extents locked for it 1247a52d9a80SChris Mason * 1248a52d9a80SChris Mason * Scheduling is not allowed, so the extent state tree is expected 1249a52d9a80SChris Mason * to have one and only one object corresponding to this IO. 1250a52d9a80SChris Mason */ 1251a52d9a80SChris Mason static int end_bio_extent_readpage(struct bio *bio, 1252a52d9a80SChris Mason unsigned int bytes_done, int err) 1253a52d9a80SChris Mason { 125407157aacSChris Mason int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1255a52d9a80SChris Mason struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1256a52d9a80SChris Mason struct extent_map_tree *tree = bio->bi_private; 1257a52d9a80SChris Mason u64 start; 1258a52d9a80SChris Mason u64 end; 1259a52d9a80SChris Mason int whole_page; 126007157aacSChris Mason int ret; 1261a52d9a80SChris Mason 1262a52d9a80SChris Mason if (bio->bi_size) 1263a52d9a80SChris Mason return 1; 1264a52d9a80SChris Mason 1265a52d9a80SChris Mason do { 1266a52d9a80SChris Mason struct page *page = bvec->bv_page; 1267a52d9a80SChris Mason start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 1268a52d9a80SChris Mason end = start + bvec->bv_len - 1; 1269a52d9a80SChris Mason 1270a52d9a80SChris Mason if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1271a52d9a80SChris Mason whole_page = 1; 1272a52d9a80SChris Mason else 1273a52d9a80SChris Mason whole_page = 0; 1274a52d9a80SChris Mason 1275a52d9a80SChris Mason if (--bvec >= bio->bi_io_vec) 1276a52d9a80SChris Mason prefetchw(&bvec->bv_page->flags); 1277a52d9a80SChris Mason 127807157aacSChris Mason if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 127907157aacSChris Mason ret = tree->ops->readpage_end_io_hook(page, start, end); 128007157aacSChris Mason if (ret) 128107157aacSChris Mason uptodate = 0; 128207157aacSChris Mason } 1283a52d9a80SChris Mason if (uptodate) { 1284a52d9a80SChris Mason set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1285a52d9a80SChris Mason if (whole_page) 1286a52d9a80SChris Mason SetPageUptodate(page); 1287a52d9a80SChris Mason else 1288a52d9a80SChris Mason check_page_uptodate(tree, page); 1289a52d9a80SChris Mason } else { 1290a52d9a80SChris Mason ClearPageUptodate(page); 1291a52d9a80SChris Mason SetPageError(page); 1292a52d9a80SChris Mason } 1293a52d9a80SChris Mason 1294a52d9a80SChris Mason unlock_extent(tree, start, end, GFP_ATOMIC); 1295a52d9a80SChris Mason 1296a52d9a80SChris Mason if (whole_page) 1297a52d9a80SChris Mason unlock_page(page); 1298a52d9a80SChris Mason else 1299a52d9a80SChris Mason check_page_locked(tree, page); 1300a52d9a80SChris Mason } while (bvec >= bio->bi_io_vec); 1301a52d9a80SChris Mason 1302a52d9a80SChris Mason bio_put(bio); 1303a52d9a80SChris Mason return 0; 1304a52d9a80SChris Mason } 1305a52d9a80SChris Mason 1306a52d9a80SChris Mason /* 1307a52d9a80SChris Mason * IO done from prepare_write is pretty simple, we just unlock 1308a52d9a80SChris Mason * the structs in the extent tree when done, and set the uptodate bits 1309a52d9a80SChris Mason * as appropriate. 1310a52d9a80SChris Mason */ 1311a52d9a80SChris Mason static int end_bio_extent_preparewrite(struct bio *bio, 1312a52d9a80SChris Mason unsigned int bytes_done, int err) 1313a52d9a80SChris Mason { 1314a52d9a80SChris Mason const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1315a52d9a80SChris Mason struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1316a52d9a80SChris Mason struct extent_map_tree *tree = bio->bi_private; 1317a52d9a80SChris Mason u64 start; 1318a52d9a80SChris Mason u64 end; 1319a52d9a80SChris Mason 1320a52d9a80SChris Mason if (bio->bi_size) 1321a52d9a80SChris Mason return 1; 1322a52d9a80SChris Mason 1323a52d9a80SChris Mason do { 1324a52d9a80SChris Mason struct page *page = bvec->bv_page; 1325a52d9a80SChris Mason start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 1326a52d9a80SChris Mason end = start + bvec->bv_len - 1; 1327a52d9a80SChris Mason 1328a52d9a80SChris Mason if (--bvec >= bio->bi_io_vec) 1329a52d9a80SChris Mason prefetchw(&bvec->bv_page->flags); 1330a52d9a80SChris Mason 1331a52d9a80SChris Mason if (uptodate) { 1332a52d9a80SChris Mason set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1333a52d9a80SChris Mason } else { 1334a52d9a80SChris Mason ClearPageUptodate(page); 1335a52d9a80SChris Mason SetPageError(page); 1336a52d9a80SChris Mason } 1337a52d9a80SChris Mason 1338a52d9a80SChris Mason unlock_extent(tree, start, end, GFP_ATOMIC); 1339a52d9a80SChris Mason 1340a52d9a80SChris Mason } while (bvec >= bio->bi_io_vec); 1341a52d9a80SChris Mason 1342a52d9a80SChris Mason bio_put(bio); 1343a52d9a80SChris Mason return 0; 1344a52d9a80SChris Mason } 1345a52d9a80SChris Mason 1346a52d9a80SChris Mason static int submit_extent_page(int rw, struct extent_map_tree *tree, 1347a52d9a80SChris Mason struct page *page, sector_t sector, 1348a52d9a80SChris Mason size_t size, unsigned long offset, 1349a52d9a80SChris Mason struct block_device *bdev, 1350a52d9a80SChris Mason bio_end_io_t end_io_func) 1351a52d9a80SChris Mason { 1352a52d9a80SChris Mason struct bio *bio; 1353a52d9a80SChris Mason int ret = 0; 1354a52d9a80SChris Mason 1355a52d9a80SChris Mason bio = bio_alloc(GFP_NOIO, 1); 1356a52d9a80SChris Mason 1357a52d9a80SChris Mason bio->bi_sector = sector; 1358a52d9a80SChris Mason bio->bi_bdev = bdev; 1359a52d9a80SChris Mason bio->bi_io_vec[0].bv_page = page; 1360a52d9a80SChris Mason bio->bi_io_vec[0].bv_len = size; 1361a52d9a80SChris Mason bio->bi_io_vec[0].bv_offset = offset; 1362a52d9a80SChris Mason 1363a52d9a80SChris Mason bio->bi_vcnt = 1; 1364a52d9a80SChris Mason bio->bi_idx = 0; 1365a52d9a80SChris Mason bio->bi_size = size; 1366a52d9a80SChris Mason 1367a52d9a80SChris Mason bio->bi_end_io = end_io_func; 1368a52d9a80SChris Mason bio->bi_private = tree; 1369a52d9a80SChris Mason 1370a52d9a80SChris Mason bio_get(bio); 1371a52d9a80SChris Mason submit_bio(rw, bio); 1372a52d9a80SChris Mason 1373a52d9a80SChris Mason if (bio_flagged(bio, BIO_EOPNOTSUPP)) 1374a52d9a80SChris Mason ret = -EOPNOTSUPP; 1375a52d9a80SChris Mason 1376a52d9a80SChris Mason bio_put(bio); 1377a52d9a80SChris Mason return ret; 1378a52d9a80SChris Mason } 1379a52d9a80SChris Mason 1380a52d9a80SChris Mason /* 1381a52d9a80SChris Mason * basic readpage implementation. Locked extent state structs are inserted 1382a52d9a80SChris Mason * into the tree that are removed when the IO is done (by the end_io 1383a52d9a80SChris Mason * handlers) 1384a52d9a80SChris Mason */ 1385a52d9a80SChris Mason int extent_read_full_page(struct extent_map_tree *tree, struct page *page, 1386a52d9a80SChris Mason get_extent_t *get_extent) 1387a52d9a80SChris Mason { 1388a52d9a80SChris Mason struct inode *inode = page->mapping->host; 1389a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1390a52d9a80SChris Mason u64 page_end = start + PAGE_CACHE_SIZE - 1; 1391a52d9a80SChris Mason u64 end; 1392a52d9a80SChris Mason u64 cur = start; 1393a52d9a80SChris Mason u64 extent_offset; 1394a52d9a80SChris Mason u64 last_byte = i_size_read(inode); 1395a52d9a80SChris Mason u64 block_start; 1396a52d9a80SChris Mason u64 cur_end; 1397a52d9a80SChris Mason sector_t sector; 1398a52d9a80SChris Mason struct extent_map *em; 1399a52d9a80SChris Mason struct block_device *bdev; 1400a52d9a80SChris Mason int ret; 1401a52d9a80SChris Mason int nr = 0; 1402a52d9a80SChris Mason size_t page_offset = 0; 1403a52d9a80SChris Mason size_t iosize; 1404a52d9a80SChris Mason size_t blocksize = inode->i_sb->s_blocksize; 1405a52d9a80SChris Mason 1406a52d9a80SChris Mason if (!PagePrivate(page)) { 1407a52d9a80SChris Mason SetPagePrivate(page); 1408b888db2bSChris Mason WARN_ON(!page->mapping->a_ops->invalidatepage); 14092bf5a725SChris Mason set_page_private(page, 1); 1410a52d9a80SChris Mason page_cache_get(page); 1411a52d9a80SChris Mason } 1412a52d9a80SChris Mason 1413a52d9a80SChris Mason end = page_end; 1414a52d9a80SChris Mason lock_extent(tree, start, end, GFP_NOFS); 1415a52d9a80SChris Mason 1416a52d9a80SChris Mason while (cur <= end) { 1417a52d9a80SChris Mason if (cur >= last_byte) { 1418a52d9a80SChris Mason iosize = PAGE_CACHE_SIZE - page_offset; 1419a52d9a80SChris Mason zero_user_page(page, page_offset, iosize, KM_USER0); 1420a52d9a80SChris Mason set_extent_uptodate(tree, cur, cur + iosize - 1, 1421a52d9a80SChris Mason GFP_NOFS); 1422a52d9a80SChris Mason unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1423a52d9a80SChris Mason break; 1424a52d9a80SChris Mason } 1425a52d9a80SChris Mason em = get_extent(inode, page, page_offset, cur, end, 0); 1426a52d9a80SChris Mason if (IS_ERR(em) || !em) { 1427a52d9a80SChris Mason SetPageError(page); 1428a52d9a80SChris Mason unlock_extent(tree, cur, end, GFP_NOFS); 1429a52d9a80SChris Mason break; 1430a52d9a80SChris Mason } 1431a52d9a80SChris Mason 1432a52d9a80SChris Mason extent_offset = cur - em->start; 1433a52d9a80SChris Mason BUG_ON(em->end < cur); 1434a52d9a80SChris Mason BUG_ON(end < cur); 1435a52d9a80SChris Mason 1436a52d9a80SChris Mason iosize = min(em->end - cur, end - cur) + 1; 1437a52d9a80SChris Mason cur_end = min(em->end, end); 1438a52d9a80SChris Mason iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1439a52d9a80SChris Mason sector = (em->block_start + extent_offset) >> 9; 1440a52d9a80SChris Mason bdev = em->bdev; 1441a52d9a80SChris Mason block_start = em->block_start; 1442a52d9a80SChris Mason free_extent_map(em); 1443a52d9a80SChris Mason em = NULL; 1444a52d9a80SChris Mason 1445a52d9a80SChris Mason /* we've found a hole, just zero and go on */ 1446a52d9a80SChris Mason if (block_start == 0) { 1447a52d9a80SChris Mason zero_user_page(page, page_offset, iosize, KM_USER0); 1448a52d9a80SChris Mason set_extent_uptodate(tree, cur, cur + iosize - 1, 1449a52d9a80SChris Mason GFP_NOFS); 1450a52d9a80SChris Mason unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1451a52d9a80SChris Mason cur = cur + iosize; 1452a52d9a80SChris Mason page_offset += iosize; 1453a52d9a80SChris Mason continue; 1454a52d9a80SChris Mason } 1455a52d9a80SChris Mason /* the get_extent function already copied into the page */ 1456a52d9a80SChris Mason if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { 1457a52d9a80SChris Mason unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1458a52d9a80SChris Mason cur = cur + iosize; 1459a52d9a80SChris Mason page_offset += iosize; 1460a52d9a80SChris Mason continue; 1461a52d9a80SChris Mason } 1462a52d9a80SChris Mason 146307157aacSChris Mason ret = 0; 146407157aacSChris Mason if (tree->ops && tree->ops->readpage_io_hook) { 146507157aacSChris Mason ret = tree->ops->readpage_io_hook(page, cur, 146607157aacSChris Mason cur + iosize - 1); 146707157aacSChris Mason } 146807157aacSChris Mason if (!ret) { 1469a52d9a80SChris Mason ret = submit_extent_page(READ, tree, page, 147007157aacSChris Mason sector, iosize, page_offset, 147107157aacSChris Mason bdev, end_bio_extent_readpage); 147207157aacSChris Mason } 1473a52d9a80SChris Mason if (ret) 1474a52d9a80SChris Mason SetPageError(page); 1475a52d9a80SChris Mason cur = cur + iosize; 1476a52d9a80SChris Mason page_offset += iosize; 1477a52d9a80SChris Mason nr++; 1478a52d9a80SChris Mason } 1479a52d9a80SChris Mason if (!nr) { 1480a52d9a80SChris Mason if (!PageError(page)) 1481a52d9a80SChris Mason SetPageUptodate(page); 1482a52d9a80SChris Mason unlock_page(page); 1483a52d9a80SChris Mason } 1484a52d9a80SChris Mason return 0; 1485a52d9a80SChris Mason } 1486a52d9a80SChris Mason EXPORT_SYMBOL(extent_read_full_page); 1487a52d9a80SChris Mason 1488a52d9a80SChris Mason /* 1489a52d9a80SChris Mason * the writepage semantics are similar to regular writepage. extent 1490a52d9a80SChris Mason * records are inserted to lock ranges in the tree, and as dirty areas 1491a52d9a80SChris Mason * are found, they are marked writeback. Then the lock bits are removed 1492a52d9a80SChris Mason * and the end_io handler clears the writeback ranges 1493a52d9a80SChris Mason */ 1494a52d9a80SChris Mason int extent_write_full_page(struct extent_map_tree *tree, struct page *page, 1495a52d9a80SChris Mason get_extent_t *get_extent, 1496a52d9a80SChris Mason struct writeback_control *wbc) 1497a52d9a80SChris Mason { 1498a52d9a80SChris Mason struct inode *inode = page->mapping->host; 1499a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1500a52d9a80SChris Mason u64 page_end = start + PAGE_CACHE_SIZE - 1; 1501a52d9a80SChris Mason u64 end; 1502a52d9a80SChris Mason u64 cur = start; 1503a52d9a80SChris Mason u64 extent_offset; 1504a52d9a80SChris Mason u64 last_byte = i_size_read(inode); 1505a52d9a80SChris Mason u64 block_start; 1506a52d9a80SChris Mason sector_t sector; 1507a52d9a80SChris Mason struct extent_map *em; 1508a52d9a80SChris Mason struct block_device *bdev; 1509a52d9a80SChris Mason int ret; 1510a52d9a80SChris Mason int nr = 0; 1511a52d9a80SChris Mason size_t page_offset = 0; 1512a52d9a80SChris Mason size_t iosize; 1513a52d9a80SChris Mason size_t blocksize; 1514a52d9a80SChris Mason loff_t i_size = i_size_read(inode); 1515a52d9a80SChris Mason unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 1516b888db2bSChris Mason u64 nr_delalloc; 1517b888db2bSChris Mason u64 delalloc_end; 1518a52d9a80SChris Mason 1519b888db2bSChris Mason WARN_ON(!PageLocked(page)); 1520a52d9a80SChris Mason if (page->index > end_index) { 1521a52d9a80SChris Mason clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1522a52d9a80SChris Mason unlock_page(page); 1523a52d9a80SChris Mason return 0; 1524a52d9a80SChris Mason } 1525a52d9a80SChris Mason 1526a52d9a80SChris Mason if (page->index == end_index) { 1527a52d9a80SChris Mason size_t offset = i_size & (PAGE_CACHE_SIZE - 1); 1528a52d9a80SChris Mason zero_user_page(page, offset, 1529a52d9a80SChris Mason PAGE_CACHE_SIZE - offset, KM_USER0); 1530a52d9a80SChris Mason } 1531a52d9a80SChris Mason 1532a52d9a80SChris Mason if (!PagePrivate(page)) { 1533a52d9a80SChris Mason SetPagePrivate(page); 1534a52d9a80SChris Mason set_page_private(page, 1); 1535b888db2bSChris Mason WARN_ON(!page->mapping->a_ops->invalidatepage); 1536a52d9a80SChris Mason page_cache_get(page); 1537a52d9a80SChris Mason } 1538a52d9a80SChris Mason 1539a52d9a80SChris Mason lock_extent(tree, start, page_end, GFP_NOFS); 1540b888db2bSChris Mason nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1, 1541b888db2bSChris Mason &delalloc_end, 1542b888db2bSChris Mason 128 * 1024 * 1024); 1543b888db2bSChris Mason if (nr_delalloc) { 154407157aacSChris Mason tree->ops->fill_delalloc(inode, start, delalloc_end); 1545b888db2bSChris Mason if (delalloc_end >= page_end + 1) { 1546b888db2bSChris Mason clear_extent_bit(tree, page_end + 1, delalloc_end, 1547b888db2bSChris Mason EXTENT_LOCKED | EXTENT_DELALLOC, 1548b888db2bSChris Mason 1, 0, GFP_NOFS); 1549b888db2bSChris Mason } 1550b888db2bSChris Mason clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC, 1551b888db2bSChris Mason 0, 0, GFP_NOFS); 1552b888db2bSChris Mason if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1553b888db2bSChris Mason printk("found delalloc bits after clear extent_bit\n"); 1554b888db2bSChris Mason } 1555b888db2bSChris Mason } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1556b888db2bSChris Mason printk("found delalloc bits after find_delalloc_range returns 0\n"); 1557b888db2bSChris Mason } 1558b888db2bSChris Mason 1559b888db2bSChris Mason end = page_end; 1560b888db2bSChris Mason if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1561b888db2bSChris Mason printk("found delalloc bits after lock_extent\n"); 1562b888db2bSChris Mason } 1563a52d9a80SChris Mason 1564a52d9a80SChris Mason if (last_byte <= start) { 1565a52d9a80SChris Mason clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1566a52d9a80SChris Mason goto done; 1567a52d9a80SChris Mason } 1568a52d9a80SChris Mason 1569a52d9a80SChris Mason set_extent_uptodate(tree, start, page_end, GFP_NOFS); 1570a52d9a80SChris Mason blocksize = inode->i_sb->s_blocksize; 1571a52d9a80SChris Mason 1572a52d9a80SChris Mason while (cur <= end) { 1573a52d9a80SChris Mason if (cur >= last_byte) { 1574a52d9a80SChris Mason clear_extent_dirty(tree, cur, page_end, GFP_NOFS); 1575a52d9a80SChris Mason break; 1576a52d9a80SChris Mason } 1577b888db2bSChris Mason em = get_extent(inode, page, page_offset, cur, end, 0); 1578a52d9a80SChris Mason if (IS_ERR(em) || !em) { 1579a52d9a80SChris Mason SetPageError(page); 1580a52d9a80SChris Mason break; 1581a52d9a80SChris Mason } 1582a52d9a80SChris Mason 1583a52d9a80SChris Mason extent_offset = cur - em->start; 1584a52d9a80SChris Mason BUG_ON(em->end < cur); 1585a52d9a80SChris Mason BUG_ON(end < cur); 1586a52d9a80SChris Mason iosize = min(em->end - cur, end - cur) + 1; 1587a52d9a80SChris Mason iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1588a52d9a80SChris Mason sector = (em->block_start + extent_offset) >> 9; 1589a52d9a80SChris Mason bdev = em->bdev; 1590a52d9a80SChris Mason block_start = em->block_start; 1591a52d9a80SChris Mason free_extent_map(em); 1592a52d9a80SChris Mason em = NULL; 1593a52d9a80SChris Mason 1594a52d9a80SChris Mason if (block_start == 0 || block_start == EXTENT_MAP_INLINE) { 1595a52d9a80SChris Mason clear_extent_dirty(tree, cur, 1596a52d9a80SChris Mason cur + iosize - 1, GFP_NOFS); 1597a52d9a80SChris Mason cur = cur + iosize; 1598a52d9a80SChris Mason page_offset += iosize; 1599a52d9a80SChris Mason continue; 1600a52d9a80SChris Mason } 1601a52d9a80SChris Mason 1602a52d9a80SChris Mason /* leave this out until we have a page_mkwrite call */ 1603a52d9a80SChris Mason if (0 && !test_range_bit(tree, cur, cur + iosize - 1, 1604a52d9a80SChris Mason EXTENT_DIRTY, 0)) { 1605a52d9a80SChris Mason cur = cur + iosize; 1606a52d9a80SChris Mason page_offset += iosize; 1607a52d9a80SChris Mason continue; 1608a52d9a80SChris Mason } 1609a52d9a80SChris Mason clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); 161007157aacSChris Mason ret = tree->ops->writepage_io_hook(page, cur, cur + iosize - 1); 161107157aacSChris Mason if (ret) 161207157aacSChris Mason SetPageError(page); 161307157aacSChris Mason else { 1614a52d9a80SChris Mason set_range_writeback(tree, cur, cur + iosize - 1); 161507157aacSChris Mason ret = submit_extent_page(WRITE, tree, page, sector, 161607157aacSChris Mason iosize, page_offset, bdev, 1617a52d9a80SChris Mason end_bio_extent_writepage); 1618a52d9a80SChris Mason if (ret) 1619a52d9a80SChris Mason SetPageError(page); 162007157aacSChris Mason } 1621a52d9a80SChris Mason cur = cur + iosize; 1622a52d9a80SChris Mason page_offset += iosize; 1623a52d9a80SChris Mason nr++; 1624a52d9a80SChris Mason } 1625a52d9a80SChris Mason done: 1626a52d9a80SChris Mason WARN_ON(test_range_bit(tree, start, page_end, EXTENT_DIRTY, 0)); 1627a52d9a80SChris Mason unlock_extent(tree, start, page_end, GFP_NOFS); 1628a52d9a80SChris Mason unlock_page(page); 1629a52d9a80SChris Mason return 0; 1630a52d9a80SChris Mason } 1631a52d9a80SChris Mason EXPORT_SYMBOL(extent_write_full_page); 1632a52d9a80SChris Mason 1633a52d9a80SChris Mason /* 1634a52d9a80SChris Mason * basic invalidatepage code, this waits on any locked or writeback 1635a52d9a80SChris Mason * ranges corresponding to the page, and then deletes any extent state 1636a52d9a80SChris Mason * records from the tree 1637a52d9a80SChris Mason */ 1638a52d9a80SChris Mason int extent_invalidatepage(struct extent_map_tree *tree, 1639a52d9a80SChris Mason struct page *page, unsigned long offset) 1640a52d9a80SChris Mason { 1641a52d9a80SChris Mason u64 start = (page->index << PAGE_CACHE_SHIFT); 1642a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1643a52d9a80SChris Mason size_t blocksize = page->mapping->host->i_sb->s_blocksize; 1644a52d9a80SChris Mason 1645a52d9a80SChris Mason start += (offset + blocksize -1) & ~(blocksize - 1); 1646a52d9a80SChris Mason if (start > end) 1647a52d9a80SChris Mason return 0; 1648a52d9a80SChris Mason 1649a52d9a80SChris Mason lock_extent(tree, start, end, GFP_NOFS); 1650a52d9a80SChris Mason wait_on_extent_writeback(tree, start, end); 16512bf5a725SChris Mason clear_extent_bit(tree, start, end, 16522bf5a725SChris Mason EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 1653a52d9a80SChris Mason 1, 1, GFP_NOFS); 1654a52d9a80SChris Mason return 0; 1655a52d9a80SChris Mason } 1656a52d9a80SChris Mason EXPORT_SYMBOL(extent_invalidatepage); 1657a52d9a80SChris Mason 1658a52d9a80SChris Mason /* 1659a52d9a80SChris Mason * simple commit_write call, set_range_dirty is used to mark both 1660a52d9a80SChris Mason * the pages and the extent records as dirty 1661a52d9a80SChris Mason */ 1662a52d9a80SChris Mason int extent_commit_write(struct extent_map_tree *tree, 1663a52d9a80SChris Mason struct inode *inode, struct page *page, 1664a52d9a80SChris Mason unsigned from, unsigned to) 1665a52d9a80SChris Mason { 1666a52d9a80SChris Mason loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1667a52d9a80SChris Mason 1668a52d9a80SChris Mason if (!PagePrivate(page)) { 1669a52d9a80SChris Mason SetPagePrivate(page); 1670a52d9a80SChris Mason set_page_private(page, 1); 1671b888db2bSChris Mason WARN_ON(!page->mapping->a_ops->invalidatepage); 1672a52d9a80SChris Mason page_cache_get(page); 1673a52d9a80SChris Mason } 1674a52d9a80SChris Mason 1675a52d9a80SChris Mason set_page_dirty(page); 1676a52d9a80SChris Mason 1677a52d9a80SChris Mason if (pos > inode->i_size) { 1678a52d9a80SChris Mason i_size_write(inode, pos); 1679a52d9a80SChris Mason mark_inode_dirty(inode); 1680a52d9a80SChris Mason } 1681a52d9a80SChris Mason return 0; 1682a52d9a80SChris Mason } 1683a52d9a80SChris Mason EXPORT_SYMBOL(extent_commit_write); 1684a52d9a80SChris Mason 1685a52d9a80SChris Mason int extent_prepare_write(struct extent_map_tree *tree, 1686a52d9a80SChris Mason struct inode *inode, struct page *page, 1687a52d9a80SChris Mason unsigned from, unsigned to, get_extent_t *get_extent) 1688a52d9a80SChris Mason { 1689a52d9a80SChris Mason u64 page_start = page->index << PAGE_CACHE_SHIFT; 1690a52d9a80SChris Mason u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 1691a52d9a80SChris Mason u64 block_start; 1692a52d9a80SChris Mason u64 orig_block_start; 1693a52d9a80SChris Mason u64 block_end; 1694a52d9a80SChris Mason u64 cur_end; 1695a52d9a80SChris Mason struct extent_map *em; 1696a52d9a80SChris Mason unsigned blocksize = 1 << inode->i_blkbits; 1697a52d9a80SChris Mason size_t page_offset = 0; 1698a52d9a80SChris Mason size_t block_off_start; 1699a52d9a80SChris Mason size_t block_off_end; 1700a52d9a80SChris Mason int err = 0; 1701a52d9a80SChris Mason int iocount = 0; 1702a52d9a80SChris Mason int ret = 0; 1703a52d9a80SChris Mason int isnew; 1704a52d9a80SChris Mason 1705a52d9a80SChris Mason if (!PagePrivate(page)) { 1706a52d9a80SChris Mason SetPagePrivate(page); 1707a52d9a80SChris Mason set_page_private(page, 1); 1708b888db2bSChris Mason WARN_ON(!page->mapping->a_ops->invalidatepage); 1709a52d9a80SChris Mason page_cache_get(page); 1710a52d9a80SChris Mason } 1711a52d9a80SChris Mason block_start = (page_start + from) & ~((u64)blocksize - 1); 1712a52d9a80SChris Mason block_end = (page_start + to - 1) | (blocksize - 1); 1713a52d9a80SChris Mason orig_block_start = block_start; 1714a52d9a80SChris Mason 1715a52d9a80SChris Mason lock_extent(tree, page_start, page_end, GFP_NOFS); 1716a52d9a80SChris Mason while(block_start <= block_end) { 1717a52d9a80SChris Mason em = get_extent(inode, page, page_offset, block_start, 1718a52d9a80SChris Mason block_end, 1); 1719a52d9a80SChris Mason if (IS_ERR(em) || !em) { 1720a52d9a80SChris Mason goto err; 1721a52d9a80SChris Mason } 1722a52d9a80SChris Mason cur_end = min(block_end, em->end); 1723a52d9a80SChris Mason block_off_start = block_start & (PAGE_CACHE_SIZE - 1); 1724a52d9a80SChris Mason block_off_end = block_off_start + blocksize; 1725a52d9a80SChris Mason isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); 1726a52d9a80SChris Mason 1727a52d9a80SChris Mason if (!PageUptodate(page) && isnew && 1728a52d9a80SChris Mason (block_off_end > to || block_off_start < from)) { 1729a52d9a80SChris Mason void *kaddr; 1730a52d9a80SChris Mason 1731a52d9a80SChris Mason kaddr = kmap_atomic(page, KM_USER0); 1732a52d9a80SChris Mason if (block_off_end > to) 1733a52d9a80SChris Mason memset(kaddr + to, 0, block_off_end - to); 1734a52d9a80SChris Mason if (block_off_start < from) 1735a52d9a80SChris Mason memset(kaddr + block_off_start, 0, 1736a52d9a80SChris Mason from - block_off_start); 1737a52d9a80SChris Mason flush_dcache_page(page); 1738a52d9a80SChris Mason kunmap_atomic(kaddr, KM_USER0); 1739a52d9a80SChris Mason } 1740a52d9a80SChris Mason if (!isnew && !PageUptodate(page) && 1741a52d9a80SChris Mason (block_off_end > to || block_off_start < from) && 1742a52d9a80SChris Mason !test_range_bit(tree, block_start, cur_end, 1743a52d9a80SChris Mason EXTENT_UPTODATE, 1)) { 1744a52d9a80SChris Mason u64 sector; 1745a52d9a80SChris Mason u64 extent_offset = block_start - em->start; 1746a52d9a80SChris Mason size_t iosize; 1747a52d9a80SChris Mason sector = (em->block_start + extent_offset) >> 9; 1748a52d9a80SChris Mason iosize = (cur_end - block_start + blocksize - 1) & 1749a52d9a80SChris Mason ~((u64)blocksize - 1); 1750a52d9a80SChris Mason /* 1751a52d9a80SChris Mason * we've already got the extent locked, but we 1752a52d9a80SChris Mason * need to split the state such that our end_bio 1753a52d9a80SChris Mason * handler can clear the lock. 1754a52d9a80SChris Mason */ 1755a52d9a80SChris Mason set_extent_bit(tree, block_start, 1756a52d9a80SChris Mason block_start + iosize - 1, 1757a52d9a80SChris Mason EXTENT_LOCKED, 0, NULL, GFP_NOFS); 1758a52d9a80SChris Mason ret = submit_extent_page(READ, tree, page, 1759a52d9a80SChris Mason sector, iosize, page_offset, em->bdev, 1760a52d9a80SChris Mason end_bio_extent_preparewrite); 1761a52d9a80SChris Mason iocount++; 1762a52d9a80SChris Mason block_start = block_start + iosize; 1763a52d9a80SChris Mason } else { 1764a52d9a80SChris Mason set_extent_uptodate(tree, block_start, cur_end, 1765a52d9a80SChris Mason GFP_NOFS); 1766a52d9a80SChris Mason unlock_extent(tree, block_start, cur_end, GFP_NOFS); 1767a52d9a80SChris Mason block_start = cur_end + 1; 1768a52d9a80SChris Mason } 1769a52d9a80SChris Mason page_offset = block_start & (PAGE_CACHE_SIZE - 1); 1770a52d9a80SChris Mason free_extent_map(em); 1771a52d9a80SChris Mason } 1772a52d9a80SChris Mason if (iocount) { 1773a52d9a80SChris Mason wait_extent_bit(tree, orig_block_start, 1774a52d9a80SChris Mason block_end, EXTENT_LOCKED); 1775a52d9a80SChris Mason } 1776a52d9a80SChris Mason check_page_uptodate(tree, page); 1777a52d9a80SChris Mason err: 1778a52d9a80SChris Mason /* FIXME, zero out newly allocated blocks on error */ 1779a52d9a80SChris Mason return err; 1780a52d9a80SChris Mason } 1781a52d9a80SChris Mason EXPORT_SYMBOL(extent_prepare_write); 1782a52d9a80SChris Mason 1783a52d9a80SChris Mason /* 1784a52d9a80SChris Mason * a helper for releasepage. As long as there are no locked extents 1785a52d9a80SChris Mason * in the range corresponding to the page, both state records and extent 1786a52d9a80SChris Mason * map records are removed 1787a52d9a80SChris Mason */ 1788a52d9a80SChris Mason int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page) 1789a52d9a80SChris Mason { 1790a52d9a80SChris Mason struct extent_map *em; 1791a52d9a80SChris Mason u64 start = page->index << PAGE_CACHE_SHIFT; 1792a52d9a80SChris Mason u64 end = start + PAGE_CACHE_SIZE - 1; 1793a52d9a80SChris Mason u64 orig_start = start; 1794b888db2bSChris Mason int ret = 1; 1795a52d9a80SChris Mason 1796a52d9a80SChris Mason while (start <= end) { 1797a52d9a80SChris Mason em = lookup_extent_mapping(tree, start, end); 1798a52d9a80SChris Mason if (!em || IS_ERR(em)) 1799a52d9a80SChris Mason break; 1800b888db2bSChris Mason if (!test_range_bit(tree, em->start, em->end, 1801a52d9a80SChris Mason EXTENT_LOCKED, 0)) { 1802a52d9a80SChris Mason remove_extent_mapping(tree, em); 1803a52d9a80SChris Mason /* once for the rb tree */ 1804a52d9a80SChris Mason free_extent_map(em); 1805b888db2bSChris Mason } 1806b888db2bSChris Mason start = em->end + 1; 1807a52d9a80SChris Mason /* once for us */ 1808a52d9a80SChris Mason free_extent_map(em); 1809a52d9a80SChris Mason } 1810b888db2bSChris Mason if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0)) 1811b888db2bSChris Mason ret = 0; 1812b888db2bSChris Mason else 1813a52d9a80SChris Mason clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE, 1814a52d9a80SChris Mason 1, 1, GFP_NOFS); 1815b888db2bSChris Mason return ret; 1816a52d9a80SChris Mason } 1817a52d9a80SChris Mason EXPORT_SYMBOL(try_release_extent_mapping); 1818a52d9a80SChris Mason 1819*d396c6f5SChristoph Hellwig sector_t extent_bmap(struct address_space *mapping, sector_t iblock, 1820*d396c6f5SChristoph Hellwig get_extent_t *get_extent) 1821*d396c6f5SChristoph Hellwig { 1822*d396c6f5SChristoph Hellwig struct inode *inode = mapping->host; 1823*d396c6f5SChristoph Hellwig u64 start = iblock << inode->i_blkbits; 1824*d396c6f5SChristoph Hellwig u64 end = start + (1 << inode->i_blkbits) - 1; 1825*d396c6f5SChristoph Hellwig struct extent_map *em; 1826*d396c6f5SChristoph Hellwig 1827*d396c6f5SChristoph Hellwig em = get_extent(inode, NULL, 0, start, end, 0); 1828*d396c6f5SChristoph Hellwig if (!em || IS_ERR(em)) 1829*d396c6f5SChristoph Hellwig return 0; 1830*d396c6f5SChristoph Hellwig 1831*d396c6f5SChristoph Hellwig // XXX(hch): block 0 is valid in some cases, e.g. XFS RT device 1832*d396c6f5SChristoph Hellwig if (em->block_start == EXTENT_MAP_INLINE || 1833*d396c6f5SChristoph Hellwig em->block_start == 0) 1834*d396c6f5SChristoph Hellwig return 0; 1835*d396c6f5SChristoph Hellwig 1836*d396c6f5SChristoph Hellwig return (em->block_start + start - em->start) >> inode->i_blkbits; 1837*d396c6f5SChristoph Hellwig } 1838