xref: /openbmc/linux/fs/btrfs/extent_map.c (revision a52d9a80)
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include "extent_map.h"
12 
13 static struct kmem_cache *extent_map_cache;
14 static struct kmem_cache *extent_state_cache;
15 
16 struct tree_entry {
17 	u64 start;
18 	u64 end;
19 	int in_tree;
20 	struct rb_node rb_node;
21 };
22 
23 /* bits for the extent state */
24 #define EXTENT_DIRTY 1
25 #define EXTENT_WRITEBACK (1 << 1)
26 #define EXTENT_UPTODATE (1 << 2)
27 #define EXTENT_LOCKED (1 << 3)
28 #define EXTENT_NEW (1 << 4)
29 #define EXTENT_DELALLOC (1 << 5)
30 
31 #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
32 
33 static LIST_HEAD(all_states);
34 spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
35 
36 void __init extent_map_init(void)
37 {
38 	extent_map_cache = kmem_cache_create("extent_map",
39 					    sizeof(struct extent_map), 0,
40 					    SLAB_RECLAIM_ACCOUNT |
41 					    SLAB_DESTROY_BY_RCU,
42 					    NULL);
43 	extent_state_cache = kmem_cache_create("extent_state",
44 					    sizeof(struct extent_state), 0,
45 					    SLAB_RECLAIM_ACCOUNT |
46 					    SLAB_DESTROY_BY_RCU,
47 					    NULL);
48 }
49 
50 void __exit extent_map_exit(void)
51 {
52 	while(!list_empty(&all_states)) {
53 		struct extent_state *state;
54 		struct list_head *cur = all_states.next;
55 		state = list_entry(cur, struct extent_state, list);
56 		printk("found leaked state %Lu %Lu state %d in_tree %d\n",
57 		       state->start, state->end, state->state, state->in_tree);
58 		list_del(&state->list);
59 		kfree(state);
60 	}
61 	if (extent_map_cache)
62 		kmem_cache_destroy(extent_map_cache);
63 	if (extent_state_cache)
64 		kmem_cache_destroy(extent_state_cache);
65 }
66 
67 void extent_map_tree_init(struct extent_map_tree *tree,
68 			  struct address_space *mapping, gfp_t mask)
69 {
70 	tree->map.rb_node = NULL;
71 	tree->state.rb_node = NULL;
72 	rwlock_init(&tree->lock);
73 	tree->mapping = mapping;
74 }
75 EXPORT_SYMBOL(extent_map_tree_init);
76 
77 struct extent_map *alloc_extent_map(gfp_t mask)
78 {
79 	struct extent_map *em;
80 	em = kmem_cache_alloc(extent_map_cache, mask);
81 	if (!em || IS_ERR(em))
82 		return em;
83 	em->in_tree = 0;
84 	atomic_set(&em->refs, 1);
85 	return em;
86 }
87 EXPORT_SYMBOL(alloc_extent_map);
88 
89 void free_extent_map(struct extent_map *em)
90 {
91 	if (atomic_dec_and_test(&em->refs)) {
92 		WARN_ON(em->in_tree);
93 		kmem_cache_free(extent_map_cache, em);
94 	}
95 }
96 EXPORT_SYMBOL(free_extent_map);
97 
98 
99 struct extent_state *alloc_extent_state(gfp_t mask)
100 {
101 	struct extent_state *state;
102 	state = kmem_cache_alloc(extent_state_cache, mask);
103 	if (!state || IS_ERR(state))
104 		return state;
105 	state->state = 0;
106 	state->in_tree = 0;
107 	atomic_set(&state->refs, 1);
108 	init_waitqueue_head(&state->wq);
109 	spin_lock_irq(&state_lock);
110 	list_add(&state->list, &all_states);
111 	spin_unlock_irq(&state_lock);
112 	return state;
113 }
114 EXPORT_SYMBOL(alloc_extent_state);
115 
116 void free_extent_state(struct extent_state *state)
117 {
118 	if (atomic_dec_and_test(&state->refs)) {
119 		WARN_ON(state->in_tree);
120 		spin_lock_irq(&state_lock);
121 		list_del_init(&state->list);
122 		spin_unlock_irq(&state_lock);
123 		kmem_cache_free(extent_state_cache, state);
124 	}
125 }
126 EXPORT_SYMBOL(free_extent_state);
127 
128 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
129 				   struct rb_node *node)
130 {
131 	struct rb_node ** p = &root->rb_node;
132 	struct rb_node * parent = NULL;
133 	struct tree_entry *entry;
134 
135 	while(*p) {
136 		parent = *p;
137 		entry = rb_entry(parent, struct tree_entry, rb_node);
138 
139 		if (offset < entry->start)
140 			p = &(*p)->rb_left;
141 		else if (offset > entry->end)
142 			p = &(*p)->rb_right;
143 		else
144 			return parent;
145 	}
146 
147 	entry = rb_entry(node, struct tree_entry, rb_node);
148 	entry->in_tree = 1;
149 	rb_link_node(node, parent, p);
150 	rb_insert_color(node, root);
151 	return NULL;
152 }
153 
154 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
155 				   struct rb_node **prev_ret)
156 {
157 	struct rb_node * n = root->rb_node;
158 	struct rb_node *prev = NULL;
159 	struct tree_entry *entry;
160 	struct tree_entry *prev_entry = NULL;
161 
162 	while(n) {
163 		entry = rb_entry(n, struct tree_entry, rb_node);
164 		prev = n;
165 		prev_entry = entry;
166 
167 		if (offset < entry->start)
168 			n = n->rb_left;
169 		else if (offset > entry->end)
170 			n = n->rb_right;
171 		else
172 			return n;
173 	}
174 	if (!prev_ret)
175 		return NULL;
176 	while(prev && offset > prev_entry->end) {
177 		prev = rb_next(prev);
178 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
179 	}
180 	*prev_ret = prev;
181 	return NULL;
182 }
183 
184 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
185 {
186 	struct rb_node *prev;
187 	struct rb_node *ret;
188 	ret = __tree_search(root, offset, &prev);
189 	if (!ret)
190 		return prev;
191 	return ret;
192 }
193 
194 static int tree_delete(struct rb_root *root, u64 offset)
195 {
196 	struct rb_node *node;
197 	struct tree_entry *entry;
198 
199 	node = __tree_search(root, offset, NULL);
200 	if (!node)
201 		return -ENOENT;
202 	entry = rb_entry(node, struct tree_entry, rb_node);
203 	entry->in_tree = 0;
204 	rb_erase(node, root);
205 	return 0;
206 }
207 
208 /*
209  * add_extent_mapping tries a simple backward merge with existing
210  * mappings.  The extent_map struct passed in will be inserted into
211  * the tree directly (no copies made, just a reference taken).
212  */
213 int add_extent_mapping(struct extent_map_tree *tree,
214 		       struct extent_map *em)
215 {
216 	int ret = 0;
217 	struct extent_map *prev = NULL;
218 	struct rb_node *rb;
219 
220 	write_lock_irq(&tree->lock);
221 	rb = tree_insert(&tree->map, em->end, &em->rb_node);
222 	if (rb) {
223 		prev = rb_entry(rb, struct extent_map, rb_node);
224 		printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
225 		ret = -EEXIST;
226 		goto out;
227 	}
228 	atomic_inc(&em->refs);
229 	if (em->start != 0) {
230 		rb = rb_prev(&em->rb_node);
231 		if (rb)
232 			prev = rb_entry(rb, struct extent_map, rb_node);
233 		if (prev && prev->end + 1 == em->start &&
234 		    ((em->block_start == 0 && prev->block_start == 0) ||
235 			     (em->block_start == prev->block_end + 1))) {
236 			em->start = prev->start;
237 			em->block_start = prev->block_start;
238 			rb_erase(&prev->rb_node, &tree->map);
239 			prev->in_tree = 0;
240 			free_extent_map(prev);
241 		}
242 	 }
243 out:
244 	write_unlock_irq(&tree->lock);
245 	return ret;
246 }
247 EXPORT_SYMBOL(add_extent_mapping);
248 
249 /*
250  * lookup_extent_mapping returns the first extent_map struct in the
251  * tree that intersects the [start, end] (inclusive) range.  There may
252  * be additional objects in the tree that intersect, so check the object
253  * returned carefully to make sure you don't need additional lookups.
254  */
255 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
256 					 u64 start, u64 end)
257 {
258 	struct extent_map *em;
259 	struct rb_node *rb_node;
260 
261 	read_lock_irq(&tree->lock);
262 	rb_node = tree_search(&tree->map, start);
263 	if (!rb_node) {
264 		em = NULL;
265 		goto out;
266 	}
267 	if (IS_ERR(rb_node)) {
268 		em = ERR_PTR(PTR_ERR(rb_node));
269 		goto out;
270 	}
271 	em = rb_entry(rb_node, struct extent_map, rb_node);
272 	if (em->end < start || em->start > end) {
273 		em = NULL;
274 		goto out;
275 	}
276 	atomic_inc(&em->refs);
277 out:
278 	read_unlock_irq(&tree->lock);
279 	return em;
280 }
281 EXPORT_SYMBOL(lookup_extent_mapping);
282 
283 /*
284  * removes an extent_map struct from the tree.  No reference counts are
285  * dropped, and no checks are done to  see if the range is in use
286  */
287 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
288 {
289 	int ret;
290 
291 	write_lock_irq(&tree->lock);
292 	ret = tree_delete(&tree->map, em->end);
293 	write_unlock_irq(&tree->lock);
294 	return ret;
295 }
296 EXPORT_SYMBOL(remove_extent_mapping);
297 
298 /*
299  * utility function to look for merge candidates inside a given range.
300  * Any extents with matching state are merged together into a single
301  * extent in the tree.  Extents with EXTENT_IO in their state field
302  * are not merged because the end_io handlers need to be able to do
303  * operations on them without sleeping (or doing allocations/splits).
304  *
305  * This should be called with the tree lock held.
306  */
307 static int merge_state(struct extent_map_tree *tree,
308 		       struct extent_state *state)
309 {
310 	struct extent_state *other;
311 	struct rb_node *other_node;
312 
313 	if (state->state & EXTENT_IOBITS)
314 		return 0;
315 
316 	other_node = rb_prev(&state->rb_node);
317 	if (other_node) {
318 		other = rb_entry(other_node, struct extent_state, rb_node);
319 		if (other->end == state->start - 1 &&
320 		    other->state == state->state) {
321 			state->start = other->start;
322 			other->in_tree = 0;
323 			rb_erase(&other->rb_node, &tree->state);
324 			free_extent_state(other);
325 		}
326 	}
327 	other_node = rb_next(&state->rb_node);
328 	if (other_node) {
329 		other = rb_entry(other_node, struct extent_state, rb_node);
330 		if (other->start == state->end + 1 &&
331 		    other->state == state->state) {
332 			other->start = state->start;
333 			state->in_tree = 0;
334 			rb_erase(&state->rb_node, &tree->state);
335 			free_extent_state(state);
336 		}
337 	}
338 	return 0;
339 }
340 
341 /*
342  * insert an extent_state struct into the tree.  'bits' are set on the
343  * struct before it is inserted.
344  *
345  * This may return -EEXIST if the extent is already there, in which case the
346  * state struct is freed.
347  *
348  * The tree lock is not taken internally.  This is a utility function and
349  * probably isn't what you want to call (see set/clear_extent_bit).
350  */
351 static int insert_state(struct extent_map_tree *tree,
352 			struct extent_state *state, u64 start, u64 end,
353 			int bits)
354 {
355 	struct rb_node *node;
356 
357 	if (end < start) {
358 		printk("end < start %Lu %Lu\n", end, start);
359 		WARN_ON(1);
360 	}
361 	state->state |= bits;
362 	state->start = start;
363 	state->end = end;
364 	if ((end & 4095) == 0) {
365 		printk("insert state %Lu %Lu strange end\n", start, end);
366 		WARN_ON(1);
367 	}
368 	node = tree_insert(&tree->state, end, &state->rb_node);
369 	if (node) {
370 		struct extent_state *found;
371 		found = rb_entry(node, struct extent_state, rb_node);
372 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
373 		free_extent_state(state);
374 		return -EEXIST;
375 	}
376 	merge_state(tree, state);
377 	return 0;
378 }
379 
380 /*
381  * split a given extent state struct in two, inserting the preallocated
382  * struct 'prealloc' as the newly created second half.  'split' indicates an
383  * offset inside 'orig' where it should be split.
384  *
385  * Before calling,
386  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
387  * are two extent state structs in the tree:
388  * prealloc: [orig->start, split - 1]
389  * orig: [ split, orig->end ]
390  *
391  * The tree locks are not taken by this function. They need to be held
392  * by the caller.
393  */
394 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
395 		       struct extent_state *prealloc, u64 split)
396 {
397 	struct rb_node *node;
398 	prealloc->start = orig->start;
399 	prealloc->end = split - 1;
400 	prealloc->state = orig->state;
401 	orig->start = split;
402 	if ((prealloc->end & 4095) == 0) {
403 		printk("insert state %Lu %Lu strange end\n", prealloc->start,
404 		       prealloc->end);
405 		WARN_ON(1);
406 	}
407 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
408 	if (node) {
409 		struct extent_state *found;
410 		found = rb_entry(node, struct extent_state, rb_node);
411 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
412 		free_extent_state(prealloc);
413 		return -EEXIST;
414 	}
415 	return 0;
416 }
417 
418 /*
419  * utility function to clear some bits in an extent state struct.
420  * it will optionally wake up any one waiting on this state (wake == 1), or
421  * forcibly remove the state from the tree (delete == 1).
422  *
423  * If no bits are set on the state struct after clearing things, the
424  * struct is freed and removed from the tree
425  */
426 static int clear_state_bit(struct extent_map_tree *tree,
427 			    struct extent_state *state, int bits, int wake,
428 			    int delete)
429 {
430 	int ret = state->state & bits;
431 	state->state &= ~bits;
432 	if (wake)
433 		wake_up(&state->wq);
434 	if (delete || state->state == 0) {
435 		if (state->in_tree) {
436 			rb_erase(&state->rb_node, &tree->state);
437 			state->in_tree = 0;
438 			free_extent_state(state);
439 		} else {
440 			WARN_ON(1);
441 		}
442 	} else {
443 		merge_state(tree, state);
444 	}
445 	return ret;
446 }
447 
448 /*
449  * clear some bits on a range in the tree.  This may require splitting
450  * or inserting elements in the tree, so the gfp mask is used to
451  * indicate which allocations or sleeping are allowed.
452  *
453  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
454  * the given range from the tree regardless of state (ie for truncate).
455  *
456  * the range [start, end] is inclusive.
457  *
458  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
459  * bits were already set, or zero if none of the bits were already set.
460  */
461 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
462 		     int bits, int wake, int delete, gfp_t mask)
463 {
464 	struct extent_state *state;
465 	struct extent_state *prealloc = NULL;
466 	struct rb_node *node;
467 	int err;
468 	int set = 0;
469 
470 again:
471 	if (!prealloc && (mask & __GFP_WAIT)) {
472 		prealloc = alloc_extent_state(mask);
473 		if (!prealloc)
474 			return -ENOMEM;
475 	}
476 
477 	write_lock_irq(&tree->lock);
478 	/*
479 	 * this search will find the extents that end after
480 	 * our range starts
481 	 */
482 	node = tree_search(&tree->state, start);
483 	if (!node)
484 		goto out;
485 	state = rb_entry(node, struct extent_state, rb_node);
486 	if (state->start > end)
487 		goto out;
488 	WARN_ON(state->end < start);
489 
490 	/*
491 	 *     | ---- desired range ---- |
492 	 *  | state | or
493 	 *  | ------------- state -------------- |
494 	 *
495 	 * We need to split the extent we found, and may flip
496 	 * bits on second half.
497 	 *
498 	 * If the extent we found extends past our range, we
499 	 * just split and search again.  It'll get split again
500 	 * the next time though.
501 	 *
502 	 * If the extent we found is inside our range, we clear
503 	 * the desired bit on it.
504 	 */
505 
506 	if (state->start < start) {
507 		err = split_state(tree, state, prealloc, start);
508 		BUG_ON(err == -EEXIST);
509 		prealloc = NULL;
510 		if (err)
511 			goto out;
512 		if (state->end <= end) {
513 			start = state->end + 1;
514 			set |= clear_state_bit(tree, state, bits,
515 					wake, delete);
516 		} else {
517 			start = state->start;
518 		}
519 		goto search_again;
520 	}
521 	/*
522 	 * | ---- desired range ---- |
523 	 *                        | state |
524 	 * We need to split the extent, and clear the bit
525 	 * on the first half
526 	 */
527 	if (state->start <= end && state->end > end) {
528 		err = split_state(tree, state, prealloc, end + 1);
529 		BUG_ON(err == -EEXIST);
530 
531 		if (wake)
532 			wake_up(&state->wq);
533 		set |= clear_state_bit(tree, prealloc, bits,
534 				       wake, delete);
535 		prealloc = NULL;
536 		goto out;
537 	}
538 
539 	start = state->end + 1;
540 	set |= clear_state_bit(tree, state, bits, wake, delete);
541 	goto search_again;
542 
543 out:
544 	write_unlock_irq(&tree->lock);
545 	if (prealloc)
546 		free_extent_state(prealloc);
547 
548 	return set;
549 
550 search_again:
551 	if (start >= end)
552 		goto out;
553 	write_unlock_irq(&tree->lock);
554 	if (mask & __GFP_WAIT)
555 		cond_resched();
556 	goto again;
557 }
558 EXPORT_SYMBOL(clear_extent_bit);
559 
560 static int wait_on_state(struct extent_map_tree *tree,
561 			 struct extent_state *state)
562 {
563 	DEFINE_WAIT(wait);
564 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
565 	read_unlock_irq(&tree->lock);
566 	schedule();
567 	read_lock_irq(&tree->lock);
568 	finish_wait(&state->wq, &wait);
569 	return 0;
570 }
571 
572 /*
573  * waits for one or more bits to clear on a range in the state tree.
574  * The range [start, end] is inclusive.
575  * The tree lock is taken by this function
576  */
577 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
578 {
579 	struct extent_state *state;
580 	struct rb_node *node;
581 
582 	read_lock_irq(&tree->lock);
583 again:
584 	while (1) {
585 		/*
586 		 * this search will find all the extents that end after
587 		 * our range starts
588 		 */
589 		node = tree_search(&tree->state, start);
590 		if (!node)
591 			break;
592 
593 		state = rb_entry(node, struct extent_state, rb_node);
594 
595 		if (state->start > end)
596 			goto out;
597 
598 		if (state->state & bits) {
599 			start = state->start;
600 			atomic_inc(&state->refs);
601 			wait_on_state(tree, state);
602 			free_extent_state(state);
603 			goto again;
604 		}
605 		start = state->end + 1;
606 
607 		if (start > end)
608 			break;
609 
610 		if (need_resched()) {
611 			read_unlock_irq(&tree->lock);
612 			cond_resched();
613 			read_lock_irq(&tree->lock);
614 		}
615 	}
616 out:
617 	read_unlock_irq(&tree->lock);
618 	return 0;
619 }
620 EXPORT_SYMBOL(wait_extent_bit);
621 
622 /*
623  * set some bits on a range in the tree.  This may require allocations
624  * or sleeping, so the gfp mask is used to indicate what is allowed.
625  *
626  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
627  * range already has the desired bits set.  The start of the existing
628  * range is returned in failed_start in this case.
629  *
630  * [start, end] is inclusive
631  * This takes the tree lock.
632  */
633 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
634 		   int exclusive, u64 *failed_start, gfp_t mask)
635 {
636 	struct extent_state *state;
637 	struct extent_state *prealloc = NULL;
638 	struct rb_node *node;
639 	int err = 0;
640 	int set;
641 	u64 last_start;
642 	u64 last_end;
643 again:
644 	if (!prealloc && (mask & __GFP_WAIT)) {
645 		prealloc = alloc_extent_state(mask);
646 		if (!prealloc)
647 			return -ENOMEM;
648 	}
649 
650 	write_lock_irq(&tree->lock);
651 	/*
652 	 * this search will find all the extents that end after
653 	 * our range starts.
654 	 */
655 	node = tree_search(&tree->state, start);
656 	if (!node) {
657 		err = insert_state(tree, prealloc, start, end, bits);
658 		prealloc = NULL;
659 		BUG_ON(err == -EEXIST);
660 		goto out;
661 	}
662 
663 	state = rb_entry(node, struct extent_state, rb_node);
664 	last_start = state->start;
665 	last_end = state->end;
666 
667 	/*
668 	 * | ---- desired range ---- |
669 	 * | state |
670 	 *
671 	 * Just lock what we found and keep going
672 	 */
673 	if (state->start == start && state->end <= end) {
674 		set = state->state & bits;
675 		if (set && exclusive) {
676 			*failed_start = state->start;
677 			err = -EEXIST;
678 			goto out;
679 		}
680 		state->state |= bits;
681 		start = state->end + 1;
682 		merge_state(tree, state);
683 		goto search_again;
684 	}
685 
686 	/*
687 	 *     | ---- desired range ---- |
688 	 * | state |
689 	 *   or
690 	 * | ------------- state -------------- |
691 	 *
692 	 * We need to split the extent we found, and may flip bits on
693 	 * second half.
694 	 *
695 	 * If the extent we found extends past our
696 	 * range, we just split and search again.  It'll get split
697 	 * again the next time though.
698 	 *
699 	 * If the extent we found is inside our range, we set the
700 	 * desired bit on it.
701 	 */
702 	if (state->start < start) {
703 		set = state->state & bits;
704 		if (exclusive && set) {
705 			*failed_start = start;
706 			err = -EEXIST;
707 			goto out;
708 		}
709 		err = split_state(tree, state, prealloc, start);
710 		BUG_ON(err == -EEXIST);
711 		prealloc = NULL;
712 		if (err)
713 			goto out;
714 		if (state->end <= end) {
715 			state->state |= bits;
716 			start = state->end + 1;
717 			merge_state(tree, state);
718 		} else {
719 			start = state->start;
720 		}
721 		goto search_again;
722 	}
723 	/*
724 	 * | ---- desired range ---- |
725 	 *                        | state |
726 	 * We need to split the extent, and set the bit
727 	 * on the first half
728 	 */
729 	if (state->start <= end && state->end > end) {
730 		set = state->state & bits;
731 		if (exclusive && set) {
732 			*failed_start = start;
733 			err = -EEXIST;
734 			goto out;
735 		}
736 		err = split_state(tree, state, prealloc, end + 1);
737 		BUG_ON(err == -EEXIST);
738 
739 		prealloc->state |= bits;
740 		merge_state(tree, prealloc);
741 		prealloc = NULL;
742 		goto out;
743 	}
744 
745 	/*
746 	 * | ---- desired range ---- |
747 	 *     | state | or               | state |
748 	 *
749 	 * There's a hole, we need to insert something in it and
750 	 * ignore the extent we found.
751 	 */
752 	if (state->start > start) {
753 		u64 this_end;
754 		if (end < last_start)
755 			this_end = end;
756 		else
757 			this_end = last_start -1;
758 		err = insert_state(tree, prealloc, start, this_end,
759 				   bits);
760 		prealloc = NULL;
761 		BUG_ON(err == -EEXIST);
762 		if (err)
763 			goto out;
764 		start = this_end + 1;
765 		goto search_again;
766 	}
767 	goto search_again;
768 
769 out:
770 	write_unlock_irq(&tree->lock);
771 	if (prealloc)
772 		free_extent_state(prealloc);
773 
774 	return err;
775 
776 search_again:
777 	if (start > end)
778 		goto out;
779 	write_unlock_irq(&tree->lock);
780 	if (mask & __GFP_WAIT)
781 		cond_resched();
782 	goto again;
783 }
784 EXPORT_SYMBOL(set_extent_bit);
785 
786 /* wrappers around set/clear extent bit */
787 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
788 		     gfp_t mask)
789 {
790 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
791 			      mask);
792 }
793 EXPORT_SYMBOL(set_extent_dirty);
794 
795 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
796 		       gfp_t mask)
797 {
798 	return clear_extent_bit(tree, start, end, EXTENT_DIRTY, 0, 0, mask);
799 }
800 EXPORT_SYMBOL(clear_extent_dirty);
801 
802 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
803 		     gfp_t mask)
804 {
805 	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
806 			      mask);
807 }
808 EXPORT_SYMBOL(set_extent_new);
809 
810 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
811 		       gfp_t mask)
812 {
813 	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
814 }
815 EXPORT_SYMBOL(clear_extent_new);
816 
817 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
818 			gfp_t mask)
819 {
820 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
821 			      mask);
822 }
823 EXPORT_SYMBOL(set_extent_uptodate);
824 
825 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
826 			  gfp_t mask)
827 {
828 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
829 }
830 EXPORT_SYMBOL(clear_extent_uptodate);
831 
832 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
833 			 gfp_t mask)
834 {
835 	return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
836 			      0, NULL, mask);
837 }
838 EXPORT_SYMBOL(set_extent_writeback);
839 
840 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
841 			   gfp_t mask)
842 {
843 	return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
844 }
845 EXPORT_SYMBOL(clear_extent_writeback);
846 
847 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
848 {
849 	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
850 }
851 EXPORT_SYMBOL(wait_on_extent_writeback);
852 
853 /*
854  * locks a range in ascending order, waiting for any locked regions
855  * it hits on the way.  [start,end] are inclusive, and this will sleep.
856  */
857 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
858 {
859 	int err;
860 	u64 failed_start;
861 	while (1) {
862 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
863 				     &failed_start, mask);
864 		if (err == -EEXIST && (mask & __GFP_WAIT)) {
865 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
866 			start = failed_start;
867 		} else {
868 			break;
869 		}
870 		WARN_ON(start > end);
871 	}
872 	return err;
873 }
874 EXPORT_SYMBOL(lock_extent);
875 
876 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
877 		  gfp_t mask)
878 {
879 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
880 }
881 EXPORT_SYMBOL(unlock_extent);
882 
883 /*
884  * helper function to set pages and extents in the tree dirty
885  */
886 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
887 {
888 	unsigned long index = start >> PAGE_CACHE_SHIFT;
889 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
890 	struct page *page;
891 
892 	while (index <= end_index) {
893 		page = find_get_page(tree->mapping, index);
894 		BUG_ON(!page);
895 		__set_page_dirty_nobuffers(page);
896 		page_cache_release(page);
897 		index++;
898 	}
899 	set_extent_dirty(tree, start, end, GFP_NOFS);
900 	return 0;
901 }
902 EXPORT_SYMBOL(set_range_dirty);
903 
904 /*
905  * helper function to set both pages and extents in the tree writeback
906  */
907 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
908 {
909 	unsigned long index = start >> PAGE_CACHE_SHIFT;
910 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
911 	struct page *page;
912 
913 	while (index <= end_index) {
914 		page = find_get_page(tree->mapping, index);
915 		BUG_ON(!page);
916 		set_page_writeback(page);
917 		page_cache_release(page);
918 		index++;
919 	}
920 	set_extent_writeback(tree, start, end, GFP_NOFS);
921 	return 0;
922 }
923 EXPORT_SYMBOL(set_range_writeback);
924 
925 /*
926  * helper function to lock both pages and extents in the tree.
927  * pages must be locked first.
928  */
929 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
930 {
931 	unsigned long index = start >> PAGE_CACHE_SHIFT;
932 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
933 	struct page *page;
934 	int err;
935 
936 	while (index <= end_index) {
937 		page = grab_cache_page(tree->mapping, index);
938 		if (!page) {
939 			err = -ENOMEM;
940 			goto failed;
941 		}
942 		if (IS_ERR(page)) {
943 			err = PTR_ERR(page);
944 			goto failed;
945 		}
946 		index++;
947 	}
948 	lock_extent(tree, start, end, GFP_NOFS);
949 	return 0;
950 
951 failed:
952 	/*
953 	 * we failed above in getting the page at 'index', so we undo here
954 	 * up to but not including the page at 'index'
955 	 */
956 	end_index = index;
957 	index = start >> PAGE_CACHE_SHIFT;
958 	while (index < end_index) {
959 		page = find_get_page(tree->mapping, index);
960 		unlock_page(page);
961 		page_cache_release(page);
962 		index++;
963 	}
964 	return err;
965 }
966 EXPORT_SYMBOL(lock_range);
967 
968 /*
969  * helper function to unlock both pages and extents in the tree.
970  */
971 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
972 {
973 	unsigned long index = start >> PAGE_CACHE_SHIFT;
974 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
975 	struct page *page;
976 
977 	while (index <= end_index) {
978 		page = find_get_page(tree->mapping, index);
979 		unlock_page(page);
980 		page_cache_release(page);
981 		index++;
982 	}
983 	unlock_extent(tree, start, end, GFP_NOFS);
984 	return 0;
985 }
986 EXPORT_SYMBOL(unlock_range);
987 
988 /*
989  * searches a range in the state tree for a given mask.
990  * If 'filled' == 1, this returns 1 only if ever extent in the tree
991  * has the bits set.  Otherwise, 1 is returned if any bit in the
992  * range is found set.
993  */
994 static int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
995 			  int bits, int filled)
996 {
997 	struct extent_state *state = NULL;
998 	struct rb_node *node;
999 	int bitset = 0;
1000 
1001 	read_lock_irq(&tree->lock);
1002 	node = tree_search(&tree->state, start);
1003 	while (node && start <= end) {
1004 		state = rb_entry(node, struct extent_state, rb_node);
1005 		if (state->start > end)
1006 			break;
1007 
1008 		if (filled && state->start > start) {
1009 			bitset = 0;
1010 			break;
1011 		}
1012 		if (state->state & bits) {
1013 			bitset = 1;
1014 			if (!filled)
1015 				break;
1016 		} else if (filled) {
1017 			bitset = 0;
1018 			break;
1019 		}
1020 		start = state->end + 1;
1021 		if (start > end)
1022 			break;
1023 		node = rb_next(node);
1024 	}
1025 	read_unlock_irq(&tree->lock);
1026 	return bitset;
1027 }
1028 
1029 /*
1030  * helper function to set a given page up to date if all the
1031  * extents in the tree for that page are up to date
1032  */
1033 static int check_page_uptodate(struct extent_map_tree *tree,
1034 			       struct page *page)
1035 {
1036 	u64 start = page->index << PAGE_CACHE_SHIFT;
1037 	u64 end = start + PAGE_CACHE_SIZE - 1;
1038 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1039 		SetPageUptodate(page);
1040 	return 0;
1041 }
1042 
1043 /*
1044  * helper function to unlock a page if all the extents in the tree
1045  * for that page are unlocked
1046  */
1047 static int check_page_locked(struct extent_map_tree *tree,
1048 			     struct page *page)
1049 {
1050 	u64 start = page->index << PAGE_CACHE_SHIFT;
1051 	u64 end = start + PAGE_CACHE_SIZE - 1;
1052 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1053 		unlock_page(page);
1054 	return 0;
1055 }
1056 
1057 /*
1058  * helper function to end page writeback if all the extents
1059  * in the tree for that page are done with writeback
1060  */
1061 static int check_page_writeback(struct extent_map_tree *tree,
1062 			     struct page *page)
1063 {
1064 	u64 start = page->index << PAGE_CACHE_SHIFT;
1065 	u64 end = start + PAGE_CACHE_SIZE - 1;
1066 	if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1067 		end_page_writeback(page);
1068 	return 0;
1069 }
1070 
1071 /* lots and lots of room for performance fixes in the end_bio funcs */
1072 
1073 /*
1074  * after a writepage IO is done, we need to:
1075  * clear the uptodate bits on error
1076  * clear the writeback bits in the extent tree for this IO
1077  * end_page_writeback if the page has no more pending IO
1078  *
1079  * Scheduling is not allowed, so the extent state tree is expected
1080  * to have one and only one object corresponding to this IO.
1081  */
1082 static int end_bio_extent_writepage(struct bio *bio,
1083 				   unsigned int bytes_done, int err)
1084 {
1085 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1086 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1087 	struct extent_map_tree *tree = bio->bi_private;
1088 	u64 start;
1089 	u64 end;
1090 	int whole_page;
1091 
1092 	if (bio->bi_size)
1093 		return 1;
1094 
1095 	do {
1096 		struct page *page = bvec->bv_page;
1097 		start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1098 		end = start + bvec->bv_len - 1;
1099 
1100 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1101 			whole_page = 1;
1102 		else
1103 			whole_page = 0;
1104 
1105 		if (--bvec >= bio->bi_io_vec)
1106 			prefetchw(&bvec->bv_page->flags);
1107 
1108 		if (!uptodate) {
1109 			clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1110 			ClearPageUptodate(page);
1111 			SetPageError(page);
1112 		}
1113 		clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1114 
1115 		if (whole_page)
1116 			end_page_writeback(page);
1117 		else
1118 			check_page_writeback(tree, page);
1119 	} while (bvec >= bio->bi_io_vec);
1120 
1121 	bio_put(bio);
1122 	return 0;
1123 }
1124 
1125 /*
1126  * after a readpage IO is done, we need to:
1127  * clear the uptodate bits on error
1128  * set the uptodate bits if things worked
1129  * set the page up to date if all extents in the tree are uptodate
1130  * clear the lock bit in the extent tree
1131  * unlock the page if there are no other extents locked for it
1132  *
1133  * Scheduling is not allowed, so the extent state tree is expected
1134  * to have one and only one object corresponding to this IO.
1135  */
1136 static int end_bio_extent_readpage(struct bio *bio,
1137 				   unsigned int bytes_done, int err)
1138 {
1139 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1140 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1141 	struct extent_map_tree *tree = bio->bi_private;
1142 	u64 start;
1143 	u64 end;
1144 	int whole_page;
1145 
1146 	if (bio->bi_size)
1147 		return 1;
1148 
1149 	do {
1150 		struct page *page = bvec->bv_page;
1151 		start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1152 		end = start + bvec->bv_len - 1;
1153 
1154 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1155 			whole_page = 1;
1156 		else
1157 			whole_page = 0;
1158 
1159 		if (--bvec >= bio->bi_io_vec)
1160 			prefetchw(&bvec->bv_page->flags);
1161 
1162 		if (uptodate) {
1163 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1164 			if (whole_page)
1165 				SetPageUptodate(page);
1166 			else
1167 				check_page_uptodate(tree, page);
1168 		} else {
1169 			ClearPageUptodate(page);
1170 			SetPageError(page);
1171 		}
1172 
1173 		unlock_extent(tree, start, end, GFP_ATOMIC);
1174 
1175 		if (whole_page)
1176 			unlock_page(page);
1177 		else
1178 			check_page_locked(tree, page);
1179 	} while (bvec >= bio->bi_io_vec);
1180 
1181 	bio_put(bio);
1182 	return 0;
1183 }
1184 
1185 /*
1186  * IO done from prepare_write is pretty simple, we just unlock
1187  * the structs in the extent tree when done, and set the uptodate bits
1188  * as appropriate.
1189  */
1190 static int end_bio_extent_preparewrite(struct bio *bio,
1191 				       unsigned int bytes_done, int err)
1192 {
1193 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1194 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1195 	struct extent_map_tree *tree = bio->bi_private;
1196 	u64 start;
1197 	u64 end;
1198 
1199 	if (bio->bi_size)
1200 		return 1;
1201 
1202 	do {
1203 		struct page *page = bvec->bv_page;
1204 		start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1205 		end = start + bvec->bv_len - 1;
1206 
1207 		if (--bvec >= bio->bi_io_vec)
1208 			prefetchw(&bvec->bv_page->flags);
1209 
1210 		if (uptodate) {
1211 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1212 		} else {
1213 			ClearPageUptodate(page);
1214 			SetPageError(page);
1215 		}
1216 
1217 		unlock_extent(tree, start, end, GFP_ATOMIC);
1218 
1219 	} while (bvec >= bio->bi_io_vec);
1220 
1221 	bio_put(bio);
1222 	return 0;
1223 }
1224 
1225 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1226 			      struct page *page, sector_t sector,
1227 			      size_t size, unsigned long offset,
1228 			      struct block_device *bdev,
1229 			      bio_end_io_t end_io_func)
1230 {
1231 	struct bio *bio;
1232 	int ret = 0;
1233 
1234 	bio = bio_alloc(GFP_NOIO, 1);
1235 
1236 	bio->bi_sector = sector;
1237 	bio->bi_bdev = bdev;
1238 	bio->bi_io_vec[0].bv_page = page;
1239 	bio->bi_io_vec[0].bv_len = size;
1240 	bio->bi_io_vec[0].bv_offset = offset;
1241 
1242 	bio->bi_vcnt = 1;
1243 	bio->bi_idx = 0;
1244 	bio->bi_size = size;
1245 
1246 	bio->bi_end_io = end_io_func;
1247 	bio->bi_private = tree;
1248 
1249 	bio_get(bio);
1250 	submit_bio(rw, bio);
1251 
1252 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1253 		ret = -EOPNOTSUPP;
1254 
1255 	bio_put(bio);
1256 	return ret;
1257 }
1258 
1259 /*
1260  * basic readpage implementation.  Locked extent state structs are inserted
1261  * into the tree that are removed when the IO is done (by the end_io
1262  * handlers)
1263  */
1264 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1265 			  get_extent_t *get_extent)
1266 {
1267 	struct inode *inode = page->mapping->host;
1268 	u64 start = page->index << PAGE_CACHE_SHIFT;
1269 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1270 	u64 end;
1271 	u64 cur = start;
1272 	u64 extent_offset;
1273 	u64 last_byte = i_size_read(inode);
1274 	u64 block_start;
1275 	u64 cur_end;
1276 	sector_t sector;
1277 	struct extent_map *em;
1278 	struct block_device *bdev;
1279 	int ret;
1280 	int nr = 0;
1281 	size_t page_offset = 0;
1282 	size_t iosize;
1283 	size_t blocksize = inode->i_sb->s_blocksize;
1284 
1285 	if (!PagePrivate(page)) {
1286 		SetPagePrivate(page);
1287 		set_page_private(page, 1);
1288 		page_cache_get(page);
1289 	}
1290 
1291 	end = page_end;
1292 	lock_extent(tree, start, end, GFP_NOFS);
1293 
1294 	while (cur <= end) {
1295 		if (cur >= last_byte) {
1296 			iosize = PAGE_CACHE_SIZE - page_offset;
1297 			zero_user_page(page, page_offset, iosize, KM_USER0);
1298 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1299 					    GFP_NOFS);
1300 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1301 			break;
1302 		}
1303 		em = get_extent(inode, page, page_offset, cur, end, 0);
1304 		if (IS_ERR(em) || !em) {
1305 			SetPageError(page);
1306 			unlock_extent(tree, cur, end, GFP_NOFS);
1307 			break;
1308 		}
1309 
1310 		extent_offset = cur - em->start;
1311 		BUG_ON(em->end < cur);
1312 		BUG_ON(end < cur);
1313 
1314 		iosize = min(em->end - cur, end - cur) + 1;
1315 		cur_end = min(em->end, end);
1316 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1317 		sector = (em->block_start + extent_offset) >> 9;
1318 		bdev = em->bdev;
1319 		block_start = em->block_start;
1320 		free_extent_map(em);
1321 		em = NULL;
1322 
1323 		/* we've found a hole, just zero and go on */
1324 		if (block_start == 0) {
1325 			zero_user_page(page, page_offset, iosize, KM_USER0);
1326 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1327 					    GFP_NOFS);
1328 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1329 			cur = cur + iosize;
1330 			page_offset += iosize;
1331 			continue;
1332 		}
1333 		/* the get_extent function already copied into the page */
1334 		if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1335 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1336 			cur = cur + iosize;
1337 			page_offset += iosize;
1338 			continue;
1339 		}
1340 
1341 		ret = submit_extent_page(READ, tree, page,
1342 					 sector, iosize, page_offset, bdev,
1343 					 end_bio_extent_readpage);
1344 		if (ret)
1345 			SetPageError(page);
1346 		cur = cur + iosize;
1347 		page_offset += iosize;
1348 		nr++;
1349 	}
1350 	if (!nr) {
1351 		if (!PageError(page))
1352 			SetPageUptodate(page);
1353 		unlock_page(page);
1354 	}
1355 	return 0;
1356 }
1357 EXPORT_SYMBOL(extent_read_full_page);
1358 
1359 /*
1360  * the writepage semantics are similar to regular writepage.  extent
1361  * records are inserted to lock ranges in the tree, and as dirty areas
1362  * are found, they are marked writeback.  Then the lock bits are removed
1363  * and the end_io handler clears the writeback ranges
1364  */
1365 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1366 			  get_extent_t *get_extent,
1367 			  struct writeback_control *wbc)
1368 {
1369 	struct inode *inode = page->mapping->host;
1370 	u64 start = page->index << PAGE_CACHE_SHIFT;
1371 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1372 	u64 end;
1373 	u64 cur = start;
1374 	u64 extent_offset;
1375 	u64 last_byte = i_size_read(inode);
1376 	u64 block_start;
1377 	sector_t sector;
1378 	struct extent_map *em;
1379 	struct block_device *bdev;
1380 	int ret;
1381 	int nr = 0;
1382 	size_t page_offset = 0;
1383 	size_t iosize;
1384 	size_t blocksize;
1385 	loff_t i_size = i_size_read(inode);
1386 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1387 
1388 	if (page->index > end_index) {
1389 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1390 		unlock_page(page);
1391 		return 0;
1392 	}
1393 
1394 	if (page->index == end_index) {
1395 		size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1396 		zero_user_page(page, offset,
1397 			       PAGE_CACHE_SIZE - offset, KM_USER0);
1398 	}
1399 
1400 	if (!PagePrivate(page)) {
1401 		SetPagePrivate(page);
1402 		set_page_private(page, 1);
1403 		page_cache_get(page);
1404 	}
1405 
1406 	end = page_end;
1407 	lock_extent(tree, start, page_end, GFP_NOFS);
1408 
1409 	if (last_byte <= start) {
1410 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1411 		goto done;
1412 	}
1413 
1414 	set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1415 	blocksize = inode->i_sb->s_blocksize;
1416 
1417 	while (cur <= end) {
1418 		if (cur >= last_byte) {
1419 			clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1420 			break;
1421 		}
1422 		em = get_extent(inode, page, page_offset, cur, end, 1);
1423 		if (IS_ERR(em) || !em) {
1424 			SetPageError(page);
1425 			break;
1426 		}
1427 
1428 		extent_offset = cur - em->start;
1429 		BUG_ON(em->end < cur);
1430 		BUG_ON(end < cur);
1431 		iosize = min(em->end - cur, end - cur) + 1;
1432 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1433 		sector = (em->block_start + extent_offset) >> 9;
1434 		bdev = em->bdev;
1435 		block_start = em->block_start;
1436 		free_extent_map(em);
1437 		em = NULL;
1438 
1439 		if (block_start == 0 || block_start == EXTENT_MAP_INLINE) {
1440 			clear_extent_dirty(tree, cur,
1441 					   cur + iosize - 1, GFP_NOFS);
1442 			cur = cur + iosize;
1443 			page_offset += iosize;
1444 			continue;
1445 		}
1446 
1447 		/* leave this out until we have a page_mkwrite call */
1448 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1449 				   EXTENT_DIRTY, 0)) {
1450 			cur = cur + iosize;
1451 			page_offset += iosize;
1452 			continue;
1453 		}
1454 		clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1455 		set_range_writeback(tree, cur, cur + iosize - 1);
1456 		ret = submit_extent_page(WRITE, tree, page,
1457 					 sector, iosize, page_offset, bdev,
1458 					 end_bio_extent_writepage);
1459 		if (ret)
1460 			SetPageError(page);
1461 		cur = cur + iosize;
1462 		page_offset += iosize;
1463 		nr++;
1464 	}
1465 done:
1466 	WARN_ON(test_range_bit(tree, start, page_end, EXTENT_DIRTY, 0));
1467 	unlock_extent(tree, start, page_end, GFP_NOFS);
1468 	unlock_page(page);
1469 	return 0;
1470 }
1471 EXPORT_SYMBOL(extent_write_full_page);
1472 
1473 /*
1474  * basic invalidatepage code, this waits on any locked or writeback
1475  * ranges corresponding to the page, and then deletes any extent state
1476  * records from the tree
1477  */
1478 int extent_invalidatepage(struct extent_map_tree *tree,
1479 			  struct page *page, unsigned long offset)
1480 {
1481 	u64 start = (page->index << PAGE_CACHE_SHIFT);
1482 	u64 end = start + PAGE_CACHE_SIZE - 1;
1483 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1484 
1485 	start += (offset + blocksize -1) & ~(blocksize - 1);
1486 	if (start > end)
1487 		return 0;
1488 
1489 	lock_extent(tree, start, end, GFP_NOFS);
1490 	wait_on_extent_writeback(tree, start, end);
1491 	clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DIRTY,
1492 			 1, 1, GFP_NOFS);
1493 	return 0;
1494 }
1495 EXPORT_SYMBOL(extent_invalidatepage);
1496 
1497 /*
1498  * simple commit_write call, set_range_dirty is used to mark both
1499  * the pages and the extent records as dirty
1500  */
1501 int extent_commit_write(struct extent_map_tree *tree,
1502 			struct inode *inode, struct page *page,
1503 			unsigned from, unsigned to)
1504 {
1505 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1506 
1507 	if (!PagePrivate(page)) {
1508 		SetPagePrivate(page);
1509 		set_page_private(page, 1);
1510 		page_cache_get(page);
1511 	}
1512 
1513 	set_page_dirty(page);
1514 
1515 	if (pos > inode->i_size) {
1516 		i_size_write(inode, pos);
1517 		mark_inode_dirty(inode);
1518 	}
1519 	return 0;
1520 }
1521 EXPORT_SYMBOL(extent_commit_write);
1522 
1523 int extent_prepare_write(struct extent_map_tree *tree,
1524 			 struct inode *inode, struct page *page,
1525 			 unsigned from, unsigned to, get_extent_t *get_extent)
1526 {
1527 	u64 page_start = page->index << PAGE_CACHE_SHIFT;
1528 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1529 	u64 block_start;
1530 	u64 orig_block_start;
1531 	u64 block_end;
1532 	u64 cur_end;
1533 	struct extent_map *em;
1534 	unsigned blocksize = 1 << inode->i_blkbits;
1535 	size_t page_offset = 0;
1536 	size_t block_off_start;
1537 	size_t block_off_end;
1538 	int err = 0;
1539 	int iocount = 0;
1540 	int ret = 0;
1541 	int isnew;
1542 
1543 	if (!PagePrivate(page)) {
1544 		SetPagePrivate(page);
1545 		set_page_private(page, 1);
1546 		page_cache_get(page);
1547 	}
1548 	block_start = (page_start + from) & ~((u64)blocksize - 1);
1549 	block_end = (page_start + to - 1) | (blocksize - 1);
1550 	orig_block_start = block_start;
1551 
1552 	lock_extent(tree, page_start, page_end, GFP_NOFS);
1553 	while(block_start <= block_end) {
1554 		em = get_extent(inode, page, page_offset, block_start,
1555 				block_end, 1);
1556 		if (IS_ERR(em) || !em) {
1557 			goto err;
1558 		}
1559 		cur_end = min(block_end, em->end);
1560 		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1561 		block_off_end = block_off_start + blocksize;
1562 		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1563 
1564 		if (!PageUptodate(page) && isnew &&
1565 		    (block_off_end > to || block_off_start < from)) {
1566 			void *kaddr;
1567 
1568 			kaddr = kmap_atomic(page, KM_USER0);
1569 			if (block_off_end > to)
1570 				memset(kaddr + to, 0, block_off_end - to);
1571 			if (block_off_start < from)
1572 				memset(kaddr + block_off_start, 0,
1573 				       from - block_off_start);
1574 			flush_dcache_page(page);
1575 			kunmap_atomic(kaddr, KM_USER0);
1576 		}
1577 		if (!isnew && !PageUptodate(page) &&
1578 		    (block_off_end > to || block_off_start < from) &&
1579 		    !test_range_bit(tree, block_start, cur_end,
1580 				    EXTENT_UPTODATE, 1)) {
1581 			u64 sector;
1582 			u64 extent_offset = block_start - em->start;
1583 			size_t iosize;
1584 			sector = (em->block_start + extent_offset) >> 9;
1585 			iosize = (cur_end - block_start + blocksize - 1) &
1586 				~((u64)blocksize - 1);
1587 			/*
1588 			 * we've already got the extent locked, but we
1589 			 * need to split the state such that our end_bio
1590 			 * handler can clear the lock.
1591 			 */
1592 			set_extent_bit(tree, block_start,
1593 				       block_start + iosize - 1,
1594 				       EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1595 			ret = submit_extent_page(READ, tree, page,
1596 					 sector, iosize, page_offset, em->bdev,
1597 					 end_bio_extent_preparewrite);
1598 			iocount++;
1599 			block_start = block_start + iosize;
1600 		} else {
1601 			set_extent_uptodate(tree, block_start, cur_end,
1602 					    GFP_NOFS);
1603 			unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1604 			block_start = cur_end + 1;
1605 		}
1606 		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1607 		free_extent_map(em);
1608 	}
1609 	if (iocount) {
1610 		wait_extent_bit(tree, orig_block_start,
1611 				block_end, EXTENT_LOCKED);
1612 	}
1613 	check_page_uptodate(tree, page);
1614 err:
1615 	/* FIXME, zero out newly allocated blocks on error */
1616 	return err;
1617 }
1618 EXPORT_SYMBOL(extent_prepare_write);
1619 
1620 /*
1621  * a helper for releasepage.  As long as there are no locked extents
1622  * in the range corresponding to the page, both state records and extent
1623  * map records are removed
1624  */
1625 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1626 {
1627 	struct extent_map *em;
1628 	u64 start = page->index << PAGE_CACHE_SHIFT;
1629 	u64 end = start + PAGE_CACHE_SIZE - 1;
1630 	u64 orig_start = start;
1631 
1632 	while (start <= end) {
1633 		em = lookup_extent_mapping(tree, start, end);
1634 		if (!em || IS_ERR(em))
1635 			break;
1636 		if (test_range_bit(tree, em->start, em->end,
1637 				   EXTENT_LOCKED, 0)) {
1638 			free_extent_map(em);
1639 			start = em->end + 1;
1640 printk("range still locked %Lu %Lu\n", em->start, em->end);
1641 			break;
1642 		}
1643 		remove_extent_mapping(tree, em);
1644 		start = em->end + 1;
1645 		/* once for the rb tree */
1646 		free_extent_map(em);
1647 		/* once for us */
1648 		free_extent_map(em);
1649 	}
1650 	WARN_ON(test_range_bit(tree, orig_start, end, EXTENT_WRITEBACK, 0));
1651 	clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1652 			 1, 1, GFP_NOFS);
1653 	return 1;
1654 }
1655 EXPORT_SYMBOL(try_release_extent_mapping);
1656 
1657