xref: /openbmc/linux/fs/btrfs/extent_map.c (revision 35ebb934)
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include "extent_map.h"
14 
15 /* temporary define until extent_map moves out of btrfs */
16 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
17 				       unsigned long extra_flags,
18 				       void (*ctor)(void *, struct kmem_cache *,
19 						    unsigned long));
20 
21 static struct kmem_cache *extent_map_cache;
22 static struct kmem_cache *extent_state_cache;
23 static struct kmem_cache *extent_buffer_cache;
24 
25 static LIST_HEAD(buffers);
26 static LIST_HEAD(states);
27 
28 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
29 #define BUFFER_LRU_MAX 64
30 
31 struct tree_entry {
32 	u64 start;
33 	u64 end;
34 	int in_tree;
35 	struct rb_node rb_node;
36 };
37 
38 void __init extent_map_init(void)
39 {
40 	extent_map_cache = btrfs_cache_create("extent_map",
41 					    sizeof(struct extent_map), 0,
42 					    NULL);
43 	extent_state_cache = btrfs_cache_create("extent_state",
44 					    sizeof(struct extent_state), 0,
45 					    NULL);
46 	extent_buffer_cache = btrfs_cache_create("extent_buffers",
47 					    sizeof(struct extent_buffer), 0,
48 					    NULL);
49 }
50 
51 void __exit extent_map_exit(void)
52 {
53 	struct extent_state *state;
54 
55 	while (!list_empty(&states)) {
56 		state = list_entry(states.next, struct extent_state, list);
57 		printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
58 		list_del(&state->list);
59 		kmem_cache_free(extent_state_cache, state);
60 
61 	}
62 
63 	if (extent_map_cache)
64 		kmem_cache_destroy(extent_map_cache);
65 	if (extent_state_cache)
66 		kmem_cache_destroy(extent_state_cache);
67 	if (extent_buffer_cache)
68 		kmem_cache_destroy(extent_buffer_cache);
69 }
70 
71 void extent_map_tree_init(struct extent_map_tree *tree,
72 			  struct address_space *mapping, gfp_t mask)
73 {
74 	tree->map.rb_node = NULL;
75 	tree->state.rb_node = NULL;
76 	tree->ops = NULL;
77 	rwlock_init(&tree->lock);
78 	spin_lock_init(&tree->lru_lock);
79 	tree->mapping = mapping;
80 	INIT_LIST_HEAD(&tree->buffer_lru);
81 	tree->lru_size = 0;
82 }
83 EXPORT_SYMBOL(extent_map_tree_init);
84 
85 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
86 {
87 	struct extent_buffer *eb;
88 	while(!list_empty(&tree->buffer_lru)) {
89 		eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
90 				lru);
91 		list_del(&eb->lru);
92 		free_extent_buffer(eb);
93 	}
94 }
95 EXPORT_SYMBOL(extent_map_tree_empty_lru);
96 
97 struct extent_map *alloc_extent_map(gfp_t mask)
98 {
99 	struct extent_map *em;
100 	em = kmem_cache_alloc(extent_map_cache, mask);
101 	if (!em || IS_ERR(em))
102 		return em;
103 	em->in_tree = 0;
104 	atomic_set(&em->refs, 1);
105 	return em;
106 }
107 EXPORT_SYMBOL(alloc_extent_map);
108 
109 void free_extent_map(struct extent_map *em)
110 {
111 	if (!em)
112 		return;
113 	if (atomic_dec_and_test(&em->refs)) {
114 		WARN_ON(em->in_tree);
115 		kmem_cache_free(extent_map_cache, em);
116 	}
117 }
118 EXPORT_SYMBOL(free_extent_map);
119 
120 
121 struct extent_state *alloc_extent_state(gfp_t mask)
122 {
123 	struct extent_state *state;
124 	unsigned long flags;
125 
126 	state = kmem_cache_alloc(extent_state_cache, mask);
127 	if (!state || IS_ERR(state))
128 		return state;
129 	state->state = 0;
130 	state->in_tree = 0;
131 	state->private = 0;
132 
133 	spin_lock_irqsave(&state_lock, flags);
134 	list_add(&state->list, &states);
135 	spin_unlock_irqrestore(&state_lock, flags);
136 
137 	atomic_set(&state->refs, 1);
138 	init_waitqueue_head(&state->wq);
139 	return state;
140 }
141 EXPORT_SYMBOL(alloc_extent_state);
142 
143 void free_extent_state(struct extent_state *state)
144 {
145 	unsigned long flags;
146 	if (!state)
147 		return;
148 	if (atomic_dec_and_test(&state->refs)) {
149 		WARN_ON(state->in_tree);
150 		spin_lock_irqsave(&state_lock, flags);
151 		list_del(&state->list);
152 		spin_unlock_irqrestore(&state_lock, flags);
153 		kmem_cache_free(extent_state_cache, state);
154 	}
155 }
156 EXPORT_SYMBOL(free_extent_state);
157 
158 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159 				   struct rb_node *node)
160 {
161 	struct rb_node ** p = &root->rb_node;
162 	struct rb_node * parent = NULL;
163 	struct tree_entry *entry;
164 
165 	while(*p) {
166 		parent = *p;
167 		entry = rb_entry(parent, struct tree_entry, rb_node);
168 
169 		if (offset < entry->start)
170 			p = &(*p)->rb_left;
171 		else if (offset > entry->end)
172 			p = &(*p)->rb_right;
173 		else
174 			return parent;
175 	}
176 
177 	entry = rb_entry(node, struct tree_entry, rb_node);
178 	entry->in_tree = 1;
179 	rb_link_node(node, parent, p);
180 	rb_insert_color(node, root);
181 	return NULL;
182 }
183 
184 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
185 				   struct rb_node **prev_ret)
186 {
187 	struct rb_node * n = root->rb_node;
188 	struct rb_node *prev = NULL;
189 	struct tree_entry *entry;
190 	struct tree_entry *prev_entry = NULL;
191 
192 	while(n) {
193 		entry = rb_entry(n, struct tree_entry, rb_node);
194 		prev = n;
195 		prev_entry = entry;
196 
197 		if (offset < entry->start)
198 			n = n->rb_left;
199 		else if (offset > entry->end)
200 			n = n->rb_right;
201 		else
202 			return n;
203 	}
204 	if (!prev_ret)
205 		return NULL;
206 	while(prev && offset > prev_entry->end) {
207 		prev = rb_next(prev);
208 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
209 	}
210 	*prev_ret = prev;
211 	return NULL;
212 }
213 
214 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
215 {
216 	struct rb_node *prev;
217 	struct rb_node *ret;
218 	ret = __tree_search(root, offset, &prev);
219 	if (!ret)
220 		return prev;
221 	return ret;
222 }
223 
224 static int tree_delete(struct rb_root *root, u64 offset)
225 {
226 	struct rb_node *node;
227 	struct tree_entry *entry;
228 
229 	node = __tree_search(root, offset, NULL);
230 	if (!node)
231 		return -ENOENT;
232 	entry = rb_entry(node, struct tree_entry, rb_node);
233 	entry->in_tree = 0;
234 	rb_erase(node, root);
235 	return 0;
236 }
237 
238 /*
239  * add_extent_mapping tries a simple backward merge with existing
240  * mappings.  The extent_map struct passed in will be inserted into
241  * the tree directly (no copies made, just a reference taken).
242  */
243 int add_extent_mapping(struct extent_map_tree *tree,
244 		       struct extent_map *em)
245 {
246 	int ret = 0;
247 	struct extent_map *prev = NULL;
248 	struct rb_node *rb;
249 
250 	write_lock_irq(&tree->lock);
251 	rb = tree_insert(&tree->map, em->end, &em->rb_node);
252 	if (rb) {
253 		prev = rb_entry(rb, struct extent_map, rb_node);
254 		printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
255 		ret = -EEXIST;
256 		goto out;
257 	}
258 	atomic_inc(&em->refs);
259 	if (em->start != 0) {
260 		rb = rb_prev(&em->rb_node);
261 		if (rb)
262 			prev = rb_entry(rb, struct extent_map, rb_node);
263 		if (prev && prev->end + 1 == em->start &&
264 		    ((em->block_start == EXTENT_MAP_HOLE &&
265 		      prev->block_start == EXTENT_MAP_HOLE) ||
266 			     (em->block_start == prev->block_end + 1))) {
267 			em->start = prev->start;
268 			em->block_start = prev->block_start;
269 			rb_erase(&prev->rb_node, &tree->map);
270 			prev->in_tree = 0;
271 			free_extent_map(prev);
272 		}
273 	 }
274 out:
275 	write_unlock_irq(&tree->lock);
276 	return ret;
277 }
278 EXPORT_SYMBOL(add_extent_mapping);
279 
280 /*
281  * lookup_extent_mapping returns the first extent_map struct in the
282  * tree that intersects the [start, end] (inclusive) range.  There may
283  * be additional objects in the tree that intersect, so check the object
284  * returned carefully to make sure you don't need additional lookups.
285  */
286 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
287 					 u64 start, u64 end)
288 {
289 	struct extent_map *em;
290 	struct rb_node *rb_node;
291 
292 	read_lock_irq(&tree->lock);
293 	rb_node = tree_search(&tree->map, start);
294 	if (!rb_node) {
295 		em = NULL;
296 		goto out;
297 	}
298 	if (IS_ERR(rb_node)) {
299 		em = ERR_PTR(PTR_ERR(rb_node));
300 		goto out;
301 	}
302 	em = rb_entry(rb_node, struct extent_map, rb_node);
303 	if (em->end < start || em->start > end) {
304 		em = NULL;
305 		goto out;
306 	}
307 	atomic_inc(&em->refs);
308 out:
309 	read_unlock_irq(&tree->lock);
310 	return em;
311 }
312 EXPORT_SYMBOL(lookup_extent_mapping);
313 
314 /*
315  * removes an extent_map struct from the tree.  No reference counts are
316  * dropped, and no checks are done to  see if the range is in use
317  */
318 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
319 {
320 	int ret;
321 
322 	write_lock_irq(&tree->lock);
323 	ret = tree_delete(&tree->map, em->end);
324 	write_unlock_irq(&tree->lock);
325 	return ret;
326 }
327 EXPORT_SYMBOL(remove_extent_mapping);
328 
329 /*
330  * utility function to look for merge candidates inside a given range.
331  * Any extents with matching state are merged together into a single
332  * extent in the tree.  Extents with EXTENT_IO in their state field
333  * are not merged because the end_io handlers need to be able to do
334  * operations on them without sleeping (or doing allocations/splits).
335  *
336  * This should be called with the tree lock held.
337  */
338 static int merge_state(struct extent_map_tree *tree,
339 		       struct extent_state *state)
340 {
341 	struct extent_state *other;
342 	struct rb_node *other_node;
343 
344 	if (state->state & EXTENT_IOBITS)
345 		return 0;
346 
347 	other_node = rb_prev(&state->rb_node);
348 	if (other_node) {
349 		other = rb_entry(other_node, struct extent_state, rb_node);
350 		if (other->end == state->start - 1 &&
351 		    other->state == state->state) {
352 			state->start = other->start;
353 			other->in_tree = 0;
354 			rb_erase(&other->rb_node, &tree->state);
355 			free_extent_state(other);
356 		}
357 	}
358 	other_node = rb_next(&state->rb_node);
359 	if (other_node) {
360 		other = rb_entry(other_node, struct extent_state, rb_node);
361 		if (other->start == state->end + 1 &&
362 		    other->state == state->state) {
363 			other->start = state->start;
364 			state->in_tree = 0;
365 			rb_erase(&state->rb_node, &tree->state);
366 			free_extent_state(state);
367 		}
368 	}
369 	return 0;
370 }
371 
372 /*
373  * insert an extent_state struct into the tree.  'bits' are set on the
374  * struct before it is inserted.
375  *
376  * This may return -EEXIST if the extent is already there, in which case the
377  * state struct is freed.
378  *
379  * The tree lock is not taken internally.  This is a utility function and
380  * probably isn't what you want to call (see set/clear_extent_bit).
381  */
382 static int insert_state(struct extent_map_tree *tree,
383 			struct extent_state *state, u64 start, u64 end,
384 			int bits)
385 {
386 	struct rb_node *node;
387 
388 	if (end < start) {
389 		printk("end < start %Lu %Lu\n", end, start);
390 		WARN_ON(1);
391 	}
392 	state->state |= bits;
393 	state->start = start;
394 	state->end = end;
395 	node = tree_insert(&tree->state, end, &state->rb_node);
396 	if (node) {
397 		struct extent_state *found;
398 		found = rb_entry(node, struct extent_state, rb_node);
399 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
400 		free_extent_state(state);
401 		return -EEXIST;
402 	}
403 	merge_state(tree, state);
404 	return 0;
405 }
406 
407 /*
408  * split a given extent state struct in two, inserting the preallocated
409  * struct 'prealloc' as the newly created second half.  'split' indicates an
410  * offset inside 'orig' where it should be split.
411  *
412  * Before calling,
413  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
414  * are two extent state structs in the tree:
415  * prealloc: [orig->start, split - 1]
416  * orig: [ split, orig->end ]
417  *
418  * The tree locks are not taken by this function. They need to be held
419  * by the caller.
420  */
421 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
422 		       struct extent_state *prealloc, u64 split)
423 {
424 	struct rb_node *node;
425 	prealloc->start = orig->start;
426 	prealloc->end = split - 1;
427 	prealloc->state = orig->state;
428 	orig->start = split;
429 
430 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
431 	if (node) {
432 		struct extent_state *found;
433 		found = rb_entry(node, struct extent_state, rb_node);
434 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
435 		free_extent_state(prealloc);
436 		return -EEXIST;
437 	}
438 	return 0;
439 }
440 
441 /*
442  * utility function to clear some bits in an extent state struct.
443  * it will optionally wake up any one waiting on this state (wake == 1), or
444  * forcibly remove the state from the tree (delete == 1).
445  *
446  * If no bits are set on the state struct after clearing things, the
447  * struct is freed and removed from the tree
448  */
449 static int clear_state_bit(struct extent_map_tree *tree,
450 			    struct extent_state *state, int bits, int wake,
451 			    int delete)
452 {
453 	int ret = state->state & bits;
454 	state->state &= ~bits;
455 	if (wake)
456 		wake_up(&state->wq);
457 	if (delete || state->state == 0) {
458 		if (state->in_tree) {
459 			rb_erase(&state->rb_node, &tree->state);
460 			state->in_tree = 0;
461 			free_extent_state(state);
462 		} else {
463 			WARN_ON(1);
464 		}
465 	} else {
466 		merge_state(tree, state);
467 	}
468 	return ret;
469 }
470 
471 /*
472  * clear some bits on a range in the tree.  This may require splitting
473  * or inserting elements in the tree, so the gfp mask is used to
474  * indicate which allocations or sleeping are allowed.
475  *
476  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
477  * the given range from the tree regardless of state (ie for truncate).
478  *
479  * the range [start, end] is inclusive.
480  *
481  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
482  * bits were already set, or zero if none of the bits were already set.
483  */
484 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
485 		     int bits, int wake, int delete, gfp_t mask)
486 {
487 	struct extent_state *state;
488 	struct extent_state *prealloc = NULL;
489 	struct rb_node *node;
490 	unsigned long flags;
491 	int err;
492 	int set = 0;
493 
494 again:
495 	if (!prealloc && (mask & __GFP_WAIT)) {
496 		prealloc = alloc_extent_state(mask);
497 		if (!prealloc)
498 			return -ENOMEM;
499 	}
500 
501 	write_lock_irqsave(&tree->lock, flags);
502 	/*
503 	 * this search will find the extents that end after
504 	 * our range starts
505 	 */
506 	node = tree_search(&tree->state, start);
507 	if (!node)
508 		goto out;
509 	state = rb_entry(node, struct extent_state, rb_node);
510 	if (state->start > end)
511 		goto out;
512 	WARN_ON(state->end < start);
513 
514 	/*
515 	 *     | ---- desired range ---- |
516 	 *  | state | or
517 	 *  | ------------- state -------------- |
518 	 *
519 	 * We need to split the extent we found, and may flip
520 	 * bits on second half.
521 	 *
522 	 * If the extent we found extends past our range, we
523 	 * just split and search again.  It'll get split again
524 	 * the next time though.
525 	 *
526 	 * If the extent we found is inside our range, we clear
527 	 * the desired bit on it.
528 	 */
529 
530 	if (state->start < start) {
531 		err = split_state(tree, state, prealloc, start);
532 		BUG_ON(err == -EEXIST);
533 		prealloc = NULL;
534 		if (err)
535 			goto out;
536 		if (state->end <= end) {
537 			start = state->end + 1;
538 			set |= clear_state_bit(tree, state, bits,
539 					wake, delete);
540 		} else {
541 			start = state->start;
542 		}
543 		goto search_again;
544 	}
545 	/*
546 	 * | ---- desired range ---- |
547 	 *                        | state |
548 	 * We need to split the extent, and clear the bit
549 	 * on the first half
550 	 */
551 	if (state->start <= end && state->end > end) {
552 		err = split_state(tree, state, prealloc, end + 1);
553 		BUG_ON(err == -EEXIST);
554 
555 		if (wake)
556 			wake_up(&state->wq);
557 		set |= clear_state_bit(tree, prealloc, bits,
558 				       wake, delete);
559 		prealloc = NULL;
560 		goto out;
561 	}
562 
563 	start = state->end + 1;
564 	set |= clear_state_bit(tree, state, bits, wake, delete);
565 	goto search_again;
566 
567 out:
568 	write_unlock_irqrestore(&tree->lock, flags);
569 	if (prealloc)
570 		free_extent_state(prealloc);
571 
572 	return set;
573 
574 search_again:
575 	if (start > end)
576 		goto out;
577 	write_unlock_irqrestore(&tree->lock, flags);
578 	if (mask & __GFP_WAIT)
579 		cond_resched();
580 	goto again;
581 }
582 EXPORT_SYMBOL(clear_extent_bit);
583 
584 static int wait_on_state(struct extent_map_tree *tree,
585 			 struct extent_state *state)
586 {
587 	DEFINE_WAIT(wait);
588 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
589 	read_unlock_irq(&tree->lock);
590 	schedule();
591 	read_lock_irq(&tree->lock);
592 	finish_wait(&state->wq, &wait);
593 	return 0;
594 }
595 
596 /*
597  * waits for one or more bits to clear on a range in the state tree.
598  * The range [start, end] is inclusive.
599  * The tree lock is taken by this function
600  */
601 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
602 {
603 	struct extent_state *state;
604 	struct rb_node *node;
605 
606 	read_lock_irq(&tree->lock);
607 again:
608 	while (1) {
609 		/*
610 		 * this search will find all the extents that end after
611 		 * our range starts
612 		 */
613 		node = tree_search(&tree->state, start);
614 		if (!node)
615 			break;
616 
617 		state = rb_entry(node, struct extent_state, rb_node);
618 
619 		if (state->start > end)
620 			goto out;
621 
622 		if (state->state & bits) {
623 			start = state->start;
624 			atomic_inc(&state->refs);
625 			wait_on_state(tree, state);
626 			free_extent_state(state);
627 			goto again;
628 		}
629 		start = state->end + 1;
630 
631 		if (start > end)
632 			break;
633 
634 		if (need_resched()) {
635 			read_unlock_irq(&tree->lock);
636 			cond_resched();
637 			read_lock_irq(&tree->lock);
638 		}
639 	}
640 out:
641 	read_unlock_irq(&tree->lock);
642 	return 0;
643 }
644 EXPORT_SYMBOL(wait_extent_bit);
645 
646 /*
647  * set some bits on a range in the tree.  This may require allocations
648  * or sleeping, so the gfp mask is used to indicate what is allowed.
649  *
650  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651  * range already has the desired bits set.  The start of the existing
652  * range is returned in failed_start in this case.
653  *
654  * [start, end] is inclusive
655  * This takes the tree lock.
656  */
657 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
658 		   int exclusive, u64 *failed_start, gfp_t mask)
659 {
660 	struct extent_state *state;
661 	struct extent_state *prealloc = NULL;
662 	struct rb_node *node;
663 	unsigned long flags;
664 	int err = 0;
665 	int set;
666 	u64 last_start;
667 	u64 last_end;
668 again:
669 	if (!prealloc && (mask & __GFP_WAIT)) {
670 		prealloc = alloc_extent_state(mask);
671 		if (!prealloc)
672 			return -ENOMEM;
673 	}
674 
675 	write_lock_irqsave(&tree->lock, flags);
676 	/*
677 	 * this search will find all the extents that end after
678 	 * our range starts.
679 	 */
680 	node = tree_search(&tree->state, start);
681 	if (!node) {
682 		err = insert_state(tree, prealloc, start, end, bits);
683 		prealloc = NULL;
684 		BUG_ON(err == -EEXIST);
685 		goto out;
686 	}
687 
688 	state = rb_entry(node, struct extent_state, rb_node);
689 	last_start = state->start;
690 	last_end = state->end;
691 
692 	/*
693 	 * | ---- desired range ---- |
694 	 * | state |
695 	 *
696 	 * Just lock what we found and keep going
697 	 */
698 	if (state->start == start && state->end <= end) {
699 		set = state->state & bits;
700 		if (set && exclusive) {
701 			*failed_start = state->start;
702 			err = -EEXIST;
703 			goto out;
704 		}
705 		state->state |= bits;
706 		start = state->end + 1;
707 		merge_state(tree, state);
708 		goto search_again;
709 	}
710 
711 	/*
712 	 *     | ---- desired range ---- |
713 	 * | state |
714 	 *   or
715 	 * | ------------- state -------------- |
716 	 *
717 	 * We need to split the extent we found, and may flip bits on
718 	 * second half.
719 	 *
720 	 * If the extent we found extends past our
721 	 * range, we just split and search again.  It'll get split
722 	 * again the next time though.
723 	 *
724 	 * If the extent we found is inside our range, we set the
725 	 * desired bit on it.
726 	 */
727 	if (state->start < start) {
728 		set = state->state & bits;
729 		if (exclusive && set) {
730 			*failed_start = start;
731 			err = -EEXIST;
732 			goto out;
733 		}
734 		err = split_state(tree, state, prealloc, start);
735 		BUG_ON(err == -EEXIST);
736 		prealloc = NULL;
737 		if (err)
738 			goto out;
739 		if (state->end <= end) {
740 			state->state |= bits;
741 			start = state->end + 1;
742 			merge_state(tree, state);
743 		} else {
744 			start = state->start;
745 		}
746 		goto search_again;
747 	}
748 	/*
749 	 * | ---- desired range ---- |
750 	 *     | state | or               | state |
751 	 *
752 	 * There's a hole, we need to insert something in it and
753 	 * ignore the extent we found.
754 	 */
755 	if (state->start > start) {
756 		u64 this_end;
757 		if (end < last_start)
758 			this_end = end;
759 		else
760 			this_end = last_start -1;
761 		err = insert_state(tree, prealloc, start, this_end,
762 				   bits);
763 		prealloc = NULL;
764 		BUG_ON(err == -EEXIST);
765 		if (err)
766 			goto out;
767 		start = this_end + 1;
768 		goto search_again;
769 	}
770 	/*
771 	 * | ---- desired range ---- |
772 	 *                        | state |
773 	 * We need to split the extent, and set the bit
774 	 * on the first half
775 	 */
776 	if (state->start <= end && state->end > end) {
777 		set = state->state & bits;
778 		if (exclusive && set) {
779 			*failed_start = start;
780 			err = -EEXIST;
781 			goto out;
782 		}
783 		err = split_state(tree, state, prealloc, end + 1);
784 		BUG_ON(err == -EEXIST);
785 
786 		prealloc->state |= bits;
787 		merge_state(tree, prealloc);
788 		prealloc = NULL;
789 		goto out;
790 	}
791 
792 	goto search_again;
793 
794 out:
795 	write_unlock_irqrestore(&tree->lock, flags);
796 	if (prealloc)
797 		free_extent_state(prealloc);
798 
799 	return err;
800 
801 search_again:
802 	if (start > end)
803 		goto out;
804 	write_unlock_irqrestore(&tree->lock, flags);
805 	if (mask & __GFP_WAIT)
806 		cond_resched();
807 	goto again;
808 }
809 EXPORT_SYMBOL(set_extent_bit);
810 
811 /* wrappers around set/clear extent bit */
812 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
813 		     gfp_t mask)
814 {
815 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
816 			      mask);
817 }
818 EXPORT_SYMBOL(set_extent_dirty);
819 
820 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
821 		    int bits, gfp_t mask)
822 {
823 	return set_extent_bit(tree, start, end, bits, 0, NULL,
824 			      mask);
825 }
826 EXPORT_SYMBOL(set_extent_bits);
827 
828 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
829 		      int bits, gfp_t mask)
830 {
831 	return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
832 }
833 EXPORT_SYMBOL(clear_extent_bits);
834 
835 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
836 		     gfp_t mask)
837 {
838 	return set_extent_bit(tree, start, end,
839 			      EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
840 			      mask);
841 }
842 EXPORT_SYMBOL(set_extent_delalloc);
843 
844 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
845 		       gfp_t mask)
846 {
847 	return clear_extent_bit(tree, start, end,
848 				EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
849 }
850 EXPORT_SYMBOL(clear_extent_dirty);
851 
852 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
853 		     gfp_t mask)
854 {
855 	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
856 			      mask);
857 }
858 EXPORT_SYMBOL(set_extent_new);
859 
860 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
861 		       gfp_t mask)
862 {
863 	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
864 }
865 EXPORT_SYMBOL(clear_extent_new);
866 
867 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
868 			gfp_t mask)
869 {
870 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
871 			      mask);
872 }
873 EXPORT_SYMBOL(set_extent_uptodate);
874 
875 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
876 			  gfp_t mask)
877 {
878 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
879 }
880 EXPORT_SYMBOL(clear_extent_uptodate);
881 
882 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
883 			 gfp_t mask)
884 {
885 	return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
886 			      0, NULL, mask);
887 }
888 EXPORT_SYMBOL(set_extent_writeback);
889 
890 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
891 			   gfp_t mask)
892 {
893 	return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
894 }
895 EXPORT_SYMBOL(clear_extent_writeback);
896 
897 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
898 {
899 	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
900 }
901 EXPORT_SYMBOL(wait_on_extent_writeback);
902 
903 /*
904  * locks a range in ascending order, waiting for any locked regions
905  * it hits on the way.  [start,end] are inclusive, and this will sleep.
906  */
907 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
908 {
909 	int err;
910 	u64 failed_start;
911 	while (1) {
912 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
913 				     &failed_start, mask);
914 		if (err == -EEXIST && (mask & __GFP_WAIT)) {
915 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
916 			start = failed_start;
917 		} else {
918 			break;
919 		}
920 		WARN_ON(start > end);
921 	}
922 	return err;
923 }
924 EXPORT_SYMBOL(lock_extent);
925 
926 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
927 		  gfp_t mask)
928 {
929 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
930 }
931 EXPORT_SYMBOL(unlock_extent);
932 
933 /*
934  * helper function to set pages and extents in the tree dirty
935  */
936 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
937 {
938 	unsigned long index = start >> PAGE_CACHE_SHIFT;
939 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
940 	struct page *page;
941 
942 	while (index <= end_index) {
943 		page = find_get_page(tree->mapping, index);
944 		BUG_ON(!page);
945 		__set_page_dirty_nobuffers(page);
946 		page_cache_release(page);
947 		index++;
948 	}
949 	set_extent_dirty(tree, start, end, GFP_NOFS);
950 	return 0;
951 }
952 EXPORT_SYMBOL(set_range_dirty);
953 
954 /*
955  * helper function to set both pages and extents in the tree writeback
956  */
957 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
958 {
959 	unsigned long index = start >> PAGE_CACHE_SHIFT;
960 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
961 	struct page *page;
962 
963 	while (index <= end_index) {
964 		page = find_get_page(tree->mapping, index);
965 		BUG_ON(!page);
966 		set_page_writeback(page);
967 		page_cache_release(page);
968 		index++;
969 	}
970 	set_extent_writeback(tree, start, end, GFP_NOFS);
971 	return 0;
972 }
973 EXPORT_SYMBOL(set_range_writeback);
974 
975 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
976 			  u64 *start_ret, u64 *end_ret, int bits)
977 {
978 	struct rb_node *node;
979 	struct extent_state *state;
980 	int ret = 1;
981 
982 	read_lock_irq(&tree->lock);
983 	/*
984 	 * this search will find all the extents that end after
985 	 * our range starts.
986 	 */
987 	node = tree_search(&tree->state, start);
988 	if (!node || IS_ERR(node)) {
989 		goto out;
990 	}
991 
992 	while(1) {
993 		state = rb_entry(node, struct extent_state, rb_node);
994 		if (state->end >= start && (state->state & bits)) {
995 			*start_ret = state->start;
996 			*end_ret = state->end;
997 			ret = 0;
998 			break;
999 		}
1000 		node = rb_next(node);
1001 		if (!node)
1002 			break;
1003 	}
1004 out:
1005 	read_unlock_irq(&tree->lock);
1006 	return ret;
1007 }
1008 EXPORT_SYMBOL(find_first_extent_bit);
1009 
1010 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1011 			     u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1012 {
1013 	struct rb_node *node;
1014 	struct extent_state *state;
1015 	u64 cur_start = start;
1016 	u64 found = 0;
1017 	u64 total_bytes = 0;
1018 
1019 	write_lock_irq(&tree->lock);
1020 	/*
1021 	 * this search will find all the extents that end after
1022 	 * our range starts.
1023 	 */
1024 search_again:
1025 	node = tree_search(&tree->state, cur_start);
1026 	if (!node || IS_ERR(node)) {
1027 		goto out;
1028 	}
1029 
1030 	while(1) {
1031 		state = rb_entry(node, struct extent_state, rb_node);
1032 		if (state->start != cur_start) {
1033 			goto out;
1034 		}
1035 		if (!(state->state & EXTENT_DELALLOC)) {
1036 			goto out;
1037 		}
1038 		if (state->start >= lock_start) {
1039 			if (state->state & EXTENT_LOCKED) {
1040 				DEFINE_WAIT(wait);
1041 				atomic_inc(&state->refs);
1042 				write_unlock_irq(&tree->lock);
1043 				schedule();
1044 				write_lock_irq(&tree->lock);
1045 				finish_wait(&state->wq, &wait);
1046 				free_extent_state(state);
1047 				goto search_again;
1048 			}
1049 			state->state |= EXTENT_LOCKED;
1050 		}
1051 		found++;
1052 		*end = state->end;
1053 		cur_start = state->end + 1;
1054 		node = rb_next(node);
1055 		if (!node)
1056 			break;
1057 		total_bytes = state->end - state->start + 1;
1058 		if (total_bytes >= max_bytes)
1059 			break;
1060 	}
1061 out:
1062 	write_unlock_irq(&tree->lock);
1063 	return found;
1064 }
1065 
1066 /*
1067  * helper function to lock both pages and extents in the tree.
1068  * pages must be locked first.
1069  */
1070 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1071 {
1072 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1073 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1074 	struct page *page;
1075 	int err;
1076 
1077 	while (index <= end_index) {
1078 		page = grab_cache_page(tree->mapping, index);
1079 		if (!page) {
1080 			err = -ENOMEM;
1081 			goto failed;
1082 		}
1083 		if (IS_ERR(page)) {
1084 			err = PTR_ERR(page);
1085 			goto failed;
1086 		}
1087 		index++;
1088 	}
1089 	lock_extent(tree, start, end, GFP_NOFS);
1090 	return 0;
1091 
1092 failed:
1093 	/*
1094 	 * we failed above in getting the page at 'index', so we undo here
1095 	 * up to but not including the page at 'index'
1096 	 */
1097 	end_index = index;
1098 	index = start >> PAGE_CACHE_SHIFT;
1099 	while (index < end_index) {
1100 		page = find_get_page(tree->mapping, index);
1101 		unlock_page(page);
1102 		page_cache_release(page);
1103 		index++;
1104 	}
1105 	return err;
1106 }
1107 EXPORT_SYMBOL(lock_range);
1108 
1109 /*
1110  * helper function to unlock both pages and extents in the tree.
1111  */
1112 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1113 {
1114 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1115 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1116 	struct page *page;
1117 
1118 	while (index <= end_index) {
1119 		page = find_get_page(tree->mapping, index);
1120 		unlock_page(page);
1121 		page_cache_release(page);
1122 		index++;
1123 	}
1124 	unlock_extent(tree, start, end, GFP_NOFS);
1125 	return 0;
1126 }
1127 EXPORT_SYMBOL(unlock_range);
1128 
1129 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1130 {
1131 	struct rb_node *node;
1132 	struct extent_state *state;
1133 	int ret = 0;
1134 
1135 	write_lock_irq(&tree->lock);
1136 	/*
1137 	 * this search will find all the extents that end after
1138 	 * our range starts.
1139 	 */
1140 	node = tree_search(&tree->state, start);
1141 	if (!node || IS_ERR(node)) {
1142 		ret = -ENOENT;
1143 		goto out;
1144 	}
1145 	state = rb_entry(node, struct extent_state, rb_node);
1146 	if (state->start != start) {
1147 		ret = -ENOENT;
1148 		goto out;
1149 	}
1150 	state->private = private;
1151 out:
1152 	write_unlock_irq(&tree->lock);
1153 	return ret;
1154 }
1155 
1156 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1157 {
1158 	struct rb_node *node;
1159 	struct extent_state *state;
1160 	int ret = 0;
1161 
1162 	read_lock_irq(&tree->lock);
1163 	/*
1164 	 * this search will find all the extents that end after
1165 	 * our range starts.
1166 	 */
1167 	node = tree_search(&tree->state, start);
1168 	if (!node || IS_ERR(node)) {
1169 		ret = -ENOENT;
1170 		goto out;
1171 	}
1172 	state = rb_entry(node, struct extent_state, rb_node);
1173 	if (state->start != start) {
1174 		ret = -ENOENT;
1175 		goto out;
1176 	}
1177 	*private = state->private;
1178 out:
1179 	read_unlock_irq(&tree->lock);
1180 	return ret;
1181 }
1182 
1183 /*
1184  * searches a range in the state tree for a given mask.
1185  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1186  * has the bits set.  Otherwise, 1 is returned if any bit in the
1187  * range is found set.
1188  */
1189 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1190 		   int bits, int filled)
1191 {
1192 	struct extent_state *state = NULL;
1193 	struct rb_node *node;
1194 	int bitset = 0;
1195 
1196 	read_lock_irq(&tree->lock);
1197 	node = tree_search(&tree->state, start);
1198 	while (node && start <= end) {
1199 		state = rb_entry(node, struct extent_state, rb_node);
1200 		if (state->start > end)
1201 			break;
1202 
1203 		if (filled && state->start > start) {
1204 			bitset = 0;
1205 			break;
1206 		}
1207 		if (state->state & bits) {
1208 			bitset = 1;
1209 			if (!filled)
1210 				break;
1211 		} else if (filled) {
1212 			bitset = 0;
1213 			break;
1214 		}
1215 		start = state->end + 1;
1216 		if (start > end)
1217 			break;
1218 		node = rb_next(node);
1219 	}
1220 	read_unlock_irq(&tree->lock);
1221 	return bitset;
1222 }
1223 EXPORT_SYMBOL(test_range_bit);
1224 
1225 /*
1226  * helper function to set a given page up to date if all the
1227  * extents in the tree for that page are up to date
1228  */
1229 static int check_page_uptodate(struct extent_map_tree *tree,
1230 			       struct page *page)
1231 {
1232 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1233 	u64 end = start + PAGE_CACHE_SIZE - 1;
1234 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1235 		SetPageUptodate(page);
1236 	return 0;
1237 }
1238 
1239 /*
1240  * helper function to unlock a page if all the extents in the tree
1241  * for that page are unlocked
1242  */
1243 static int check_page_locked(struct extent_map_tree *tree,
1244 			     struct page *page)
1245 {
1246 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1247 	u64 end = start + PAGE_CACHE_SIZE - 1;
1248 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1249 		unlock_page(page);
1250 	return 0;
1251 }
1252 
1253 /*
1254  * helper function to end page writeback if all the extents
1255  * in the tree for that page are done with writeback
1256  */
1257 static int check_page_writeback(struct extent_map_tree *tree,
1258 			     struct page *page)
1259 {
1260 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1261 	u64 end = start + PAGE_CACHE_SIZE - 1;
1262 	if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1263 		end_page_writeback(page);
1264 	return 0;
1265 }
1266 
1267 /* lots and lots of room for performance fixes in the end_bio funcs */
1268 
1269 /*
1270  * after a writepage IO is done, we need to:
1271  * clear the uptodate bits on error
1272  * clear the writeback bits in the extent tree for this IO
1273  * end_page_writeback if the page has no more pending IO
1274  *
1275  * Scheduling is not allowed, so the extent state tree is expected
1276  * to have one and only one object corresponding to this IO.
1277  */
1278 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1279 static void end_bio_extent_writepage(struct bio *bio, int err)
1280 #else
1281 static int end_bio_extent_writepage(struct bio *bio,
1282 				   unsigned int bytes_done, int err)
1283 #endif
1284 {
1285 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1286 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1287 	struct extent_map_tree *tree = bio->bi_private;
1288 	u64 start;
1289 	u64 end;
1290 	int whole_page;
1291 
1292 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1293 	if (bio->bi_size)
1294 		return 1;
1295 #endif
1296 
1297 	do {
1298 		struct page *page = bvec->bv_page;
1299 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1300 			 bvec->bv_offset;
1301 		end = start + bvec->bv_len - 1;
1302 
1303 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1304 			whole_page = 1;
1305 		else
1306 			whole_page = 0;
1307 
1308 		if (--bvec >= bio->bi_io_vec)
1309 			prefetchw(&bvec->bv_page->flags);
1310 
1311 		if (!uptodate) {
1312 			clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1313 			ClearPageUptodate(page);
1314 			SetPageError(page);
1315 		}
1316 		clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1317 
1318 		if (whole_page)
1319 			end_page_writeback(page);
1320 		else
1321 			check_page_writeback(tree, page);
1322 		if (tree->ops && tree->ops->writepage_end_io_hook)
1323 			tree->ops->writepage_end_io_hook(page, start, end);
1324 	} while (bvec >= bio->bi_io_vec);
1325 
1326 	bio_put(bio);
1327 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1328 	return 0;
1329 #endif
1330 }
1331 
1332 /*
1333  * after a readpage IO is done, we need to:
1334  * clear the uptodate bits on error
1335  * set the uptodate bits if things worked
1336  * set the page up to date if all extents in the tree are uptodate
1337  * clear the lock bit in the extent tree
1338  * unlock the page if there are no other extents locked for it
1339  *
1340  * Scheduling is not allowed, so the extent state tree is expected
1341  * to have one and only one object corresponding to this IO.
1342  */
1343 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1344 static void end_bio_extent_readpage(struct bio *bio, int err)
1345 #else
1346 static int end_bio_extent_readpage(struct bio *bio,
1347 				   unsigned int bytes_done, int err)
1348 #endif
1349 {
1350 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1351 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1352 	struct extent_map_tree *tree = bio->bi_private;
1353 	u64 start;
1354 	u64 end;
1355 	int whole_page;
1356 	int ret;
1357 
1358 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1359 	if (bio->bi_size)
1360 		return 1;
1361 #endif
1362 
1363 	do {
1364 		struct page *page = bvec->bv_page;
1365 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1366 			bvec->bv_offset;
1367 		end = start + bvec->bv_len - 1;
1368 
1369 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1370 			whole_page = 1;
1371 		else
1372 			whole_page = 0;
1373 
1374 		if (--bvec >= bio->bi_io_vec)
1375 			prefetchw(&bvec->bv_page->flags);
1376 
1377 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1378 			ret = tree->ops->readpage_end_io_hook(page, start, end);
1379 			if (ret)
1380 				uptodate = 0;
1381 		}
1382 		if (uptodate) {
1383 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1384 			if (whole_page)
1385 				SetPageUptodate(page);
1386 			else
1387 				check_page_uptodate(tree, page);
1388 		} else {
1389 			ClearPageUptodate(page);
1390 			SetPageError(page);
1391 		}
1392 
1393 		unlock_extent(tree, start, end, GFP_ATOMIC);
1394 
1395 		if (whole_page)
1396 			unlock_page(page);
1397 		else
1398 			check_page_locked(tree, page);
1399 	} while (bvec >= bio->bi_io_vec);
1400 
1401 	bio_put(bio);
1402 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1403 	return 0;
1404 #endif
1405 }
1406 
1407 /*
1408  * IO done from prepare_write is pretty simple, we just unlock
1409  * the structs in the extent tree when done, and set the uptodate bits
1410  * as appropriate.
1411  */
1412 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1413 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1414 #else
1415 static int end_bio_extent_preparewrite(struct bio *bio,
1416 				       unsigned int bytes_done, int err)
1417 #endif
1418 {
1419 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1420 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1421 	struct extent_map_tree *tree = bio->bi_private;
1422 	u64 start;
1423 	u64 end;
1424 
1425 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1426 	if (bio->bi_size)
1427 		return 1;
1428 #endif
1429 
1430 	do {
1431 		struct page *page = bvec->bv_page;
1432 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1433 			bvec->bv_offset;
1434 		end = start + bvec->bv_len - 1;
1435 
1436 		if (--bvec >= bio->bi_io_vec)
1437 			prefetchw(&bvec->bv_page->flags);
1438 
1439 		if (uptodate) {
1440 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1441 		} else {
1442 			ClearPageUptodate(page);
1443 			SetPageError(page);
1444 		}
1445 
1446 		unlock_extent(tree, start, end, GFP_ATOMIC);
1447 
1448 	} while (bvec >= bio->bi_io_vec);
1449 
1450 	bio_put(bio);
1451 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1452 	return 0;
1453 #endif
1454 }
1455 
1456 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1457 			      struct page *page, sector_t sector,
1458 			      size_t size, unsigned long offset,
1459 			      struct block_device *bdev,
1460 			      bio_end_io_t end_io_func)
1461 {
1462 	struct bio *bio;
1463 	int ret = 0;
1464 
1465 	bio = bio_alloc(GFP_NOIO, 1);
1466 
1467 	bio->bi_sector = sector;
1468 	bio->bi_bdev = bdev;
1469 	bio->bi_io_vec[0].bv_page = page;
1470 	bio->bi_io_vec[0].bv_len = size;
1471 	bio->bi_io_vec[0].bv_offset = offset;
1472 
1473 	bio->bi_vcnt = 1;
1474 	bio->bi_idx = 0;
1475 	bio->bi_size = size;
1476 
1477 	bio->bi_end_io = end_io_func;
1478 	bio->bi_private = tree;
1479 
1480 	bio_get(bio);
1481 	submit_bio(rw, bio);
1482 
1483 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1484 		ret = -EOPNOTSUPP;
1485 
1486 	bio_put(bio);
1487 	return ret;
1488 }
1489 
1490 void set_page_extent_mapped(struct page *page)
1491 {
1492 	if (!PagePrivate(page)) {
1493 		SetPagePrivate(page);
1494 		WARN_ON(!page->mapping->a_ops->invalidatepage);
1495 		set_page_private(page, EXTENT_PAGE_PRIVATE);
1496 		page_cache_get(page);
1497 	}
1498 }
1499 
1500 /*
1501  * basic readpage implementation.  Locked extent state structs are inserted
1502  * into the tree that are removed when the IO is done (by the end_io
1503  * handlers)
1504  */
1505 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1506 			  get_extent_t *get_extent)
1507 {
1508 	struct inode *inode = page->mapping->host;
1509 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1510 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1511 	u64 end;
1512 	u64 cur = start;
1513 	u64 extent_offset;
1514 	u64 last_byte = i_size_read(inode);
1515 	u64 block_start;
1516 	u64 cur_end;
1517 	sector_t sector;
1518 	struct extent_map *em;
1519 	struct block_device *bdev;
1520 	int ret;
1521 	int nr = 0;
1522 	size_t page_offset = 0;
1523 	size_t iosize;
1524 	size_t blocksize = inode->i_sb->s_blocksize;
1525 
1526 	set_page_extent_mapped(page);
1527 
1528 	end = page_end;
1529 	lock_extent(tree, start, end, GFP_NOFS);
1530 
1531 	while (cur <= end) {
1532 		if (cur >= last_byte) {
1533 			iosize = PAGE_CACHE_SIZE - page_offset;
1534 			zero_user_page(page, page_offset, iosize, KM_USER0);
1535 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1536 					    GFP_NOFS);
1537 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1538 			break;
1539 		}
1540 		em = get_extent(inode, page, page_offset, cur, end, 0);
1541 		if (IS_ERR(em) || !em) {
1542 			SetPageError(page);
1543 			unlock_extent(tree, cur, end, GFP_NOFS);
1544 			break;
1545 		}
1546 
1547 		extent_offset = cur - em->start;
1548 		BUG_ON(em->end < cur);
1549 		BUG_ON(end < cur);
1550 
1551 		iosize = min(em->end - cur, end - cur) + 1;
1552 		cur_end = min(em->end, end);
1553 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1554 		sector = (em->block_start + extent_offset) >> 9;
1555 		bdev = em->bdev;
1556 		block_start = em->block_start;
1557 		free_extent_map(em);
1558 		em = NULL;
1559 
1560 		/* we've found a hole, just zero and go on */
1561 		if (block_start == EXTENT_MAP_HOLE) {
1562 			zero_user_page(page, page_offset, iosize, KM_USER0);
1563 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1564 					    GFP_NOFS);
1565 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1566 			cur = cur + iosize;
1567 			page_offset += iosize;
1568 			continue;
1569 		}
1570 		/* the get_extent function already copied into the page */
1571 		if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1572 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1573 			cur = cur + iosize;
1574 			page_offset += iosize;
1575 			continue;
1576 		}
1577 
1578 		ret = 0;
1579 		if (tree->ops && tree->ops->readpage_io_hook) {
1580 			ret = tree->ops->readpage_io_hook(page, cur,
1581 							  cur + iosize - 1);
1582 		}
1583 		if (!ret) {
1584 			ret = submit_extent_page(READ, tree, page,
1585 						 sector, iosize, page_offset,
1586 						 bdev, end_bio_extent_readpage);
1587 		}
1588 		if (ret)
1589 			SetPageError(page);
1590 		cur = cur + iosize;
1591 		page_offset += iosize;
1592 		nr++;
1593 	}
1594 	if (!nr) {
1595 		if (!PageError(page))
1596 			SetPageUptodate(page);
1597 		unlock_page(page);
1598 	}
1599 	return 0;
1600 }
1601 EXPORT_SYMBOL(extent_read_full_page);
1602 
1603 /*
1604  * the writepage semantics are similar to regular writepage.  extent
1605  * records are inserted to lock ranges in the tree, and as dirty areas
1606  * are found, they are marked writeback.  Then the lock bits are removed
1607  * and the end_io handler clears the writeback ranges
1608  */
1609 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1610 			  get_extent_t *get_extent,
1611 			  struct writeback_control *wbc)
1612 {
1613 	struct inode *inode = page->mapping->host;
1614 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1615 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1616 	u64 end;
1617 	u64 cur = start;
1618 	u64 extent_offset;
1619 	u64 last_byte = i_size_read(inode);
1620 	u64 block_start;
1621 	sector_t sector;
1622 	struct extent_map *em;
1623 	struct block_device *bdev;
1624 	int ret;
1625 	int nr = 0;
1626 	size_t page_offset = 0;
1627 	size_t iosize;
1628 	size_t blocksize;
1629 	loff_t i_size = i_size_read(inode);
1630 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1631 	u64 nr_delalloc;
1632 	u64 delalloc_end;
1633 
1634 	WARN_ON(!PageLocked(page));
1635 	if (page->index > end_index) {
1636 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1637 		unlock_page(page);
1638 		return 0;
1639 	}
1640 
1641 	if (page->index == end_index) {
1642 		size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1643 		zero_user_page(page, offset,
1644 			       PAGE_CACHE_SIZE - offset, KM_USER0);
1645 	}
1646 
1647 	set_page_extent_mapped(page);
1648 
1649 	lock_extent(tree, start, page_end, GFP_NOFS);
1650 	nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1651 					       &delalloc_end,
1652 					       128 * 1024 * 1024);
1653 	if (nr_delalloc) {
1654 		tree->ops->fill_delalloc(inode, start, delalloc_end);
1655 		if (delalloc_end >= page_end + 1) {
1656 			clear_extent_bit(tree, page_end + 1, delalloc_end,
1657 					 EXTENT_LOCKED | EXTENT_DELALLOC,
1658 					 1, 0, GFP_NOFS);
1659 		}
1660 		clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1661 				 0, 0, GFP_NOFS);
1662 		if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1663 			printk("found delalloc bits after clear extent_bit\n");
1664 		}
1665 	} else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1666 		printk("found delalloc bits after find_delalloc_range returns 0\n");
1667 	}
1668 
1669 	end = page_end;
1670 	if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1671 		printk("found delalloc bits after lock_extent\n");
1672 	}
1673 
1674 	if (last_byte <= start) {
1675 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1676 		goto done;
1677 	}
1678 
1679 	set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1680 	blocksize = inode->i_sb->s_blocksize;
1681 
1682 	while (cur <= end) {
1683 		if (cur >= last_byte) {
1684 			clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1685 			break;
1686 		}
1687 		em = get_extent(inode, page, page_offset, cur, end, 0);
1688 		if (IS_ERR(em) || !em) {
1689 			SetPageError(page);
1690 			break;
1691 		}
1692 
1693 		extent_offset = cur - em->start;
1694 		BUG_ON(em->end < cur);
1695 		BUG_ON(end < cur);
1696 		iosize = min(em->end - cur, end - cur) + 1;
1697 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1698 		sector = (em->block_start + extent_offset) >> 9;
1699 		bdev = em->bdev;
1700 		block_start = em->block_start;
1701 		free_extent_map(em);
1702 		em = NULL;
1703 
1704 		if (block_start == EXTENT_MAP_HOLE ||
1705 		    block_start == EXTENT_MAP_INLINE) {
1706 			clear_extent_dirty(tree, cur,
1707 					   cur + iosize - 1, GFP_NOFS);
1708 			cur = cur + iosize;
1709 			page_offset += iosize;
1710 			continue;
1711 		}
1712 
1713 		/* leave this out until we have a page_mkwrite call */
1714 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1715 				   EXTENT_DIRTY, 0)) {
1716 			cur = cur + iosize;
1717 			page_offset += iosize;
1718 			continue;
1719 		}
1720 		clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1721 		if (tree->ops && tree->ops->writepage_io_hook) {
1722 			ret = tree->ops->writepage_io_hook(page, cur,
1723 						cur + iosize - 1);
1724 		} else {
1725 			ret = 0;
1726 		}
1727 		if (ret)
1728 			SetPageError(page);
1729 		else {
1730 			set_range_writeback(tree, cur, cur + iosize - 1);
1731 			ret = submit_extent_page(WRITE, tree, page, sector,
1732 						 iosize, page_offset, bdev,
1733 						 end_bio_extent_writepage);
1734 			if (ret)
1735 				SetPageError(page);
1736 		}
1737 		cur = cur + iosize;
1738 		page_offset += iosize;
1739 		nr++;
1740 	}
1741 done:
1742 	unlock_extent(tree, start, page_end, GFP_NOFS);
1743 	unlock_page(page);
1744 	return 0;
1745 }
1746 EXPORT_SYMBOL(extent_write_full_page);
1747 
1748 /*
1749  * basic invalidatepage code, this waits on any locked or writeback
1750  * ranges corresponding to the page, and then deletes any extent state
1751  * records from the tree
1752  */
1753 int extent_invalidatepage(struct extent_map_tree *tree,
1754 			  struct page *page, unsigned long offset)
1755 {
1756 	u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
1757 	u64 end = start + PAGE_CACHE_SIZE - 1;
1758 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1759 
1760 	start += (offset + blocksize -1) & ~(blocksize - 1);
1761 	if (start > end)
1762 		return 0;
1763 
1764 	lock_extent(tree, start, end, GFP_NOFS);
1765 	wait_on_extent_writeback(tree, start, end);
1766 	clear_extent_bit(tree, start, end,
1767 			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1768 			 1, 1, GFP_NOFS);
1769 	return 0;
1770 }
1771 EXPORT_SYMBOL(extent_invalidatepage);
1772 
1773 /*
1774  * simple commit_write call, set_range_dirty is used to mark both
1775  * the pages and the extent records as dirty
1776  */
1777 int extent_commit_write(struct extent_map_tree *tree,
1778 			struct inode *inode, struct page *page,
1779 			unsigned from, unsigned to)
1780 {
1781 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1782 
1783 	set_page_extent_mapped(page);
1784 	set_page_dirty(page);
1785 
1786 	if (pos > inode->i_size) {
1787 		i_size_write(inode, pos);
1788 		mark_inode_dirty(inode);
1789 	}
1790 	return 0;
1791 }
1792 EXPORT_SYMBOL(extent_commit_write);
1793 
1794 int extent_prepare_write(struct extent_map_tree *tree,
1795 			 struct inode *inode, struct page *page,
1796 			 unsigned from, unsigned to, get_extent_t *get_extent)
1797 {
1798 	u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1799 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1800 	u64 block_start;
1801 	u64 orig_block_start;
1802 	u64 block_end;
1803 	u64 cur_end;
1804 	struct extent_map *em;
1805 	unsigned blocksize = 1 << inode->i_blkbits;
1806 	size_t page_offset = 0;
1807 	size_t block_off_start;
1808 	size_t block_off_end;
1809 	int err = 0;
1810 	int iocount = 0;
1811 	int ret = 0;
1812 	int isnew;
1813 
1814 	set_page_extent_mapped(page);
1815 
1816 	block_start = (page_start + from) & ~((u64)blocksize - 1);
1817 	block_end = (page_start + to - 1) | (blocksize - 1);
1818 	orig_block_start = block_start;
1819 
1820 	lock_extent(tree, page_start, page_end, GFP_NOFS);
1821 	while(block_start <= block_end) {
1822 		em = get_extent(inode, page, page_offset, block_start,
1823 				block_end, 1);
1824 		if (IS_ERR(em) || !em) {
1825 			goto err;
1826 		}
1827 		cur_end = min(block_end, em->end);
1828 		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1829 		block_off_end = block_off_start + blocksize;
1830 		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1831 
1832 		if (!PageUptodate(page) && isnew &&
1833 		    (block_off_end > to || block_off_start < from)) {
1834 			void *kaddr;
1835 
1836 			kaddr = kmap_atomic(page, KM_USER0);
1837 			if (block_off_end > to)
1838 				memset(kaddr + to, 0, block_off_end - to);
1839 			if (block_off_start < from)
1840 				memset(kaddr + block_off_start, 0,
1841 				       from - block_off_start);
1842 			flush_dcache_page(page);
1843 			kunmap_atomic(kaddr, KM_USER0);
1844 		}
1845 		if (!isnew && !PageUptodate(page) &&
1846 		    (block_off_end > to || block_off_start < from) &&
1847 		    !test_range_bit(tree, block_start, cur_end,
1848 				    EXTENT_UPTODATE, 1)) {
1849 			u64 sector;
1850 			u64 extent_offset = block_start - em->start;
1851 			size_t iosize;
1852 			sector = (em->block_start + extent_offset) >> 9;
1853 			iosize = (cur_end - block_start + blocksize - 1) &
1854 				~((u64)blocksize - 1);
1855 			/*
1856 			 * we've already got the extent locked, but we
1857 			 * need to split the state such that our end_bio
1858 			 * handler can clear the lock.
1859 			 */
1860 			set_extent_bit(tree, block_start,
1861 				       block_start + iosize - 1,
1862 				       EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1863 			ret = submit_extent_page(READ, tree, page,
1864 					 sector, iosize, page_offset, em->bdev,
1865 					 end_bio_extent_preparewrite);
1866 			iocount++;
1867 			block_start = block_start + iosize;
1868 		} else {
1869 			set_extent_uptodate(tree, block_start, cur_end,
1870 					    GFP_NOFS);
1871 			unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1872 			block_start = cur_end + 1;
1873 		}
1874 		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1875 		free_extent_map(em);
1876 	}
1877 	if (iocount) {
1878 		wait_extent_bit(tree, orig_block_start,
1879 				block_end, EXTENT_LOCKED);
1880 	}
1881 	check_page_uptodate(tree, page);
1882 err:
1883 	/* FIXME, zero out newly allocated blocks on error */
1884 	return err;
1885 }
1886 EXPORT_SYMBOL(extent_prepare_write);
1887 
1888 /*
1889  * a helper for releasepage.  As long as there are no locked extents
1890  * in the range corresponding to the page, both state records and extent
1891  * map records are removed
1892  */
1893 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1894 {
1895 	struct extent_map *em;
1896 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1897 	u64 end = start + PAGE_CACHE_SIZE - 1;
1898 	u64 orig_start = start;
1899 	int ret = 1;
1900 
1901 	while (start <= end) {
1902 		em = lookup_extent_mapping(tree, start, end);
1903 		if (!em || IS_ERR(em))
1904 			break;
1905 		if (!test_range_bit(tree, em->start, em->end,
1906 				    EXTENT_LOCKED, 0)) {
1907 			remove_extent_mapping(tree, em);
1908 			/* once for the rb tree */
1909 			free_extent_map(em);
1910 		}
1911 		start = em->end + 1;
1912 		/* once for us */
1913 		free_extent_map(em);
1914 	}
1915 	if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
1916 		ret = 0;
1917 	else
1918 		clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1919 				 1, 1, GFP_NOFS);
1920 	return ret;
1921 }
1922 EXPORT_SYMBOL(try_release_extent_mapping);
1923 
1924 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
1925 		get_extent_t *get_extent)
1926 {
1927 	struct inode *inode = mapping->host;
1928 	u64 start = iblock << inode->i_blkbits;
1929 	u64 end = start + (1 << inode->i_blkbits) - 1;
1930 	sector_t sector = 0;
1931 	struct extent_map *em;
1932 
1933 	em = get_extent(inode, NULL, 0, start, end, 0);
1934 	if (!em || IS_ERR(em))
1935 		return 0;
1936 
1937 	if (em->block_start == EXTENT_MAP_INLINE ||
1938 	    em->block_start == EXTENT_MAP_HOLE)
1939 		goto out;
1940 
1941 	sector = (em->block_start + start - em->start) >> inode->i_blkbits;
1942 out:
1943 	free_extent_map(em);
1944 	return sector;
1945 }
1946 
1947 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
1948 {
1949 	if (list_empty(&eb->lru)) {
1950 		extent_buffer_get(eb);
1951 		list_add(&eb->lru, &tree->buffer_lru);
1952 		tree->lru_size++;
1953 		if (tree->lru_size >= BUFFER_LRU_MAX) {
1954 			struct extent_buffer *rm;
1955 			rm = list_entry(tree->buffer_lru.prev,
1956 					struct extent_buffer, lru);
1957 			tree->lru_size--;
1958 			list_del(&rm->lru);
1959 			free_extent_buffer(rm);
1960 		}
1961 	} else
1962 		list_move(&eb->lru, &tree->buffer_lru);
1963 	return 0;
1964 }
1965 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
1966 				      u64 start, unsigned long len)
1967 {
1968 	struct list_head *lru = &tree->buffer_lru;
1969 	struct list_head *cur = lru->next;
1970 	struct extent_buffer *eb;
1971 
1972 	if (list_empty(lru))
1973 		return NULL;
1974 
1975 	do {
1976 		eb = list_entry(cur, struct extent_buffer, lru);
1977 		if (eb->start == start && eb->len == len) {
1978 			extent_buffer_get(eb);
1979 			return eb;
1980 		}
1981 		cur = cur->next;
1982 	} while (cur != lru);
1983 	return NULL;
1984 }
1985 
1986 static inline unsigned long num_extent_pages(u64 start, u64 len)
1987 {
1988 	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
1989 		(start >> PAGE_CACHE_SHIFT);
1990 }
1991 
1992 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
1993 					      unsigned long i)
1994 {
1995 	struct page *p;
1996 	struct address_space *mapping;
1997 
1998 	if (i == 0)
1999 		return eb->first_page;
2000 	i += eb->start >> PAGE_CACHE_SHIFT;
2001 	mapping = eb->first_page->mapping;
2002 	read_lock_irq(&mapping->tree_lock);
2003 	p = radix_tree_lookup(&mapping->page_tree, i);
2004 	read_unlock_irq(&mapping->tree_lock);
2005 	return p;
2006 }
2007 
2008 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2009 						   u64 start,
2010 						   unsigned long len,
2011 						   gfp_t mask)
2012 {
2013 	struct extent_buffer *eb = NULL;
2014 
2015 	spin_lock(&tree->lru_lock);
2016 	eb = find_lru(tree, start, len);
2017 	if (eb) {
2018 		goto lru_add;
2019 	}
2020 	spin_unlock(&tree->lru_lock);
2021 
2022 	if (eb) {
2023 		memset(eb, 0, sizeof(*eb));
2024 	} else {
2025 		eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2026 	}
2027 	INIT_LIST_HEAD(&eb->lru);
2028 	eb->start = start;
2029 	eb->len = len;
2030 	atomic_set(&eb->refs, 1);
2031 
2032 	spin_lock(&tree->lru_lock);
2033 lru_add:
2034 	add_lru(tree, eb);
2035 	spin_unlock(&tree->lru_lock);
2036 	return eb;
2037 }
2038 
2039 static void __free_extent_buffer(struct extent_buffer *eb)
2040 {
2041 	kmem_cache_free(extent_buffer_cache, eb);
2042 }
2043 
2044 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2045 					  u64 start, unsigned long len,
2046 					  struct page *page0,
2047 					  gfp_t mask)
2048 {
2049 	unsigned long num_pages = num_extent_pages(start, len);
2050 	unsigned long i;
2051 	unsigned long index = start >> PAGE_CACHE_SHIFT;
2052 	struct extent_buffer *eb;
2053 	struct page *p;
2054 	struct address_space *mapping = tree->mapping;
2055 	int uptodate = 1;
2056 
2057 	eb = __alloc_extent_buffer(tree, start, len, mask);
2058 	if (!eb || IS_ERR(eb))
2059 		return NULL;
2060 
2061 	if (eb->flags & EXTENT_BUFFER_FILLED)
2062 		return eb;
2063 
2064 	if (page0) {
2065 		eb->first_page = page0;
2066 		i = 1;
2067 		index++;
2068 		page_cache_get(page0);
2069 		mark_page_accessed(page0);
2070 		set_page_extent_mapped(page0);
2071 		set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2072 				 len << 2);
2073 	} else {
2074 		i = 0;
2075 	}
2076 	for (; i < num_pages; i++, index++) {
2077 		p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2078 		if (!p) {
2079 			WARN_ON(1);
2080 			/* make sure the free only frees the pages we've
2081 			 * grabbed a reference on
2082 			 */
2083 			eb->len = i << PAGE_CACHE_SHIFT;
2084 			eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2085 			goto fail;
2086 		}
2087 		set_page_extent_mapped(p);
2088 		mark_page_accessed(p);
2089 		if (i == 0) {
2090 			eb->first_page = p;
2091 			set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2092 					 len << 2);
2093 		} else {
2094 			set_page_private(p, EXTENT_PAGE_PRIVATE);
2095 		}
2096 		if (!PageUptodate(p))
2097 			uptodate = 0;
2098 		unlock_page(p);
2099 	}
2100 	if (uptodate)
2101 		eb->flags |= EXTENT_UPTODATE;
2102 	eb->flags |= EXTENT_BUFFER_FILLED;
2103 	return eb;
2104 fail:
2105 	free_extent_buffer(eb);
2106 	return NULL;
2107 }
2108 EXPORT_SYMBOL(alloc_extent_buffer);
2109 
2110 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2111 					 u64 start, unsigned long len,
2112 					  gfp_t mask)
2113 {
2114 	unsigned long num_pages = num_extent_pages(start, len);
2115 	unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT;
2116 	struct extent_buffer *eb;
2117 	struct page *p;
2118 	struct address_space *mapping = tree->mapping;
2119 	int uptodate = 1;
2120 
2121 	eb = __alloc_extent_buffer(tree, start, len, mask);
2122 	if (!eb || IS_ERR(eb))
2123 		return NULL;
2124 
2125 	if (eb->flags & EXTENT_BUFFER_FILLED)
2126 		return eb;
2127 
2128 	for (i = 0; i < num_pages; i++, index++) {
2129 		p = find_lock_page(mapping, index);
2130 		if (!p) {
2131 			/* make sure the free only frees the pages we've
2132 			 * grabbed a reference on
2133 			 */
2134 			eb->len = i << PAGE_CACHE_SHIFT;
2135 			eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2136 			goto fail;
2137 		}
2138 		set_page_extent_mapped(p);
2139 		mark_page_accessed(p);
2140 
2141 		if (i == 0) {
2142 			eb->first_page = p;
2143 			set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2144 					 len << 2);
2145 		} else {
2146 			set_page_private(p, EXTENT_PAGE_PRIVATE);
2147 		}
2148 
2149 		if (!PageUptodate(p))
2150 			uptodate = 0;
2151 		unlock_page(p);
2152 	}
2153 	if (uptodate)
2154 		eb->flags |= EXTENT_UPTODATE;
2155 	eb->flags |= EXTENT_BUFFER_FILLED;
2156 	return eb;
2157 fail:
2158 	free_extent_buffer(eb);
2159 	return NULL;
2160 }
2161 EXPORT_SYMBOL(find_extent_buffer);
2162 
2163 void free_extent_buffer(struct extent_buffer *eb)
2164 {
2165 	unsigned long i;
2166 	unsigned long num_pages;
2167 
2168 	if (!eb)
2169 		return;
2170 
2171 	if (!atomic_dec_and_test(&eb->refs))
2172 		return;
2173 
2174 	num_pages = num_extent_pages(eb->start, eb->len);
2175 
2176 	for (i = 0; i < num_pages; i++) {
2177 		page_cache_release(extent_buffer_page(eb, i));
2178 	}
2179 	__free_extent_buffer(eb);
2180 }
2181 EXPORT_SYMBOL(free_extent_buffer);
2182 
2183 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2184 			      struct extent_buffer *eb)
2185 {
2186 	int set;
2187 	unsigned long i;
2188 	unsigned long num_pages;
2189 	struct page *page;
2190 
2191 	u64 start = eb->start;
2192 	u64 end = start + eb->len - 1;
2193 
2194 	set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2195 	num_pages = num_extent_pages(eb->start, eb->len);
2196 
2197 	for (i = 0; i < num_pages; i++) {
2198 		page = extent_buffer_page(eb, i);
2199 		lock_page(page);
2200 		/*
2201 		 * if we're on the last page or the first page and the
2202 		 * block isn't aligned on a page boundary, do extra checks
2203 		 * to make sure we don't clean page that is partially dirty
2204 		 */
2205 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2206 		    ((i == num_pages - 1) &&
2207 		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2208 			start = (u64)page->index << PAGE_CACHE_SHIFT;
2209 			end  = start + PAGE_CACHE_SIZE - 1;
2210 			if (test_range_bit(tree, start, end,
2211 					   EXTENT_DIRTY, 0)) {
2212 				unlock_page(page);
2213 				continue;
2214 			}
2215 		}
2216 		clear_page_dirty_for_io(page);
2217 		unlock_page(page);
2218 	}
2219 	return 0;
2220 }
2221 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2222 
2223 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2224 				    struct extent_buffer *eb)
2225 {
2226 	return wait_on_extent_writeback(tree, eb->start,
2227 					eb->start + eb->len - 1);
2228 }
2229 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2230 
2231 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2232 			     struct extent_buffer *eb)
2233 {
2234 	unsigned long i;
2235 	unsigned long num_pages;
2236 
2237 	num_pages = num_extent_pages(eb->start, eb->len);
2238 	for (i = 0; i < num_pages; i++) {
2239 		struct page *page = extent_buffer_page(eb, i);
2240 		/* writepage may need to do something special for the
2241 		 * first page, we have to make sure page->private is
2242 		 * properly set.  releasepage may drop page->private
2243 		 * on us if the page isn't already dirty.
2244 		 */
2245 		if (i == 0) {
2246 			lock_page(page);
2247 			set_page_private(page,
2248 					 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2249 					 eb->len << 2);
2250 		}
2251 		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2252 		if (i == 0)
2253 			unlock_page(page);
2254 	}
2255 	return set_extent_dirty(tree, eb->start,
2256 				eb->start + eb->len - 1, GFP_NOFS);
2257 }
2258 EXPORT_SYMBOL(set_extent_buffer_dirty);
2259 
2260 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2261 				struct extent_buffer *eb)
2262 {
2263 	unsigned long i;
2264 	struct page *page;
2265 	unsigned long num_pages;
2266 
2267 	num_pages = num_extent_pages(eb->start, eb->len);
2268 
2269 	set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2270 			    GFP_NOFS);
2271 	for (i = 0; i < num_pages; i++) {
2272 		page = extent_buffer_page(eb, i);
2273 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2274 		    ((i == num_pages - 1) &&
2275 		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2276 			check_page_uptodate(tree, page);
2277 			continue;
2278 		}
2279 		SetPageUptodate(page);
2280 	}
2281 	return 0;
2282 }
2283 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2284 
2285 int extent_buffer_uptodate(struct extent_map_tree *tree,
2286 			     struct extent_buffer *eb)
2287 {
2288 	if (eb->flags & EXTENT_UPTODATE)
2289 		return 1;
2290 	return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2291 			   EXTENT_UPTODATE, 1);
2292 }
2293 EXPORT_SYMBOL(extent_buffer_uptodate);
2294 
2295 int read_extent_buffer_pages(struct extent_map_tree *tree,
2296 			     struct extent_buffer *eb,
2297 			     u64 start,
2298 			     int wait)
2299 {
2300 	unsigned long i;
2301 	unsigned long start_i;
2302 	struct page *page;
2303 	int err;
2304 	int ret = 0;
2305 	unsigned long num_pages;
2306 
2307 	if (eb->flags & EXTENT_UPTODATE)
2308 		return 0;
2309 
2310 	if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2311 			   EXTENT_UPTODATE, 1)) {
2312 		return 0;
2313 	}
2314 	if (start) {
2315 		WARN_ON(start < eb->start);
2316 		start_i = (start >> PAGE_CACHE_SHIFT) -
2317 			(eb->start >> PAGE_CACHE_SHIFT);
2318 	} else {
2319 		start_i = 0;
2320 	}
2321 
2322 	num_pages = num_extent_pages(eb->start, eb->len);
2323 	for (i = start_i; i < num_pages; i++) {
2324 		page = extent_buffer_page(eb, i);
2325 		if (PageUptodate(page)) {
2326 			continue;
2327 		}
2328 		if (!wait) {
2329 			if (TestSetPageLocked(page)) {
2330 				continue;
2331 			}
2332 		} else {
2333 			lock_page(page);
2334 		}
2335 		if (!PageUptodate(page)) {
2336 			err = page->mapping->a_ops->readpage(NULL, page);
2337 			if (err) {
2338 				ret = err;
2339 			}
2340 		} else {
2341 			unlock_page(page);
2342 		}
2343 	}
2344 
2345 	if (ret || !wait) {
2346 		return ret;
2347 	}
2348 
2349 	for (i = start_i; i < num_pages; i++) {
2350 		page = extent_buffer_page(eb, i);
2351 		wait_on_page_locked(page);
2352 		if (!PageUptodate(page)) {
2353 			ret = -EIO;
2354 		}
2355 	}
2356 	if (!ret)
2357 		eb->flags |= EXTENT_UPTODATE;
2358 	return ret;
2359 }
2360 EXPORT_SYMBOL(read_extent_buffer_pages);
2361 
2362 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2363 			unsigned long start,
2364 			unsigned long len)
2365 {
2366 	size_t cur;
2367 	size_t offset;
2368 	struct page *page;
2369 	char *kaddr;
2370 	char *dst = (char *)dstv;
2371 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2372 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2373 	unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2374 
2375 	WARN_ON(start > eb->len);
2376 	WARN_ON(start + len > eb->start + eb->len);
2377 
2378 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2379 
2380 	while(len > 0) {
2381 		page = extent_buffer_page(eb, i);
2382 		if (!PageUptodate(page)) {
2383 			printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2384 			WARN_ON(1);
2385 		}
2386 		WARN_ON(!PageUptodate(page));
2387 
2388 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2389 		kaddr = kmap_atomic(page, KM_USER1);
2390 		memcpy(dst, kaddr + offset, cur);
2391 		kunmap_atomic(kaddr, KM_USER1);
2392 
2393 		dst += cur;
2394 		len -= cur;
2395 		offset = 0;
2396 		i++;
2397 	}
2398 }
2399 EXPORT_SYMBOL(read_extent_buffer);
2400 
2401 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2402 			       unsigned long min_len, char **token, char **map,
2403 			       unsigned long *map_start,
2404 			       unsigned long *map_len, int km)
2405 {
2406 	size_t offset = start & (PAGE_CACHE_SIZE - 1);
2407 	char *kaddr;
2408 	struct page *p;
2409 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2410 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2411 	unsigned long end_i = (start_offset + start + min_len - 1) >>
2412 		PAGE_CACHE_SHIFT;
2413 
2414 	if (i != end_i)
2415 		return -EINVAL;
2416 
2417 	if (i == 0) {
2418 		offset = start_offset;
2419 		*map_start = 0;
2420 	} else {
2421 		offset = 0;
2422 		*map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2423 	}
2424 	if (start + min_len > eb->len) {
2425 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2426 		WARN_ON(1);
2427 	}
2428 
2429 	p = extent_buffer_page(eb, i);
2430 	WARN_ON(!PageUptodate(p));
2431 	kaddr = kmap_atomic(p, km);
2432 	*token = kaddr;
2433 	*map = kaddr + offset;
2434 	*map_len = PAGE_CACHE_SIZE - offset;
2435 	return 0;
2436 }
2437 EXPORT_SYMBOL(map_private_extent_buffer);
2438 
2439 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2440 		      unsigned long min_len,
2441 		      char **token, char **map,
2442 		      unsigned long *map_start,
2443 		      unsigned long *map_len, int km)
2444 {
2445 	int err;
2446 	int save = 0;
2447 	if (eb->map_token) {
2448 		unmap_extent_buffer(eb, eb->map_token, km);
2449 		eb->map_token = NULL;
2450 		save = 1;
2451 	}
2452 	err = map_private_extent_buffer(eb, start, min_len, token, map,
2453 				       map_start, map_len, km);
2454 	if (!err && save) {
2455 		eb->map_token = *token;
2456 		eb->kaddr = *map;
2457 		eb->map_start = *map_start;
2458 		eb->map_len = *map_len;
2459 	}
2460 	return err;
2461 }
2462 EXPORT_SYMBOL(map_extent_buffer);
2463 
2464 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2465 {
2466 	kunmap_atomic(token, km);
2467 }
2468 EXPORT_SYMBOL(unmap_extent_buffer);
2469 
2470 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2471 			  unsigned long start,
2472 			  unsigned long len)
2473 {
2474 	size_t cur;
2475 	size_t offset;
2476 	struct page *page;
2477 	char *kaddr;
2478 	char *ptr = (char *)ptrv;
2479 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2480 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2481 	int ret = 0;
2482 
2483 	WARN_ON(start > eb->len);
2484 	WARN_ON(start + len > eb->start + eb->len);
2485 
2486 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2487 
2488 	while(len > 0) {
2489 		page = extent_buffer_page(eb, i);
2490 		WARN_ON(!PageUptodate(page));
2491 
2492 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2493 
2494 		kaddr = kmap_atomic(page, KM_USER0);
2495 		ret = memcmp(ptr, kaddr + offset, cur);
2496 		kunmap_atomic(kaddr, KM_USER0);
2497 		if (ret)
2498 			break;
2499 
2500 		ptr += cur;
2501 		len -= cur;
2502 		offset = 0;
2503 		i++;
2504 	}
2505 	return ret;
2506 }
2507 EXPORT_SYMBOL(memcmp_extent_buffer);
2508 
2509 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2510 			 unsigned long start, unsigned long len)
2511 {
2512 	size_t cur;
2513 	size_t offset;
2514 	struct page *page;
2515 	char *kaddr;
2516 	char *src = (char *)srcv;
2517 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2518 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2519 
2520 	WARN_ON(start > eb->len);
2521 	WARN_ON(start + len > eb->start + eb->len);
2522 
2523 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2524 
2525 	while(len > 0) {
2526 		page = extent_buffer_page(eb, i);
2527 		WARN_ON(!PageUptodate(page));
2528 
2529 		cur = min(len, PAGE_CACHE_SIZE - offset);
2530 		kaddr = kmap_atomic(page, KM_USER1);
2531 		memcpy(kaddr + offset, src, cur);
2532 		kunmap_atomic(kaddr, KM_USER1);
2533 
2534 		src += cur;
2535 		len -= cur;
2536 		offset = 0;
2537 		i++;
2538 	}
2539 }
2540 EXPORT_SYMBOL(write_extent_buffer);
2541 
2542 void memset_extent_buffer(struct extent_buffer *eb, char c,
2543 			  unsigned long start, unsigned long len)
2544 {
2545 	size_t cur;
2546 	size_t offset;
2547 	struct page *page;
2548 	char *kaddr;
2549 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2550 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2551 
2552 	WARN_ON(start > eb->len);
2553 	WARN_ON(start + len > eb->start + eb->len);
2554 
2555 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2556 
2557 	while(len > 0) {
2558 		page = extent_buffer_page(eb, i);
2559 		WARN_ON(!PageUptodate(page));
2560 
2561 		cur = min(len, PAGE_CACHE_SIZE - offset);
2562 		kaddr = kmap_atomic(page, KM_USER0);
2563 		memset(kaddr + offset, c, cur);
2564 		kunmap_atomic(kaddr, KM_USER0);
2565 
2566 		len -= cur;
2567 		offset = 0;
2568 		i++;
2569 	}
2570 }
2571 EXPORT_SYMBOL(memset_extent_buffer);
2572 
2573 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2574 			unsigned long dst_offset, unsigned long src_offset,
2575 			unsigned long len)
2576 {
2577 	u64 dst_len = dst->len;
2578 	size_t cur;
2579 	size_t offset;
2580 	struct page *page;
2581 	char *kaddr;
2582 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2583 	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2584 
2585 	WARN_ON(src->len != dst_len);
2586 
2587 	offset = (start_offset + dst_offset) &
2588 		((unsigned long)PAGE_CACHE_SIZE - 1);
2589 
2590 	while(len > 0) {
2591 		page = extent_buffer_page(dst, i);
2592 		WARN_ON(!PageUptodate(page));
2593 
2594 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2595 
2596 		kaddr = kmap_atomic(page, KM_USER0);
2597 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
2598 		kunmap_atomic(kaddr, KM_USER0);
2599 
2600 		src_offset += cur;
2601 		len -= cur;
2602 		offset = 0;
2603 		i++;
2604 	}
2605 }
2606 EXPORT_SYMBOL(copy_extent_buffer);
2607 
2608 static void move_pages(struct page *dst_page, struct page *src_page,
2609 		       unsigned long dst_off, unsigned long src_off,
2610 		       unsigned long len)
2611 {
2612 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2613 	if (dst_page == src_page) {
2614 		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2615 	} else {
2616 		char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2617 		char *p = dst_kaddr + dst_off + len;
2618 		char *s = src_kaddr + src_off + len;
2619 
2620 		while (len--)
2621 			*--p = *--s;
2622 
2623 		kunmap_atomic(src_kaddr, KM_USER1);
2624 	}
2625 	kunmap_atomic(dst_kaddr, KM_USER0);
2626 }
2627 
2628 static void copy_pages(struct page *dst_page, struct page *src_page,
2629 		       unsigned long dst_off, unsigned long src_off,
2630 		       unsigned long len)
2631 {
2632 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2633 	char *src_kaddr;
2634 
2635 	if (dst_page != src_page)
2636 		src_kaddr = kmap_atomic(src_page, KM_USER1);
2637 	else
2638 		src_kaddr = dst_kaddr;
2639 
2640 	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2641 	kunmap_atomic(dst_kaddr, KM_USER0);
2642 	if (dst_page != src_page)
2643 		kunmap_atomic(src_kaddr, KM_USER1);
2644 }
2645 
2646 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2647 			   unsigned long src_offset, unsigned long len)
2648 {
2649 	size_t cur;
2650 	size_t dst_off_in_page;
2651 	size_t src_off_in_page;
2652 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2653 	unsigned long dst_i;
2654 	unsigned long src_i;
2655 
2656 	if (src_offset + len > dst->len) {
2657 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2658 		       src_offset, len, dst->len);
2659 		BUG_ON(1);
2660 	}
2661 	if (dst_offset + len > dst->len) {
2662 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2663 		       dst_offset, len, dst->len);
2664 		BUG_ON(1);
2665 	}
2666 
2667 	while(len > 0) {
2668 		dst_off_in_page = (start_offset + dst_offset) &
2669 			((unsigned long)PAGE_CACHE_SIZE - 1);
2670 		src_off_in_page = (start_offset + src_offset) &
2671 			((unsigned long)PAGE_CACHE_SIZE - 1);
2672 
2673 		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2674 		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2675 
2676 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2677 					       src_off_in_page));
2678 		cur = min_t(unsigned long, cur,
2679 			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2680 
2681 		copy_pages(extent_buffer_page(dst, dst_i),
2682 			   extent_buffer_page(dst, src_i),
2683 			   dst_off_in_page, src_off_in_page, cur);
2684 
2685 		src_offset += cur;
2686 		dst_offset += cur;
2687 		len -= cur;
2688 	}
2689 }
2690 EXPORT_SYMBOL(memcpy_extent_buffer);
2691 
2692 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2693 			   unsigned long src_offset, unsigned long len)
2694 {
2695 	size_t cur;
2696 	size_t dst_off_in_page;
2697 	size_t src_off_in_page;
2698 	unsigned long dst_end = dst_offset + len - 1;
2699 	unsigned long src_end = src_offset + len - 1;
2700 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2701 	unsigned long dst_i;
2702 	unsigned long src_i;
2703 
2704 	if (src_offset + len > dst->len) {
2705 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2706 		       src_offset, len, dst->len);
2707 		BUG_ON(1);
2708 	}
2709 	if (dst_offset + len > dst->len) {
2710 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2711 		       dst_offset, len, dst->len);
2712 		BUG_ON(1);
2713 	}
2714 	if (dst_offset < src_offset) {
2715 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2716 		return;
2717 	}
2718 	while(len > 0) {
2719 		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2720 		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2721 
2722 		dst_off_in_page = (start_offset + dst_end) &
2723 			((unsigned long)PAGE_CACHE_SIZE - 1);
2724 		src_off_in_page = (start_offset + src_end) &
2725 			((unsigned long)PAGE_CACHE_SIZE - 1);
2726 
2727 		cur = min_t(unsigned long, len, src_off_in_page + 1);
2728 		cur = min(cur, dst_off_in_page + 1);
2729 		move_pages(extent_buffer_page(dst, dst_i),
2730 			   extent_buffer_page(dst, src_i),
2731 			   dst_off_in_page - cur + 1,
2732 			   src_off_in_page - cur + 1, cur);
2733 
2734 		dst_end -= cur;
2735 		src_end -= cur;
2736 		len -= cur;
2737 	}
2738 }
2739 EXPORT_SYMBOL(memmove_extent_buffer);
2740