xref: /openbmc/linux/fs/btrfs/extent_map.c (revision bcd987fe)
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
16 
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 				       unsigned long extra_flags,
20 				       void (*ctor)(void *, struct kmem_cache *,
21 						    unsigned long));
22 
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26 
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29 
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
32 
33 struct tree_entry {
34 	u64 start;
35 	u64 end;
36 	int in_tree;
37 	struct rb_node rb_node;
38 };
39 
40 struct extent_page_data {
41 	struct bio *bio;
42 	struct extent_map_tree *tree;
43 	get_extent_t *get_extent;
44 };
45 
46 int __init extent_map_init(void)
47 {
48 	extent_map_cache = btrfs_cache_create("extent_map",
49 					    sizeof(struct extent_map), 0,
50 					    NULL);
51 	if (!extent_map_cache)
52 		return -ENOMEM;
53 	extent_state_cache = btrfs_cache_create("extent_state",
54 					    sizeof(struct extent_state), 0,
55 					    NULL);
56 	if (!extent_state_cache)
57 		goto free_map_cache;
58 	extent_buffer_cache = btrfs_cache_create("extent_buffers",
59 					    sizeof(struct extent_buffer), 0,
60 					    NULL);
61 	if (!extent_buffer_cache)
62 		goto free_state_cache;
63 	return 0;
64 
65 free_state_cache:
66 	kmem_cache_destroy(extent_state_cache);
67 free_map_cache:
68 	kmem_cache_destroy(extent_map_cache);
69 	return -ENOMEM;
70 }
71 
72 void extent_map_exit(void)
73 {
74 	struct extent_state *state;
75 
76 	while (!list_empty(&states)) {
77 		state = list_entry(states.next, struct extent_state, list);
78 		printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
79 		list_del(&state->list);
80 		kmem_cache_free(extent_state_cache, state);
81 
82 	}
83 
84 	if (extent_map_cache)
85 		kmem_cache_destroy(extent_map_cache);
86 	if (extent_state_cache)
87 		kmem_cache_destroy(extent_state_cache);
88 	if (extent_buffer_cache)
89 		kmem_cache_destroy(extent_buffer_cache);
90 }
91 
92 void extent_map_tree_init(struct extent_map_tree *tree,
93 			  struct address_space *mapping, gfp_t mask)
94 {
95 	tree->map.rb_node = NULL;
96 	tree->state.rb_node = NULL;
97 	tree->ops = NULL;
98 	tree->dirty_bytes = 0;
99 	rwlock_init(&tree->lock);
100 	spin_lock_init(&tree->lru_lock);
101 	tree->mapping = mapping;
102 	INIT_LIST_HEAD(&tree->buffer_lru);
103 	tree->lru_size = 0;
104 }
105 EXPORT_SYMBOL(extent_map_tree_init);
106 
107 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
108 {
109 	struct extent_buffer *eb;
110 	while(!list_empty(&tree->buffer_lru)) {
111 		eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
112 				lru);
113 		list_del_init(&eb->lru);
114 		free_extent_buffer(eb);
115 	}
116 }
117 EXPORT_SYMBOL(extent_map_tree_empty_lru);
118 
119 struct extent_map *alloc_extent_map(gfp_t mask)
120 {
121 	struct extent_map *em;
122 	em = kmem_cache_alloc(extent_map_cache, mask);
123 	if (!em || IS_ERR(em))
124 		return em;
125 	em->in_tree = 0;
126 	atomic_set(&em->refs, 1);
127 	return em;
128 }
129 EXPORT_SYMBOL(alloc_extent_map);
130 
131 void free_extent_map(struct extent_map *em)
132 {
133 	if (!em)
134 		return;
135 	if (atomic_dec_and_test(&em->refs)) {
136 		WARN_ON(em->in_tree);
137 		kmem_cache_free(extent_map_cache, em);
138 	}
139 }
140 EXPORT_SYMBOL(free_extent_map);
141 
142 
143 struct extent_state *alloc_extent_state(gfp_t mask)
144 {
145 	struct extent_state *state;
146 	unsigned long flags;
147 
148 	state = kmem_cache_alloc(extent_state_cache, mask);
149 	if (!state || IS_ERR(state))
150 		return state;
151 	state->state = 0;
152 	state->in_tree = 0;
153 	state->private = 0;
154 
155 	spin_lock_irqsave(&state_lock, flags);
156 	list_add(&state->list, &states);
157 	spin_unlock_irqrestore(&state_lock, flags);
158 
159 	atomic_set(&state->refs, 1);
160 	init_waitqueue_head(&state->wq);
161 	return state;
162 }
163 EXPORT_SYMBOL(alloc_extent_state);
164 
165 void free_extent_state(struct extent_state *state)
166 {
167 	unsigned long flags;
168 	if (!state)
169 		return;
170 	if (atomic_dec_and_test(&state->refs)) {
171 		WARN_ON(state->in_tree);
172 		spin_lock_irqsave(&state_lock, flags);
173 		list_del(&state->list);
174 		spin_unlock_irqrestore(&state_lock, flags);
175 		kmem_cache_free(extent_state_cache, state);
176 	}
177 }
178 EXPORT_SYMBOL(free_extent_state);
179 
180 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
181 				   struct rb_node *node)
182 {
183 	struct rb_node ** p = &root->rb_node;
184 	struct rb_node * parent = NULL;
185 	struct tree_entry *entry;
186 
187 	while(*p) {
188 		parent = *p;
189 		entry = rb_entry(parent, struct tree_entry, rb_node);
190 
191 		if (offset < entry->start)
192 			p = &(*p)->rb_left;
193 		else if (offset > entry->end)
194 			p = &(*p)->rb_right;
195 		else
196 			return parent;
197 	}
198 
199 	entry = rb_entry(node, struct tree_entry, rb_node);
200 	entry->in_tree = 1;
201 	rb_link_node(node, parent, p);
202 	rb_insert_color(node, root);
203 	return NULL;
204 }
205 
206 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
207 				   struct rb_node **prev_ret)
208 {
209 	struct rb_node * n = root->rb_node;
210 	struct rb_node *prev = NULL;
211 	struct tree_entry *entry;
212 	struct tree_entry *prev_entry = NULL;
213 
214 	while(n) {
215 		entry = rb_entry(n, struct tree_entry, rb_node);
216 		prev = n;
217 		prev_entry = entry;
218 
219 		if (offset < entry->start)
220 			n = n->rb_left;
221 		else if (offset > entry->end)
222 			n = n->rb_right;
223 		else
224 			return n;
225 	}
226 	if (!prev_ret)
227 		return NULL;
228 	while(prev && offset > prev_entry->end) {
229 		prev = rb_next(prev);
230 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
231 	}
232 	*prev_ret = prev;
233 	return NULL;
234 }
235 
236 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
237 {
238 	struct rb_node *prev;
239 	struct rb_node *ret;
240 	ret = __tree_search(root, offset, &prev);
241 	if (!ret)
242 		return prev;
243 	return ret;
244 }
245 
246 static int tree_delete(struct rb_root *root, u64 offset)
247 {
248 	struct rb_node *node;
249 	struct tree_entry *entry;
250 
251 	node = __tree_search(root, offset, NULL);
252 	if (!node)
253 		return -ENOENT;
254 	entry = rb_entry(node, struct tree_entry, rb_node);
255 	entry->in_tree = 0;
256 	rb_erase(node, root);
257 	return 0;
258 }
259 
260 /*
261  * add_extent_mapping tries a simple backward merge with existing
262  * mappings.  The extent_map struct passed in will be inserted into
263  * the tree directly (no copies made, just a reference taken).
264  */
265 int add_extent_mapping(struct extent_map_tree *tree,
266 		       struct extent_map *em)
267 {
268 	int ret = 0;
269 	struct extent_map *prev = NULL;
270 	struct rb_node *rb;
271 
272 	write_lock_irq(&tree->lock);
273 	rb = tree_insert(&tree->map, em->end, &em->rb_node);
274 	if (rb) {
275 		prev = rb_entry(rb, struct extent_map, rb_node);
276 		ret = -EEXIST;
277 		goto out;
278 	}
279 	atomic_inc(&em->refs);
280 	if (em->start != 0) {
281 		rb = rb_prev(&em->rb_node);
282 		if (rb)
283 			prev = rb_entry(rb, struct extent_map, rb_node);
284 		if (prev && prev->end + 1 == em->start &&
285 		    ((em->block_start == EXTENT_MAP_HOLE &&
286 		      prev->block_start == EXTENT_MAP_HOLE) ||
287 		     (em->block_start == EXTENT_MAP_INLINE &&
288 		      prev->block_start == EXTENT_MAP_INLINE) ||
289 		     (em->block_start == EXTENT_MAP_DELALLOC &&
290 		      prev->block_start == EXTENT_MAP_DELALLOC) ||
291 		     (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
292 		      em->block_start == prev->block_end + 1))) {
293 			em->start = prev->start;
294 			em->block_start = prev->block_start;
295 			rb_erase(&prev->rb_node, &tree->map);
296 			prev->in_tree = 0;
297 			free_extent_map(prev);
298 		}
299 	 }
300 out:
301 	write_unlock_irq(&tree->lock);
302 	return ret;
303 }
304 EXPORT_SYMBOL(add_extent_mapping);
305 
306 /*
307  * lookup_extent_mapping returns the first extent_map struct in the
308  * tree that intersects the [start, end] (inclusive) range.  There may
309  * be additional objects in the tree that intersect, so check the object
310  * returned carefully to make sure you don't need additional lookups.
311  */
312 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
313 					 u64 start, u64 end)
314 {
315 	struct extent_map *em;
316 	struct rb_node *rb_node;
317 
318 	read_lock_irq(&tree->lock);
319 	rb_node = tree_search(&tree->map, start);
320 	if (!rb_node) {
321 		em = NULL;
322 		goto out;
323 	}
324 	if (IS_ERR(rb_node)) {
325 		em = ERR_PTR(PTR_ERR(rb_node));
326 		goto out;
327 	}
328 	em = rb_entry(rb_node, struct extent_map, rb_node);
329 	if (em->end < start || em->start > end) {
330 		em = NULL;
331 		goto out;
332 	}
333 	atomic_inc(&em->refs);
334 out:
335 	read_unlock_irq(&tree->lock);
336 	return em;
337 }
338 EXPORT_SYMBOL(lookup_extent_mapping);
339 
340 /*
341  * removes an extent_map struct from the tree.  No reference counts are
342  * dropped, and no checks are done to  see if the range is in use
343  */
344 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
345 {
346 	int ret;
347 
348 	write_lock_irq(&tree->lock);
349 	ret = tree_delete(&tree->map, em->end);
350 	write_unlock_irq(&tree->lock);
351 	return ret;
352 }
353 EXPORT_SYMBOL(remove_extent_mapping);
354 
355 /*
356  * utility function to look for merge candidates inside a given range.
357  * Any extents with matching state are merged together into a single
358  * extent in the tree.  Extents with EXTENT_IO in their state field
359  * are not merged because the end_io handlers need to be able to do
360  * operations on them without sleeping (or doing allocations/splits).
361  *
362  * This should be called with the tree lock held.
363  */
364 static int merge_state(struct extent_map_tree *tree,
365 		       struct extent_state *state)
366 {
367 	struct extent_state *other;
368 	struct rb_node *other_node;
369 
370 	if (state->state & EXTENT_IOBITS)
371 		return 0;
372 
373 	other_node = rb_prev(&state->rb_node);
374 	if (other_node) {
375 		other = rb_entry(other_node, struct extent_state, rb_node);
376 		if (other->end == state->start - 1 &&
377 		    other->state == state->state) {
378 			state->start = other->start;
379 			other->in_tree = 0;
380 			rb_erase(&other->rb_node, &tree->state);
381 			free_extent_state(other);
382 		}
383 	}
384 	other_node = rb_next(&state->rb_node);
385 	if (other_node) {
386 		other = rb_entry(other_node, struct extent_state, rb_node);
387 		if (other->start == state->end + 1 &&
388 		    other->state == state->state) {
389 			other->start = state->start;
390 			state->in_tree = 0;
391 			rb_erase(&state->rb_node, &tree->state);
392 			free_extent_state(state);
393 		}
394 	}
395 	return 0;
396 }
397 
398 /*
399  * insert an extent_state struct into the tree.  'bits' are set on the
400  * struct before it is inserted.
401  *
402  * This may return -EEXIST if the extent is already there, in which case the
403  * state struct is freed.
404  *
405  * The tree lock is not taken internally.  This is a utility function and
406  * probably isn't what you want to call (see set/clear_extent_bit).
407  */
408 static int insert_state(struct extent_map_tree *tree,
409 			struct extent_state *state, u64 start, u64 end,
410 			int bits)
411 {
412 	struct rb_node *node;
413 
414 	if (end < start) {
415 		printk("end < start %Lu %Lu\n", end, start);
416 		WARN_ON(1);
417 	}
418 	if (bits & EXTENT_DIRTY)
419 		tree->dirty_bytes += end - start + 1;
420 	state->state |= bits;
421 	state->start = start;
422 	state->end = end;
423 	node = tree_insert(&tree->state, end, &state->rb_node);
424 	if (node) {
425 		struct extent_state *found;
426 		found = rb_entry(node, struct extent_state, rb_node);
427 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
428 		free_extent_state(state);
429 		return -EEXIST;
430 	}
431 	merge_state(tree, state);
432 	return 0;
433 }
434 
435 /*
436  * split a given extent state struct in two, inserting the preallocated
437  * struct 'prealloc' as the newly created second half.  'split' indicates an
438  * offset inside 'orig' where it should be split.
439  *
440  * Before calling,
441  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
442  * are two extent state structs in the tree:
443  * prealloc: [orig->start, split - 1]
444  * orig: [ split, orig->end ]
445  *
446  * The tree locks are not taken by this function. They need to be held
447  * by the caller.
448  */
449 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
450 		       struct extent_state *prealloc, u64 split)
451 {
452 	struct rb_node *node;
453 	prealloc->start = orig->start;
454 	prealloc->end = split - 1;
455 	prealloc->state = orig->state;
456 	orig->start = split;
457 
458 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
459 	if (node) {
460 		struct extent_state *found;
461 		found = rb_entry(node, struct extent_state, rb_node);
462 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
463 		free_extent_state(prealloc);
464 		return -EEXIST;
465 	}
466 	return 0;
467 }
468 
469 /*
470  * utility function to clear some bits in an extent state struct.
471  * it will optionally wake up any one waiting on this state (wake == 1), or
472  * forcibly remove the state from the tree (delete == 1).
473  *
474  * If no bits are set on the state struct after clearing things, the
475  * struct is freed and removed from the tree
476  */
477 static int clear_state_bit(struct extent_map_tree *tree,
478 			    struct extent_state *state, int bits, int wake,
479 			    int delete)
480 {
481 	int ret = state->state & bits;
482 
483 	if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
484 		u64 range = state->end - state->start + 1;
485 		WARN_ON(range > tree->dirty_bytes);
486 		tree->dirty_bytes -= range;
487 	}
488 	state->state &= ~bits;
489 	if (wake)
490 		wake_up(&state->wq);
491 	if (delete || state->state == 0) {
492 		if (state->in_tree) {
493 			rb_erase(&state->rb_node, &tree->state);
494 			state->in_tree = 0;
495 			free_extent_state(state);
496 		} else {
497 			WARN_ON(1);
498 		}
499 	} else {
500 		merge_state(tree, state);
501 	}
502 	return ret;
503 }
504 
505 /*
506  * clear some bits on a range in the tree.  This may require splitting
507  * or inserting elements in the tree, so the gfp mask is used to
508  * indicate which allocations or sleeping are allowed.
509  *
510  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
511  * the given range from the tree regardless of state (ie for truncate).
512  *
513  * the range [start, end] is inclusive.
514  *
515  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
516  * bits were already set, or zero if none of the bits were already set.
517  */
518 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
519 		     int bits, int wake, int delete, gfp_t mask)
520 {
521 	struct extent_state *state;
522 	struct extent_state *prealloc = NULL;
523 	struct rb_node *node;
524 	unsigned long flags;
525 	int err;
526 	int set = 0;
527 
528 again:
529 	if (!prealloc && (mask & __GFP_WAIT)) {
530 		prealloc = alloc_extent_state(mask);
531 		if (!prealloc)
532 			return -ENOMEM;
533 	}
534 
535 	write_lock_irqsave(&tree->lock, flags);
536 	/*
537 	 * this search will find the extents that end after
538 	 * our range starts
539 	 */
540 	node = tree_search(&tree->state, start);
541 	if (!node)
542 		goto out;
543 	state = rb_entry(node, struct extent_state, rb_node);
544 	if (state->start > end)
545 		goto out;
546 	WARN_ON(state->end < start);
547 
548 	/*
549 	 *     | ---- desired range ---- |
550 	 *  | state | or
551 	 *  | ------------- state -------------- |
552 	 *
553 	 * We need to split the extent we found, and may flip
554 	 * bits on second half.
555 	 *
556 	 * If the extent we found extends past our range, we
557 	 * just split and search again.  It'll get split again
558 	 * the next time though.
559 	 *
560 	 * If the extent we found is inside our range, we clear
561 	 * the desired bit on it.
562 	 */
563 
564 	if (state->start < start) {
565 		err = split_state(tree, state, prealloc, start);
566 		BUG_ON(err == -EEXIST);
567 		prealloc = NULL;
568 		if (err)
569 			goto out;
570 		if (state->end <= end) {
571 			start = state->end + 1;
572 			set |= clear_state_bit(tree, state, bits,
573 					wake, delete);
574 		} else {
575 			start = state->start;
576 		}
577 		goto search_again;
578 	}
579 	/*
580 	 * | ---- desired range ---- |
581 	 *                        | state |
582 	 * We need to split the extent, and clear the bit
583 	 * on the first half
584 	 */
585 	if (state->start <= end && state->end > end) {
586 		err = split_state(tree, state, prealloc, end + 1);
587 		BUG_ON(err == -EEXIST);
588 
589 		if (wake)
590 			wake_up(&state->wq);
591 		set |= clear_state_bit(tree, prealloc, bits,
592 				       wake, delete);
593 		prealloc = NULL;
594 		goto out;
595 	}
596 
597 	start = state->end + 1;
598 	set |= clear_state_bit(tree, state, bits, wake, delete);
599 	goto search_again;
600 
601 out:
602 	write_unlock_irqrestore(&tree->lock, flags);
603 	if (prealloc)
604 		free_extent_state(prealloc);
605 
606 	return set;
607 
608 search_again:
609 	if (start > end)
610 		goto out;
611 	write_unlock_irqrestore(&tree->lock, flags);
612 	if (mask & __GFP_WAIT)
613 		cond_resched();
614 	goto again;
615 }
616 EXPORT_SYMBOL(clear_extent_bit);
617 
618 static int wait_on_state(struct extent_map_tree *tree,
619 			 struct extent_state *state)
620 {
621 	DEFINE_WAIT(wait);
622 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
623 	read_unlock_irq(&tree->lock);
624 	schedule();
625 	read_lock_irq(&tree->lock);
626 	finish_wait(&state->wq, &wait);
627 	return 0;
628 }
629 
630 /*
631  * waits for one or more bits to clear on a range in the state tree.
632  * The range [start, end] is inclusive.
633  * The tree lock is taken by this function
634  */
635 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
636 {
637 	struct extent_state *state;
638 	struct rb_node *node;
639 
640 	read_lock_irq(&tree->lock);
641 again:
642 	while (1) {
643 		/*
644 		 * this search will find all the extents that end after
645 		 * our range starts
646 		 */
647 		node = tree_search(&tree->state, start);
648 		if (!node)
649 			break;
650 
651 		state = rb_entry(node, struct extent_state, rb_node);
652 
653 		if (state->start > end)
654 			goto out;
655 
656 		if (state->state & bits) {
657 			start = state->start;
658 			atomic_inc(&state->refs);
659 			wait_on_state(tree, state);
660 			free_extent_state(state);
661 			goto again;
662 		}
663 		start = state->end + 1;
664 
665 		if (start > end)
666 			break;
667 
668 		if (need_resched()) {
669 			read_unlock_irq(&tree->lock);
670 			cond_resched();
671 			read_lock_irq(&tree->lock);
672 		}
673 	}
674 out:
675 	read_unlock_irq(&tree->lock);
676 	return 0;
677 }
678 EXPORT_SYMBOL(wait_extent_bit);
679 
680 static void set_state_bits(struct extent_map_tree *tree,
681 			   struct extent_state *state,
682 			   int bits)
683 {
684 	if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
685 		u64 range = state->end - state->start + 1;
686 		tree->dirty_bytes += range;
687 	}
688 	state->state |= bits;
689 }
690 
691 /*
692  * set some bits on a range in the tree.  This may require allocations
693  * or sleeping, so the gfp mask is used to indicate what is allowed.
694  *
695  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
696  * range already has the desired bits set.  The start of the existing
697  * range is returned in failed_start in this case.
698  *
699  * [start, end] is inclusive
700  * This takes the tree lock.
701  */
702 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
703 		   int exclusive, u64 *failed_start, gfp_t mask)
704 {
705 	struct extent_state *state;
706 	struct extent_state *prealloc = NULL;
707 	struct rb_node *node;
708 	unsigned long flags;
709 	int err = 0;
710 	int set;
711 	u64 last_start;
712 	u64 last_end;
713 again:
714 	if (!prealloc && (mask & __GFP_WAIT)) {
715 		prealloc = alloc_extent_state(mask);
716 		if (!prealloc)
717 			return -ENOMEM;
718 	}
719 
720 	write_lock_irqsave(&tree->lock, flags);
721 	/*
722 	 * this search will find all the extents that end after
723 	 * our range starts.
724 	 */
725 	node = tree_search(&tree->state, start);
726 	if (!node) {
727 		err = insert_state(tree, prealloc, start, end, bits);
728 		prealloc = NULL;
729 		BUG_ON(err == -EEXIST);
730 		goto out;
731 	}
732 
733 	state = rb_entry(node, struct extent_state, rb_node);
734 	last_start = state->start;
735 	last_end = state->end;
736 
737 	/*
738 	 * | ---- desired range ---- |
739 	 * | state |
740 	 *
741 	 * Just lock what we found and keep going
742 	 */
743 	if (state->start == start && state->end <= end) {
744 		set = state->state & bits;
745 		if (set && exclusive) {
746 			*failed_start = state->start;
747 			err = -EEXIST;
748 			goto out;
749 		}
750 		set_state_bits(tree, state, bits);
751 		start = state->end + 1;
752 		merge_state(tree, state);
753 		goto search_again;
754 	}
755 
756 	/*
757 	 *     | ---- desired range ---- |
758 	 * | state |
759 	 *   or
760 	 * | ------------- state -------------- |
761 	 *
762 	 * We need to split the extent we found, and may flip bits on
763 	 * second half.
764 	 *
765 	 * If the extent we found extends past our
766 	 * range, we just split and search again.  It'll get split
767 	 * again the next time though.
768 	 *
769 	 * If the extent we found is inside our range, we set the
770 	 * desired bit on it.
771 	 */
772 	if (state->start < start) {
773 		set = state->state & bits;
774 		if (exclusive && set) {
775 			*failed_start = start;
776 			err = -EEXIST;
777 			goto out;
778 		}
779 		err = split_state(tree, state, prealloc, start);
780 		BUG_ON(err == -EEXIST);
781 		prealloc = NULL;
782 		if (err)
783 			goto out;
784 		if (state->end <= end) {
785 			set_state_bits(tree, state, bits);
786 			start = state->end + 1;
787 			merge_state(tree, state);
788 		} else {
789 			start = state->start;
790 		}
791 		goto search_again;
792 	}
793 	/*
794 	 * | ---- desired range ---- |
795 	 *     | state | or               | state |
796 	 *
797 	 * There's a hole, we need to insert something in it and
798 	 * ignore the extent we found.
799 	 */
800 	if (state->start > start) {
801 		u64 this_end;
802 		if (end < last_start)
803 			this_end = end;
804 		else
805 			this_end = last_start -1;
806 		err = insert_state(tree, prealloc, start, this_end,
807 				   bits);
808 		prealloc = NULL;
809 		BUG_ON(err == -EEXIST);
810 		if (err)
811 			goto out;
812 		start = this_end + 1;
813 		goto search_again;
814 	}
815 	/*
816 	 * | ---- desired range ---- |
817 	 *                        | state |
818 	 * We need to split the extent, and set the bit
819 	 * on the first half
820 	 */
821 	if (state->start <= end && state->end > end) {
822 		set = state->state & bits;
823 		if (exclusive && set) {
824 			*failed_start = start;
825 			err = -EEXIST;
826 			goto out;
827 		}
828 		err = split_state(tree, state, prealloc, end + 1);
829 		BUG_ON(err == -EEXIST);
830 
831 		set_state_bits(tree, prealloc, bits);
832 		merge_state(tree, prealloc);
833 		prealloc = NULL;
834 		goto out;
835 	}
836 
837 	goto search_again;
838 
839 out:
840 	write_unlock_irqrestore(&tree->lock, flags);
841 	if (prealloc)
842 		free_extent_state(prealloc);
843 
844 	return err;
845 
846 search_again:
847 	if (start > end)
848 		goto out;
849 	write_unlock_irqrestore(&tree->lock, flags);
850 	if (mask & __GFP_WAIT)
851 		cond_resched();
852 	goto again;
853 }
854 EXPORT_SYMBOL(set_extent_bit);
855 
856 /* wrappers around set/clear extent bit */
857 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
858 		     gfp_t mask)
859 {
860 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
861 			      mask);
862 }
863 EXPORT_SYMBOL(set_extent_dirty);
864 
865 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
866 		    int bits, gfp_t mask)
867 {
868 	return set_extent_bit(tree, start, end, bits, 0, NULL,
869 			      mask);
870 }
871 EXPORT_SYMBOL(set_extent_bits);
872 
873 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
874 		      int bits, gfp_t mask)
875 {
876 	return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
877 }
878 EXPORT_SYMBOL(clear_extent_bits);
879 
880 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
881 		     gfp_t mask)
882 {
883 	return set_extent_bit(tree, start, end,
884 			      EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
885 			      mask);
886 }
887 EXPORT_SYMBOL(set_extent_delalloc);
888 
889 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
890 		       gfp_t mask)
891 {
892 	return clear_extent_bit(tree, start, end,
893 				EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
894 }
895 EXPORT_SYMBOL(clear_extent_dirty);
896 
897 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
898 		     gfp_t mask)
899 {
900 	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
901 			      mask);
902 }
903 EXPORT_SYMBOL(set_extent_new);
904 
905 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
906 		       gfp_t mask)
907 {
908 	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
909 }
910 EXPORT_SYMBOL(clear_extent_new);
911 
912 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
913 			gfp_t mask)
914 {
915 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
916 			      mask);
917 }
918 EXPORT_SYMBOL(set_extent_uptodate);
919 
920 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
921 			  gfp_t mask)
922 {
923 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
924 }
925 EXPORT_SYMBOL(clear_extent_uptodate);
926 
927 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
928 			 gfp_t mask)
929 {
930 	return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
931 			      0, NULL, mask);
932 }
933 EXPORT_SYMBOL(set_extent_writeback);
934 
935 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
936 			   gfp_t mask)
937 {
938 	return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
939 }
940 EXPORT_SYMBOL(clear_extent_writeback);
941 
942 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
943 {
944 	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
945 }
946 EXPORT_SYMBOL(wait_on_extent_writeback);
947 
948 /*
949  * locks a range in ascending order, waiting for any locked regions
950  * it hits on the way.  [start,end] are inclusive, and this will sleep.
951  */
952 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
953 {
954 	int err;
955 	u64 failed_start;
956 	while (1) {
957 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
958 				     &failed_start, mask);
959 		if (err == -EEXIST && (mask & __GFP_WAIT)) {
960 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
961 			start = failed_start;
962 		} else {
963 			break;
964 		}
965 		WARN_ON(start > end);
966 	}
967 	return err;
968 }
969 EXPORT_SYMBOL(lock_extent);
970 
971 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
972 		  gfp_t mask)
973 {
974 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
975 }
976 EXPORT_SYMBOL(unlock_extent);
977 
978 /*
979  * helper function to set pages and extents in the tree dirty
980  */
981 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
982 {
983 	unsigned long index = start >> PAGE_CACHE_SHIFT;
984 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
985 	struct page *page;
986 
987 	while (index <= end_index) {
988 		page = find_get_page(tree->mapping, index);
989 		BUG_ON(!page);
990 		__set_page_dirty_nobuffers(page);
991 		page_cache_release(page);
992 		index++;
993 	}
994 	set_extent_dirty(tree, start, end, GFP_NOFS);
995 	return 0;
996 }
997 EXPORT_SYMBOL(set_range_dirty);
998 
999 /*
1000  * helper function to set both pages and extents in the tree writeback
1001  */
1002 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
1003 {
1004 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1005 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1006 	struct page *page;
1007 
1008 	while (index <= end_index) {
1009 		page = find_get_page(tree->mapping, index);
1010 		BUG_ON(!page);
1011 		set_page_writeback(page);
1012 		page_cache_release(page);
1013 		index++;
1014 	}
1015 	set_extent_writeback(tree, start, end, GFP_NOFS);
1016 	return 0;
1017 }
1018 EXPORT_SYMBOL(set_range_writeback);
1019 
1020 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1021 			  u64 *start_ret, u64 *end_ret, int bits)
1022 {
1023 	struct rb_node *node;
1024 	struct extent_state *state;
1025 	int ret = 1;
1026 
1027 	read_lock_irq(&tree->lock);
1028 	/*
1029 	 * this search will find all the extents that end after
1030 	 * our range starts.
1031 	 */
1032 	node = tree_search(&tree->state, start);
1033 	if (!node || IS_ERR(node)) {
1034 		goto out;
1035 	}
1036 
1037 	while(1) {
1038 		state = rb_entry(node, struct extent_state, rb_node);
1039 		if (state->end >= start && (state->state & bits)) {
1040 			*start_ret = state->start;
1041 			*end_ret = state->end;
1042 			ret = 0;
1043 			break;
1044 		}
1045 		node = rb_next(node);
1046 		if (!node)
1047 			break;
1048 	}
1049 out:
1050 	read_unlock_irq(&tree->lock);
1051 	return ret;
1052 }
1053 EXPORT_SYMBOL(find_first_extent_bit);
1054 
1055 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1056 			     u64 *start, u64 *end, u64 max_bytes)
1057 {
1058 	struct rb_node *node;
1059 	struct extent_state *state;
1060 	u64 cur_start = *start;
1061 	u64 found = 0;
1062 	u64 total_bytes = 0;
1063 
1064 	write_lock_irq(&tree->lock);
1065 	/*
1066 	 * this search will find all the extents that end after
1067 	 * our range starts.
1068 	 */
1069 search_again:
1070 	node = tree_search(&tree->state, cur_start);
1071 	if (!node || IS_ERR(node)) {
1072 		*end = (u64)-1;
1073 		goto out;
1074 	}
1075 
1076 	while(1) {
1077 		state = rb_entry(node, struct extent_state, rb_node);
1078 		if (found && state->start != cur_start) {
1079 			goto out;
1080 		}
1081 		if (!(state->state & EXTENT_DELALLOC)) {
1082 			if (!found)
1083 				*end = state->end;
1084 			goto out;
1085 		}
1086 		if (!found) {
1087 			struct extent_state *prev_state;
1088 			struct rb_node *prev_node = node;
1089 			while(1) {
1090 				prev_node = rb_prev(prev_node);
1091 				if (!prev_node)
1092 					break;
1093 				prev_state = rb_entry(prev_node,
1094 						      struct extent_state,
1095 						      rb_node);
1096 				if (!(prev_state->state & EXTENT_DELALLOC))
1097 					break;
1098 				state = prev_state;
1099 				node = prev_node;
1100 			}
1101 		}
1102 		if (state->state & EXTENT_LOCKED) {
1103 			DEFINE_WAIT(wait);
1104 			atomic_inc(&state->refs);
1105 			prepare_to_wait(&state->wq, &wait,
1106 					TASK_UNINTERRUPTIBLE);
1107 			write_unlock_irq(&tree->lock);
1108 			schedule();
1109 			write_lock_irq(&tree->lock);
1110 			finish_wait(&state->wq, &wait);
1111 			free_extent_state(state);
1112 			goto search_again;
1113 		}
1114 		state->state |= EXTENT_LOCKED;
1115 		if (!found)
1116 			*start = state->start;
1117 		found++;
1118 		*end = state->end;
1119 		cur_start = state->end + 1;
1120 		node = rb_next(node);
1121 		if (!node)
1122 			break;
1123 		total_bytes += state->end - state->start + 1;
1124 		if (total_bytes >= max_bytes)
1125 			break;
1126 	}
1127 out:
1128 	write_unlock_irq(&tree->lock);
1129 	return found;
1130 }
1131 
1132 u64 count_range_bits(struct extent_map_tree *tree,
1133 		     u64 *start, u64 search_end, u64 max_bytes,
1134 		     unsigned long bits)
1135 {
1136 	struct rb_node *node;
1137 	struct extent_state *state;
1138 	u64 cur_start = *start;
1139 	u64 total_bytes = 0;
1140 	int found = 0;
1141 
1142 	if (search_end <= cur_start) {
1143 		printk("search_end %Lu start %Lu\n", search_end, cur_start);
1144 		WARN_ON(1);
1145 		return 0;
1146 	}
1147 
1148 	write_lock_irq(&tree->lock);
1149 	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1150 		total_bytes = tree->dirty_bytes;
1151 		goto out;
1152 	}
1153 	/*
1154 	 * this search will find all the extents that end after
1155 	 * our range starts.
1156 	 */
1157 	node = tree_search(&tree->state, cur_start);
1158 	if (!node || IS_ERR(node)) {
1159 		goto out;
1160 	}
1161 
1162 	while(1) {
1163 		state = rb_entry(node, struct extent_state, rb_node);
1164 		if (state->start > search_end)
1165 			break;
1166 		if (state->end >= cur_start && (state->state & bits)) {
1167 			total_bytes += min(search_end, state->end) + 1 -
1168 				       max(cur_start, state->start);
1169 			if (total_bytes >= max_bytes)
1170 				break;
1171 			if (!found) {
1172 				*start = state->start;
1173 				found = 1;
1174 			}
1175 		}
1176 		node = rb_next(node);
1177 		if (!node)
1178 			break;
1179 	}
1180 out:
1181 	write_unlock_irq(&tree->lock);
1182 	return total_bytes;
1183 }
1184 /*
1185  * helper function to lock both pages and extents in the tree.
1186  * pages must be locked first.
1187  */
1188 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1189 {
1190 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1191 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1192 	struct page *page;
1193 	int err;
1194 
1195 	while (index <= end_index) {
1196 		page = grab_cache_page(tree->mapping, index);
1197 		if (!page) {
1198 			err = -ENOMEM;
1199 			goto failed;
1200 		}
1201 		if (IS_ERR(page)) {
1202 			err = PTR_ERR(page);
1203 			goto failed;
1204 		}
1205 		index++;
1206 	}
1207 	lock_extent(tree, start, end, GFP_NOFS);
1208 	return 0;
1209 
1210 failed:
1211 	/*
1212 	 * we failed above in getting the page at 'index', so we undo here
1213 	 * up to but not including the page at 'index'
1214 	 */
1215 	end_index = index;
1216 	index = start >> PAGE_CACHE_SHIFT;
1217 	while (index < end_index) {
1218 		page = find_get_page(tree->mapping, index);
1219 		unlock_page(page);
1220 		page_cache_release(page);
1221 		index++;
1222 	}
1223 	return err;
1224 }
1225 EXPORT_SYMBOL(lock_range);
1226 
1227 /*
1228  * helper function to unlock both pages and extents in the tree.
1229  */
1230 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1231 {
1232 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1233 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1234 	struct page *page;
1235 
1236 	while (index <= end_index) {
1237 		page = find_get_page(tree->mapping, index);
1238 		unlock_page(page);
1239 		page_cache_release(page);
1240 		index++;
1241 	}
1242 	unlock_extent(tree, start, end, GFP_NOFS);
1243 	return 0;
1244 }
1245 EXPORT_SYMBOL(unlock_range);
1246 
1247 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1248 {
1249 	struct rb_node *node;
1250 	struct extent_state *state;
1251 	int ret = 0;
1252 
1253 	write_lock_irq(&tree->lock);
1254 	/*
1255 	 * this search will find all the extents that end after
1256 	 * our range starts.
1257 	 */
1258 	node = tree_search(&tree->state, start);
1259 	if (!node || IS_ERR(node)) {
1260 		ret = -ENOENT;
1261 		goto out;
1262 	}
1263 	state = rb_entry(node, struct extent_state, rb_node);
1264 	if (state->start != start) {
1265 		ret = -ENOENT;
1266 		goto out;
1267 	}
1268 	state->private = private;
1269 out:
1270 	write_unlock_irq(&tree->lock);
1271 	return ret;
1272 }
1273 
1274 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1275 {
1276 	struct rb_node *node;
1277 	struct extent_state *state;
1278 	int ret = 0;
1279 
1280 	read_lock_irq(&tree->lock);
1281 	/*
1282 	 * this search will find all the extents that end after
1283 	 * our range starts.
1284 	 */
1285 	node = tree_search(&tree->state, start);
1286 	if (!node || IS_ERR(node)) {
1287 		ret = -ENOENT;
1288 		goto out;
1289 	}
1290 	state = rb_entry(node, struct extent_state, rb_node);
1291 	if (state->start != start) {
1292 		ret = -ENOENT;
1293 		goto out;
1294 	}
1295 	*private = state->private;
1296 out:
1297 	read_unlock_irq(&tree->lock);
1298 	return ret;
1299 }
1300 
1301 /*
1302  * searches a range in the state tree for a given mask.
1303  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1304  * has the bits set.  Otherwise, 1 is returned if any bit in the
1305  * range is found set.
1306  */
1307 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1308 		   int bits, int filled)
1309 {
1310 	struct extent_state *state = NULL;
1311 	struct rb_node *node;
1312 	int bitset = 0;
1313 
1314 	read_lock_irq(&tree->lock);
1315 	node = tree_search(&tree->state, start);
1316 	while (node && start <= end) {
1317 		state = rb_entry(node, struct extent_state, rb_node);
1318 
1319 		if (filled && state->start > start) {
1320 			bitset = 0;
1321 			break;
1322 		}
1323 
1324 		if (state->start > end)
1325 			break;
1326 
1327 		if (state->state & bits) {
1328 			bitset = 1;
1329 			if (!filled)
1330 				break;
1331 		} else if (filled) {
1332 			bitset = 0;
1333 			break;
1334 		}
1335 		start = state->end + 1;
1336 		if (start > end)
1337 			break;
1338 		node = rb_next(node);
1339 	}
1340 	read_unlock_irq(&tree->lock);
1341 	return bitset;
1342 }
1343 EXPORT_SYMBOL(test_range_bit);
1344 
1345 /*
1346  * helper function to set a given page up to date if all the
1347  * extents in the tree for that page are up to date
1348  */
1349 static int check_page_uptodate(struct extent_map_tree *tree,
1350 			       struct page *page)
1351 {
1352 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1353 	u64 end = start + PAGE_CACHE_SIZE - 1;
1354 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1355 		SetPageUptodate(page);
1356 	return 0;
1357 }
1358 
1359 /*
1360  * helper function to unlock a page if all the extents in the tree
1361  * for that page are unlocked
1362  */
1363 static int check_page_locked(struct extent_map_tree *tree,
1364 			     struct page *page)
1365 {
1366 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1367 	u64 end = start + PAGE_CACHE_SIZE - 1;
1368 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1369 		unlock_page(page);
1370 	return 0;
1371 }
1372 
1373 /*
1374  * helper function to end page writeback if all the extents
1375  * in the tree for that page are done with writeback
1376  */
1377 static int check_page_writeback(struct extent_map_tree *tree,
1378 			     struct page *page)
1379 {
1380 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1381 	u64 end = start + PAGE_CACHE_SIZE - 1;
1382 	if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1383 		end_page_writeback(page);
1384 	return 0;
1385 }
1386 
1387 /* lots and lots of room for performance fixes in the end_bio funcs */
1388 
1389 /*
1390  * after a writepage IO is done, we need to:
1391  * clear the uptodate bits on error
1392  * clear the writeback bits in the extent tree for this IO
1393  * end_page_writeback if the page has no more pending IO
1394  *
1395  * Scheduling is not allowed, so the extent state tree is expected
1396  * to have one and only one object corresponding to this IO.
1397  */
1398 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1399 static void end_bio_extent_writepage(struct bio *bio, int err)
1400 #else
1401 static int end_bio_extent_writepage(struct bio *bio,
1402 				   unsigned int bytes_done, int err)
1403 #endif
1404 {
1405 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1406 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1407 	struct extent_map_tree *tree = bio->bi_private;
1408 	u64 start;
1409 	u64 end;
1410 	int whole_page;
1411 
1412 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1413 	if (bio->bi_size)
1414 		return 1;
1415 #endif
1416 
1417 	do {
1418 		struct page *page = bvec->bv_page;
1419 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1420 			 bvec->bv_offset;
1421 		end = start + bvec->bv_len - 1;
1422 
1423 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1424 			whole_page = 1;
1425 		else
1426 			whole_page = 0;
1427 
1428 		if (--bvec >= bio->bi_io_vec)
1429 			prefetchw(&bvec->bv_page->flags);
1430 
1431 		if (!uptodate) {
1432 			clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1433 			ClearPageUptodate(page);
1434 			SetPageError(page);
1435 		}
1436 		clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1437 
1438 		if (whole_page)
1439 			end_page_writeback(page);
1440 		else
1441 			check_page_writeback(tree, page);
1442 		if (tree->ops && tree->ops->writepage_end_io_hook)
1443 			tree->ops->writepage_end_io_hook(page, start, end);
1444 	} while (bvec >= bio->bi_io_vec);
1445 
1446 	bio_put(bio);
1447 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1448 	return 0;
1449 #endif
1450 }
1451 
1452 /*
1453  * after a readpage IO is done, we need to:
1454  * clear the uptodate bits on error
1455  * set the uptodate bits if things worked
1456  * set the page up to date if all extents in the tree are uptodate
1457  * clear the lock bit in the extent tree
1458  * unlock the page if there are no other extents locked for it
1459  *
1460  * Scheduling is not allowed, so the extent state tree is expected
1461  * to have one and only one object corresponding to this IO.
1462  */
1463 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1464 static void end_bio_extent_readpage(struct bio *bio, int err)
1465 #else
1466 static int end_bio_extent_readpage(struct bio *bio,
1467 				   unsigned int bytes_done, int err)
1468 #endif
1469 {
1470 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1471 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1472 	struct extent_map_tree *tree = bio->bi_private;
1473 	u64 start;
1474 	u64 end;
1475 	int whole_page;
1476 	int ret;
1477 
1478 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1479 	if (bio->bi_size)
1480 		return 1;
1481 #endif
1482 
1483 	do {
1484 		struct page *page = bvec->bv_page;
1485 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1486 			bvec->bv_offset;
1487 		end = start + bvec->bv_len - 1;
1488 
1489 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1490 			whole_page = 1;
1491 		else
1492 			whole_page = 0;
1493 
1494 		if (--bvec >= bio->bi_io_vec)
1495 			prefetchw(&bvec->bv_page->flags);
1496 
1497 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1498 			ret = tree->ops->readpage_end_io_hook(page, start, end);
1499 			if (ret)
1500 				uptodate = 0;
1501 		}
1502 		if (uptodate) {
1503 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1504 			if (whole_page)
1505 				SetPageUptodate(page);
1506 			else
1507 				check_page_uptodate(tree, page);
1508 		} else {
1509 			ClearPageUptodate(page);
1510 			SetPageError(page);
1511 		}
1512 
1513 		unlock_extent(tree, start, end, GFP_ATOMIC);
1514 
1515 		if (whole_page)
1516 			unlock_page(page);
1517 		else
1518 			check_page_locked(tree, page);
1519 	} while (bvec >= bio->bi_io_vec);
1520 
1521 	bio_put(bio);
1522 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1523 	return 0;
1524 #endif
1525 }
1526 
1527 /*
1528  * IO done from prepare_write is pretty simple, we just unlock
1529  * the structs in the extent tree when done, and set the uptodate bits
1530  * as appropriate.
1531  */
1532 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1533 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1534 #else
1535 static int end_bio_extent_preparewrite(struct bio *bio,
1536 				       unsigned int bytes_done, int err)
1537 #endif
1538 {
1539 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1540 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1541 	struct extent_map_tree *tree = bio->bi_private;
1542 	u64 start;
1543 	u64 end;
1544 
1545 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1546 	if (bio->bi_size)
1547 		return 1;
1548 #endif
1549 
1550 	do {
1551 		struct page *page = bvec->bv_page;
1552 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1553 			bvec->bv_offset;
1554 		end = start + bvec->bv_len - 1;
1555 
1556 		if (--bvec >= bio->bi_io_vec)
1557 			prefetchw(&bvec->bv_page->flags);
1558 
1559 		if (uptodate) {
1560 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1561 		} else {
1562 			ClearPageUptodate(page);
1563 			SetPageError(page);
1564 		}
1565 
1566 		unlock_extent(tree, start, end, GFP_ATOMIC);
1567 
1568 	} while (bvec >= bio->bi_io_vec);
1569 
1570 	bio_put(bio);
1571 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1572 	return 0;
1573 #endif
1574 }
1575 
1576 static struct bio *
1577 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1578 		 gfp_t gfp_flags)
1579 {
1580 	struct bio *bio;
1581 
1582 	bio = bio_alloc(gfp_flags, nr_vecs);
1583 
1584 	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1585 		while (!bio && (nr_vecs /= 2))
1586 			bio = bio_alloc(gfp_flags, nr_vecs);
1587 	}
1588 
1589 	if (bio) {
1590 		bio->bi_bdev = bdev;
1591 		bio->bi_sector = first_sector;
1592 	}
1593 	return bio;
1594 }
1595 
1596 static int submit_one_bio(int rw, struct bio *bio)
1597 {
1598 	u64 maxsector;
1599 	int ret = 0;
1600 
1601 	bio_get(bio);
1602 
1603         maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1604 	if (maxsector < bio->bi_sector) {
1605 		printk("sector too large max %Lu got %llu\n", maxsector,
1606 			(unsigned long long)bio->bi_sector);
1607 		WARN_ON(1);
1608 	}
1609 
1610 	submit_bio(rw, bio);
1611 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1612 		ret = -EOPNOTSUPP;
1613 	bio_put(bio);
1614 	return ret;
1615 }
1616 
1617 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1618 			      struct page *page, sector_t sector,
1619 			      size_t size, unsigned long offset,
1620 			      struct block_device *bdev,
1621 			      struct bio **bio_ret,
1622 			      unsigned long max_pages,
1623 			      bio_end_io_t end_io_func)
1624 {
1625 	int ret = 0;
1626 	struct bio *bio;
1627 	int nr;
1628 
1629 	if (bio_ret && *bio_ret) {
1630 		bio = *bio_ret;
1631 		if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1632 		    bio_add_page(bio, page, size, offset) < size) {
1633 			ret = submit_one_bio(rw, bio);
1634 			bio = NULL;
1635 		} else {
1636 			return 0;
1637 		}
1638 	}
1639 	nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1640 	bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1641 	if (!bio) {
1642 		printk("failed to allocate bio nr %d\n", nr);
1643 	}
1644 	bio_add_page(bio, page, size, offset);
1645 	bio->bi_end_io = end_io_func;
1646 	bio->bi_private = tree;
1647 	if (bio_ret) {
1648 		*bio_ret = bio;
1649 	} else {
1650 		ret = submit_one_bio(rw, bio);
1651 	}
1652 
1653 	return ret;
1654 }
1655 
1656 void set_page_extent_mapped(struct page *page)
1657 {
1658 	if (!PagePrivate(page)) {
1659 		SetPagePrivate(page);
1660 		WARN_ON(!page->mapping->a_ops->invalidatepage);
1661 		set_page_private(page, EXTENT_PAGE_PRIVATE);
1662 		page_cache_get(page);
1663 	}
1664 }
1665 
1666 /*
1667  * basic readpage implementation.  Locked extent state structs are inserted
1668  * into the tree that are removed when the IO is done (by the end_io
1669  * handlers)
1670  */
1671 static int __extent_read_full_page(struct extent_map_tree *tree,
1672 				   struct page *page,
1673 				   get_extent_t *get_extent,
1674 				   struct bio **bio)
1675 {
1676 	struct inode *inode = page->mapping->host;
1677 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1678 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1679 	u64 end;
1680 	u64 cur = start;
1681 	u64 extent_offset;
1682 	u64 last_byte = i_size_read(inode);
1683 	u64 block_start;
1684 	u64 cur_end;
1685 	sector_t sector;
1686 	struct extent_map *em;
1687 	struct block_device *bdev;
1688 	int ret;
1689 	int nr = 0;
1690 	size_t page_offset = 0;
1691 	size_t iosize;
1692 	size_t blocksize = inode->i_sb->s_blocksize;
1693 
1694 	set_page_extent_mapped(page);
1695 
1696 	end = page_end;
1697 	lock_extent(tree, start, end, GFP_NOFS);
1698 
1699 	while (cur <= end) {
1700 		if (cur >= last_byte) {
1701 			char *userpage;
1702 			iosize = PAGE_CACHE_SIZE - page_offset;
1703 			userpage = kmap_atomic(page, KM_USER0);
1704 			memset(userpage + page_offset, 0, iosize);
1705 			flush_dcache_page(page);
1706 			kunmap_atomic(userpage, KM_USER0);
1707 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1708 					    GFP_NOFS);
1709 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1710 			break;
1711 		}
1712 		em = get_extent(inode, page, page_offset, cur, end, 0);
1713 		if (IS_ERR(em) || !em) {
1714 			SetPageError(page);
1715 			unlock_extent(tree, cur, end, GFP_NOFS);
1716 			break;
1717 		}
1718 
1719 		extent_offset = cur - em->start;
1720 		BUG_ON(em->end < cur);
1721 		BUG_ON(end < cur);
1722 
1723 		iosize = min(em->end - cur, end - cur) + 1;
1724 		cur_end = min(em->end, end);
1725 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1726 		sector = (em->block_start + extent_offset) >> 9;
1727 		bdev = em->bdev;
1728 		block_start = em->block_start;
1729 		free_extent_map(em);
1730 		em = NULL;
1731 
1732 		/* we've found a hole, just zero and go on */
1733 		if (block_start == EXTENT_MAP_HOLE) {
1734 			char *userpage;
1735 			userpage = kmap_atomic(page, KM_USER0);
1736 			memset(userpage + page_offset, 0, iosize);
1737 			flush_dcache_page(page);
1738 			kunmap_atomic(userpage, KM_USER0);
1739 
1740 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1741 					    GFP_NOFS);
1742 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1743 			cur = cur + iosize;
1744 			page_offset += iosize;
1745 			continue;
1746 		}
1747 		/* the get_extent function already copied into the page */
1748 		if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1749 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1750 			cur = cur + iosize;
1751 			page_offset += iosize;
1752 			continue;
1753 		}
1754 
1755 		ret = 0;
1756 		if (tree->ops && tree->ops->readpage_io_hook) {
1757 			ret = tree->ops->readpage_io_hook(page, cur,
1758 							  cur + iosize - 1);
1759 		}
1760 		if (!ret) {
1761 			unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1762 			nr -= page->index;
1763 			ret = submit_extent_page(READ, tree, page,
1764 					 sector, iosize, page_offset,
1765 					 bdev, bio, nr,
1766 					 end_bio_extent_readpage);
1767 		}
1768 		if (ret)
1769 			SetPageError(page);
1770 		cur = cur + iosize;
1771 		page_offset += iosize;
1772 		nr++;
1773 	}
1774 	if (!nr) {
1775 		if (!PageError(page))
1776 			SetPageUptodate(page);
1777 		unlock_page(page);
1778 	}
1779 	return 0;
1780 }
1781 
1782 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1783 			    get_extent_t *get_extent)
1784 {
1785 	struct bio *bio = NULL;
1786 	int ret;
1787 
1788 	ret = __extent_read_full_page(tree, page, get_extent, &bio);
1789 	if (bio)
1790 		submit_one_bio(READ, bio);
1791 	return ret;
1792 }
1793 EXPORT_SYMBOL(extent_read_full_page);
1794 
1795 /*
1796  * the writepage semantics are similar to regular writepage.  extent
1797  * records are inserted to lock ranges in the tree, and as dirty areas
1798  * are found, they are marked writeback.  Then the lock bits are removed
1799  * and the end_io handler clears the writeback ranges
1800  */
1801 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1802 			      void *data)
1803 {
1804 	struct inode *inode = page->mapping->host;
1805 	struct extent_page_data *epd = data;
1806 	struct extent_map_tree *tree = epd->tree;
1807 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1808 	u64 delalloc_start;
1809 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1810 	u64 end;
1811 	u64 cur = start;
1812 	u64 extent_offset;
1813 	u64 last_byte = i_size_read(inode);
1814 	u64 block_start;
1815 	u64 iosize;
1816 	sector_t sector;
1817 	struct extent_map *em;
1818 	struct block_device *bdev;
1819 	int ret;
1820 	int nr = 0;
1821 	size_t page_offset = 0;
1822 	size_t blocksize;
1823 	loff_t i_size = i_size_read(inode);
1824 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1825 	u64 nr_delalloc;
1826 	u64 delalloc_end;
1827 
1828 	WARN_ON(!PageLocked(page));
1829 	if (page->index > end_index) {
1830 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1831 		unlock_page(page);
1832 		return 0;
1833 	}
1834 
1835 	if (page->index == end_index) {
1836 		char *userpage;
1837 
1838 		size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1839 
1840 		userpage = kmap_atomic(page, KM_USER0);
1841 		memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1842 		flush_dcache_page(page);
1843 		kunmap_atomic(userpage, KM_USER0);
1844 	}
1845 
1846 	set_page_extent_mapped(page);
1847 
1848 	delalloc_start = start;
1849 	delalloc_end = 0;
1850 	while(delalloc_end < page_end) {
1851 		nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1852 						       &delalloc_end,
1853 						       128 * 1024 * 1024);
1854 		if (nr_delalloc == 0) {
1855 			delalloc_start = delalloc_end + 1;
1856 			continue;
1857 		}
1858 		tree->ops->fill_delalloc(inode, delalloc_start,
1859 					 delalloc_end);
1860 		clear_extent_bit(tree, delalloc_start,
1861 				 delalloc_end,
1862 				 EXTENT_LOCKED | EXTENT_DELALLOC,
1863 				 1, 0, GFP_NOFS);
1864 		delalloc_start = delalloc_end + 1;
1865 	}
1866 	lock_extent(tree, start, page_end, GFP_NOFS);
1867 
1868 	end = page_end;
1869 	if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1870 		printk("found delalloc bits after lock_extent\n");
1871 	}
1872 
1873 	if (last_byte <= start) {
1874 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1875 		goto done;
1876 	}
1877 
1878 	set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1879 	blocksize = inode->i_sb->s_blocksize;
1880 
1881 	while (cur <= end) {
1882 		if (cur >= last_byte) {
1883 			clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1884 			break;
1885 		}
1886 		em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1887 		if (IS_ERR(em) || !em) {
1888 			SetPageError(page);
1889 			break;
1890 		}
1891 
1892 		extent_offset = cur - em->start;
1893 		BUG_ON(em->end < cur);
1894 		BUG_ON(end < cur);
1895 		iosize = min(em->end - cur, end - cur) + 1;
1896 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1897 		sector = (em->block_start + extent_offset) >> 9;
1898 		bdev = em->bdev;
1899 		block_start = em->block_start;
1900 		free_extent_map(em);
1901 		em = NULL;
1902 
1903 		if (block_start == EXTENT_MAP_HOLE ||
1904 		    block_start == EXTENT_MAP_INLINE) {
1905 			clear_extent_dirty(tree, cur,
1906 					   cur + iosize - 1, GFP_NOFS);
1907 			cur = cur + iosize;
1908 			page_offset += iosize;
1909 			continue;
1910 		}
1911 
1912 		/* leave this out until we have a page_mkwrite call */
1913 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1914 				   EXTENT_DIRTY, 0)) {
1915 			cur = cur + iosize;
1916 			page_offset += iosize;
1917 			continue;
1918 		}
1919 		clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1920 		if (tree->ops && tree->ops->writepage_io_hook) {
1921 			ret = tree->ops->writepage_io_hook(page, cur,
1922 						cur + iosize - 1);
1923 		} else {
1924 			ret = 0;
1925 		}
1926 		if (ret)
1927 			SetPageError(page);
1928 		else {
1929 			unsigned long max_nr = end_index + 1;
1930 			set_range_writeback(tree, cur, cur + iosize - 1);
1931 			if (!PageWriteback(page)) {
1932 				printk("warning page %lu not writeback, "
1933 				       "cur %llu end %llu\n", page->index,
1934 				       (unsigned long long)cur,
1935 				       (unsigned long long)end);
1936 			}
1937 
1938 			ret = submit_extent_page(WRITE, tree, page, sector,
1939 						 iosize, page_offset, bdev,
1940 						 &epd->bio, max_nr,
1941 						 end_bio_extent_writepage);
1942 			if (ret)
1943 				SetPageError(page);
1944 		}
1945 		cur = cur + iosize;
1946 		page_offset += iosize;
1947 		nr++;
1948 	}
1949 done:
1950 	if (nr == 0) {
1951 		/* make sure the mapping tag for page dirty gets cleared */
1952 		set_page_writeback(page);
1953 		end_page_writeback(page);
1954 	}
1955 	unlock_extent(tree, start, page_end, GFP_NOFS);
1956 	unlock_page(page);
1957 	return 0;
1958 }
1959 
1960 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1961 
1962 /* Taken directly from 2.6.23 for 2.6.18 back port */
1963 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
1964                                 void *data);
1965 
1966 /**
1967  * write_cache_pages - walk the list of dirty pages of the given address space
1968  * and write all of them.
1969  * @mapping: address space structure to write
1970  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1971  * @writepage: function called for each page
1972  * @data: data passed to writepage function
1973  *
1974  * If a page is already under I/O, write_cache_pages() skips it, even
1975  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1976  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1977  * and msync() need to guarantee that all the data which was dirty at the time
1978  * the call was made get new I/O started against them.  If wbc->sync_mode is
1979  * WB_SYNC_ALL then we were called for data integrity and we must wait for
1980  * existing IO to complete.
1981  */
1982 static int write_cache_pages(struct address_space *mapping,
1983 		      struct writeback_control *wbc, writepage_t writepage,
1984 		      void *data)
1985 {
1986 	struct backing_dev_info *bdi = mapping->backing_dev_info;
1987 	int ret = 0;
1988 	int done = 0;
1989 	struct pagevec pvec;
1990 	int nr_pages;
1991 	pgoff_t index;
1992 	pgoff_t end;		/* Inclusive */
1993 	int scanned = 0;
1994 	int range_whole = 0;
1995 
1996 	if (wbc->nonblocking && bdi_write_congested(bdi)) {
1997 		wbc->encountered_congestion = 1;
1998 		return 0;
1999 	}
2000 
2001 	pagevec_init(&pvec, 0);
2002 	if (wbc->range_cyclic) {
2003 		index = mapping->writeback_index; /* Start from prev offset */
2004 		end = -1;
2005 	} else {
2006 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2007 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
2008 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2009 			range_whole = 1;
2010 		scanned = 1;
2011 	}
2012 retry:
2013 	while (!done && (index <= end) &&
2014 	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2015 					      PAGECACHE_TAG_DIRTY,
2016 					      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2017 		unsigned i;
2018 
2019 		scanned = 1;
2020 		for (i = 0; i < nr_pages; i++) {
2021 			struct page *page = pvec.pages[i];
2022 
2023 			/*
2024 			 * At this point we hold neither mapping->tree_lock nor
2025 			 * lock on the page itself: the page may be truncated or
2026 			 * invalidated (changing page->mapping to NULL), or even
2027 			 * swizzled back from swapper_space to tmpfs file
2028 			 * mapping
2029 			 */
2030 			lock_page(page);
2031 
2032 			if (unlikely(page->mapping != mapping)) {
2033 				unlock_page(page);
2034 				continue;
2035 			}
2036 
2037 			if (!wbc->range_cyclic && page->index > end) {
2038 				done = 1;
2039 				unlock_page(page);
2040 				continue;
2041 			}
2042 
2043 			if (wbc->sync_mode != WB_SYNC_NONE)
2044 				wait_on_page_writeback(page);
2045 
2046 			if (PageWriteback(page) ||
2047 			    !clear_page_dirty_for_io(page)) {
2048 				unlock_page(page);
2049 				continue;
2050 			}
2051 
2052 			ret = (*writepage)(page, wbc, data);
2053 
2054 			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2055 				unlock_page(page);
2056 				ret = 0;
2057 			}
2058 			if (ret || (--(wbc->nr_to_write) <= 0))
2059 				done = 1;
2060 			if (wbc->nonblocking && bdi_write_congested(bdi)) {
2061 				wbc->encountered_congestion = 1;
2062 				done = 1;
2063 			}
2064 		}
2065 		pagevec_release(&pvec);
2066 		cond_resched();
2067 	}
2068 	if (!scanned && !done) {
2069 		/*
2070 		 * We hit the last page and there is more work to be done: wrap
2071 		 * back to the start of the file
2072 		 */
2073 		scanned = 1;
2074 		index = 0;
2075 		goto retry;
2076 	}
2077 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2078 		mapping->writeback_index = index;
2079 	return ret;
2080 }
2081 #endif
2082 
2083 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
2084 			  get_extent_t *get_extent,
2085 			  struct writeback_control *wbc)
2086 {
2087 	int ret;
2088 	struct address_space *mapping = page->mapping;
2089 	struct extent_page_data epd = {
2090 		.bio = NULL,
2091 		.tree = tree,
2092 		.get_extent = get_extent,
2093 	};
2094 	struct writeback_control wbc_writepages = {
2095 		.bdi		= wbc->bdi,
2096 		.sync_mode	= WB_SYNC_NONE,
2097 		.older_than_this = NULL,
2098 		.nr_to_write	= 64,
2099 		.range_start	= page_offset(page) + PAGE_CACHE_SIZE,
2100 		.range_end	= (loff_t)-1,
2101 	};
2102 
2103 
2104 	ret = __extent_writepage(page, wbc, &epd);
2105 
2106 	write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2107 	if (epd.bio) {
2108 		submit_one_bio(WRITE, epd.bio);
2109 	}
2110 	return ret;
2111 }
2112 EXPORT_SYMBOL(extent_write_full_page);
2113 
2114 
2115 int extent_writepages(struct extent_map_tree *tree,
2116 		      struct address_space *mapping,
2117 		      get_extent_t *get_extent,
2118 		      struct writeback_control *wbc)
2119 {
2120 	int ret = 0;
2121 	struct extent_page_data epd = {
2122 		.bio = NULL,
2123 		.tree = tree,
2124 		.get_extent = get_extent,
2125 	};
2126 
2127 	ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2128 	if (epd.bio) {
2129 		submit_one_bio(WRITE, epd.bio);
2130 	}
2131 	return ret;
2132 }
2133 EXPORT_SYMBOL(extent_writepages);
2134 
2135 int extent_readpages(struct extent_map_tree *tree,
2136 		     struct address_space *mapping,
2137 		     struct list_head *pages, unsigned nr_pages,
2138 		     get_extent_t get_extent)
2139 {
2140 	struct bio *bio = NULL;
2141 	unsigned page_idx;
2142 	struct pagevec pvec;
2143 
2144 	pagevec_init(&pvec, 0);
2145 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2146 		struct page *page = list_entry(pages->prev, struct page, lru);
2147 
2148 		prefetchw(&page->flags);
2149 		list_del(&page->lru);
2150 		/*
2151 		 * what we want to do here is call add_to_page_cache_lru,
2152 		 * but that isn't exported, so we reproduce it here
2153 		 */
2154 		if (!add_to_page_cache(page, mapping,
2155 					page->index, GFP_KERNEL)) {
2156 
2157 			/* open coding of lru_cache_add, also not exported */
2158 			page_cache_get(page);
2159 			if (!pagevec_add(&pvec, page))
2160 				__pagevec_lru_add(&pvec);
2161 			__extent_read_full_page(tree, page, get_extent, &bio);
2162 		}
2163 		page_cache_release(page);
2164 	}
2165 	if (pagevec_count(&pvec))
2166 		__pagevec_lru_add(&pvec);
2167 	BUG_ON(!list_empty(pages));
2168 	if (bio)
2169 		submit_one_bio(READ, bio);
2170 	return 0;
2171 }
2172 EXPORT_SYMBOL(extent_readpages);
2173 
2174 /*
2175  * basic invalidatepage code, this waits on any locked or writeback
2176  * ranges corresponding to the page, and then deletes any extent state
2177  * records from the tree
2178  */
2179 int extent_invalidatepage(struct extent_map_tree *tree,
2180 			  struct page *page, unsigned long offset)
2181 {
2182 	u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2183 	u64 end = start + PAGE_CACHE_SIZE - 1;
2184 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2185 
2186 	start += (offset + blocksize -1) & ~(blocksize - 1);
2187 	if (start > end)
2188 		return 0;
2189 
2190 	lock_extent(tree, start, end, GFP_NOFS);
2191 	wait_on_extent_writeback(tree, start, end);
2192 	clear_extent_bit(tree, start, end,
2193 			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2194 			 1, 1, GFP_NOFS);
2195 	return 0;
2196 }
2197 EXPORT_SYMBOL(extent_invalidatepage);
2198 
2199 /*
2200  * simple commit_write call, set_range_dirty is used to mark both
2201  * the pages and the extent records as dirty
2202  */
2203 int extent_commit_write(struct extent_map_tree *tree,
2204 			struct inode *inode, struct page *page,
2205 			unsigned from, unsigned to)
2206 {
2207 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2208 
2209 	set_page_extent_mapped(page);
2210 	set_page_dirty(page);
2211 
2212 	if (pos > inode->i_size) {
2213 		i_size_write(inode, pos);
2214 		mark_inode_dirty(inode);
2215 	}
2216 	return 0;
2217 }
2218 EXPORT_SYMBOL(extent_commit_write);
2219 
2220 int extent_prepare_write(struct extent_map_tree *tree,
2221 			 struct inode *inode, struct page *page,
2222 			 unsigned from, unsigned to, get_extent_t *get_extent)
2223 {
2224 	u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2225 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2226 	u64 block_start;
2227 	u64 orig_block_start;
2228 	u64 block_end;
2229 	u64 cur_end;
2230 	struct extent_map *em;
2231 	unsigned blocksize = 1 << inode->i_blkbits;
2232 	size_t page_offset = 0;
2233 	size_t block_off_start;
2234 	size_t block_off_end;
2235 	int err = 0;
2236 	int iocount = 0;
2237 	int ret = 0;
2238 	int isnew;
2239 
2240 	set_page_extent_mapped(page);
2241 
2242 	block_start = (page_start + from) & ~((u64)blocksize - 1);
2243 	block_end = (page_start + to - 1) | (blocksize - 1);
2244 	orig_block_start = block_start;
2245 
2246 	lock_extent(tree, page_start, page_end, GFP_NOFS);
2247 	while(block_start <= block_end) {
2248 		em = get_extent(inode, page, page_offset, block_start,
2249 				block_end, 1);
2250 		if (IS_ERR(em) || !em) {
2251 			goto err;
2252 		}
2253 		cur_end = min(block_end, em->end);
2254 		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2255 		block_off_end = block_off_start + blocksize;
2256 		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2257 
2258 		if (!PageUptodate(page) && isnew &&
2259 		    (block_off_end > to || block_off_start < from)) {
2260 			void *kaddr;
2261 
2262 			kaddr = kmap_atomic(page, KM_USER0);
2263 			if (block_off_end > to)
2264 				memset(kaddr + to, 0, block_off_end - to);
2265 			if (block_off_start < from)
2266 				memset(kaddr + block_off_start, 0,
2267 				       from - block_off_start);
2268 			flush_dcache_page(page);
2269 			kunmap_atomic(kaddr, KM_USER0);
2270 		}
2271 		if ((em->block_start != EXTENT_MAP_HOLE &&
2272 		     em->block_start != EXTENT_MAP_INLINE) &&
2273 		    !isnew && !PageUptodate(page) &&
2274 		    (block_off_end > to || block_off_start < from) &&
2275 		    !test_range_bit(tree, block_start, cur_end,
2276 				    EXTENT_UPTODATE, 1)) {
2277 			u64 sector;
2278 			u64 extent_offset = block_start - em->start;
2279 			size_t iosize;
2280 			sector = (em->block_start + extent_offset) >> 9;
2281 			iosize = (cur_end - block_start + blocksize) &
2282 				~((u64)blocksize - 1);
2283 			/*
2284 			 * we've already got the extent locked, but we
2285 			 * need to split the state such that our end_bio
2286 			 * handler can clear the lock.
2287 			 */
2288 			set_extent_bit(tree, block_start,
2289 				       block_start + iosize - 1,
2290 				       EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2291 			ret = submit_extent_page(READ, tree, page,
2292 					 sector, iosize, page_offset, em->bdev,
2293 					 NULL, 1,
2294 					 end_bio_extent_preparewrite);
2295 			iocount++;
2296 			block_start = block_start + iosize;
2297 		} else {
2298 			set_extent_uptodate(tree, block_start, cur_end,
2299 					    GFP_NOFS);
2300 			unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2301 			block_start = cur_end + 1;
2302 		}
2303 		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2304 		free_extent_map(em);
2305 	}
2306 	if (iocount) {
2307 		wait_extent_bit(tree, orig_block_start,
2308 				block_end, EXTENT_LOCKED);
2309 	}
2310 	check_page_uptodate(tree, page);
2311 err:
2312 	/* FIXME, zero out newly allocated blocks on error */
2313 	return err;
2314 }
2315 EXPORT_SYMBOL(extent_prepare_write);
2316 
2317 /*
2318  * a helper for releasepage.  As long as there are no locked extents
2319  * in the range corresponding to the page, both state records and extent
2320  * map records are removed
2321  */
2322 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2323 {
2324 	struct extent_map *em;
2325 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2326 	u64 end = start + PAGE_CACHE_SIZE - 1;
2327 	u64 orig_start = start;
2328 	int ret = 1;
2329 
2330 	while (start <= end) {
2331 		em = lookup_extent_mapping(tree, start, end);
2332 		if (!em || IS_ERR(em))
2333 			break;
2334 		if (!test_range_bit(tree, em->start, em->end,
2335 				    EXTENT_LOCKED, 0)) {
2336 			remove_extent_mapping(tree, em);
2337 			/* once for the rb tree */
2338 			free_extent_map(em);
2339 		}
2340 		start = em->end + 1;
2341 		/* once for us */
2342 		free_extent_map(em);
2343 	}
2344 	if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2345 		ret = 0;
2346 	else
2347 		clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2348 				 1, 1, GFP_NOFS);
2349 	return ret;
2350 }
2351 EXPORT_SYMBOL(try_release_extent_mapping);
2352 
2353 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2354 		get_extent_t *get_extent)
2355 {
2356 	struct inode *inode = mapping->host;
2357 	u64 start = iblock << inode->i_blkbits;
2358 	u64 end = start + (1 << inode->i_blkbits) - 1;
2359 	sector_t sector = 0;
2360 	struct extent_map *em;
2361 
2362 	em = get_extent(inode, NULL, 0, start, end, 0);
2363 	if (!em || IS_ERR(em))
2364 		return 0;
2365 
2366 	if (em->block_start == EXTENT_MAP_INLINE ||
2367 	    em->block_start == EXTENT_MAP_HOLE)
2368 		goto out;
2369 
2370 	sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2371 out:
2372 	free_extent_map(em);
2373 	return sector;
2374 }
2375 
2376 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2377 {
2378 	if (list_empty(&eb->lru)) {
2379 		extent_buffer_get(eb);
2380 		list_add(&eb->lru, &tree->buffer_lru);
2381 		tree->lru_size++;
2382 		if (tree->lru_size >= BUFFER_LRU_MAX) {
2383 			struct extent_buffer *rm;
2384 			rm = list_entry(tree->buffer_lru.prev,
2385 					struct extent_buffer, lru);
2386 			tree->lru_size--;
2387 			list_del_init(&rm->lru);
2388 			free_extent_buffer(rm);
2389 		}
2390 	} else
2391 		list_move(&eb->lru, &tree->buffer_lru);
2392 	return 0;
2393 }
2394 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2395 				      u64 start, unsigned long len)
2396 {
2397 	struct list_head *lru = &tree->buffer_lru;
2398 	struct list_head *cur = lru->next;
2399 	struct extent_buffer *eb;
2400 
2401 	if (list_empty(lru))
2402 		return NULL;
2403 
2404 	do {
2405 		eb = list_entry(cur, struct extent_buffer, lru);
2406 		if (eb->start == start && eb->len == len) {
2407 			extent_buffer_get(eb);
2408 			return eb;
2409 		}
2410 		cur = cur->next;
2411 	} while (cur != lru);
2412 	return NULL;
2413 }
2414 
2415 static inline unsigned long num_extent_pages(u64 start, u64 len)
2416 {
2417 	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2418 		(start >> PAGE_CACHE_SHIFT);
2419 }
2420 
2421 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2422 					      unsigned long i)
2423 {
2424 	struct page *p;
2425 	struct address_space *mapping;
2426 
2427 	if (i == 0)
2428 		return eb->first_page;
2429 	i += eb->start >> PAGE_CACHE_SHIFT;
2430 	mapping = eb->first_page->mapping;
2431 	read_lock_irq(&mapping->tree_lock);
2432 	p = radix_tree_lookup(&mapping->page_tree, i);
2433 	read_unlock_irq(&mapping->tree_lock);
2434 	return p;
2435 }
2436 
2437 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2438 						   u64 start,
2439 						   unsigned long len,
2440 						   gfp_t mask)
2441 {
2442 	struct extent_buffer *eb = NULL;
2443 
2444 	spin_lock(&tree->lru_lock);
2445 	eb = find_lru(tree, start, len);
2446 	spin_unlock(&tree->lru_lock);
2447 	if (eb) {
2448 		return eb;
2449 	}
2450 
2451 	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2452 	INIT_LIST_HEAD(&eb->lru);
2453 	eb->start = start;
2454 	eb->len = len;
2455 	atomic_set(&eb->refs, 1);
2456 
2457 	return eb;
2458 }
2459 
2460 static void __free_extent_buffer(struct extent_buffer *eb)
2461 {
2462 	kmem_cache_free(extent_buffer_cache, eb);
2463 }
2464 
2465 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2466 					  u64 start, unsigned long len,
2467 					  struct page *page0,
2468 					  gfp_t mask)
2469 {
2470 	unsigned long num_pages = num_extent_pages(start, len);
2471 	unsigned long i;
2472 	unsigned long index = start >> PAGE_CACHE_SHIFT;
2473 	struct extent_buffer *eb;
2474 	struct page *p;
2475 	struct address_space *mapping = tree->mapping;
2476 	int uptodate = 1;
2477 
2478 	eb = __alloc_extent_buffer(tree, start, len, mask);
2479 	if (!eb || IS_ERR(eb))
2480 		return NULL;
2481 
2482 	if (eb->flags & EXTENT_BUFFER_FILLED)
2483 		goto lru_add;
2484 
2485 	if (page0) {
2486 		eb->first_page = page0;
2487 		i = 1;
2488 		index++;
2489 		page_cache_get(page0);
2490 		mark_page_accessed(page0);
2491 		set_page_extent_mapped(page0);
2492 		WARN_ON(!PageUptodate(page0));
2493 		set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2494 				 len << 2);
2495 	} else {
2496 		i = 0;
2497 	}
2498 	for (; i < num_pages; i++, index++) {
2499 		p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2500 		if (!p) {
2501 			WARN_ON(1);
2502 			goto fail;
2503 		}
2504 		set_page_extent_mapped(p);
2505 		mark_page_accessed(p);
2506 		if (i == 0) {
2507 			eb->first_page = p;
2508 			set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2509 					 len << 2);
2510 		} else {
2511 			set_page_private(p, EXTENT_PAGE_PRIVATE);
2512 		}
2513 		if (!PageUptodate(p))
2514 			uptodate = 0;
2515 		unlock_page(p);
2516 	}
2517 	if (uptodate)
2518 		eb->flags |= EXTENT_UPTODATE;
2519 	eb->flags |= EXTENT_BUFFER_FILLED;
2520 
2521 lru_add:
2522 	spin_lock(&tree->lru_lock);
2523 	add_lru(tree, eb);
2524 	spin_unlock(&tree->lru_lock);
2525 	return eb;
2526 
2527 fail:
2528 	spin_lock(&tree->lru_lock);
2529 	list_del_init(&eb->lru);
2530 	spin_unlock(&tree->lru_lock);
2531 	if (!atomic_dec_and_test(&eb->refs))
2532 		return NULL;
2533 	for (index = 1; index < i; index++) {
2534 		page_cache_release(extent_buffer_page(eb, index));
2535 	}
2536 	if (i > 0)
2537 		page_cache_release(extent_buffer_page(eb, 0));
2538 	__free_extent_buffer(eb);
2539 	return NULL;
2540 }
2541 EXPORT_SYMBOL(alloc_extent_buffer);
2542 
2543 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2544 					 u64 start, unsigned long len,
2545 					  gfp_t mask)
2546 {
2547 	unsigned long num_pages = num_extent_pages(start, len);
2548 	unsigned long i;
2549 	unsigned long index = start >> PAGE_CACHE_SHIFT;
2550 	struct extent_buffer *eb;
2551 	struct page *p;
2552 	struct address_space *mapping = tree->mapping;
2553 	int uptodate = 1;
2554 
2555 	eb = __alloc_extent_buffer(tree, start, len, mask);
2556 	if (!eb || IS_ERR(eb))
2557 		return NULL;
2558 
2559 	if (eb->flags & EXTENT_BUFFER_FILLED)
2560 		goto lru_add;
2561 
2562 	for (i = 0; i < num_pages; i++, index++) {
2563 		p = find_lock_page(mapping, index);
2564 		if (!p) {
2565 			goto fail;
2566 		}
2567 		set_page_extent_mapped(p);
2568 		mark_page_accessed(p);
2569 
2570 		if (i == 0) {
2571 			eb->first_page = p;
2572 			set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2573 					 len << 2);
2574 		} else {
2575 			set_page_private(p, EXTENT_PAGE_PRIVATE);
2576 		}
2577 
2578 		if (!PageUptodate(p))
2579 			uptodate = 0;
2580 		unlock_page(p);
2581 	}
2582 	if (uptodate)
2583 		eb->flags |= EXTENT_UPTODATE;
2584 	eb->flags |= EXTENT_BUFFER_FILLED;
2585 
2586 lru_add:
2587 	spin_lock(&tree->lru_lock);
2588 	add_lru(tree, eb);
2589 	spin_unlock(&tree->lru_lock);
2590 	return eb;
2591 fail:
2592 	spin_lock(&tree->lru_lock);
2593 	list_del_init(&eb->lru);
2594 	spin_unlock(&tree->lru_lock);
2595 	if (!atomic_dec_and_test(&eb->refs))
2596 		return NULL;
2597 	for (index = 1; index < i; index++) {
2598 		page_cache_release(extent_buffer_page(eb, index));
2599 	}
2600 	if (i > 0)
2601 		page_cache_release(extent_buffer_page(eb, 0));
2602 	__free_extent_buffer(eb);
2603 	return NULL;
2604 }
2605 EXPORT_SYMBOL(find_extent_buffer);
2606 
2607 void free_extent_buffer(struct extent_buffer *eb)
2608 {
2609 	unsigned long i;
2610 	unsigned long num_pages;
2611 
2612 	if (!eb)
2613 		return;
2614 
2615 	if (!atomic_dec_and_test(&eb->refs))
2616 		return;
2617 
2618 	WARN_ON(!list_empty(&eb->lru));
2619 	num_pages = num_extent_pages(eb->start, eb->len);
2620 
2621 	for (i = 1; i < num_pages; i++) {
2622 		page_cache_release(extent_buffer_page(eb, i));
2623 	}
2624 	page_cache_release(extent_buffer_page(eb, 0));
2625 	__free_extent_buffer(eb);
2626 }
2627 EXPORT_SYMBOL(free_extent_buffer);
2628 
2629 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2630 			      struct extent_buffer *eb)
2631 {
2632 	int set;
2633 	unsigned long i;
2634 	unsigned long num_pages;
2635 	struct page *page;
2636 
2637 	u64 start = eb->start;
2638 	u64 end = start + eb->len - 1;
2639 
2640 	set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2641 	num_pages = num_extent_pages(eb->start, eb->len);
2642 
2643 	for (i = 0; i < num_pages; i++) {
2644 		page = extent_buffer_page(eb, i);
2645 		lock_page(page);
2646 		/*
2647 		 * if we're on the last page or the first page and the
2648 		 * block isn't aligned on a page boundary, do extra checks
2649 		 * to make sure we don't clean page that is partially dirty
2650 		 */
2651 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2652 		    ((i == num_pages - 1) &&
2653 		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2654 			start = (u64)page->index << PAGE_CACHE_SHIFT;
2655 			end  = start + PAGE_CACHE_SIZE - 1;
2656 			if (test_range_bit(tree, start, end,
2657 					   EXTENT_DIRTY, 0)) {
2658 				unlock_page(page);
2659 				continue;
2660 			}
2661 		}
2662 		clear_page_dirty_for_io(page);
2663 		write_lock_irq(&page->mapping->tree_lock);
2664 		if (!PageDirty(page)) {
2665 			radix_tree_tag_clear(&page->mapping->page_tree,
2666 						page_index(page),
2667 						PAGECACHE_TAG_DIRTY);
2668 		}
2669 		write_unlock_irq(&page->mapping->tree_lock);
2670 		unlock_page(page);
2671 	}
2672 	return 0;
2673 }
2674 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2675 
2676 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2677 				    struct extent_buffer *eb)
2678 {
2679 	return wait_on_extent_writeback(tree, eb->start,
2680 					eb->start + eb->len - 1);
2681 }
2682 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2683 
2684 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2685 			     struct extent_buffer *eb)
2686 {
2687 	unsigned long i;
2688 	unsigned long num_pages;
2689 
2690 	num_pages = num_extent_pages(eb->start, eb->len);
2691 	for (i = 0; i < num_pages; i++) {
2692 		struct page *page = extent_buffer_page(eb, i);
2693 		/* writepage may need to do something special for the
2694 		 * first page, we have to make sure page->private is
2695 		 * properly set.  releasepage may drop page->private
2696 		 * on us if the page isn't already dirty.
2697 		 */
2698 		if (i == 0) {
2699 			lock_page(page);
2700 			set_page_private(page,
2701 					 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2702 					 eb->len << 2);
2703 		}
2704 		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2705 		if (i == 0)
2706 			unlock_page(page);
2707 	}
2708 	return set_extent_dirty(tree, eb->start,
2709 				eb->start + eb->len - 1, GFP_NOFS);
2710 }
2711 EXPORT_SYMBOL(set_extent_buffer_dirty);
2712 
2713 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2714 				struct extent_buffer *eb)
2715 {
2716 	unsigned long i;
2717 	struct page *page;
2718 	unsigned long num_pages;
2719 
2720 	num_pages = num_extent_pages(eb->start, eb->len);
2721 
2722 	set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2723 			    GFP_NOFS);
2724 	for (i = 0; i < num_pages; i++) {
2725 		page = extent_buffer_page(eb, i);
2726 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2727 		    ((i == num_pages - 1) &&
2728 		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2729 			check_page_uptodate(tree, page);
2730 			continue;
2731 		}
2732 		SetPageUptodate(page);
2733 	}
2734 	return 0;
2735 }
2736 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2737 
2738 int extent_buffer_uptodate(struct extent_map_tree *tree,
2739 			     struct extent_buffer *eb)
2740 {
2741 	if (eb->flags & EXTENT_UPTODATE)
2742 		return 1;
2743 	return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2744 			   EXTENT_UPTODATE, 1);
2745 }
2746 EXPORT_SYMBOL(extent_buffer_uptodate);
2747 
2748 int read_extent_buffer_pages(struct extent_map_tree *tree,
2749 			     struct extent_buffer *eb,
2750 			     u64 start,
2751 			     int wait)
2752 {
2753 	unsigned long i;
2754 	unsigned long start_i;
2755 	struct page *page;
2756 	int err;
2757 	int ret = 0;
2758 	unsigned long num_pages;
2759 
2760 	if (eb->flags & EXTENT_UPTODATE)
2761 		return 0;
2762 
2763 	if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2764 			   EXTENT_UPTODATE, 1)) {
2765 		return 0;
2766 	}
2767 
2768 	if (start) {
2769 		WARN_ON(start < eb->start);
2770 		start_i = (start >> PAGE_CACHE_SHIFT) -
2771 			(eb->start >> PAGE_CACHE_SHIFT);
2772 	} else {
2773 		start_i = 0;
2774 	}
2775 
2776 	num_pages = num_extent_pages(eb->start, eb->len);
2777 	for (i = start_i; i < num_pages; i++) {
2778 		page = extent_buffer_page(eb, i);
2779 		if (PageUptodate(page)) {
2780 			continue;
2781 		}
2782 		if (!wait) {
2783 			if (TestSetPageLocked(page)) {
2784 				continue;
2785 			}
2786 		} else {
2787 			lock_page(page);
2788 		}
2789 		if (!PageUptodate(page)) {
2790 			err = page->mapping->a_ops->readpage(NULL, page);
2791 			if (err) {
2792 				ret = err;
2793 			}
2794 		} else {
2795 			unlock_page(page);
2796 		}
2797 	}
2798 
2799 	if (ret || !wait) {
2800 		return ret;
2801 	}
2802 
2803 	for (i = start_i; i < num_pages; i++) {
2804 		page = extent_buffer_page(eb, i);
2805 		wait_on_page_locked(page);
2806 		if (!PageUptodate(page)) {
2807 			ret = -EIO;
2808 		}
2809 	}
2810 	if (!ret)
2811 		eb->flags |= EXTENT_UPTODATE;
2812 	return ret;
2813 }
2814 EXPORT_SYMBOL(read_extent_buffer_pages);
2815 
2816 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2817 			unsigned long start,
2818 			unsigned long len)
2819 {
2820 	size_t cur;
2821 	size_t offset;
2822 	struct page *page;
2823 	char *kaddr;
2824 	char *dst = (char *)dstv;
2825 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2826 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2827 	unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2828 
2829 	WARN_ON(start > eb->len);
2830 	WARN_ON(start + len > eb->start + eb->len);
2831 
2832 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2833 
2834 	while(len > 0) {
2835 		page = extent_buffer_page(eb, i);
2836 		if (!PageUptodate(page)) {
2837 			printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2838 			WARN_ON(1);
2839 		}
2840 		WARN_ON(!PageUptodate(page));
2841 
2842 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2843 		kaddr = kmap_atomic(page, KM_USER1);
2844 		memcpy(dst, kaddr + offset, cur);
2845 		kunmap_atomic(kaddr, KM_USER1);
2846 
2847 		dst += cur;
2848 		len -= cur;
2849 		offset = 0;
2850 		i++;
2851 	}
2852 }
2853 EXPORT_SYMBOL(read_extent_buffer);
2854 
2855 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2856 			       unsigned long min_len, char **token, char **map,
2857 			       unsigned long *map_start,
2858 			       unsigned long *map_len, int km)
2859 {
2860 	size_t offset = start & (PAGE_CACHE_SIZE - 1);
2861 	char *kaddr;
2862 	struct page *p;
2863 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2864 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2865 	unsigned long end_i = (start_offset + start + min_len - 1) >>
2866 		PAGE_CACHE_SHIFT;
2867 
2868 	if (i != end_i)
2869 		return -EINVAL;
2870 
2871 	if (i == 0) {
2872 		offset = start_offset;
2873 		*map_start = 0;
2874 	} else {
2875 		offset = 0;
2876 		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2877 	}
2878 	if (start + min_len > eb->len) {
2879 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2880 		WARN_ON(1);
2881 	}
2882 
2883 	p = extent_buffer_page(eb, i);
2884 	WARN_ON(!PageUptodate(p));
2885 	kaddr = kmap_atomic(p, km);
2886 	*token = kaddr;
2887 	*map = kaddr + offset;
2888 	*map_len = PAGE_CACHE_SIZE - offset;
2889 	return 0;
2890 }
2891 EXPORT_SYMBOL(map_private_extent_buffer);
2892 
2893 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2894 		      unsigned long min_len,
2895 		      char **token, char **map,
2896 		      unsigned long *map_start,
2897 		      unsigned long *map_len, int km)
2898 {
2899 	int err;
2900 	int save = 0;
2901 	if (eb->map_token) {
2902 		unmap_extent_buffer(eb, eb->map_token, km);
2903 		eb->map_token = NULL;
2904 		save = 1;
2905 	}
2906 	err = map_private_extent_buffer(eb, start, min_len, token, map,
2907 				       map_start, map_len, km);
2908 	if (!err && save) {
2909 		eb->map_token = *token;
2910 		eb->kaddr = *map;
2911 		eb->map_start = *map_start;
2912 		eb->map_len = *map_len;
2913 	}
2914 	return err;
2915 }
2916 EXPORT_SYMBOL(map_extent_buffer);
2917 
2918 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2919 {
2920 	kunmap_atomic(token, km);
2921 }
2922 EXPORT_SYMBOL(unmap_extent_buffer);
2923 
2924 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2925 			  unsigned long start,
2926 			  unsigned long len)
2927 {
2928 	size_t cur;
2929 	size_t offset;
2930 	struct page *page;
2931 	char *kaddr;
2932 	char *ptr = (char *)ptrv;
2933 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2934 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2935 	int ret = 0;
2936 
2937 	WARN_ON(start > eb->len);
2938 	WARN_ON(start + len > eb->start + eb->len);
2939 
2940 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2941 
2942 	while(len > 0) {
2943 		page = extent_buffer_page(eb, i);
2944 		WARN_ON(!PageUptodate(page));
2945 
2946 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2947 
2948 		kaddr = kmap_atomic(page, KM_USER0);
2949 		ret = memcmp(ptr, kaddr + offset, cur);
2950 		kunmap_atomic(kaddr, KM_USER0);
2951 		if (ret)
2952 			break;
2953 
2954 		ptr += cur;
2955 		len -= cur;
2956 		offset = 0;
2957 		i++;
2958 	}
2959 	return ret;
2960 }
2961 EXPORT_SYMBOL(memcmp_extent_buffer);
2962 
2963 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2964 			 unsigned long start, unsigned long len)
2965 {
2966 	size_t cur;
2967 	size_t offset;
2968 	struct page *page;
2969 	char *kaddr;
2970 	char *src = (char *)srcv;
2971 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2972 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2973 
2974 	WARN_ON(start > eb->len);
2975 	WARN_ON(start + len > eb->start + eb->len);
2976 
2977 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2978 
2979 	while(len > 0) {
2980 		page = extent_buffer_page(eb, i);
2981 		WARN_ON(!PageUptodate(page));
2982 
2983 		cur = min(len, PAGE_CACHE_SIZE - offset);
2984 		kaddr = kmap_atomic(page, KM_USER1);
2985 		memcpy(kaddr + offset, src, cur);
2986 		kunmap_atomic(kaddr, KM_USER1);
2987 
2988 		src += cur;
2989 		len -= cur;
2990 		offset = 0;
2991 		i++;
2992 	}
2993 }
2994 EXPORT_SYMBOL(write_extent_buffer);
2995 
2996 void memset_extent_buffer(struct extent_buffer *eb, char c,
2997 			  unsigned long start, unsigned long len)
2998 {
2999 	size_t cur;
3000 	size_t offset;
3001 	struct page *page;
3002 	char *kaddr;
3003 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3004 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3005 
3006 	WARN_ON(start > eb->len);
3007 	WARN_ON(start + len > eb->start + eb->len);
3008 
3009 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3010 
3011 	while(len > 0) {
3012 		page = extent_buffer_page(eb, i);
3013 		WARN_ON(!PageUptodate(page));
3014 
3015 		cur = min(len, PAGE_CACHE_SIZE - offset);
3016 		kaddr = kmap_atomic(page, KM_USER0);
3017 		memset(kaddr + offset, c, cur);
3018 		kunmap_atomic(kaddr, KM_USER0);
3019 
3020 		len -= cur;
3021 		offset = 0;
3022 		i++;
3023 	}
3024 }
3025 EXPORT_SYMBOL(memset_extent_buffer);
3026 
3027 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3028 			unsigned long dst_offset, unsigned long src_offset,
3029 			unsigned long len)
3030 {
3031 	u64 dst_len = dst->len;
3032 	size_t cur;
3033 	size_t offset;
3034 	struct page *page;
3035 	char *kaddr;
3036 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3037 	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3038 
3039 	WARN_ON(src->len != dst_len);
3040 
3041 	offset = (start_offset + dst_offset) &
3042 		((unsigned long)PAGE_CACHE_SIZE - 1);
3043 
3044 	while(len > 0) {
3045 		page = extent_buffer_page(dst, i);
3046 		WARN_ON(!PageUptodate(page));
3047 
3048 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3049 
3050 		kaddr = kmap_atomic(page, KM_USER0);
3051 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
3052 		kunmap_atomic(kaddr, KM_USER0);
3053 
3054 		src_offset += cur;
3055 		len -= cur;
3056 		offset = 0;
3057 		i++;
3058 	}
3059 }
3060 EXPORT_SYMBOL(copy_extent_buffer);
3061 
3062 static void move_pages(struct page *dst_page, struct page *src_page,
3063 		       unsigned long dst_off, unsigned long src_off,
3064 		       unsigned long len)
3065 {
3066 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3067 	if (dst_page == src_page) {
3068 		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3069 	} else {
3070 		char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3071 		char *p = dst_kaddr + dst_off + len;
3072 		char *s = src_kaddr + src_off + len;
3073 
3074 		while (len--)
3075 			*--p = *--s;
3076 
3077 		kunmap_atomic(src_kaddr, KM_USER1);
3078 	}
3079 	kunmap_atomic(dst_kaddr, KM_USER0);
3080 }
3081 
3082 static void copy_pages(struct page *dst_page, struct page *src_page,
3083 		       unsigned long dst_off, unsigned long src_off,
3084 		       unsigned long len)
3085 {
3086 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3087 	char *src_kaddr;
3088 
3089 	if (dst_page != src_page)
3090 		src_kaddr = kmap_atomic(src_page, KM_USER1);
3091 	else
3092 		src_kaddr = dst_kaddr;
3093 
3094 	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3095 	kunmap_atomic(dst_kaddr, KM_USER0);
3096 	if (dst_page != src_page)
3097 		kunmap_atomic(src_kaddr, KM_USER1);
3098 }
3099 
3100 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3101 			   unsigned long src_offset, unsigned long len)
3102 {
3103 	size_t cur;
3104 	size_t dst_off_in_page;
3105 	size_t src_off_in_page;
3106 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3107 	unsigned long dst_i;
3108 	unsigned long src_i;
3109 
3110 	if (src_offset + len > dst->len) {
3111 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3112 		       src_offset, len, dst->len);
3113 		BUG_ON(1);
3114 	}
3115 	if (dst_offset + len > dst->len) {
3116 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3117 		       dst_offset, len, dst->len);
3118 		BUG_ON(1);
3119 	}
3120 
3121 	while(len > 0) {
3122 		dst_off_in_page = (start_offset + dst_offset) &
3123 			((unsigned long)PAGE_CACHE_SIZE - 1);
3124 		src_off_in_page = (start_offset + src_offset) &
3125 			((unsigned long)PAGE_CACHE_SIZE - 1);
3126 
3127 		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3128 		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3129 
3130 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3131 					       src_off_in_page));
3132 		cur = min_t(unsigned long, cur,
3133 			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3134 
3135 		copy_pages(extent_buffer_page(dst, dst_i),
3136 			   extent_buffer_page(dst, src_i),
3137 			   dst_off_in_page, src_off_in_page, cur);
3138 
3139 		src_offset += cur;
3140 		dst_offset += cur;
3141 		len -= cur;
3142 	}
3143 }
3144 EXPORT_SYMBOL(memcpy_extent_buffer);
3145 
3146 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3147 			   unsigned long src_offset, unsigned long len)
3148 {
3149 	size_t cur;
3150 	size_t dst_off_in_page;
3151 	size_t src_off_in_page;
3152 	unsigned long dst_end = dst_offset + len - 1;
3153 	unsigned long src_end = src_offset + len - 1;
3154 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3155 	unsigned long dst_i;
3156 	unsigned long src_i;
3157 
3158 	if (src_offset + len > dst->len) {
3159 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3160 		       src_offset, len, dst->len);
3161 		BUG_ON(1);
3162 	}
3163 	if (dst_offset + len > dst->len) {
3164 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3165 		       dst_offset, len, dst->len);
3166 		BUG_ON(1);
3167 	}
3168 	if (dst_offset < src_offset) {
3169 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3170 		return;
3171 	}
3172 	while(len > 0) {
3173 		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3174 		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3175 
3176 		dst_off_in_page = (start_offset + dst_end) &
3177 			((unsigned long)PAGE_CACHE_SIZE - 1);
3178 		src_off_in_page = (start_offset + src_end) &
3179 			((unsigned long)PAGE_CACHE_SIZE - 1);
3180 
3181 		cur = min_t(unsigned long, len, src_off_in_page + 1);
3182 		cur = min(cur, dst_off_in_page + 1);
3183 		move_pages(extent_buffer_page(dst, dst_i),
3184 			   extent_buffer_page(dst, src_i),
3185 			   dst_off_in_page - cur + 1,
3186 			   src_off_in_page - cur + 1, cur);
3187 
3188 		dst_end -= cur;
3189 		src_end -= cur;
3190 		len -= cur;
3191 	}
3192 }
3193 EXPORT_SYMBOL(memmove_extent_buffer);
3194