xref: /openbmc/linux/fs/btrfs/extent_map.c (revision 190662b2)
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
16 
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 				       unsigned long extra_flags,
20 				       void (*ctor)(void *, struct kmem_cache *,
21 						    unsigned long));
22 
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26 
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29 
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
32 
33 struct tree_entry {
34 	u64 start;
35 	u64 end;
36 	int in_tree;
37 	struct rb_node rb_node;
38 };
39 
40 struct extent_page_data {
41 	struct bio *bio;
42 	struct extent_map_tree *tree;
43 	get_extent_t *get_extent;
44 };
45 
46 int __init extent_map_init(void)
47 {
48 	extent_map_cache = btrfs_cache_create("extent_map",
49 					    sizeof(struct extent_map), 0,
50 					    NULL);
51 	if (!extent_map_cache)
52 		return -ENOMEM;
53 	extent_state_cache = btrfs_cache_create("extent_state",
54 					    sizeof(struct extent_state), 0,
55 					    NULL);
56 	if (!extent_state_cache)
57 		goto free_map_cache;
58 	extent_buffer_cache = btrfs_cache_create("extent_buffers",
59 					    sizeof(struct extent_buffer), 0,
60 					    NULL);
61 	if (!extent_buffer_cache)
62 		goto free_state_cache;
63 	return 0;
64 
65 free_state_cache:
66 	kmem_cache_destroy(extent_state_cache);
67 free_map_cache:
68 	kmem_cache_destroy(extent_map_cache);
69 	return -ENOMEM;
70 }
71 
72 void extent_map_exit(void)
73 {
74 	struct extent_state *state;
75 
76 	while (!list_empty(&states)) {
77 		state = list_entry(states.next, struct extent_state, list);
78 		printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
79 		list_del(&state->list);
80 		kmem_cache_free(extent_state_cache, state);
81 
82 	}
83 
84 	if (extent_map_cache)
85 		kmem_cache_destroy(extent_map_cache);
86 	if (extent_state_cache)
87 		kmem_cache_destroy(extent_state_cache);
88 	if (extent_buffer_cache)
89 		kmem_cache_destroy(extent_buffer_cache);
90 }
91 
92 void extent_map_tree_init(struct extent_map_tree *tree,
93 			  struct address_space *mapping, gfp_t mask)
94 {
95 	tree->map.rb_node = NULL;
96 	tree->state.rb_node = NULL;
97 	tree->ops = NULL;
98 	tree->dirty_bytes = 0;
99 	rwlock_init(&tree->lock);
100 	spin_lock_init(&tree->lru_lock);
101 	tree->mapping = mapping;
102 	INIT_LIST_HEAD(&tree->buffer_lru);
103 	tree->lru_size = 0;
104 }
105 EXPORT_SYMBOL(extent_map_tree_init);
106 
107 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
108 {
109 	struct extent_buffer *eb;
110 	while(!list_empty(&tree->buffer_lru)) {
111 		eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
112 				lru);
113 		list_del_init(&eb->lru);
114 		free_extent_buffer(eb);
115 	}
116 }
117 EXPORT_SYMBOL(extent_map_tree_empty_lru);
118 
119 struct extent_map *alloc_extent_map(gfp_t mask)
120 {
121 	struct extent_map *em;
122 	em = kmem_cache_alloc(extent_map_cache, mask);
123 	if (!em || IS_ERR(em))
124 		return em;
125 	em->in_tree = 0;
126 	atomic_set(&em->refs, 1);
127 	return em;
128 }
129 EXPORT_SYMBOL(alloc_extent_map);
130 
131 void free_extent_map(struct extent_map *em)
132 {
133 	if (!em)
134 		return;
135 	if (atomic_dec_and_test(&em->refs)) {
136 		WARN_ON(em->in_tree);
137 		kmem_cache_free(extent_map_cache, em);
138 	}
139 }
140 EXPORT_SYMBOL(free_extent_map);
141 
142 
143 struct extent_state *alloc_extent_state(gfp_t mask)
144 {
145 	struct extent_state *state;
146 	unsigned long flags;
147 
148 	state = kmem_cache_alloc(extent_state_cache, mask);
149 	if (!state || IS_ERR(state))
150 		return state;
151 	state->state = 0;
152 	state->in_tree = 0;
153 	state->private = 0;
154 
155 	spin_lock_irqsave(&state_lock, flags);
156 	list_add(&state->list, &states);
157 	spin_unlock_irqrestore(&state_lock, flags);
158 
159 	atomic_set(&state->refs, 1);
160 	init_waitqueue_head(&state->wq);
161 	return state;
162 }
163 EXPORT_SYMBOL(alloc_extent_state);
164 
165 void free_extent_state(struct extent_state *state)
166 {
167 	unsigned long flags;
168 	if (!state)
169 		return;
170 	if (atomic_dec_and_test(&state->refs)) {
171 		WARN_ON(state->in_tree);
172 		spin_lock_irqsave(&state_lock, flags);
173 		list_del(&state->list);
174 		spin_unlock_irqrestore(&state_lock, flags);
175 		kmem_cache_free(extent_state_cache, state);
176 	}
177 }
178 EXPORT_SYMBOL(free_extent_state);
179 
180 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
181 				   struct rb_node *node)
182 {
183 	struct rb_node ** p = &root->rb_node;
184 	struct rb_node * parent = NULL;
185 	struct tree_entry *entry;
186 
187 	while(*p) {
188 		parent = *p;
189 		entry = rb_entry(parent, struct tree_entry, rb_node);
190 
191 		if (offset < entry->start)
192 			p = &(*p)->rb_left;
193 		else if (offset > entry->end)
194 			p = &(*p)->rb_right;
195 		else
196 			return parent;
197 	}
198 
199 	entry = rb_entry(node, struct tree_entry, rb_node);
200 	entry->in_tree = 1;
201 	rb_link_node(node, parent, p);
202 	rb_insert_color(node, root);
203 	return NULL;
204 }
205 
206 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
207 				   struct rb_node **prev_ret)
208 {
209 	struct rb_node * n = root->rb_node;
210 	struct rb_node *prev = NULL;
211 	struct tree_entry *entry;
212 	struct tree_entry *prev_entry = NULL;
213 
214 	while(n) {
215 		entry = rb_entry(n, struct tree_entry, rb_node);
216 		prev = n;
217 		prev_entry = entry;
218 
219 		if (offset < entry->start)
220 			n = n->rb_left;
221 		else if (offset > entry->end)
222 			n = n->rb_right;
223 		else
224 			return n;
225 	}
226 	if (!prev_ret)
227 		return NULL;
228 	while(prev && offset > prev_entry->end) {
229 		prev = rb_next(prev);
230 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
231 	}
232 	*prev_ret = prev;
233 	return NULL;
234 }
235 
236 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
237 {
238 	struct rb_node *prev;
239 	struct rb_node *ret;
240 	ret = __tree_search(root, offset, &prev);
241 	if (!ret)
242 		return prev;
243 	return ret;
244 }
245 
246 static int tree_delete(struct rb_root *root, u64 offset)
247 {
248 	struct rb_node *node;
249 	struct tree_entry *entry;
250 
251 	node = __tree_search(root, offset, NULL);
252 	if (!node)
253 		return -ENOENT;
254 	entry = rb_entry(node, struct tree_entry, rb_node);
255 	entry->in_tree = 0;
256 	rb_erase(node, root);
257 	return 0;
258 }
259 
260 /*
261  * add_extent_mapping tries a simple backward merge with existing
262  * mappings.  The extent_map struct passed in will be inserted into
263  * the tree directly (no copies made, just a reference taken).
264  */
265 int add_extent_mapping(struct extent_map_tree *tree,
266 		       struct extent_map *em)
267 {
268 	int ret = 0;
269 	struct extent_map *prev = NULL;
270 	struct rb_node *rb;
271 
272 	write_lock_irq(&tree->lock);
273 	rb = tree_insert(&tree->map, em->end, &em->rb_node);
274 	if (rb) {
275 		prev = rb_entry(rb, struct extent_map, rb_node);
276 		printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
277 		ret = -EEXIST;
278 		goto out;
279 	}
280 	atomic_inc(&em->refs);
281 	if (em->start != 0) {
282 		rb = rb_prev(&em->rb_node);
283 		if (rb)
284 			prev = rb_entry(rb, struct extent_map, rb_node);
285 		if (prev && prev->end + 1 == em->start &&
286 		    ((em->block_start == EXTENT_MAP_HOLE &&
287 		      prev->block_start == EXTENT_MAP_HOLE) ||
288 		     (em->block_start == EXTENT_MAP_INLINE &&
289 		      prev->block_start == EXTENT_MAP_INLINE) ||
290 		     (em->block_start == EXTENT_MAP_DELALLOC &&
291 		      prev->block_start == EXTENT_MAP_DELALLOC) ||
292 		     (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
293 		      em->block_start == prev->block_end + 1))) {
294 			em->start = prev->start;
295 			em->block_start = prev->block_start;
296 			rb_erase(&prev->rb_node, &tree->map);
297 			prev->in_tree = 0;
298 			free_extent_map(prev);
299 		}
300 	 }
301 out:
302 	write_unlock_irq(&tree->lock);
303 	return ret;
304 }
305 EXPORT_SYMBOL(add_extent_mapping);
306 
307 /*
308  * lookup_extent_mapping returns the first extent_map struct in the
309  * tree that intersects the [start, end] (inclusive) range.  There may
310  * be additional objects in the tree that intersect, so check the object
311  * returned carefully to make sure you don't need additional lookups.
312  */
313 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
314 					 u64 start, u64 end)
315 {
316 	struct extent_map *em;
317 	struct rb_node *rb_node;
318 
319 	read_lock_irq(&tree->lock);
320 	rb_node = tree_search(&tree->map, start);
321 	if (!rb_node) {
322 		em = NULL;
323 		goto out;
324 	}
325 	if (IS_ERR(rb_node)) {
326 		em = ERR_PTR(PTR_ERR(rb_node));
327 		goto out;
328 	}
329 	em = rb_entry(rb_node, struct extent_map, rb_node);
330 	if (em->end < start || em->start > end) {
331 		em = NULL;
332 		goto out;
333 	}
334 	atomic_inc(&em->refs);
335 out:
336 	read_unlock_irq(&tree->lock);
337 	return em;
338 }
339 EXPORT_SYMBOL(lookup_extent_mapping);
340 
341 /*
342  * removes an extent_map struct from the tree.  No reference counts are
343  * dropped, and no checks are done to  see if the range is in use
344  */
345 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
346 {
347 	int ret;
348 
349 	write_lock_irq(&tree->lock);
350 	ret = tree_delete(&tree->map, em->end);
351 	write_unlock_irq(&tree->lock);
352 	return ret;
353 }
354 EXPORT_SYMBOL(remove_extent_mapping);
355 
356 /*
357  * utility function to look for merge candidates inside a given range.
358  * Any extents with matching state are merged together into a single
359  * extent in the tree.  Extents with EXTENT_IO in their state field
360  * are not merged because the end_io handlers need to be able to do
361  * operations on them without sleeping (or doing allocations/splits).
362  *
363  * This should be called with the tree lock held.
364  */
365 static int merge_state(struct extent_map_tree *tree,
366 		       struct extent_state *state)
367 {
368 	struct extent_state *other;
369 	struct rb_node *other_node;
370 
371 	if (state->state & EXTENT_IOBITS)
372 		return 0;
373 
374 	other_node = rb_prev(&state->rb_node);
375 	if (other_node) {
376 		other = rb_entry(other_node, struct extent_state, rb_node);
377 		if (other->end == state->start - 1 &&
378 		    other->state == state->state) {
379 			state->start = other->start;
380 			other->in_tree = 0;
381 			rb_erase(&other->rb_node, &tree->state);
382 			free_extent_state(other);
383 		}
384 	}
385 	other_node = rb_next(&state->rb_node);
386 	if (other_node) {
387 		other = rb_entry(other_node, struct extent_state, rb_node);
388 		if (other->start == state->end + 1 &&
389 		    other->state == state->state) {
390 			other->start = state->start;
391 			state->in_tree = 0;
392 			rb_erase(&state->rb_node, &tree->state);
393 			free_extent_state(state);
394 		}
395 	}
396 	return 0;
397 }
398 
399 /*
400  * insert an extent_state struct into the tree.  'bits' are set on the
401  * struct before it is inserted.
402  *
403  * This may return -EEXIST if the extent is already there, in which case the
404  * state struct is freed.
405  *
406  * The tree lock is not taken internally.  This is a utility function and
407  * probably isn't what you want to call (see set/clear_extent_bit).
408  */
409 static int insert_state(struct extent_map_tree *tree,
410 			struct extent_state *state, u64 start, u64 end,
411 			int bits)
412 {
413 	struct rb_node *node;
414 
415 	if (end < start) {
416 		printk("end < start %Lu %Lu\n", end, start);
417 		WARN_ON(1);
418 	}
419 	if (bits & EXTENT_DIRTY)
420 		tree->dirty_bytes += end - start + 1;
421 	state->state |= bits;
422 	state->start = start;
423 	state->end = end;
424 	node = tree_insert(&tree->state, end, &state->rb_node);
425 	if (node) {
426 		struct extent_state *found;
427 		found = rb_entry(node, struct extent_state, rb_node);
428 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
429 		free_extent_state(state);
430 		return -EEXIST;
431 	}
432 	merge_state(tree, state);
433 	return 0;
434 }
435 
436 /*
437  * split a given extent state struct in two, inserting the preallocated
438  * struct 'prealloc' as the newly created second half.  'split' indicates an
439  * offset inside 'orig' where it should be split.
440  *
441  * Before calling,
442  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
443  * are two extent state structs in the tree:
444  * prealloc: [orig->start, split - 1]
445  * orig: [ split, orig->end ]
446  *
447  * The tree locks are not taken by this function. They need to be held
448  * by the caller.
449  */
450 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
451 		       struct extent_state *prealloc, u64 split)
452 {
453 	struct rb_node *node;
454 	prealloc->start = orig->start;
455 	prealloc->end = split - 1;
456 	prealloc->state = orig->state;
457 	orig->start = split;
458 
459 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
460 	if (node) {
461 		struct extent_state *found;
462 		found = rb_entry(node, struct extent_state, rb_node);
463 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
464 		free_extent_state(prealloc);
465 		return -EEXIST;
466 	}
467 	return 0;
468 }
469 
470 /*
471  * utility function to clear some bits in an extent state struct.
472  * it will optionally wake up any one waiting on this state (wake == 1), or
473  * forcibly remove the state from the tree (delete == 1).
474  *
475  * If no bits are set on the state struct after clearing things, the
476  * struct is freed and removed from the tree
477  */
478 static int clear_state_bit(struct extent_map_tree *tree,
479 			    struct extent_state *state, int bits, int wake,
480 			    int delete)
481 {
482 	int ret = state->state & bits;
483 
484 	if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
485 		u64 range = state->end - state->start + 1;
486 		WARN_ON(range > tree->dirty_bytes);
487 		tree->dirty_bytes -= range;
488 	}
489 	state->state &= ~bits;
490 	if (wake)
491 		wake_up(&state->wq);
492 	if (delete || state->state == 0) {
493 		if (state->in_tree) {
494 			rb_erase(&state->rb_node, &tree->state);
495 			state->in_tree = 0;
496 			free_extent_state(state);
497 		} else {
498 			WARN_ON(1);
499 		}
500 	} else {
501 		merge_state(tree, state);
502 	}
503 	return ret;
504 }
505 
506 /*
507  * clear some bits on a range in the tree.  This may require splitting
508  * or inserting elements in the tree, so the gfp mask is used to
509  * indicate which allocations or sleeping are allowed.
510  *
511  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
512  * the given range from the tree regardless of state (ie for truncate).
513  *
514  * the range [start, end] is inclusive.
515  *
516  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
517  * bits were already set, or zero if none of the bits were already set.
518  */
519 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
520 		     int bits, int wake, int delete, gfp_t mask)
521 {
522 	struct extent_state *state;
523 	struct extent_state *prealloc = NULL;
524 	struct rb_node *node;
525 	unsigned long flags;
526 	int err;
527 	int set = 0;
528 
529 again:
530 	if (!prealloc && (mask & __GFP_WAIT)) {
531 		prealloc = alloc_extent_state(mask);
532 		if (!prealloc)
533 			return -ENOMEM;
534 	}
535 
536 	write_lock_irqsave(&tree->lock, flags);
537 	/*
538 	 * this search will find the extents that end after
539 	 * our range starts
540 	 */
541 	node = tree_search(&tree->state, start);
542 	if (!node)
543 		goto out;
544 	state = rb_entry(node, struct extent_state, rb_node);
545 	if (state->start > end)
546 		goto out;
547 	WARN_ON(state->end < start);
548 
549 	/*
550 	 *     | ---- desired range ---- |
551 	 *  | state | or
552 	 *  | ------------- state -------------- |
553 	 *
554 	 * We need to split the extent we found, and may flip
555 	 * bits on second half.
556 	 *
557 	 * If the extent we found extends past our range, we
558 	 * just split and search again.  It'll get split again
559 	 * the next time though.
560 	 *
561 	 * If the extent we found is inside our range, we clear
562 	 * the desired bit on it.
563 	 */
564 
565 	if (state->start < start) {
566 		err = split_state(tree, state, prealloc, start);
567 		BUG_ON(err == -EEXIST);
568 		prealloc = NULL;
569 		if (err)
570 			goto out;
571 		if (state->end <= end) {
572 			start = state->end + 1;
573 			set |= clear_state_bit(tree, state, bits,
574 					wake, delete);
575 		} else {
576 			start = state->start;
577 		}
578 		goto search_again;
579 	}
580 	/*
581 	 * | ---- desired range ---- |
582 	 *                        | state |
583 	 * We need to split the extent, and clear the bit
584 	 * on the first half
585 	 */
586 	if (state->start <= end && state->end > end) {
587 		err = split_state(tree, state, prealloc, end + 1);
588 		BUG_ON(err == -EEXIST);
589 
590 		if (wake)
591 			wake_up(&state->wq);
592 		set |= clear_state_bit(tree, prealloc, bits,
593 				       wake, delete);
594 		prealloc = NULL;
595 		goto out;
596 	}
597 
598 	start = state->end + 1;
599 	set |= clear_state_bit(tree, state, bits, wake, delete);
600 	goto search_again;
601 
602 out:
603 	write_unlock_irqrestore(&tree->lock, flags);
604 	if (prealloc)
605 		free_extent_state(prealloc);
606 
607 	return set;
608 
609 search_again:
610 	if (start > end)
611 		goto out;
612 	write_unlock_irqrestore(&tree->lock, flags);
613 	if (mask & __GFP_WAIT)
614 		cond_resched();
615 	goto again;
616 }
617 EXPORT_SYMBOL(clear_extent_bit);
618 
619 static int wait_on_state(struct extent_map_tree *tree,
620 			 struct extent_state *state)
621 {
622 	DEFINE_WAIT(wait);
623 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
624 	read_unlock_irq(&tree->lock);
625 	schedule();
626 	read_lock_irq(&tree->lock);
627 	finish_wait(&state->wq, &wait);
628 	return 0;
629 }
630 
631 /*
632  * waits for one or more bits to clear on a range in the state tree.
633  * The range [start, end] is inclusive.
634  * The tree lock is taken by this function
635  */
636 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
637 {
638 	struct extent_state *state;
639 	struct rb_node *node;
640 
641 	read_lock_irq(&tree->lock);
642 again:
643 	while (1) {
644 		/*
645 		 * this search will find all the extents that end after
646 		 * our range starts
647 		 */
648 		node = tree_search(&tree->state, start);
649 		if (!node)
650 			break;
651 
652 		state = rb_entry(node, struct extent_state, rb_node);
653 
654 		if (state->start > end)
655 			goto out;
656 
657 		if (state->state & bits) {
658 			start = state->start;
659 			atomic_inc(&state->refs);
660 			wait_on_state(tree, state);
661 			free_extent_state(state);
662 			goto again;
663 		}
664 		start = state->end + 1;
665 
666 		if (start > end)
667 			break;
668 
669 		if (need_resched()) {
670 			read_unlock_irq(&tree->lock);
671 			cond_resched();
672 			read_lock_irq(&tree->lock);
673 		}
674 	}
675 out:
676 	read_unlock_irq(&tree->lock);
677 	return 0;
678 }
679 EXPORT_SYMBOL(wait_extent_bit);
680 
681 static void set_state_bits(struct extent_map_tree *tree,
682 			   struct extent_state *state,
683 			   int bits)
684 {
685 	if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
686 		u64 range = state->end - state->start + 1;
687 		tree->dirty_bytes += range;
688 	}
689 	state->state |= bits;
690 }
691 
692 /*
693  * set some bits on a range in the tree.  This may require allocations
694  * or sleeping, so the gfp mask is used to indicate what is allowed.
695  *
696  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
697  * range already has the desired bits set.  The start of the existing
698  * range is returned in failed_start in this case.
699  *
700  * [start, end] is inclusive
701  * This takes the tree lock.
702  */
703 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
704 		   int exclusive, u64 *failed_start, gfp_t mask)
705 {
706 	struct extent_state *state;
707 	struct extent_state *prealloc = NULL;
708 	struct rb_node *node;
709 	unsigned long flags;
710 	int err = 0;
711 	int set;
712 	u64 last_start;
713 	u64 last_end;
714 again:
715 	if (!prealloc && (mask & __GFP_WAIT)) {
716 		prealloc = alloc_extent_state(mask);
717 		if (!prealloc)
718 			return -ENOMEM;
719 	}
720 
721 	write_lock_irqsave(&tree->lock, flags);
722 	/*
723 	 * this search will find all the extents that end after
724 	 * our range starts.
725 	 */
726 	node = tree_search(&tree->state, start);
727 	if (!node) {
728 		err = insert_state(tree, prealloc, start, end, bits);
729 		prealloc = NULL;
730 		BUG_ON(err == -EEXIST);
731 		goto out;
732 	}
733 
734 	state = rb_entry(node, struct extent_state, rb_node);
735 	last_start = state->start;
736 	last_end = state->end;
737 
738 	/*
739 	 * | ---- desired range ---- |
740 	 * | state |
741 	 *
742 	 * Just lock what we found and keep going
743 	 */
744 	if (state->start == start && state->end <= end) {
745 		set = state->state & bits;
746 		if (set && exclusive) {
747 			*failed_start = state->start;
748 			err = -EEXIST;
749 			goto out;
750 		}
751 		set_state_bits(tree, state, bits);
752 		start = state->end + 1;
753 		merge_state(tree, state);
754 		goto search_again;
755 	}
756 
757 	/*
758 	 *     | ---- desired range ---- |
759 	 * | state |
760 	 *   or
761 	 * | ------------- state -------------- |
762 	 *
763 	 * We need to split the extent we found, and may flip bits on
764 	 * second half.
765 	 *
766 	 * If the extent we found extends past our
767 	 * range, we just split and search again.  It'll get split
768 	 * again the next time though.
769 	 *
770 	 * If the extent we found is inside our range, we set the
771 	 * desired bit on it.
772 	 */
773 	if (state->start < start) {
774 		set = state->state & bits;
775 		if (exclusive && set) {
776 			*failed_start = start;
777 			err = -EEXIST;
778 			goto out;
779 		}
780 		err = split_state(tree, state, prealloc, start);
781 		BUG_ON(err == -EEXIST);
782 		prealloc = NULL;
783 		if (err)
784 			goto out;
785 		if (state->end <= end) {
786 			set_state_bits(tree, state, bits);
787 			start = state->end + 1;
788 			merge_state(tree, state);
789 		} else {
790 			start = state->start;
791 		}
792 		goto search_again;
793 	}
794 	/*
795 	 * | ---- desired range ---- |
796 	 *     | state | or               | state |
797 	 *
798 	 * There's a hole, we need to insert something in it and
799 	 * ignore the extent we found.
800 	 */
801 	if (state->start > start) {
802 		u64 this_end;
803 		if (end < last_start)
804 			this_end = end;
805 		else
806 			this_end = last_start -1;
807 		err = insert_state(tree, prealloc, start, this_end,
808 				   bits);
809 		prealloc = NULL;
810 		BUG_ON(err == -EEXIST);
811 		if (err)
812 			goto out;
813 		start = this_end + 1;
814 		goto search_again;
815 	}
816 	/*
817 	 * | ---- desired range ---- |
818 	 *                        | state |
819 	 * We need to split the extent, and set the bit
820 	 * on the first half
821 	 */
822 	if (state->start <= end && state->end > end) {
823 		set = state->state & bits;
824 		if (exclusive && set) {
825 			*failed_start = start;
826 			err = -EEXIST;
827 			goto out;
828 		}
829 		err = split_state(tree, state, prealloc, end + 1);
830 		BUG_ON(err == -EEXIST);
831 
832 		set_state_bits(tree, prealloc, bits);
833 		merge_state(tree, prealloc);
834 		prealloc = NULL;
835 		goto out;
836 	}
837 
838 	goto search_again;
839 
840 out:
841 	write_unlock_irqrestore(&tree->lock, flags);
842 	if (prealloc)
843 		free_extent_state(prealloc);
844 
845 	return err;
846 
847 search_again:
848 	if (start > end)
849 		goto out;
850 	write_unlock_irqrestore(&tree->lock, flags);
851 	if (mask & __GFP_WAIT)
852 		cond_resched();
853 	goto again;
854 }
855 EXPORT_SYMBOL(set_extent_bit);
856 
857 /* wrappers around set/clear extent bit */
858 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
859 		     gfp_t mask)
860 {
861 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
862 			      mask);
863 }
864 EXPORT_SYMBOL(set_extent_dirty);
865 
866 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
867 		    int bits, gfp_t mask)
868 {
869 	return set_extent_bit(tree, start, end, bits, 0, NULL,
870 			      mask);
871 }
872 EXPORT_SYMBOL(set_extent_bits);
873 
874 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
875 		      int bits, gfp_t mask)
876 {
877 	return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
878 }
879 EXPORT_SYMBOL(clear_extent_bits);
880 
881 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
882 		     gfp_t mask)
883 {
884 	return set_extent_bit(tree, start, end,
885 			      EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
886 			      mask);
887 }
888 EXPORT_SYMBOL(set_extent_delalloc);
889 
890 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
891 		       gfp_t mask)
892 {
893 	return clear_extent_bit(tree, start, end,
894 				EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
895 }
896 EXPORT_SYMBOL(clear_extent_dirty);
897 
898 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
899 		     gfp_t mask)
900 {
901 	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
902 			      mask);
903 }
904 EXPORT_SYMBOL(set_extent_new);
905 
906 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
907 		       gfp_t mask)
908 {
909 	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
910 }
911 EXPORT_SYMBOL(clear_extent_new);
912 
913 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
914 			gfp_t mask)
915 {
916 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
917 			      mask);
918 }
919 EXPORT_SYMBOL(set_extent_uptodate);
920 
921 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
922 			  gfp_t mask)
923 {
924 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
925 }
926 EXPORT_SYMBOL(clear_extent_uptodate);
927 
928 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
929 			 gfp_t mask)
930 {
931 	return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
932 			      0, NULL, mask);
933 }
934 EXPORT_SYMBOL(set_extent_writeback);
935 
936 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
937 			   gfp_t mask)
938 {
939 	return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
940 }
941 EXPORT_SYMBOL(clear_extent_writeback);
942 
943 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
944 {
945 	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
946 }
947 EXPORT_SYMBOL(wait_on_extent_writeback);
948 
949 /*
950  * locks a range in ascending order, waiting for any locked regions
951  * it hits on the way.  [start,end] are inclusive, and this will sleep.
952  */
953 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
954 {
955 	int err;
956 	u64 failed_start;
957 	while (1) {
958 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
959 				     &failed_start, mask);
960 		if (err == -EEXIST && (mask & __GFP_WAIT)) {
961 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
962 			start = failed_start;
963 		} else {
964 			break;
965 		}
966 		WARN_ON(start > end);
967 	}
968 	return err;
969 }
970 EXPORT_SYMBOL(lock_extent);
971 
972 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
973 		  gfp_t mask)
974 {
975 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
976 }
977 EXPORT_SYMBOL(unlock_extent);
978 
979 /*
980  * helper function to set pages and extents in the tree dirty
981  */
982 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
983 {
984 	unsigned long index = start >> PAGE_CACHE_SHIFT;
985 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
986 	struct page *page;
987 
988 	while (index <= end_index) {
989 		page = find_get_page(tree->mapping, index);
990 		BUG_ON(!page);
991 		__set_page_dirty_nobuffers(page);
992 		page_cache_release(page);
993 		index++;
994 	}
995 	set_extent_dirty(tree, start, end, GFP_NOFS);
996 	return 0;
997 }
998 EXPORT_SYMBOL(set_range_dirty);
999 
1000 /*
1001  * helper function to set both pages and extents in the tree writeback
1002  */
1003 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
1004 {
1005 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1006 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1007 	struct page *page;
1008 
1009 	while (index <= end_index) {
1010 		page = find_get_page(tree->mapping, index);
1011 		BUG_ON(!page);
1012 		set_page_writeback(page);
1013 		page_cache_release(page);
1014 		index++;
1015 	}
1016 	set_extent_writeback(tree, start, end, GFP_NOFS);
1017 	return 0;
1018 }
1019 EXPORT_SYMBOL(set_range_writeback);
1020 
1021 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1022 			  u64 *start_ret, u64 *end_ret, int bits)
1023 {
1024 	struct rb_node *node;
1025 	struct extent_state *state;
1026 	int ret = 1;
1027 
1028 	read_lock_irq(&tree->lock);
1029 	/*
1030 	 * this search will find all the extents that end after
1031 	 * our range starts.
1032 	 */
1033 	node = tree_search(&tree->state, start);
1034 	if (!node || IS_ERR(node)) {
1035 		goto out;
1036 	}
1037 
1038 	while(1) {
1039 		state = rb_entry(node, struct extent_state, rb_node);
1040 		if (state->end >= start && (state->state & bits)) {
1041 			*start_ret = state->start;
1042 			*end_ret = state->end;
1043 			ret = 0;
1044 			break;
1045 		}
1046 		node = rb_next(node);
1047 		if (!node)
1048 			break;
1049 	}
1050 out:
1051 	read_unlock_irq(&tree->lock);
1052 	return ret;
1053 }
1054 EXPORT_SYMBOL(find_first_extent_bit);
1055 
1056 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1057 			     u64 *start, u64 *end, u64 max_bytes)
1058 {
1059 	struct rb_node *node;
1060 	struct extent_state *state;
1061 	u64 cur_start = *start;
1062 	u64 found = 0;
1063 	u64 total_bytes = 0;
1064 
1065 	write_lock_irq(&tree->lock);
1066 	/*
1067 	 * this search will find all the extents that end after
1068 	 * our range starts.
1069 	 */
1070 search_again:
1071 	node = tree_search(&tree->state, cur_start);
1072 	if (!node || IS_ERR(node)) {
1073 		*end = (u64)-1;
1074 		goto out;
1075 	}
1076 
1077 	while(1) {
1078 		state = rb_entry(node, struct extent_state, rb_node);
1079 		if (found && state->start != cur_start) {
1080 			goto out;
1081 		}
1082 		if (!(state->state & EXTENT_DELALLOC)) {
1083 			if (!found)
1084 				*end = state->end;
1085 			goto out;
1086 		}
1087 		if (!found) {
1088 			struct extent_state *prev_state;
1089 			struct rb_node *prev_node = node;
1090 			while(1) {
1091 				prev_node = rb_prev(prev_node);
1092 				if (!prev_node)
1093 					break;
1094 				prev_state = rb_entry(prev_node,
1095 						      struct extent_state,
1096 						      rb_node);
1097 				if (!(prev_state->state & EXTENT_DELALLOC))
1098 					break;
1099 				state = prev_state;
1100 				node = prev_node;
1101 			}
1102 		}
1103 		if (state->state & EXTENT_LOCKED) {
1104 			DEFINE_WAIT(wait);
1105 			atomic_inc(&state->refs);
1106 			prepare_to_wait(&state->wq, &wait,
1107 					TASK_UNINTERRUPTIBLE);
1108 			write_unlock_irq(&tree->lock);
1109 			schedule();
1110 			write_lock_irq(&tree->lock);
1111 			finish_wait(&state->wq, &wait);
1112 			free_extent_state(state);
1113 			goto search_again;
1114 		}
1115 		state->state |= EXTENT_LOCKED;
1116 		if (!found)
1117 			*start = state->start;
1118 		found++;
1119 		*end = state->end;
1120 		cur_start = state->end + 1;
1121 		node = rb_next(node);
1122 		if (!node)
1123 			break;
1124 		total_bytes += state->end - state->start + 1;
1125 		if (total_bytes >= max_bytes)
1126 			break;
1127 	}
1128 out:
1129 	write_unlock_irq(&tree->lock);
1130 	return found;
1131 }
1132 
1133 u64 count_range_bits(struct extent_map_tree *tree,
1134 		     u64 *start, u64 max_bytes, unsigned long bits)
1135 {
1136 	struct rb_node *node;
1137 	struct extent_state *state;
1138 	u64 cur_start = *start;
1139 	u64 total_bytes = 0;
1140 	int found = 0;
1141 
1142 	write_lock_irq(&tree->lock);
1143 	if (bits == EXTENT_DIRTY) {
1144 		*start = 0;
1145 		total_bytes = tree->dirty_bytes;
1146 		goto out;
1147 	}
1148 	/*
1149 	 * this search will find all the extents that end after
1150 	 * our range starts.
1151 	 */
1152 	node = tree_search(&tree->state, cur_start);
1153 	if (!node || IS_ERR(node)) {
1154 		goto out;
1155 	}
1156 
1157 	while(1) {
1158 		state = rb_entry(node, struct extent_state, rb_node);
1159 		if ((state->state & bits)) {
1160 			total_bytes += state->end - state->start + 1;
1161 			if (total_bytes >= max_bytes)
1162 				break;
1163 			if (!found) {
1164 				*start = state->start;
1165 				found = 1;
1166 			}
1167 		}
1168 		node = rb_next(node);
1169 		if (!node)
1170 			break;
1171 	}
1172 out:
1173 	write_unlock_irq(&tree->lock);
1174 	return total_bytes;
1175 }
1176 
1177 /*
1178  * helper function to lock both pages and extents in the tree.
1179  * pages must be locked first.
1180  */
1181 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1182 {
1183 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1184 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1185 	struct page *page;
1186 	int err;
1187 
1188 	while (index <= end_index) {
1189 		page = grab_cache_page(tree->mapping, index);
1190 		if (!page) {
1191 			err = -ENOMEM;
1192 			goto failed;
1193 		}
1194 		if (IS_ERR(page)) {
1195 			err = PTR_ERR(page);
1196 			goto failed;
1197 		}
1198 		index++;
1199 	}
1200 	lock_extent(tree, start, end, GFP_NOFS);
1201 	return 0;
1202 
1203 failed:
1204 	/*
1205 	 * we failed above in getting the page at 'index', so we undo here
1206 	 * up to but not including the page at 'index'
1207 	 */
1208 	end_index = index;
1209 	index = start >> PAGE_CACHE_SHIFT;
1210 	while (index < end_index) {
1211 		page = find_get_page(tree->mapping, index);
1212 		unlock_page(page);
1213 		page_cache_release(page);
1214 		index++;
1215 	}
1216 	return err;
1217 }
1218 EXPORT_SYMBOL(lock_range);
1219 
1220 /*
1221  * helper function to unlock both pages and extents in the tree.
1222  */
1223 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1224 {
1225 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1226 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1227 	struct page *page;
1228 
1229 	while (index <= end_index) {
1230 		page = find_get_page(tree->mapping, index);
1231 		unlock_page(page);
1232 		page_cache_release(page);
1233 		index++;
1234 	}
1235 	unlock_extent(tree, start, end, GFP_NOFS);
1236 	return 0;
1237 }
1238 EXPORT_SYMBOL(unlock_range);
1239 
1240 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1241 {
1242 	struct rb_node *node;
1243 	struct extent_state *state;
1244 	int ret = 0;
1245 
1246 	write_lock_irq(&tree->lock);
1247 	/*
1248 	 * this search will find all the extents that end after
1249 	 * our range starts.
1250 	 */
1251 	node = tree_search(&tree->state, start);
1252 	if (!node || IS_ERR(node)) {
1253 		ret = -ENOENT;
1254 		goto out;
1255 	}
1256 	state = rb_entry(node, struct extent_state, rb_node);
1257 	if (state->start != start) {
1258 		ret = -ENOENT;
1259 		goto out;
1260 	}
1261 	state->private = private;
1262 out:
1263 	write_unlock_irq(&tree->lock);
1264 	return ret;
1265 }
1266 
1267 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1268 {
1269 	struct rb_node *node;
1270 	struct extent_state *state;
1271 	int ret = 0;
1272 
1273 	read_lock_irq(&tree->lock);
1274 	/*
1275 	 * this search will find all the extents that end after
1276 	 * our range starts.
1277 	 */
1278 	node = tree_search(&tree->state, start);
1279 	if (!node || IS_ERR(node)) {
1280 		ret = -ENOENT;
1281 		goto out;
1282 	}
1283 	state = rb_entry(node, struct extent_state, rb_node);
1284 	if (state->start != start) {
1285 		ret = -ENOENT;
1286 		goto out;
1287 	}
1288 	*private = state->private;
1289 out:
1290 	read_unlock_irq(&tree->lock);
1291 	return ret;
1292 }
1293 
1294 /*
1295  * searches a range in the state tree for a given mask.
1296  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1297  * has the bits set.  Otherwise, 1 is returned if any bit in the
1298  * range is found set.
1299  */
1300 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1301 		   int bits, int filled)
1302 {
1303 	struct extent_state *state = NULL;
1304 	struct rb_node *node;
1305 	int bitset = 0;
1306 
1307 	read_lock_irq(&tree->lock);
1308 	node = tree_search(&tree->state, start);
1309 	while (node && start <= end) {
1310 		state = rb_entry(node, struct extent_state, rb_node);
1311 
1312 		if (filled && state->start > start) {
1313 			bitset = 0;
1314 			break;
1315 		}
1316 
1317 		if (state->start > end)
1318 			break;
1319 
1320 		if (state->state & bits) {
1321 			bitset = 1;
1322 			if (!filled)
1323 				break;
1324 		} else if (filled) {
1325 			bitset = 0;
1326 			break;
1327 		}
1328 		start = state->end + 1;
1329 		if (start > end)
1330 			break;
1331 		node = rb_next(node);
1332 	}
1333 	read_unlock_irq(&tree->lock);
1334 	return bitset;
1335 }
1336 EXPORT_SYMBOL(test_range_bit);
1337 
1338 /*
1339  * helper function to set a given page up to date if all the
1340  * extents in the tree for that page are up to date
1341  */
1342 static int check_page_uptodate(struct extent_map_tree *tree,
1343 			       struct page *page)
1344 {
1345 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1346 	u64 end = start + PAGE_CACHE_SIZE - 1;
1347 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1348 		SetPageUptodate(page);
1349 	return 0;
1350 }
1351 
1352 /*
1353  * helper function to unlock a page if all the extents in the tree
1354  * for that page are unlocked
1355  */
1356 static int check_page_locked(struct extent_map_tree *tree,
1357 			     struct page *page)
1358 {
1359 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1360 	u64 end = start + PAGE_CACHE_SIZE - 1;
1361 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1362 		unlock_page(page);
1363 	return 0;
1364 }
1365 
1366 /*
1367  * helper function to end page writeback if all the extents
1368  * in the tree for that page are done with writeback
1369  */
1370 static int check_page_writeback(struct extent_map_tree *tree,
1371 			     struct page *page)
1372 {
1373 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1374 	u64 end = start + PAGE_CACHE_SIZE - 1;
1375 	if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1376 		end_page_writeback(page);
1377 	return 0;
1378 }
1379 
1380 /* lots and lots of room for performance fixes in the end_bio funcs */
1381 
1382 /*
1383  * after a writepage IO is done, we need to:
1384  * clear the uptodate bits on error
1385  * clear the writeback bits in the extent tree for this IO
1386  * end_page_writeback if the page has no more pending IO
1387  *
1388  * Scheduling is not allowed, so the extent state tree is expected
1389  * to have one and only one object corresponding to this IO.
1390  */
1391 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1392 static void end_bio_extent_writepage(struct bio *bio, int err)
1393 #else
1394 static int end_bio_extent_writepage(struct bio *bio,
1395 				   unsigned int bytes_done, int err)
1396 #endif
1397 {
1398 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1399 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1400 	struct extent_map_tree *tree = bio->bi_private;
1401 	u64 start;
1402 	u64 end;
1403 	int whole_page;
1404 
1405 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1406 	if (bio->bi_size)
1407 		return 1;
1408 #endif
1409 
1410 	do {
1411 		struct page *page = bvec->bv_page;
1412 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1413 			 bvec->bv_offset;
1414 		end = start + bvec->bv_len - 1;
1415 
1416 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1417 			whole_page = 1;
1418 		else
1419 			whole_page = 0;
1420 
1421 		if (--bvec >= bio->bi_io_vec)
1422 			prefetchw(&bvec->bv_page->flags);
1423 
1424 		if (!uptodate) {
1425 			clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1426 			ClearPageUptodate(page);
1427 			SetPageError(page);
1428 		}
1429 		clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1430 
1431 		if (whole_page)
1432 			end_page_writeback(page);
1433 		else
1434 			check_page_writeback(tree, page);
1435 		if (tree->ops && tree->ops->writepage_end_io_hook)
1436 			tree->ops->writepage_end_io_hook(page, start, end);
1437 	} while (bvec >= bio->bi_io_vec);
1438 
1439 	bio_put(bio);
1440 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1441 	return 0;
1442 #endif
1443 }
1444 
1445 /*
1446  * after a readpage IO is done, we need to:
1447  * clear the uptodate bits on error
1448  * set the uptodate bits if things worked
1449  * set the page up to date if all extents in the tree are uptodate
1450  * clear the lock bit in the extent tree
1451  * unlock the page if there are no other extents locked for it
1452  *
1453  * Scheduling is not allowed, so the extent state tree is expected
1454  * to have one and only one object corresponding to this IO.
1455  */
1456 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1457 static void end_bio_extent_readpage(struct bio *bio, int err)
1458 #else
1459 static int end_bio_extent_readpage(struct bio *bio,
1460 				   unsigned int bytes_done, int err)
1461 #endif
1462 {
1463 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1464 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1465 	struct extent_map_tree *tree = bio->bi_private;
1466 	u64 start;
1467 	u64 end;
1468 	int whole_page;
1469 	int ret;
1470 
1471 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1472 	if (bio->bi_size)
1473 		return 1;
1474 #endif
1475 
1476 	do {
1477 		struct page *page = bvec->bv_page;
1478 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1479 			bvec->bv_offset;
1480 		end = start + bvec->bv_len - 1;
1481 
1482 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1483 			whole_page = 1;
1484 		else
1485 			whole_page = 0;
1486 
1487 		if (--bvec >= bio->bi_io_vec)
1488 			prefetchw(&bvec->bv_page->flags);
1489 
1490 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1491 			ret = tree->ops->readpage_end_io_hook(page, start, end);
1492 			if (ret)
1493 				uptodate = 0;
1494 		}
1495 		if (uptodate) {
1496 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1497 			if (whole_page)
1498 				SetPageUptodate(page);
1499 			else
1500 				check_page_uptodate(tree, page);
1501 		} else {
1502 			ClearPageUptodate(page);
1503 			SetPageError(page);
1504 		}
1505 
1506 		unlock_extent(tree, start, end, GFP_ATOMIC);
1507 
1508 		if (whole_page)
1509 			unlock_page(page);
1510 		else
1511 			check_page_locked(tree, page);
1512 	} while (bvec >= bio->bi_io_vec);
1513 
1514 	bio_put(bio);
1515 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1516 	return 0;
1517 #endif
1518 }
1519 
1520 /*
1521  * IO done from prepare_write is pretty simple, we just unlock
1522  * the structs in the extent tree when done, and set the uptodate bits
1523  * as appropriate.
1524  */
1525 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1526 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1527 #else
1528 static int end_bio_extent_preparewrite(struct bio *bio,
1529 				       unsigned int bytes_done, int err)
1530 #endif
1531 {
1532 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1533 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1534 	struct extent_map_tree *tree = bio->bi_private;
1535 	u64 start;
1536 	u64 end;
1537 
1538 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1539 	if (bio->bi_size)
1540 		return 1;
1541 #endif
1542 
1543 	do {
1544 		struct page *page = bvec->bv_page;
1545 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1546 			bvec->bv_offset;
1547 		end = start + bvec->bv_len - 1;
1548 
1549 		if (--bvec >= bio->bi_io_vec)
1550 			prefetchw(&bvec->bv_page->flags);
1551 
1552 		if (uptodate) {
1553 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1554 		} else {
1555 			ClearPageUptodate(page);
1556 			SetPageError(page);
1557 		}
1558 
1559 		unlock_extent(tree, start, end, GFP_ATOMIC);
1560 
1561 	} while (bvec >= bio->bi_io_vec);
1562 
1563 	bio_put(bio);
1564 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1565 	return 0;
1566 #endif
1567 }
1568 
1569 static struct bio *
1570 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1571 		 gfp_t gfp_flags)
1572 {
1573 	struct bio *bio;
1574 
1575 	bio = bio_alloc(gfp_flags, nr_vecs);
1576 
1577 	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1578 		while (!bio && (nr_vecs /= 2))
1579 			bio = bio_alloc(gfp_flags, nr_vecs);
1580 	}
1581 
1582 	if (bio) {
1583 		bio->bi_bdev = bdev;
1584 		bio->bi_sector = first_sector;
1585 	}
1586 	return bio;
1587 }
1588 
1589 static int submit_one_bio(int rw, struct bio *bio)
1590 {
1591 	u64 maxsector;
1592 	int ret = 0;
1593 
1594 	bio_get(bio);
1595 
1596         maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1597 	if (maxsector < bio->bi_sector) {
1598 		printk("sector too large max %Lu got %llu\n", maxsector,
1599 			(unsigned long long)bio->bi_sector);
1600 		WARN_ON(1);
1601 	}
1602 
1603 	submit_bio(rw, bio);
1604 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1605 		ret = -EOPNOTSUPP;
1606 	bio_put(bio);
1607 	return ret;
1608 }
1609 
1610 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1611 			      struct page *page, sector_t sector,
1612 			      size_t size, unsigned long offset,
1613 			      struct block_device *bdev,
1614 			      struct bio **bio_ret,
1615 			      unsigned long max_pages,
1616 			      bio_end_io_t end_io_func)
1617 {
1618 	int ret = 0;
1619 	struct bio *bio;
1620 	int nr;
1621 
1622 	if (bio_ret && *bio_ret) {
1623 		bio = *bio_ret;
1624 		if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1625 		    bio_add_page(bio, page, size, offset) < size) {
1626 			ret = submit_one_bio(rw, bio);
1627 			bio = NULL;
1628 		} else {
1629 			return 0;
1630 		}
1631 	}
1632 	nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1633 	bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1634 	if (!bio) {
1635 		printk("failed to allocate bio nr %d\n", nr);
1636 	}
1637 	bio_add_page(bio, page, size, offset);
1638 	bio->bi_end_io = end_io_func;
1639 	bio->bi_private = tree;
1640 	if (bio_ret) {
1641 		*bio_ret = bio;
1642 	} else {
1643 		ret = submit_one_bio(rw, bio);
1644 	}
1645 
1646 	return ret;
1647 }
1648 
1649 void set_page_extent_mapped(struct page *page)
1650 {
1651 	if (!PagePrivate(page)) {
1652 		SetPagePrivate(page);
1653 		WARN_ON(!page->mapping->a_ops->invalidatepage);
1654 		set_page_private(page, EXTENT_PAGE_PRIVATE);
1655 		page_cache_get(page);
1656 	}
1657 }
1658 
1659 /*
1660  * basic readpage implementation.  Locked extent state structs are inserted
1661  * into the tree that are removed when the IO is done (by the end_io
1662  * handlers)
1663  */
1664 static int __extent_read_full_page(struct extent_map_tree *tree,
1665 				   struct page *page,
1666 				   get_extent_t *get_extent,
1667 				   struct bio **bio)
1668 {
1669 	struct inode *inode = page->mapping->host;
1670 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1671 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1672 	u64 end;
1673 	u64 cur = start;
1674 	u64 extent_offset;
1675 	u64 last_byte = i_size_read(inode);
1676 	u64 block_start;
1677 	u64 cur_end;
1678 	sector_t sector;
1679 	struct extent_map *em;
1680 	struct block_device *bdev;
1681 	int ret;
1682 	int nr = 0;
1683 	size_t page_offset = 0;
1684 	size_t iosize;
1685 	size_t blocksize = inode->i_sb->s_blocksize;
1686 
1687 	set_page_extent_mapped(page);
1688 
1689 	end = page_end;
1690 	lock_extent(tree, start, end, GFP_NOFS);
1691 
1692 	while (cur <= end) {
1693 		if (cur >= last_byte) {
1694 			char *userpage;
1695 			iosize = PAGE_CACHE_SIZE - page_offset;
1696 			userpage = kmap_atomic(page, KM_USER0);
1697 			memset(userpage + page_offset, 0, iosize);
1698 			flush_dcache_page(page);
1699 			kunmap_atomic(userpage, KM_USER0);
1700 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1701 					    GFP_NOFS);
1702 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1703 			break;
1704 		}
1705 		em = get_extent(inode, page, page_offset, cur, end, 0);
1706 		if (IS_ERR(em) || !em) {
1707 			SetPageError(page);
1708 			unlock_extent(tree, cur, end, GFP_NOFS);
1709 			break;
1710 		}
1711 
1712 		extent_offset = cur - em->start;
1713 		BUG_ON(em->end < cur);
1714 		BUG_ON(end < cur);
1715 
1716 		iosize = min(em->end - cur, end - cur) + 1;
1717 		cur_end = min(em->end, end);
1718 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1719 		sector = (em->block_start + extent_offset) >> 9;
1720 		bdev = em->bdev;
1721 		block_start = em->block_start;
1722 		free_extent_map(em);
1723 		em = NULL;
1724 
1725 		/* we've found a hole, just zero and go on */
1726 		if (block_start == EXTENT_MAP_HOLE) {
1727 			char *userpage;
1728 			userpage = kmap_atomic(page, KM_USER0);
1729 			memset(userpage + page_offset, 0, iosize);
1730 			flush_dcache_page(page);
1731 			kunmap_atomic(userpage, KM_USER0);
1732 
1733 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1734 					    GFP_NOFS);
1735 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1736 			cur = cur + iosize;
1737 			page_offset += iosize;
1738 			continue;
1739 		}
1740 		/* the get_extent function already copied into the page */
1741 		if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1742 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1743 			cur = cur + iosize;
1744 			page_offset += iosize;
1745 			continue;
1746 		}
1747 
1748 		ret = 0;
1749 		if (tree->ops && tree->ops->readpage_io_hook) {
1750 			ret = tree->ops->readpage_io_hook(page, cur,
1751 							  cur + iosize - 1);
1752 		}
1753 		if (!ret) {
1754 			unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1755 			nr -= page->index;
1756 			ret = submit_extent_page(READ, tree, page,
1757 					 sector, iosize, page_offset,
1758 					 bdev, bio, nr,
1759 					 end_bio_extent_readpage);
1760 		}
1761 		if (ret)
1762 			SetPageError(page);
1763 		cur = cur + iosize;
1764 		page_offset += iosize;
1765 		nr++;
1766 	}
1767 	if (!nr) {
1768 		if (!PageError(page))
1769 			SetPageUptodate(page);
1770 		unlock_page(page);
1771 	}
1772 	return 0;
1773 }
1774 
1775 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1776 			    get_extent_t *get_extent)
1777 {
1778 	struct bio *bio = NULL;
1779 	int ret;
1780 
1781 	ret = __extent_read_full_page(tree, page, get_extent, &bio);
1782 	if (bio)
1783 		submit_one_bio(READ, bio);
1784 	return ret;
1785 }
1786 EXPORT_SYMBOL(extent_read_full_page);
1787 
1788 /*
1789  * the writepage semantics are similar to regular writepage.  extent
1790  * records are inserted to lock ranges in the tree, and as dirty areas
1791  * are found, they are marked writeback.  Then the lock bits are removed
1792  * and the end_io handler clears the writeback ranges
1793  */
1794 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1795 			      void *data)
1796 {
1797 	struct inode *inode = page->mapping->host;
1798 	struct extent_page_data *epd = data;
1799 	struct extent_map_tree *tree = epd->tree;
1800 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1801 	u64 delalloc_start;
1802 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1803 	u64 end;
1804 	u64 cur = start;
1805 	u64 extent_offset;
1806 	u64 last_byte = i_size_read(inode);
1807 	u64 block_start;
1808 	u64 iosize;
1809 	sector_t sector;
1810 	struct extent_map *em;
1811 	struct block_device *bdev;
1812 	int ret;
1813 	int nr = 0;
1814 	size_t page_offset = 0;
1815 	size_t blocksize;
1816 	loff_t i_size = i_size_read(inode);
1817 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1818 	u64 nr_delalloc;
1819 	u64 delalloc_end;
1820 
1821 	WARN_ON(!PageLocked(page));
1822 	if (page->index > end_index) {
1823 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1824 		unlock_page(page);
1825 		return 0;
1826 	}
1827 
1828 	if (page->index == end_index) {
1829 		char *userpage;
1830 
1831 		size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1832 
1833 		userpage = kmap_atomic(page, KM_USER0);
1834 		memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1835 		flush_dcache_page(page);
1836 		kunmap_atomic(userpage, KM_USER0);
1837 	}
1838 
1839 	set_page_extent_mapped(page);
1840 
1841 	delalloc_start = start;
1842 	delalloc_end = 0;
1843 	while(delalloc_end < page_end) {
1844 		nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1845 						       &delalloc_end,
1846 						       128 * 1024 * 1024);
1847 		if (nr_delalloc == 0) {
1848 			delalloc_start = delalloc_end + 1;
1849 			continue;
1850 		}
1851 		tree->ops->fill_delalloc(inode, delalloc_start,
1852 					 delalloc_end);
1853 		clear_extent_bit(tree, delalloc_start,
1854 				 delalloc_end,
1855 				 EXTENT_LOCKED | EXTENT_DELALLOC,
1856 				 1, 0, GFP_NOFS);
1857 		delalloc_start = delalloc_end + 1;
1858 	}
1859 	lock_extent(tree, start, page_end, GFP_NOFS);
1860 
1861 	end = page_end;
1862 	if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1863 		printk("found delalloc bits after lock_extent\n");
1864 	}
1865 
1866 	if (last_byte <= start) {
1867 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1868 		goto done;
1869 	}
1870 
1871 	set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1872 	blocksize = inode->i_sb->s_blocksize;
1873 
1874 	while (cur <= end) {
1875 		if (cur >= last_byte) {
1876 			clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1877 			break;
1878 		}
1879 		em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1880 		if (IS_ERR(em) || !em) {
1881 			SetPageError(page);
1882 			break;
1883 		}
1884 
1885 		extent_offset = cur - em->start;
1886 		BUG_ON(em->end < cur);
1887 		BUG_ON(end < cur);
1888 		iosize = min(em->end - cur, end - cur) + 1;
1889 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1890 		sector = (em->block_start + extent_offset) >> 9;
1891 		bdev = em->bdev;
1892 		block_start = em->block_start;
1893 		free_extent_map(em);
1894 		em = NULL;
1895 
1896 		if (block_start == EXTENT_MAP_HOLE ||
1897 		    block_start == EXTENT_MAP_INLINE) {
1898 			clear_extent_dirty(tree, cur,
1899 					   cur + iosize - 1, GFP_NOFS);
1900 			cur = cur + iosize;
1901 			page_offset += iosize;
1902 			continue;
1903 		}
1904 
1905 		/* leave this out until we have a page_mkwrite call */
1906 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1907 				   EXTENT_DIRTY, 0)) {
1908 			cur = cur + iosize;
1909 			page_offset += iosize;
1910 			continue;
1911 		}
1912 		clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1913 		if (tree->ops && tree->ops->writepage_io_hook) {
1914 			ret = tree->ops->writepage_io_hook(page, cur,
1915 						cur + iosize - 1);
1916 		} else {
1917 			ret = 0;
1918 		}
1919 		if (ret)
1920 			SetPageError(page);
1921 		else {
1922 			unsigned long max_nr = end_index + 1;
1923 			set_range_writeback(tree, cur, cur + iosize - 1);
1924 			if (!PageWriteback(page)) {
1925 				printk("warning page %lu not writeback, "
1926 				       "cur %llu end %llu\n", page->index,
1927 				       (unsigned long long)cur,
1928 				       (unsigned long long)end);
1929 			}
1930 
1931 			ret = submit_extent_page(WRITE, tree, page, sector,
1932 						 iosize, page_offset, bdev,
1933 						 &epd->bio, max_nr,
1934 						 end_bio_extent_writepage);
1935 			if (ret)
1936 				SetPageError(page);
1937 		}
1938 		cur = cur + iosize;
1939 		page_offset += iosize;
1940 		nr++;
1941 	}
1942 done:
1943 	if (nr == 0) {
1944 		/* make sure the mapping tag for page dirty gets cleared */
1945 		set_page_writeback(page);
1946 		end_page_writeback(page);
1947 	}
1948 	unlock_extent(tree, start, page_end, GFP_NOFS);
1949 	unlock_page(page);
1950 	return 0;
1951 }
1952 
1953 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1954 
1955 /* Taken directly from 2.6.23 for 2.6.18 back port */
1956 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
1957                                 void *data);
1958 
1959 /**
1960  * write_cache_pages - walk the list of dirty pages of the given address space
1961  * and write all of them.
1962  * @mapping: address space structure to write
1963  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1964  * @writepage: function called for each page
1965  * @data: data passed to writepage function
1966  *
1967  * If a page is already under I/O, write_cache_pages() skips it, even
1968  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1969  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1970  * and msync() need to guarantee that all the data which was dirty at the time
1971  * the call was made get new I/O started against them.  If wbc->sync_mode is
1972  * WB_SYNC_ALL then we were called for data integrity and we must wait for
1973  * existing IO to complete.
1974  */
1975 static int write_cache_pages(struct address_space *mapping,
1976 		      struct writeback_control *wbc, writepage_t writepage,
1977 		      void *data)
1978 {
1979 	struct backing_dev_info *bdi = mapping->backing_dev_info;
1980 	int ret = 0;
1981 	int done = 0;
1982 	struct pagevec pvec;
1983 	int nr_pages;
1984 	pgoff_t index;
1985 	pgoff_t end;		/* Inclusive */
1986 	int scanned = 0;
1987 	int range_whole = 0;
1988 
1989 	if (wbc->nonblocking && bdi_write_congested(bdi)) {
1990 		wbc->encountered_congestion = 1;
1991 		return 0;
1992 	}
1993 
1994 	pagevec_init(&pvec, 0);
1995 	if (wbc->range_cyclic) {
1996 		index = mapping->writeback_index; /* Start from prev offset */
1997 		end = -1;
1998 	} else {
1999 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2000 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
2001 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2002 			range_whole = 1;
2003 		scanned = 1;
2004 	}
2005 retry:
2006 	while (!done && (index <= end) &&
2007 	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2008 					      PAGECACHE_TAG_DIRTY,
2009 					      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2010 		unsigned i;
2011 
2012 		scanned = 1;
2013 		for (i = 0; i < nr_pages; i++) {
2014 			struct page *page = pvec.pages[i];
2015 
2016 			/*
2017 			 * At this point we hold neither mapping->tree_lock nor
2018 			 * lock on the page itself: the page may be truncated or
2019 			 * invalidated (changing page->mapping to NULL), or even
2020 			 * swizzled back from swapper_space to tmpfs file
2021 			 * mapping
2022 			 */
2023 			lock_page(page);
2024 
2025 			if (unlikely(page->mapping != mapping)) {
2026 				unlock_page(page);
2027 				continue;
2028 			}
2029 
2030 			if (!wbc->range_cyclic && page->index > end) {
2031 				done = 1;
2032 				unlock_page(page);
2033 				continue;
2034 			}
2035 
2036 			if (wbc->sync_mode != WB_SYNC_NONE)
2037 				wait_on_page_writeback(page);
2038 
2039 			if (PageWriteback(page) ||
2040 			    !clear_page_dirty_for_io(page)) {
2041 				unlock_page(page);
2042 				continue;
2043 			}
2044 
2045 			ret = (*writepage)(page, wbc, data);
2046 
2047 			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2048 				unlock_page(page);
2049 				ret = 0;
2050 			}
2051 			if (ret || (--(wbc->nr_to_write) <= 0))
2052 				done = 1;
2053 			if (wbc->nonblocking && bdi_write_congested(bdi)) {
2054 				wbc->encountered_congestion = 1;
2055 				done = 1;
2056 			}
2057 		}
2058 		pagevec_release(&pvec);
2059 		cond_resched();
2060 	}
2061 	if (!scanned && !done) {
2062 		/*
2063 		 * We hit the last page and there is more work to be done: wrap
2064 		 * back to the start of the file
2065 		 */
2066 		scanned = 1;
2067 		index = 0;
2068 		goto retry;
2069 	}
2070 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2071 		mapping->writeback_index = index;
2072 	return ret;
2073 }
2074 #endif
2075 
2076 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
2077 			  get_extent_t *get_extent,
2078 			  struct writeback_control *wbc)
2079 {
2080 	int ret;
2081 	struct address_space *mapping = page->mapping;
2082 	struct extent_page_data epd = {
2083 		.bio = NULL,
2084 		.tree = tree,
2085 		.get_extent = get_extent,
2086 	};
2087 	struct writeback_control wbc_writepages = {
2088 		.bdi		= wbc->bdi,
2089 		.sync_mode	= WB_SYNC_NONE,
2090 		.older_than_this = NULL,
2091 		.nr_to_write	= 64,
2092 		.range_start	= page_offset(page) + PAGE_CACHE_SIZE,
2093 		.range_end	= (loff_t)-1,
2094 	};
2095 
2096 
2097 	ret = __extent_writepage(page, wbc, &epd);
2098 
2099 	write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2100 	if (epd.bio) {
2101 		submit_one_bio(WRITE, epd.bio);
2102 	}
2103 	return ret;
2104 }
2105 EXPORT_SYMBOL(extent_write_full_page);
2106 
2107 
2108 int extent_writepages(struct extent_map_tree *tree,
2109 		      struct address_space *mapping,
2110 		      get_extent_t *get_extent,
2111 		      struct writeback_control *wbc)
2112 {
2113 	int ret = 0;
2114 	struct extent_page_data epd = {
2115 		.bio = NULL,
2116 		.tree = tree,
2117 		.get_extent = get_extent,
2118 	};
2119 
2120 	ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2121 	if (epd.bio) {
2122 		submit_one_bio(WRITE, epd.bio);
2123 	}
2124 	return ret;
2125 }
2126 EXPORT_SYMBOL(extent_writepages);
2127 
2128 int extent_readpages(struct extent_map_tree *tree,
2129 		     struct address_space *mapping,
2130 		     struct list_head *pages, unsigned nr_pages,
2131 		     get_extent_t get_extent)
2132 {
2133 	struct bio *bio = NULL;
2134 	unsigned page_idx;
2135 	struct pagevec pvec;
2136 
2137 	pagevec_init(&pvec, 0);
2138 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2139 		struct page *page = list_entry(pages->prev, struct page, lru);
2140 
2141 		prefetchw(&page->flags);
2142 		list_del(&page->lru);
2143 		/*
2144 		 * what we want to do here is call add_to_page_cache_lru,
2145 		 * but that isn't exported, so we reproduce it here
2146 		 */
2147 		if (!add_to_page_cache(page, mapping,
2148 					page->index, GFP_KERNEL)) {
2149 
2150 			/* open coding of lru_cache_add, also not exported */
2151 			page_cache_get(page);
2152 			if (!pagevec_add(&pvec, page))
2153 				__pagevec_lru_add(&pvec);
2154 			__extent_read_full_page(tree, page, get_extent, &bio);
2155 		}
2156 		page_cache_release(page);
2157 	}
2158 	if (pagevec_count(&pvec))
2159 		__pagevec_lru_add(&pvec);
2160 	BUG_ON(!list_empty(pages));
2161 	if (bio)
2162 		submit_one_bio(READ, bio);
2163 	return 0;
2164 }
2165 EXPORT_SYMBOL(extent_readpages);
2166 
2167 /*
2168  * basic invalidatepage code, this waits on any locked or writeback
2169  * ranges corresponding to the page, and then deletes any extent state
2170  * records from the tree
2171  */
2172 int extent_invalidatepage(struct extent_map_tree *tree,
2173 			  struct page *page, unsigned long offset)
2174 {
2175 	u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2176 	u64 end = start + PAGE_CACHE_SIZE - 1;
2177 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2178 
2179 	start += (offset + blocksize -1) & ~(blocksize - 1);
2180 	if (start > end)
2181 		return 0;
2182 
2183 	lock_extent(tree, start, end, GFP_NOFS);
2184 	wait_on_extent_writeback(tree, start, end);
2185 	clear_extent_bit(tree, start, end,
2186 			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2187 			 1, 1, GFP_NOFS);
2188 	return 0;
2189 }
2190 EXPORT_SYMBOL(extent_invalidatepage);
2191 
2192 /*
2193  * simple commit_write call, set_range_dirty is used to mark both
2194  * the pages and the extent records as dirty
2195  */
2196 int extent_commit_write(struct extent_map_tree *tree,
2197 			struct inode *inode, struct page *page,
2198 			unsigned from, unsigned to)
2199 {
2200 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2201 
2202 	set_page_extent_mapped(page);
2203 	set_page_dirty(page);
2204 
2205 	if (pos > inode->i_size) {
2206 		i_size_write(inode, pos);
2207 		mark_inode_dirty(inode);
2208 	}
2209 	return 0;
2210 }
2211 EXPORT_SYMBOL(extent_commit_write);
2212 
2213 int extent_prepare_write(struct extent_map_tree *tree,
2214 			 struct inode *inode, struct page *page,
2215 			 unsigned from, unsigned to, get_extent_t *get_extent)
2216 {
2217 	u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2218 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2219 	u64 block_start;
2220 	u64 orig_block_start;
2221 	u64 block_end;
2222 	u64 cur_end;
2223 	struct extent_map *em;
2224 	unsigned blocksize = 1 << inode->i_blkbits;
2225 	size_t page_offset = 0;
2226 	size_t block_off_start;
2227 	size_t block_off_end;
2228 	int err = 0;
2229 	int iocount = 0;
2230 	int ret = 0;
2231 	int isnew;
2232 
2233 	set_page_extent_mapped(page);
2234 
2235 	block_start = (page_start + from) & ~((u64)blocksize - 1);
2236 	block_end = (page_start + to - 1) | (blocksize - 1);
2237 	orig_block_start = block_start;
2238 
2239 	lock_extent(tree, page_start, page_end, GFP_NOFS);
2240 	while(block_start <= block_end) {
2241 		em = get_extent(inode, page, page_offset, block_start,
2242 				block_end, 1);
2243 		if (IS_ERR(em) || !em) {
2244 			goto err;
2245 		}
2246 		cur_end = min(block_end, em->end);
2247 		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2248 		block_off_end = block_off_start + blocksize;
2249 		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2250 
2251 		if (!PageUptodate(page) && isnew &&
2252 		    (block_off_end > to || block_off_start < from)) {
2253 			void *kaddr;
2254 
2255 			kaddr = kmap_atomic(page, KM_USER0);
2256 			if (block_off_end > to)
2257 				memset(kaddr + to, 0, block_off_end - to);
2258 			if (block_off_start < from)
2259 				memset(kaddr + block_off_start, 0,
2260 				       from - block_off_start);
2261 			flush_dcache_page(page);
2262 			kunmap_atomic(kaddr, KM_USER0);
2263 		}
2264 		if ((em->block_start != EXTENT_MAP_HOLE &&
2265 		     em->block_start != EXTENT_MAP_INLINE) &&
2266 		    !isnew && !PageUptodate(page) &&
2267 		    (block_off_end > to || block_off_start < from) &&
2268 		    !test_range_bit(tree, block_start, cur_end,
2269 				    EXTENT_UPTODATE, 1)) {
2270 			u64 sector;
2271 			u64 extent_offset = block_start - em->start;
2272 			size_t iosize;
2273 			sector = (em->block_start + extent_offset) >> 9;
2274 			iosize = (cur_end - block_start + blocksize - 1) &
2275 				~((u64)blocksize - 1);
2276 			/*
2277 			 * we've already got the extent locked, but we
2278 			 * need to split the state such that our end_bio
2279 			 * handler can clear the lock.
2280 			 */
2281 			set_extent_bit(tree, block_start,
2282 				       block_start + iosize - 1,
2283 				       EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2284 			ret = submit_extent_page(READ, tree, page,
2285 					 sector, iosize, page_offset, em->bdev,
2286 					 NULL, 1,
2287 					 end_bio_extent_preparewrite);
2288 			iocount++;
2289 			block_start = block_start + iosize;
2290 		} else {
2291 			set_extent_uptodate(tree, block_start, cur_end,
2292 					    GFP_NOFS);
2293 			unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2294 			block_start = cur_end + 1;
2295 		}
2296 		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2297 		free_extent_map(em);
2298 	}
2299 	if (iocount) {
2300 		wait_extent_bit(tree, orig_block_start,
2301 				block_end, EXTENT_LOCKED);
2302 	}
2303 	check_page_uptodate(tree, page);
2304 err:
2305 	/* FIXME, zero out newly allocated blocks on error */
2306 	return err;
2307 }
2308 EXPORT_SYMBOL(extent_prepare_write);
2309 
2310 /*
2311  * a helper for releasepage.  As long as there are no locked extents
2312  * in the range corresponding to the page, both state records and extent
2313  * map records are removed
2314  */
2315 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2316 {
2317 	struct extent_map *em;
2318 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2319 	u64 end = start + PAGE_CACHE_SIZE - 1;
2320 	u64 orig_start = start;
2321 	int ret = 1;
2322 
2323 	while (start <= end) {
2324 		em = lookup_extent_mapping(tree, start, end);
2325 		if (!em || IS_ERR(em))
2326 			break;
2327 		if (!test_range_bit(tree, em->start, em->end,
2328 				    EXTENT_LOCKED, 0)) {
2329 			remove_extent_mapping(tree, em);
2330 			/* once for the rb tree */
2331 			free_extent_map(em);
2332 		}
2333 		start = em->end + 1;
2334 		/* once for us */
2335 		free_extent_map(em);
2336 	}
2337 	if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2338 		ret = 0;
2339 	else
2340 		clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2341 				 1, 1, GFP_NOFS);
2342 	return ret;
2343 }
2344 EXPORT_SYMBOL(try_release_extent_mapping);
2345 
2346 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2347 		get_extent_t *get_extent)
2348 {
2349 	struct inode *inode = mapping->host;
2350 	u64 start = iblock << inode->i_blkbits;
2351 	u64 end = start + (1 << inode->i_blkbits) - 1;
2352 	sector_t sector = 0;
2353 	struct extent_map *em;
2354 
2355 	em = get_extent(inode, NULL, 0, start, end, 0);
2356 	if (!em || IS_ERR(em))
2357 		return 0;
2358 
2359 	if (em->block_start == EXTENT_MAP_INLINE ||
2360 	    em->block_start == EXTENT_MAP_HOLE)
2361 		goto out;
2362 
2363 	sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2364 out:
2365 	free_extent_map(em);
2366 	return sector;
2367 }
2368 
2369 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2370 {
2371 	if (list_empty(&eb->lru)) {
2372 		extent_buffer_get(eb);
2373 		list_add(&eb->lru, &tree->buffer_lru);
2374 		tree->lru_size++;
2375 		if (tree->lru_size >= BUFFER_LRU_MAX) {
2376 			struct extent_buffer *rm;
2377 			rm = list_entry(tree->buffer_lru.prev,
2378 					struct extent_buffer, lru);
2379 			tree->lru_size--;
2380 			list_del_init(&rm->lru);
2381 			free_extent_buffer(rm);
2382 		}
2383 	} else
2384 		list_move(&eb->lru, &tree->buffer_lru);
2385 	return 0;
2386 }
2387 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2388 				      u64 start, unsigned long len)
2389 {
2390 	struct list_head *lru = &tree->buffer_lru;
2391 	struct list_head *cur = lru->next;
2392 	struct extent_buffer *eb;
2393 
2394 	if (list_empty(lru))
2395 		return NULL;
2396 
2397 	do {
2398 		eb = list_entry(cur, struct extent_buffer, lru);
2399 		if (eb->start == start && eb->len == len) {
2400 			extent_buffer_get(eb);
2401 			return eb;
2402 		}
2403 		cur = cur->next;
2404 	} while (cur != lru);
2405 	return NULL;
2406 }
2407 
2408 static inline unsigned long num_extent_pages(u64 start, u64 len)
2409 {
2410 	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2411 		(start >> PAGE_CACHE_SHIFT);
2412 }
2413 
2414 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2415 					      unsigned long i)
2416 {
2417 	struct page *p;
2418 	struct address_space *mapping;
2419 
2420 	if (i == 0)
2421 		return eb->first_page;
2422 	i += eb->start >> PAGE_CACHE_SHIFT;
2423 	mapping = eb->first_page->mapping;
2424 	read_lock_irq(&mapping->tree_lock);
2425 	p = radix_tree_lookup(&mapping->page_tree, i);
2426 	read_unlock_irq(&mapping->tree_lock);
2427 	return p;
2428 }
2429 
2430 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2431 						   u64 start,
2432 						   unsigned long len,
2433 						   gfp_t mask)
2434 {
2435 	struct extent_buffer *eb = NULL;
2436 
2437 	spin_lock(&tree->lru_lock);
2438 	eb = find_lru(tree, start, len);
2439 	spin_unlock(&tree->lru_lock);
2440 	if (eb) {
2441 		return eb;
2442 	}
2443 
2444 	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2445 	INIT_LIST_HEAD(&eb->lru);
2446 	eb->start = start;
2447 	eb->len = len;
2448 	atomic_set(&eb->refs, 1);
2449 
2450 	return eb;
2451 }
2452 
2453 static void __free_extent_buffer(struct extent_buffer *eb)
2454 {
2455 	kmem_cache_free(extent_buffer_cache, eb);
2456 }
2457 
2458 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2459 					  u64 start, unsigned long len,
2460 					  struct page *page0,
2461 					  gfp_t mask)
2462 {
2463 	unsigned long num_pages = num_extent_pages(start, len);
2464 	unsigned long i;
2465 	unsigned long index = start >> PAGE_CACHE_SHIFT;
2466 	struct extent_buffer *eb;
2467 	struct page *p;
2468 	struct address_space *mapping = tree->mapping;
2469 	int uptodate = 1;
2470 
2471 	eb = __alloc_extent_buffer(tree, start, len, mask);
2472 	if (!eb || IS_ERR(eb))
2473 		return NULL;
2474 
2475 	if (eb->flags & EXTENT_BUFFER_FILLED)
2476 		goto lru_add;
2477 
2478 	if (page0) {
2479 		eb->first_page = page0;
2480 		i = 1;
2481 		index++;
2482 		page_cache_get(page0);
2483 		mark_page_accessed(page0);
2484 		set_page_extent_mapped(page0);
2485 		WARN_ON(!PageUptodate(page0));
2486 		set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2487 				 len << 2);
2488 	} else {
2489 		i = 0;
2490 	}
2491 	for (; i < num_pages; i++, index++) {
2492 		p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2493 		if (!p) {
2494 			WARN_ON(1);
2495 			goto fail;
2496 		}
2497 		set_page_extent_mapped(p);
2498 		mark_page_accessed(p);
2499 		if (i == 0) {
2500 			eb->first_page = p;
2501 			set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2502 					 len << 2);
2503 		} else {
2504 			set_page_private(p, EXTENT_PAGE_PRIVATE);
2505 		}
2506 		if (!PageUptodate(p))
2507 			uptodate = 0;
2508 		unlock_page(p);
2509 	}
2510 	if (uptodate)
2511 		eb->flags |= EXTENT_UPTODATE;
2512 	eb->flags |= EXTENT_BUFFER_FILLED;
2513 
2514 lru_add:
2515 	spin_lock(&tree->lru_lock);
2516 	add_lru(tree, eb);
2517 	spin_unlock(&tree->lru_lock);
2518 	return eb;
2519 
2520 fail:
2521 	spin_lock(&tree->lru_lock);
2522 	list_del_init(&eb->lru);
2523 	spin_unlock(&tree->lru_lock);
2524 	if (!atomic_dec_and_test(&eb->refs))
2525 		return NULL;
2526 	for (index = 1; index < i; index++) {
2527 		page_cache_release(extent_buffer_page(eb, index));
2528 	}
2529 	if (i > 0)
2530 		page_cache_release(extent_buffer_page(eb, 0));
2531 	__free_extent_buffer(eb);
2532 	return NULL;
2533 }
2534 EXPORT_SYMBOL(alloc_extent_buffer);
2535 
2536 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2537 					 u64 start, unsigned long len,
2538 					  gfp_t mask)
2539 {
2540 	unsigned long num_pages = num_extent_pages(start, len);
2541 	unsigned long i;
2542 	unsigned long index = start >> PAGE_CACHE_SHIFT;
2543 	struct extent_buffer *eb;
2544 	struct page *p;
2545 	struct address_space *mapping = tree->mapping;
2546 	int uptodate = 1;
2547 
2548 	eb = __alloc_extent_buffer(tree, start, len, mask);
2549 	if (!eb || IS_ERR(eb))
2550 		return NULL;
2551 
2552 	if (eb->flags & EXTENT_BUFFER_FILLED)
2553 		goto lru_add;
2554 
2555 	for (i = 0; i < num_pages; i++, index++) {
2556 		p = find_lock_page(mapping, index);
2557 		if (!p) {
2558 			goto fail;
2559 		}
2560 		set_page_extent_mapped(p);
2561 		mark_page_accessed(p);
2562 
2563 		if (i == 0) {
2564 			eb->first_page = p;
2565 			set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2566 					 len << 2);
2567 		} else {
2568 			set_page_private(p, EXTENT_PAGE_PRIVATE);
2569 		}
2570 
2571 		if (!PageUptodate(p))
2572 			uptodate = 0;
2573 		unlock_page(p);
2574 	}
2575 	if (uptodate)
2576 		eb->flags |= EXTENT_UPTODATE;
2577 	eb->flags |= EXTENT_BUFFER_FILLED;
2578 
2579 lru_add:
2580 	spin_lock(&tree->lru_lock);
2581 	add_lru(tree, eb);
2582 	spin_unlock(&tree->lru_lock);
2583 	return eb;
2584 fail:
2585 	spin_lock(&tree->lru_lock);
2586 	list_del_init(&eb->lru);
2587 	spin_unlock(&tree->lru_lock);
2588 	if (!atomic_dec_and_test(&eb->refs))
2589 		return NULL;
2590 	for (index = 1; index < i; index++) {
2591 		page_cache_release(extent_buffer_page(eb, index));
2592 	}
2593 	if (i > 0)
2594 		page_cache_release(extent_buffer_page(eb, 0));
2595 	__free_extent_buffer(eb);
2596 	return NULL;
2597 }
2598 EXPORT_SYMBOL(find_extent_buffer);
2599 
2600 void free_extent_buffer(struct extent_buffer *eb)
2601 {
2602 	unsigned long i;
2603 	unsigned long num_pages;
2604 
2605 	if (!eb)
2606 		return;
2607 
2608 	if (!atomic_dec_and_test(&eb->refs))
2609 		return;
2610 
2611 	WARN_ON(!list_empty(&eb->lru));
2612 	num_pages = num_extent_pages(eb->start, eb->len);
2613 
2614 	for (i = 1; i < num_pages; i++) {
2615 		page_cache_release(extent_buffer_page(eb, i));
2616 	}
2617 	page_cache_release(extent_buffer_page(eb, 0));
2618 	__free_extent_buffer(eb);
2619 }
2620 EXPORT_SYMBOL(free_extent_buffer);
2621 
2622 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2623 			      struct extent_buffer *eb)
2624 {
2625 	int set;
2626 	unsigned long i;
2627 	unsigned long num_pages;
2628 	struct page *page;
2629 
2630 	u64 start = eb->start;
2631 	u64 end = start + eb->len - 1;
2632 
2633 	set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2634 	num_pages = num_extent_pages(eb->start, eb->len);
2635 
2636 	for (i = 0; i < num_pages; i++) {
2637 		page = extent_buffer_page(eb, i);
2638 		lock_page(page);
2639 		/*
2640 		 * if we're on the last page or the first page and the
2641 		 * block isn't aligned on a page boundary, do extra checks
2642 		 * to make sure we don't clean page that is partially dirty
2643 		 */
2644 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2645 		    ((i == num_pages - 1) &&
2646 		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2647 			start = (u64)page->index << PAGE_CACHE_SHIFT;
2648 			end  = start + PAGE_CACHE_SIZE - 1;
2649 			if (test_range_bit(tree, start, end,
2650 					   EXTENT_DIRTY, 0)) {
2651 				unlock_page(page);
2652 				continue;
2653 			}
2654 		}
2655 		clear_page_dirty_for_io(page);
2656 		write_lock_irq(&page->mapping->tree_lock);
2657 		if (!PageDirty(page)) {
2658 			radix_tree_tag_clear(&page->mapping->page_tree,
2659 						page_index(page),
2660 						PAGECACHE_TAG_DIRTY);
2661 		}
2662 		write_unlock_irq(&page->mapping->tree_lock);
2663 		unlock_page(page);
2664 	}
2665 	return 0;
2666 }
2667 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2668 
2669 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2670 				    struct extent_buffer *eb)
2671 {
2672 	return wait_on_extent_writeback(tree, eb->start,
2673 					eb->start + eb->len - 1);
2674 }
2675 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2676 
2677 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2678 			     struct extent_buffer *eb)
2679 {
2680 	unsigned long i;
2681 	unsigned long num_pages;
2682 
2683 	num_pages = num_extent_pages(eb->start, eb->len);
2684 	for (i = 0; i < num_pages; i++) {
2685 		struct page *page = extent_buffer_page(eb, i);
2686 		/* writepage may need to do something special for the
2687 		 * first page, we have to make sure page->private is
2688 		 * properly set.  releasepage may drop page->private
2689 		 * on us if the page isn't already dirty.
2690 		 */
2691 		if (i == 0) {
2692 			lock_page(page);
2693 			set_page_private(page,
2694 					 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2695 					 eb->len << 2);
2696 		}
2697 		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2698 		if (i == 0)
2699 			unlock_page(page);
2700 	}
2701 	return set_extent_dirty(tree, eb->start,
2702 				eb->start + eb->len - 1, GFP_NOFS);
2703 }
2704 EXPORT_SYMBOL(set_extent_buffer_dirty);
2705 
2706 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2707 				struct extent_buffer *eb)
2708 {
2709 	unsigned long i;
2710 	struct page *page;
2711 	unsigned long num_pages;
2712 
2713 	num_pages = num_extent_pages(eb->start, eb->len);
2714 
2715 	set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2716 			    GFP_NOFS);
2717 	for (i = 0; i < num_pages; i++) {
2718 		page = extent_buffer_page(eb, i);
2719 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2720 		    ((i == num_pages - 1) &&
2721 		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2722 			check_page_uptodate(tree, page);
2723 			continue;
2724 		}
2725 		SetPageUptodate(page);
2726 	}
2727 	return 0;
2728 }
2729 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2730 
2731 int extent_buffer_uptodate(struct extent_map_tree *tree,
2732 			     struct extent_buffer *eb)
2733 {
2734 	if (eb->flags & EXTENT_UPTODATE)
2735 		return 1;
2736 	return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2737 			   EXTENT_UPTODATE, 1);
2738 }
2739 EXPORT_SYMBOL(extent_buffer_uptodate);
2740 
2741 int read_extent_buffer_pages(struct extent_map_tree *tree,
2742 			     struct extent_buffer *eb,
2743 			     u64 start,
2744 			     int wait)
2745 {
2746 	unsigned long i;
2747 	unsigned long start_i;
2748 	struct page *page;
2749 	int err;
2750 	int ret = 0;
2751 	unsigned long num_pages;
2752 
2753 	if (eb->flags & EXTENT_UPTODATE)
2754 		return 0;
2755 
2756 	if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2757 			   EXTENT_UPTODATE, 1)) {
2758 		return 0;
2759 	}
2760 
2761 	if (start) {
2762 		WARN_ON(start < eb->start);
2763 		start_i = (start >> PAGE_CACHE_SHIFT) -
2764 			(eb->start >> PAGE_CACHE_SHIFT);
2765 	} else {
2766 		start_i = 0;
2767 	}
2768 
2769 	num_pages = num_extent_pages(eb->start, eb->len);
2770 	for (i = start_i; i < num_pages; i++) {
2771 		page = extent_buffer_page(eb, i);
2772 		if (PageUptodate(page)) {
2773 			continue;
2774 		}
2775 		if (!wait) {
2776 			if (TestSetPageLocked(page)) {
2777 				continue;
2778 			}
2779 		} else {
2780 			lock_page(page);
2781 		}
2782 		if (!PageUptodate(page)) {
2783 			err = page->mapping->a_ops->readpage(NULL, page);
2784 			if (err) {
2785 				ret = err;
2786 			}
2787 		} else {
2788 			unlock_page(page);
2789 		}
2790 	}
2791 
2792 	if (ret || !wait) {
2793 		return ret;
2794 	}
2795 
2796 	for (i = start_i; i < num_pages; i++) {
2797 		page = extent_buffer_page(eb, i);
2798 		wait_on_page_locked(page);
2799 		if (!PageUptodate(page)) {
2800 			ret = -EIO;
2801 		}
2802 	}
2803 	if (!ret)
2804 		eb->flags |= EXTENT_UPTODATE;
2805 	return ret;
2806 }
2807 EXPORT_SYMBOL(read_extent_buffer_pages);
2808 
2809 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2810 			unsigned long start,
2811 			unsigned long len)
2812 {
2813 	size_t cur;
2814 	size_t offset;
2815 	struct page *page;
2816 	char *kaddr;
2817 	char *dst = (char *)dstv;
2818 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2819 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2820 	unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2821 
2822 	WARN_ON(start > eb->len);
2823 	WARN_ON(start + len > eb->start + eb->len);
2824 
2825 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2826 
2827 	while(len > 0) {
2828 		page = extent_buffer_page(eb, i);
2829 		if (!PageUptodate(page)) {
2830 			printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2831 			WARN_ON(1);
2832 		}
2833 		WARN_ON(!PageUptodate(page));
2834 
2835 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2836 		kaddr = kmap_atomic(page, KM_USER1);
2837 		memcpy(dst, kaddr + offset, cur);
2838 		kunmap_atomic(kaddr, KM_USER1);
2839 
2840 		dst += cur;
2841 		len -= cur;
2842 		offset = 0;
2843 		i++;
2844 	}
2845 }
2846 EXPORT_SYMBOL(read_extent_buffer);
2847 
2848 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2849 			       unsigned long min_len, char **token, char **map,
2850 			       unsigned long *map_start,
2851 			       unsigned long *map_len, int km)
2852 {
2853 	size_t offset = start & (PAGE_CACHE_SIZE - 1);
2854 	char *kaddr;
2855 	struct page *p;
2856 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2857 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2858 	unsigned long end_i = (start_offset + start + min_len - 1) >>
2859 		PAGE_CACHE_SHIFT;
2860 
2861 	if (i != end_i)
2862 		return -EINVAL;
2863 
2864 	if (i == 0) {
2865 		offset = start_offset;
2866 		*map_start = 0;
2867 	} else {
2868 		offset = 0;
2869 		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2870 	}
2871 	if (start + min_len > eb->len) {
2872 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2873 		WARN_ON(1);
2874 	}
2875 
2876 	p = extent_buffer_page(eb, i);
2877 	WARN_ON(!PageUptodate(p));
2878 	kaddr = kmap_atomic(p, km);
2879 	*token = kaddr;
2880 	*map = kaddr + offset;
2881 	*map_len = PAGE_CACHE_SIZE - offset;
2882 	return 0;
2883 }
2884 EXPORT_SYMBOL(map_private_extent_buffer);
2885 
2886 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2887 		      unsigned long min_len,
2888 		      char **token, char **map,
2889 		      unsigned long *map_start,
2890 		      unsigned long *map_len, int km)
2891 {
2892 	int err;
2893 	int save = 0;
2894 	if (eb->map_token) {
2895 		unmap_extent_buffer(eb, eb->map_token, km);
2896 		eb->map_token = NULL;
2897 		save = 1;
2898 	}
2899 	err = map_private_extent_buffer(eb, start, min_len, token, map,
2900 				       map_start, map_len, km);
2901 	if (!err && save) {
2902 		eb->map_token = *token;
2903 		eb->kaddr = *map;
2904 		eb->map_start = *map_start;
2905 		eb->map_len = *map_len;
2906 	}
2907 	return err;
2908 }
2909 EXPORT_SYMBOL(map_extent_buffer);
2910 
2911 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2912 {
2913 	kunmap_atomic(token, km);
2914 }
2915 EXPORT_SYMBOL(unmap_extent_buffer);
2916 
2917 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2918 			  unsigned long start,
2919 			  unsigned long len)
2920 {
2921 	size_t cur;
2922 	size_t offset;
2923 	struct page *page;
2924 	char *kaddr;
2925 	char *ptr = (char *)ptrv;
2926 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2927 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2928 	int ret = 0;
2929 
2930 	WARN_ON(start > eb->len);
2931 	WARN_ON(start + len > eb->start + eb->len);
2932 
2933 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2934 
2935 	while(len > 0) {
2936 		page = extent_buffer_page(eb, i);
2937 		WARN_ON(!PageUptodate(page));
2938 
2939 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2940 
2941 		kaddr = kmap_atomic(page, KM_USER0);
2942 		ret = memcmp(ptr, kaddr + offset, cur);
2943 		kunmap_atomic(kaddr, KM_USER0);
2944 		if (ret)
2945 			break;
2946 
2947 		ptr += cur;
2948 		len -= cur;
2949 		offset = 0;
2950 		i++;
2951 	}
2952 	return ret;
2953 }
2954 EXPORT_SYMBOL(memcmp_extent_buffer);
2955 
2956 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2957 			 unsigned long start, unsigned long len)
2958 {
2959 	size_t cur;
2960 	size_t offset;
2961 	struct page *page;
2962 	char *kaddr;
2963 	char *src = (char *)srcv;
2964 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2965 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2966 
2967 	WARN_ON(start > eb->len);
2968 	WARN_ON(start + len > eb->start + eb->len);
2969 
2970 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2971 
2972 	while(len > 0) {
2973 		page = extent_buffer_page(eb, i);
2974 		WARN_ON(!PageUptodate(page));
2975 
2976 		cur = min(len, PAGE_CACHE_SIZE - offset);
2977 		kaddr = kmap_atomic(page, KM_USER1);
2978 		memcpy(kaddr + offset, src, cur);
2979 		kunmap_atomic(kaddr, KM_USER1);
2980 
2981 		src += cur;
2982 		len -= cur;
2983 		offset = 0;
2984 		i++;
2985 	}
2986 }
2987 EXPORT_SYMBOL(write_extent_buffer);
2988 
2989 void memset_extent_buffer(struct extent_buffer *eb, char c,
2990 			  unsigned long start, unsigned long len)
2991 {
2992 	size_t cur;
2993 	size_t offset;
2994 	struct page *page;
2995 	char *kaddr;
2996 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2997 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2998 
2999 	WARN_ON(start > eb->len);
3000 	WARN_ON(start + len > eb->start + eb->len);
3001 
3002 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3003 
3004 	while(len > 0) {
3005 		page = extent_buffer_page(eb, i);
3006 		WARN_ON(!PageUptodate(page));
3007 
3008 		cur = min(len, PAGE_CACHE_SIZE - offset);
3009 		kaddr = kmap_atomic(page, KM_USER0);
3010 		memset(kaddr + offset, c, cur);
3011 		kunmap_atomic(kaddr, KM_USER0);
3012 
3013 		len -= cur;
3014 		offset = 0;
3015 		i++;
3016 	}
3017 }
3018 EXPORT_SYMBOL(memset_extent_buffer);
3019 
3020 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3021 			unsigned long dst_offset, unsigned long src_offset,
3022 			unsigned long len)
3023 {
3024 	u64 dst_len = dst->len;
3025 	size_t cur;
3026 	size_t offset;
3027 	struct page *page;
3028 	char *kaddr;
3029 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3030 	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3031 
3032 	WARN_ON(src->len != dst_len);
3033 
3034 	offset = (start_offset + dst_offset) &
3035 		((unsigned long)PAGE_CACHE_SIZE - 1);
3036 
3037 	while(len > 0) {
3038 		page = extent_buffer_page(dst, i);
3039 		WARN_ON(!PageUptodate(page));
3040 
3041 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3042 
3043 		kaddr = kmap_atomic(page, KM_USER0);
3044 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
3045 		kunmap_atomic(kaddr, KM_USER0);
3046 
3047 		src_offset += cur;
3048 		len -= cur;
3049 		offset = 0;
3050 		i++;
3051 	}
3052 }
3053 EXPORT_SYMBOL(copy_extent_buffer);
3054 
3055 static void move_pages(struct page *dst_page, struct page *src_page,
3056 		       unsigned long dst_off, unsigned long src_off,
3057 		       unsigned long len)
3058 {
3059 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3060 	if (dst_page == src_page) {
3061 		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3062 	} else {
3063 		char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3064 		char *p = dst_kaddr + dst_off + len;
3065 		char *s = src_kaddr + src_off + len;
3066 
3067 		while (len--)
3068 			*--p = *--s;
3069 
3070 		kunmap_atomic(src_kaddr, KM_USER1);
3071 	}
3072 	kunmap_atomic(dst_kaddr, KM_USER0);
3073 }
3074 
3075 static void copy_pages(struct page *dst_page, struct page *src_page,
3076 		       unsigned long dst_off, unsigned long src_off,
3077 		       unsigned long len)
3078 {
3079 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3080 	char *src_kaddr;
3081 
3082 	if (dst_page != src_page)
3083 		src_kaddr = kmap_atomic(src_page, KM_USER1);
3084 	else
3085 		src_kaddr = dst_kaddr;
3086 
3087 	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3088 	kunmap_atomic(dst_kaddr, KM_USER0);
3089 	if (dst_page != src_page)
3090 		kunmap_atomic(src_kaddr, KM_USER1);
3091 }
3092 
3093 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3094 			   unsigned long src_offset, unsigned long len)
3095 {
3096 	size_t cur;
3097 	size_t dst_off_in_page;
3098 	size_t src_off_in_page;
3099 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3100 	unsigned long dst_i;
3101 	unsigned long src_i;
3102 
3103 	if (src_offset + len > dst->len) {
3104 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3105 		       src_offset, len, dst->len);
3106 		BUG_ON(1);
3107 	}
3108 	if (dst_offset + len > dst->len) {
3109 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3110 		       dst_offset, len, dst->len);
3111 		BUG_ON(1);
3112 	}
3113 
3114 	while(len > 0) {
3115 		dst_off_in_page = (start_offset + dst_offset) &
3116 			((unsigned long)PAGE_CACHE_SIZE - 1);
3117 		src_off_in_page = (start_offset + src_offset) &
3118 			((unsigned long)PAGE_CACHE_SIZE - 1);
3119 
3120 		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3121 		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3122 
3123 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3124 					       src_off_in_page));
3125 		cur = min_t(unsigned long, cur,
3126 			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3127 
3128 		copy_pages(extent_buffer_page(dst, dst_i),
3129 			   extent_buffer_page(dst, src_i),
3130 			   dst_off_in_page, src_off_in_page, cur);
3131 
3132 		src_offset += cur;
3133 		dst_offset += cur;
3134 		len -= cur;
3135 	}
3136 }
3137 EXPORT_SYMBOL(memcpy_extent_buffer);
3138 
3139 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3140 			   unsigned long src_offset, unsigned long len)
3141 {
3142 	size_t cur;
3143 	size_t dst_off_in_page;
3144 	size_t src_off_in_page;
3145 	unsigned long dst_end = dst_offset + len - 1;
3146 	unsigned long src_end = src_offset + len - 1;
3147 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3148 	unsigned long dst_i;
3149 	unsigned long src_i;
3150 
3151 	if (src_offset + len > dst->len) {
3152 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3153 		       src_offset, len, dst->len);
3154 		BUG_ON(1);
3155 	}
3156 	if (dst_offset + len > dst->len) {
3157 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3158 		       dst_offset, len, dst->len);
3159 		BUG_ON(1);
3160 	}
3161 	if (dst_offset < src_offset) {
3162 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3163 		return;
3164 	}
3165 	while(len > 0) {
3166 		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3167 		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3168 
3169 		dst_off_in_page = (start_offset + dst_end) &
3170 			((unsigned long)PAGE_CACHE_SIZE - 1);
3171 		src_off_in_page = (start_offset + src_end) &
3172 			((unsigned long)PAGE_CACHE_SIZE - 1);
3173 
3174 		cur = min_t(unsigned long, len, src_off_in_page + 1);
3175 		cur = min(cur, dst_off_in_page + 1);
3176 		move_pages(extent_buffer_page(dst, dst_i),
3177 			   extent_buffer_page(dst, src_i),
3178 			   dst_off_in_page - cur + 1,
3179 			   src_off_in_page - cur + 1, cur);
3180 
3181 		dst_end -= cur;
3182 		src_end -= cur;
3183 		len -= cur;
3184 	}
3185 }
3186 EXPORT_SYMBOL(memmove_extent_buffer);
3187