xref: /openbmc/linux/fs/btrfs/extent_map.c (revision 793955bc)
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
16 
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 				       unsigned long extra_flags,
20 				       void (*ctor)(void *, struct kmem_cache *,
21 						    unsigned long));
22 
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26 
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29 
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
32 
33 struct tree_entry {
34 	u64 start;
35 	u64 end;
36 	int in_tree;
37 	struct rb_node rb_node;
38 };
39 
40 struct extent_page_data {
41 	struct bio *bio;
42 	struct extent_map_tree *tree;
43 	get_extent_t *get_extent;
44 };
45 int __init extent_map_init(void)
46 {
47 	extent_map_cache = btrfs_cache_create("extent_map",
48 					    sizeof(struct extent_map), 0,
49 					    NULL);
50 	if (!extent_map_cache)
51 		return -ENOMEM;
52 	extent_state_cache = btrfs_cache_create("extent_state",
53 					    sizeof(struct extent_state), 0,
54 					    NULL);
55 	if (!extent_state_cache)
56 		goto free_map_cache;
57 	extent_buffer_cache = btrfs_cache_create("extent_buffers",
58 					    sizeof(struct extent_buffer), 0,
59 					    NULL);
60 	if (!extent_buffer_cache)
61 		goto free_state_cache;
62 	return 0;
63 
64 free_state_cache:
65 	kmem_cache_destroy(extent_state_cache);
66 free_map_cache:
67 	kmem_cache_destroy(extent_map_cache);
68 	return -ENOMEM;
69 }
70 
71 void __exit extent_map_exit(void)
72 {
73 	struct extent_state *state;
74 
75 	while (!list_empty(&states)) {
76 		state = list_entry(states.next, struct extent_state, list);
77 		printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
78 		list_del(&state->list);
79 		kmem_cache_free(extent_state_cache, state);
80 
81 	}
82 
83 	if (extent_map_cache)
84 		kmem_cache_destroy(extent_map_cache);
85 	if (extent_state_cache)
86 		kmem_cache_destroy(extent_state_cache);
87 	if (extent_buffer_cache)
88 		kmem_cache_destroy(extent_buffer_cache);
89 }
90 
91 void extent_map_tree_init(struct extent_map_tree *tree,
92 			  struct address_space *mapping, gfp_t mask)
93 {
94 	tree->map.rb_node = NULL;
95 	tree->state.rb_node = NULL;
96 	tree->ops = NULL;
97 	rwlock_init(&tree->lock);
98 	spin_lock_init(&tree->lru_lock);
99 	tree->mapping = mapping;
100 	INIT_LIST_HEAD(&tree->buffer_lru);
101 	tree->lru_size = 0;
102 }
103 EXPORT_SYMBOL(extent_map_tree_init);
104 
105 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
106 {
107 	struct extent_buffer *eb;
108 	while(!list_empty(&tree->buffer_lru)) {
109 		eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
110 				lru);
111 		list_del_init(&eb->lru);
112 		free_extent_buffer(eb);
113 	}
114 }
115 EXPORT_SYMBOL(extent_map_tree_empty_lru);
116 
117 struct extent_map *alloc_extent_map(gfp_t mask)
118 {
119 	struct extent_map *em;
120 	em = kmem_cache_alloc(extent_map_cache, mask);
121 	if (!em || IS_ERR(em))
122 		return em;
123 	em->in_tree = 0;
124 	atomic_set(&em->refs, 1);
125 	return em;
126 }
127 EXPORT_SYMBOL(alloc_extent_map);
128 
129 void free_extent_map(struct extent_map *em)
130 {
131 	if (!em)
132 		return;
133 	if (atomic_dec_and_test(&em->refs)) {
134 		WARN_ON(em->in_tree);
135 		kmem_cache_free(extent_map_cache, em);
136 	}
137 }
138 EXPORT_SYMBOL(free_extent_map);
139 
140 
141 struct extent_state *alloc_extent_state(gfp_t mask)
142 {
143 	struct extent_state *state;
144 	unsigned long flags;
145 
146 	state = kmem_cache_alloc(extent_state_cache, mask);
147 	if (!state || IS_ERR(state))
148 		return state;
149 	state->state = 0;
150 	state->in_tree = 0;
151 	state->private = 0;
152 
153 	spin_lock_irqsave(&state_lock, flags);
154 	list_add(&state->list, &states);
155 	spin_unlock_irqrestore(&state_lock, flags);
156 
157 	atomic_set(&state->refs, 1);
158 	init_waitqueue_head(&state->wq);
159 	return state;
160 }
161 EXPORT_SYMBOL(alloc_extent_state);
162 
163 void free_extent_state(struct extent_state *state)
164 {
165 	unsigned long flags;
166 	if (!state)
167 		return;
168 	if (atomic_dec_and_test(&state->refs)) {
169 		WARN_ON(state->in_tree);
170 		spin_lock_irqsave(&state_lock, flags);
171 		list_del(&state->list);
172 		spin_unlock_irqrestore(&state_lock, flags);
173 		kmem_cache_free(extent_state_cache, state);
174 	}
175 }
176 EXPORT_SYMBOL(free_extent_state);
177 
178 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
179 				   struct rb_node *node)
180 {
181 	struct rb_node ** p = &root->rb_node;
182 	struct rb_node * parent = NULL;
183 	struct tree_entry *entry;
184 
185 	while(*p) {
186 		parent = *p;
187 		entry = rb_entry(parent, struct tree_entry, rb_node);
188 
189 		if (offset < entry->start)
190 			p = &(*p)->rb_left;
191 		else if (offset > entry->end)
192 			p = &(*p)->rb_right;
193 		else
194 			return parent;
195 	}
196 
197 	entry = rb_entry(node, struct tree_entry, rb_node);
198 	entry->in_tree = 1;
199 	rb_link_node(node, parent, p);
200 	rb_insert_color(node, root);
201 	return NULL;
202 }
203 
204 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
205 				   struct rb_node **prev_ret)
206 {
207 	struct rb_node * n = root->rb_node;
208 	struct rb_node *prev = NULL;
209 	struct tree_entry *entry;
210 	struct tree_entry *prev_entry = NULL;
211 
212 	while(n) {
213 		entry = rb_entry(n, struct tree_entry, rb_node);
214 		prev = n;
215 		prev_entry = entry;
216 
217 		if (offset < entry->start)
218 			n = n->rb_left;
219 		else if (offset > entry->end)
220 			n = n->rb_right;
221 		else
222 			return n;
223 	}
224 	if (!prev_ret)
225 		return NULL;
226 	while(prev && offset > prev_entry->end) {
227 		prev = rb_next(prev);
228 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
229 	}
230 	*prev_ret = prev;
231 	return NULL;
232 }
233 
234 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
235 {
236 	struct rb_node *prev;
237 	struct rb_node *ret;
238 	ret = __tree_search(root, offset, &prev);
239 	if (!ret)
240 		return prev;
241 	return ret;
242 }
243 
244 static int tree_delete(struct rb_root *root, u64 offset)
245 {
246 	struct rb_node *node;
247 	struct tree_entry *entry;
248 
249 	node = __tree_search(root, offset, NULL);
250 	if (!node)
251 		return -ENOENT;
252 	entry = rb_entry(node, struct tree_entry, rb_node);
253 	entry->in_tree = 0;
254 	rb_erase(node, root);
255 	return 0;
256 }
257 
258 /*
259  * add_extent_mapping tries a simple backward merge with existing
260  * mappings.  The extent_map struct passed in will be inserted into
261  * the tree directly (no copies made, just a reference taken).
262  */
263 int add_extent_mapping(struct extent_map_tree *tree,
264 		       struct extent_map *em)
265 {
266 	int ret = 0;
267 	struct extent_map *prev = NULL;
268 	struct rb_node *rb;
269 
270 	write_lock_irq(&tree->lock);
271 	rb = tree_insert(&tree->map, em->end, &em->rb_node);
272 	if (rb) {
273 		prev = rb_entry(rb, struct extent_map, rb_node);
274 		printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
275 		ret = -EEXIST;
276 		goto out;
277 	}
278 	atomic_inc(&em->refs);
279 	if (em->start != 0) {
280 		rb = rb_prev(&em->rb_node);
281 		if (rb)
282 			prev = rb_entry(rb, struct extent_map, rb_node);
283 		if (prev && prev->end + 1 == em->start &&
284 		    ((em->block_start == EXTENT_MAP_HOLE &&
285 		      prev->block_start == EXTENT_MAP_HOLE) ||
286 		     (em->block_start == EXTENT_MAP_INLINE &&
287 		      prev->block_start == EXTENT_MAP_INLINE) ||
288 		     (em->block_start == EXTENT_MAP_DELALLOC &&
289 		      prev->block_start == EXTENT_MAP_DELALLOC) ||
290 		     (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
291 		      em->block_start == prev->block_end + 1))) {
292 			em->start = prev->start;
293 			em->block_start = prev->block_start;
294 			rb_erase(&prev->rb_node, &tree->map);
295 			prev->in_tree = 0;
296 			free_extent_map(prev);
297 		}
298 	 }
299 out:
300 	write_unlock_irq(&tree->lock);
301 	return ret;
302 }
303 EXPORT_SYMBOL(add_extent_mapping);
304 
305 /*
306  * lookup_extent_mapping returns the first extent_map struct in the
307  * tree that intersects the [start, end] (inclusive) range.  There may
308  * be additional objects in the tree that intersect, so check the object
309  * returned carefully to make sure you don't need additional lookups.
310  */
311 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
312 					 u64 start, u64 end)
313 {
314 	struct extent_map *em;
315 	struct rb_node *rb_node;
316 
317 	read_lock_irq(&tree->lock);
318 	rb_node = tree_search(&tree->map, start);
319 	if (!rb_node) {
320 		em = NULL;
321 		goto out;
322 	}
323 	if (IS_ERR(rb_node)) {
324 		em = ERR_PTR(PTR_ERR(rb_node));
325 		goto out;
326 	}
327 	em = rb_entry(rb_node, struct extent_map, rb_node);
328 	if (em->end < start || em->start > end) {
329 		em = NULL;
330 		goto out;
331 	}
332 	atomic_inc(&em->refs);
333 out:
334 	read_unlock_irq(&tree->lock);
335 	return em;
336 }
337 EXPORT_SYMBOL(lookup_extent_mapping);
338 
339 /*
340  * removes an extent_map struct from the tree.  No reference counts are
341  * dropped, and no checks are done to  see if the range is in use
342  */
343 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
344 {
345 	int ret;
346 
347 	write_lock_irq(&tree->lock);
348 	ret = tree_delete(&tree->map, em->end);
349 	write_unlock_irq(&tree->lock);
350 	return ret;
351 }
352 EXPORT_SYMBOL(remove_extent_mapping);
353 
354 /*
355  * utility function to look for merge candidates inside a given range.
356  * Any extents with matching state are merged together into a single
357  * extent in the tree.  Extents with EXTENT_IO in their state field
358  * are not merged because the end_io handlers need to be able to do
359  * operations on them without sleeping (or doing allocations/splits).
360  *
361  * This should be called with the tree lock held.
362  */
363 static int merge_state(struct extent_map_tree *tree,
364 		       struct extent_state *state)
365 {
366 	struct extent_state *other;
367 	struct rb_node *other_node;
368 
369 	if (state->state & EXTENT_IOBITS)
370 		return 0;
371 
372 	other_node = rb_prev(&state->rb_node);
373 	if (other_node) {
374 		other = rb_entry(other_node, struct extent_state, rb_node);
375 		if (other->end == state->start - 1 &&
376 		    other->state == state->state) {
377 			state->start = other->start;
378 			other->in_tree = 0;
379 			rb_erase(&other->rb_node, &tree->state);
380 			free_extent_state(other);
381 		}
382 	}
383 	other_node = rb_next(&state->rb_node);
384 	if (other_node) {
385 		other = rb_entry(other_node, struct extent_state, rb_node);
386 		if (other->start == state->end + 1 &&
387 		    other->state == state->state) {
388 			other->start = state->start;
389 			state->in_tree = 0;
390 			rb_erase(&state->rb_node, &tree->state);
391 			free_extent_state(state);
392 		}
393 	}
394 	return 0;
395 }
396 
397 /*
398  * insert an extent_state struct into the tree.  'bits' are set on the
399  * struct before it is inserted.
400  *
401  * This may return -EEXIST if the extent is already there, in which case the
402  * state struct is freed.
403  *
404  * The tree lock is not taken internally.  This is a utility function and
405  * probably isn't what you want to call (see set/clear_extent_bit).
406  */
407 static int insert_state(struct extent_map_tree *tree,
408 			struct extent_state *state, u64 start, u64 end,
409 			int bits)
410 {
411 	struct rb_node *node;
412 
413 	if (end < start) {
414 		printk("end < start %Lu %Lu\n", end, start);
415 		WARN_ON(1);
416 	}
417 	state->state |= bits;
418 	state->start = start;
419 	state->end = end;
420 	node = tree_insert(&tree->state, end, &state->rb_node);
421 	if (node) {
422 		struct extent_state *found;
423 		found = rb_entry(node, struct extent_state, rb_node);
424 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
425 		free_extent_state(state);
426 		return -EEXIST;
427 	}
428 	merge_state(tree, state);
429 	return 0;
430 }
431 
432 /*
433  * split a given extent state struct in two, inserting the preallocated
434  * struct 'prealloc' as the newly created second half.  'split' indicates an
435  * offset inside 'orig' where it should be split.
436  *
437  * Before calling,
438  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
439  * are two extent state structs in the tree:
440  * prealloc: [orig->start, split - 1]
441  * orig: [ split, orig->end ]
442  *
443  * The tree locks are not taken by this function. They need to be held
444  * by the caller.
445  */
446 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
447 		       struct extent_state *prealloc, u64 split)
448 {
449 	struct rb_node *node;
450 	prealloc->start = orig->start;
451 	prealloc->end = split - 1;
452 	prealloc->state = orig->state;
453 	orig->start = split;
454 
455 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
456 	if (node) {
457 		struct extent_state *found;
458 		found = rb_entry(node, struct extent_state, rb_node);
459 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
460 		free_extent_state(prealloc);
461 		return -EEXIST;
462 	}
463 	return 0;
464 }
465 
466 /*
467  * utility function to clear some bits in an extent state struct.
468  * it will optionally wake up any one waiting on this state (wake == 1), or
469  * forcibly remove the state from the tree (delete == 1).
470  *
471  * If no bits are set on the state struct after clearing things, the
472  * struct is freed and removed from the tree
473  */
474 static int clear_state_bit(struct extent_map_tree *tree,
475 			    struct extent_state *state, int bits, int wake,
476 			    int delete)
477 {
478 	int ret = state->state & bits;
479 	state->state &= ~bits;
480 	if (wake)
481 		wake_up(&state->wq);
482 	if (delete || state->state == 0) {
483 		if (state->in_tree) {
484 			rb_erase(&state->rb_node, &tree->state);
485 			state->in_tree = 0;
486 			free_extent_state(state);
487 		} else {
488 			WARN_ON(1);
489 		}
490 	} else {
491 		merge_state(tree, state);
492 	}
493 	return ret;
494 }
495 
496 /*
497  * clear some bits on a range in the tree.  This may require splitting
498  * or inserting elements in the tree, so the gfp mask is used to
499  * indicate which allocations or sleeping are allowed.
500  *
501  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
502  * the given range from the tree regardless of state (ie for truncate).
503  *
504  * the range [start, end] is inclusive.
505  *
506  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
507  * bits were already set, or zero if none of the bits were already set.
508  */
509 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
510 		     int bits, int wake, int delete, gfp_t mask)
511 {
512 	struct extent_state *state;
513 	struct extent_state *prealloc = NULL;
514 	struct rb_node *node;
515 	unsigned long flags;
516 	int err;
517 	int set = 0;
518 
519 again:
520 	if (!prealloc && (mask & __GFP_WAIT)) {
521 		prealloc = alloc_extent_state(mask);
522 		if (!prealloc)
523 			return -ENOMEM;
524 	}
525 
526 	write_lock_irqsave(&tree->lock, flags);
527 	/*
528 	 * this search will find the extents that end after
529 	 * our range starts
530 	 */
531 	node = tree_search(&tree->state, start);
532 	if (!node)
533 		goto out;
534 	state = rb_entry(node, struct extent_state, rb_node);
535 	if (state->start > end)
536 		goto out;
537 	WARN_ON(state->end < start);
538 
539 	/*
540 	 *     | ---- desired range ---- |
541 	 *  | state | or
542 	 *  | ------------- state -------------- |
543 	 *
544 	 * We need to split the extent we found, and may flip
545 	 * bits on second half.
546 	 *
547 	 * If the extent we found extends past our range, we
548 	 * just split and search again.  It'll get split again
549 	 * the next time though.
550 	 *
551 	 * If the extent we found is inside our range, we clear
552 	 * the desired bit on it.
553 	 */
554 
555 	if (state->start < start) {
556 		err = split_state(tree, state, prealloc, start);
557 		BUG_ON(err == -EEXIST);
558 		prealloc = NULL;
559 		if (err)
560 			goto out;
561 		if (state->end <= end) {
562 			start = state->end + 1;
563 			set |= clear_state_bit(tree, state, bits,
564 					wake, delete);
565 		} else {
566 			start = state->start;
567 		}
568 		goto search_again;
569 	}
570 	/*
571 	 * | ---- desired range ---- |
572 	 *                        | state |
573 	 * We need to split the extent, and clear the bit
574 	 * on the first half
575 	 */
576 	if (state->start <= end && state->end > end) {
577 		err = split_state(tree, state, prealloc, end + 1);
578 		BUG_ON(err == -EEXIST);
579 
580 		if (wake)
581 			wake_up(&state->wq);
582 		set |= clear_state_bit(tree, prealloc, bits,
583 				       wake, delete);
584 		prealloc = NULL;
585 		goto out;
586 	}
587 
588 	start = state->end + 1;
589 	set |= clear_state_bit(tree, state, bits, wake, delete);
590 	goto search_again;
591 
592 out:
593 	write_unlock_irqrestore(&tree->lock, flags);
594 	if (prealloc)
595 		free_extent_state(prealloc);
596 
597 	return set;
598 
599 search_again:
600 	if (start > end)
601 		goto out;
602 	write_unlock_irqrestore(&tree->lock, flags);
603 	if (mask & __GFP_WAIT)
604 		cond_resched();
605 	goto again;
606 }
607 EXPORT_SYMBOL(clear_extent_bit);
608 
609 static int wait_on_state(struct extent_map_tree *tree,
610 			 struct extent_state *state)
611 {
612 	DEFINE_WAIT(wait);
613 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
614 	read_unlock_irq(&tree->lock);
615 	schedule();
616 	read_lock_irq(&tree->lock);
617 	finish_wait(&state->wq, &wait);
618 	return 0;
619 }
620 
621 /*
622  * waits for one or more bits to clear on a range in the state tree.
623  * The range [start, end] is inclusive.
624  * The tree lock is taken by this function
625  */
626 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
627 {
628 	struct extent_state *state;
629 	struct rb_node *node;
630 
631 	read_lock_irq(&tree->lock);
632 again:
633 	while (1) {
634 		/*
635 		 * this search will find all the extents that end after
636 		 * our range starts
637 		 */
638 		node = tree_search(&tree->state, start);
639 		if (!node)
640 			break;
641 
642 		state = rb_entry(node, struct extent_state, rb_node);
643 
644 		if (state->start > end)
645 			goto out;
646 
647 		if (state->state & bits) {
648 			start = state->start;
649 			atomic_inc(&state->refs);
650 			wait_on_state(tree, state);
651 			free_extent_state(state);
652 			goto again;
653 		}
654 		start = state->end + 1;
655 
656 		if (start > end)
657 			break;
658 
659 		if (need_resched()) {
660 			read_unlock_irq(&tree->lock);
661 			cond_resched();
662 			read_lock_irq(&tree->lock);
663 		}
664 	}
665 out:
666 	read_unlock_irq(&tree->lock);
667 	return 0;
668 }
669 EXPORT_SYMBOL(wait_extent_bit);
670 
671 /*
672  * set some bits on a range in the tree.  This may require allocations
673  * or sleeping, so the gfp mask is used to indicate what is allowed.
674  *
675  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
676  * range already has the desired bits set.  The start of the existing
677  * range is returned in failed_start in this case.
678  *
679  * [start, end] is inclusive
680  * This takes the tree lock.
681  */
682 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
683 		   int exclusive, u64 *failed_start, gfp_t mask)
684 {
685 	struct extent_state *state;
686 	struct extent_state *prealloc = NULL;
687 	struct rb_node *node;
688 	unsigned long flags;
689 	int err = 0;
690 	int set;
691 	u64 last_start;
692 	u64 last_end;
693 again:
694 	if (!prealloc && (mask & __GFP_WAIT)) {
695 		prealloc = alloc_extent_state(mask);
696 		if (!prealloc)
697 			return -ENOMEM;
698 	}
699 
700 	write_lock_irqsave(&tree->lock, flags);
701 	/*
702 	 * this search will find all the extents that end after
703 	 * our range starts.
704 	 */
705 	node = tree_search(&tree->state, start);
706 	if (!node) {
707 		err = insert_state(tree, prealloc, start, end, bits);
708 		prealloc = NULL;
709 		BUG_ON(err == -EEXIST);
710 		goto out;
711 	}
712 
713 	state = rb_entry(node, struct extent_state, rb_node);
714 	last_start = state->start;
715 	last_end = state->end;
716 
717 	/*
718 	 * | ---- desired range ---- |
719 	 * | state |
720 	 *
721 	 * Just lock what we found and keep going
722 	 */
723 	if (state->start == start && state->end <= end) {
724 		set = state->state & bits;
725 		if (set && exclusive) {
726 			*failed_start = state->start;
727 			err = -EEXIST;
728 			goto out;
729 		}
730 		state->state |= bits;
731 		start = state->end + 1;
732 		merge_state(tree, state);
733 		goto search_again;
734 	}
735 
736 	/*
737 	 *     | ---- desired range ---- |
738 	 * | state |
739 	 *   or
740 	 * | ------------- state -------------- |
741 	 *
742 	 * We need to split the extent we found, and may flip bits on
743 	 * second half.
744 	 *
745 	 * If the extent we found extends past our
746 	 * range, we just split and search again.  It'll get split
747 	 * again the next time though.
748 	 *
749 	 * If the extent we found is inside our range, we set the
750 	 * desired bit on it.
751 	 */
752 	if (state->start < start) {
753 		set = state->state & bits;
754 		if (exclusive && set) {
755 			*failed_start = start;
756 			err = -EEXIST;
757 			goto out;
758 		}
759 		err = split_state(tree, state, prealloc, start);
760 		BUG_ON(err == -EEXIST);
761 		prealloc = NULL;
762 		if (err)
763 			goto out;
764 		if (state->end <= end) {
765 			state->state |= bits;
766 			start = state->end + 1;
767 			merge_state(tree, state);
768 		} else {
769 			start = state->start;
770 		}
771 		goto search_again;
772 	}
773 	/*
774 	 * | ---- desired range ---- |
775 	 *     | state | or               | state |
776 	 *
777 	 * There's a hole, we need to insert something in it and
778 	 * ignore the extent we found.
779 	 */
780 	if (state->start > start) {
781 		u64 this_end;
782 		if (end < last_start)
783 			this_end = end;
784 		else
785 			this_end = last_start -1;
786 		err = insert_state(tree, prealloc, start, this_end,
787 				   bits);
788 		prealloc = NULL;
789 		BUG_ON(err == -EEXIST);
790 		if (err)
791 			goto out;
792 		start = this_end + 1;
793 		goto search_again;
794 	}
795 	/*
796 	 * | ---- desired range ---- |
797 	 *                        | state |
798 	 * We need to split the extent, and set the bit
799 	 * on the first half
800 	 */
801 	if (state->start <= end && state->end > end) {
802 		set = state->state & bits;
803 		if (exclusive && set) {
804 			*failed_start = start;
805 			err = -EEXIST;
806 			goto out;
807 		}
808 		err = split_state(tree, state, prealloc, end + 1);
809 		BUG_ON(err == -EEXIST);
810 
811 		prealloc->state |= bits;
812 		merge_state(tree, prealloc);
813 		prealloc = NULL;
814 		goto out;
815 	}
816 
817 	goto search_again;
818 
819 out:
820 	write_unlock_irqrestore(&tree->lock, flags);
821 	if (prealloc)
822 		free_extent_state(prealloc);
823 
824 	return err;
825 
826 search_again:
827 	if (start > end)
828 		goto out;
829 	write_unlock_irqrestore(&tree->lock, flags);
830 	if (mask & __GFP_WAIT)
831 		cond_resched();
832 	goto again;
833 }
834 EXPORT_SYMBOL(set_extent_bit);
835 
836 /* wrappers around set/clear extent bit */
837 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
838 		     gfp_t mask)
839 {
840 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
841 			      mask);
842 }
843 EXPORT_SYMBOL(set_extent_dirty);
844 
845 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
846 		    int bits, gfp_t mask)
847 {
848 	return set_extent_bit(tree, start, end, bits, 0, NULL,
849 			      mask);
850 }
851 EXPORT_SYMBOL(set_extent_bits);
852 
853 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
854 		      int bits, gfp_t mask)
855 {
856 	return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
857 }
858 EXPORT_SYMBOL(clear_extent_bits);
859 
860 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
861 		     gfp_t mask)
862 {
863 	return set_extent_bit(tree, start, end,
864 			      EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
865 			      mask);
866 }
867 EXPORT_SYMBOL(set_extent_delalloc);
868 
869 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
870 		       gfp_t mask)
871 {
872 	return clear_extent_bit(tree, start, end,
873 				EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
874 }
875 EXPORT_SYMBOL(clear_extent_dirty);
876 
877 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
878 		     gfp_t mask)
879 {
880 	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
881 			      mask);
882 }
883 EXPORT_SYMBOL(set_extent_new);
884 
885 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
886 		       gfp_t mask)
887 {
888 	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
889 }
890 EXPORT_SYMBOL(clear_extent_new);
891 
892 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
893 			gfp_t mask)
894 {
895 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
896 			      mask);
897 }
898 EXPORT_SYMBOL(set_extent_uptodate);
899 
900 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
901 			  gfp_t mask)
902 {
903 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
904 }
905 EXPORT_SYMBOL(clear_extent_uptodate);
906 
907 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
908 			 gfp_t mask)
909 {
910 	return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
911 			      0, NULL, mask);
912 }
913 EXPORT_SYMBOL(set_extent_writeback);
914 
915 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
916 			   gfp_t mask)
917 {
918 	return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
919 }
920 EXPORT_SYMBOL(clear_extent_writeback);
921 
922 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
923 {
924 	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
925 }
926 EXPORT_SYMBOL(wait_on_extent_writeback);
927 
928 /*
929  * locks a range in ascending order, waiting for any locked regions
930  * it hits on the way.  [start,end] are inclusive, and this will sleep.
931  */
932 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
933 {
934 	int err;
935 	u64 failed_start;
936 	while (1) {
937 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
938 				     &failed_start, mask);
939 		if (err == -EEXIST && (mask & __GFP_WAIT)) {
940 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
941 			start = failed_start;
942 		} else {
943 			break;
944 		}
945 		WARN_ON(start > end);
946 	}
947 	return err;
948 }
949 EXPORT_SYMBOL(lock_extent);
950 
951 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
952 		  gfp_t mask)
953 {
954 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
955 }
956 EXPORT_SYMBOL(unlock_extent);
957 
958 /*
959  * helper function to set pages and extents in the tree dirty
960  */
961 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
962 {
963 	unsigned long index = start >> PAGE_CACHE_SHIFT;
964 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
965 	struct page *page;
966 
967 	while (index <= end_index) {
968 		page = find_get_page(tree->mapping, index);
969 		BUG_ON(!page);
970 		__set_page_dirty_nobuffers(page);
971 		page_cache_release(page);
972 		index++;
973 	}
974 	set_extent_dirty(tree, start, end, GFP_NOFS);
975 	return 0;
976 }
977 EXPORT_SYMBOL(set_range_dirty);
978 
979 /*
980  * helper function to set both pages and extents in the tree writeback
981  */
982 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
983 {
984 	unsigned long index = start >> PAGE_CACHE_SHIFT;
985 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
986 	struct page *page;
987 
988 	while (index <= end_index) {
989 		page = find_get_page(tree->mapping, index);
990 		BUG_ON(!page);
991 		set_page_writeback(page);
992 		page_cache_release(page);
993 		index++;
994 	}
995 	set_extent_writeback(tree, start, end, GFP_NOFS);
996 	return 0;
997 }
998 EXPORT_SYMBOL(set_range_writeback);
999 
1000 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1001 			  u64 *start_ret, u64 *end_ret, int bits)
1002 {
1003 	struct rb_node *node;
1004 	struct extent_state *state;
1005 	int ret = 1;
1006 
1007 	read_lock_irq(&tree->lock);
1008 	/*
1009 	 * this search will find all the extents that end after
1010 	 * our range starts.
1011 	 */
1012 	node = tree_search(&tree->state, start);
1013 	if (!node || IS_ERR(node)) {
1014 		goto out;
1015 	}
1016 
1017 	while(1) {
1018 		state = rb_entry(node, struct extent_state, rb_node);
1019 		if (state->end >= start && (state->state & bits)) {
1020 			*start_ret = state->start;
1021 			*end_ret = state->end;
1022 			ret = 0;
1023 			break;
1024 		}
1025 		node = rb_next(node);
1026 		if (!node)
1027 			break;
1028 	}
1029 out:
1030 	read_unlock_irq(&tree->lock);
1031 	return ret;
1032 }
1033 EXPORT_SYMBOL(find_first_extent_bit);
1034 
1035 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1036 			     u64 *start, u64 *end, u64 max_bytes)
1037 {
1038 	struct rb_node *node;
1039 	struct extent_state *state;
1040 	u64 cur_start = *start;
1041 	u64 found = 0;
1042 	u64 total_bytes = 0;
1043 
1044 	write_lock_irq(&tree->lock);
1045 	/*
1046 	 * this search will find all the extents that end after
1047 	 * our range starts.
1048 	 */
1049 search_again:
1050 	node = tree_search(&tree->state, cur_start);
1051 	if (!node || IS_ERR(node)) {
1052 		goto out;
1053 	}
1054 
1055 	while(1) {
1056 		state = rb_entry(node, struct extent_state, rb_node);
1057 		if (found && state->start != cur_start) {
1058 			goto out;
1059 		}
1060 		if (!(state->state & EXTENT_DELALLOC)) {
1061 			goto out;
1062 		}
1063 		if (!found) {
1064 			struct extent_state *prev_state;
1065 			struct rb_node *prev_node = node;
1066 			while(1) {
1067 				prev_node = rb_prev(prev_node);
1068 				if (!prev_node)
1069 					break;
1070 				prev_state = rb_entry(prev_node,
1071 						      struct extent_state,
1072 						      rb_node);
1073 				if (!(prev_state->state & EXTENT_DELALLOC))
1074 					break;
1075 				state = prev_state;
1076 				node = prev_node;
1077 			}
1078 		}
1079 		if (state->state & EXTENT_LOCKED) {
1080 			DEFINE_WAIT(wait);
1081 			atomic_inc(&state->refs);
1082 			prepare_to_wait(&state->wq, &wait,
1083 					TASK_UNINTERRUPTIBLE);
1084 			write_unlock_irq(&tree->lock);
1085 			schedule();
1086 			write_lock_irq(&tree->lock);
1087 			finish_wait(&state->wq, &wait);
1088 			free_extent_state(state);
1089 			goto search_again;
1090 		}
1091 		state->state |= EXTENT_LOCKED;
1092 		if (!found)
1093 			*start = state->start;
1094 		found++;
1095 		*end = state->end;
1096 		cur_start = state->end + 1;
1097 		node = rb_next(node);
1098 		if (!node)
1099 			break;
1100 		total_bytes += state->end - state->start + 1;
1101 		if (total_bytes >= max_bytes)
1102 			break;
1103 	}
1104 out:
1105 	write_unlock_irq(&tree->lock);
1106 	return found;
1107 }
1108 
1109 u64 count_range_bits(struct extent_map_tree *tree,
1110 		     u64 *start, u64 max_bytes, unsigned long bits)
1111 {
1112 	struct rb_node *node;
1113 	struct extent_state *state;
1114 	u64 cur_start = *start;
1115 	u64 total_bytes = 0;
1116 	int found = 0;
1117 
1118 	write_lock_irq(&tree->lock);
1119 	/*
1120 	 * this search will find all the extents that end after
1121 	 * our range starts.
1122 	 */
1123 	node = tree_search(&tree->state, cur_start);
1124 	if (!node || IS_ERR(node)) {
1125 		goto out;
1126 	}
1127 
1128 	while(1) {
1129 		state = rb_entry(node, struct extent_state, rb_node);
1130 		if ((state->state & bits)) {
1131 			total_bytes += state->end - state->start + 1;
1132 			if (total_bytes >= max_bytes)
1133 				break;
1134 			if (!found) {
1135 				*start = state->start;
1136 				found = 1;
1137 			}
1138 		}
1139 		node = rb_next(node);
1140 		if (!node)
1141 			break;
1142 	}
1143 out:
1144 	write_unlock_irq(&tree->lock);
1145 	return total_bytes;
1146 }
1147 
1148 /*
1149  * helper function to lock both pages and extents in the tree.
1150  * pages must be locked first.
1151  */
1152 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1153 {
1154 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1155 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1156 	struct page *page;
1157 	int err;
1158 
1159 	while (index <= end_index) {
1160 		page = grab_cache_page(tree->mapping, index);
1161 		if (!page) {
1162 			err = -ENOMEM;
1163 			goto failed;
1164 		}
1165 		if (IS_ERR(page)) {
1166 			err = PTR_ERR(page);
1167 			goto failed;
1168 		}
1169 		index++;
1170 	}
1171 	lock_extent(tree, start, end, GFP_NOFS);
1172 	return 0;
1173 
1174 failed:
1175 	/*
1176 	 * we failed above in getting the page at 'index', so we undo here
1177 	 * up to but not including the page at 'index'
1178 	 */
1179 	end_index = index;
1180 	index = start >> PAGE_CACHE_SHIFT;
1181 	while (index < end_index) {
1182 		page = find_get_page(tree->mapping, index);
1183 		unlock_page(page);
1184 		page_cache_release(page);
1185 		index++;
1186 	}
1187 	return err;
1188 }
1189 EXPORT_SYMBOL(lock_range);
1190 
1191 /*
1192  * helper function to unlock both pages and extents in the tree.
1193  */
1194 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1195 {
1196 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1197 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1198 	struct page *page;
1199 
1200 	while (index <= end_index) {
1201 		page = find_get_page(tree->mapping, index);
1202 		unlock_page(page);
1203 		page_cache_release(page);
1204 		index++;
1205 	}
1206 	unlock_extent(tree, start, end, GFP_NOFS);
1207 	return 0;
1208 }
1209 EXPORT_SYMBOL(unlock_range);
1210 
1211 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1212 {
1213 	struct rb_node *node;
1214 	struct extent_state *state;
1215 	int ret = 0;
1216 
1217 	write_lock_irq(&tree->lock);
1218 	/*
1219 	 * this search will find all the extents that end after
1220 	 * our range starts.
1221 	 */
1222 	node = tree_search(&tree->state, start);
1223 	if (!node || IS_ERR(node)) {
1224 		ret = -ENOENT;
1225 		goto out;
1226 	}
1227 	state = rb_entry(node, struct extent_state, rb_node);
1228 	if (state->start != start) {
1229 		ret = -ENOENT;
1230 		goto out;
1231 	}
1232 	state->private = private;
1233 out:
1234 	write_unlock_irq(&tree->lock);
1235 	return ret;
1236 }
1237 
1238 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1239 {
1240 	struct rb_node *node;
1241 	struct extent_state *state;
1242 	int ret = 0;
1243 
1244 	read_lock_irq(&tree->lock);
1245 	/*
1246 	 * this search will find all the extents that end after
1247 	 * our range starts.
1248 	 */
1249 	node = tree_search(&tree->state, start);
1250 	if (!node || IS_ERR(node)) {
1251 		ret = -ENOENT;
1252 		goto out;
1253 	}
1254 	state = rb_entry(node, struct extent_state, rb_node);
1255 	if (state->start != start) {
1256 		ret = -ENOENT;
1257 		goto out;
1258 	}
1259 	*private = state->private;
1260 out:
1261 	read_unlock_irq(&tree->lock);
1262 	return ret;
1263 }
1264 
1265 /*
1266  * searches a range in the state tree for a given mask.
1267  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1268  * has the bits set.  Otherwise, 1 is returned if any bit in the
1269  * range is found set.
1270  */
1271 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1272 		   int bits, int filled)
1273 {
1274 	struct extent_state *state = NULL;
1275 	struct rb_node *node;
1276 	int bitset = 0;
1277 
1278 	read_lock_irq(&tree->lock);
1279 	node = tree_search(&tree->state, start);
1280 	while (node && start <= end) {
1281 		state = rb_entry(node, struct extent_state, rb_node);
1282 
1283 		if (filled && state->start > start) {
1284 			bitset = 0;
1285 			break;
1286 		}
1287 
1288 		if (state->start > end)
1289 			break;
1290 
1291 		if (state->state & bits) {
1292 			bitset = 1;
1293 			if (!filled)
1294 				break;
1295 		} else if (filled) {
1296 			bitset = 0;
1297 			break;
1298 		}
1299 		start = state->end + 1;
1300 		if (start > end)
1301 			break;
1302 		node = rb_next(node);
1303 	}
1304 	read_unlock_irq(&tree->lock);
1305 	return bitset;
1306 }
1307 EXPORT_SYMBOL(test_range_bit);
1308 
1309 /*
1310  * helper function to set a given page up to date if all the
1311  * extents in the tree for that page are up to date
1312  */
1313 static int check_page_uptodate(struct extent_map_tree *tree,
1314 			       struct page *page)
1315 {
1316 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1317 	u64 end = start + PAGE_CACHE_SIZE - 1;
1318 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1319 		SetPageUptodate(page);
1320 	return 0;
1321 }
1322 
1323 /*
1324  * helper function to unlock a page if all the extents in the tree
1325  * for that page are unlocked
1326  */
1327 static int check_page_locked(struct extent_map_tree *tree,
1328 			     struct page *page)
1329 {
1330 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1331 	u64 end = start + PAGE_CACHE_SIZE - 1;
1332 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1333 		unlock_page(page);
1334 	return 0;
1335 }
1336 
1337 /*
1338  * helper function to end page writeback if all the extents
1339  * in the tree for that page are done with writeback
1340  */
1341 static int check_page_writeback(struct extent_map_tree *tree,
1342 			     struct page *page)
1343 {
1344 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1345 	u64 end = start + PAGE_CACHE_SIZE - 1;
1346 	if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1347 		end_page_writeback(page);
1348 	return 0;
1349 }
1350 
1351 /* lots and lots of room for performance fixes in the end_bio funcs */
1352 
1353 /*
1354  * after a writepage IO is done, we need to:
1355  * clear the uptodate bits on error
1356  * clear the writeback bits in the extent tree for this IO
1357  * end_page_writeback if the page has no more pending IO
1358  *
1359  * Scheduling is not allowed, so the extent state tree is expected
1360  * to have one and only one object corresponding to this IO.
1361  */
1362 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1363 static void end_bio_extent_writepage(struct bio *bio, int err)
1364 #else
1365 static int end_bio_extent_writepage(struct bio *bio,
1366 				   unsigned int bytes_done, int err)
1367 #endif
1368 {
1369 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1370 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1371 	struct extent_map_tree *tree = bio->bi_private;
1372 	u64 start;
1373 	u64 end;
1374 	int whole_page;
1375 
1376 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1377 	if (bio->bi_size)
1378 		return 1;
1379 #endif
1380 
1381 	do {
1382 		struct page *page = bvec->bv_page;
1383 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1384 			 bvec->bv_offset;
1385 		end = start + bvec->bv_len - 1;
1386 
1387 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1388 			whole_page = 1;
1389 		else
1390 			whole_page = 0;
1391 
1392 		if (--bvec >= bio->bi_io_vec)
1393 			prefetchw(&bvec->bv_page->flags);
1394 
1395 		if (!uptodate) {
1396 			clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1397 			ClearPageUptodate(page);
1398 			SetPageError(page);
1399 		}
1400 		clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1401 
1402 		if (whole_page)
1403 			end_page_writeback(page);
1404 		else
1405 			check_page_writeback(tree, page);
1406 		if (tree->ops && tree->ops->writepage_end_io_hook)
1407 			tree->ops->writepage_end_io_hook(page, start, end);
1408 	} while (bvec >= bio->bi_io_vec);
1409 
1410 	bio_put(bio);
1411 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1412 	return 0;
1413 #endif
1414 }
1415 
1416 /*
1417  * after a readpage IO is done, we need to:
1418  * clear the uptodate bits on error
1419  * set the uptodate bits if things worked
1420  * set the page up to date if all extents in the tree are uptodate
1421  * clear the lock bit in the extent tree
1422  * unlock the page if there are no other extents locked for it
1423  *
1424  * Scheduling is not allowed, so the extent state tree is expected
1425  * to have one and only one object corresponding to this IO.
1426  */
1427 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1428 static void end_bio_extent_readpage(struct bio *bio, int err)
1429 #else
1430 static int end_bio_extent_readpage(struct bio *bio,
1431 				   unsigned int bytes_done, int err)
1432 #endif
1433 {
1434 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1435 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1436 	struct extent_map_tree *tree = bio->bi_private;
1437 	u64 start;
1438 	u64 end;
1439 	int whole_page;
1440 	int ret;
1441 
1442 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1443 	if (bio->bi_size)
1444 		return 1;
1445 #endif
1446 
1447 	do {
1448 		struct page *page = bvec->bv_page;
1449 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1450 			bvec->bv_offset;
1451 		end = start + bvec->bv_len - 1;
1452 
1453 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1454 			whole_page = 1;
1455 		else
1456 			whole_page = 0;
1457 
1458 		if (--bvec >= bio->bi_io_vec)
1459 			prefetchw(&bvec->bv_page->flags);
1460 
1461 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1462 			ret = tree->ops->readpage_end_io_hook(page, start, end);
1463 			if (ret)
1464 				uptodate = 0;
1465 		}
1466 		if (uptodate) {
1467 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1468 			if (whole_page)
1469 				SetPageUptodate(page);
1470 			else
1471 				check_page_uptodate(tree, page);
1472 		} else {
1473 			ClearPageUptodate(page);
1474 			SetPageError(page);
1475 		}
1476 
1477 		unlock_extent(tree, start, end, GFP_ATOMIC);
1478 
1479 		if (whole_page)
1480 			unlock_page(page);
1481 		else
1482 			check_page_locked(tree, page);
1483 	} while (bvec >= bio->bi_io_vec);
1484 
1485 	bio_put(bio);
1486 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1487 	return 0;
1488 #endif
1489 }
1490 
1491 /*
1492  * IO done from prepare_write is pretty simple, we just unlock
1493  * the structs in the extent tree when done, and set the uptodate bits
1494  * as appropriate.
1495  */
1496 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1497 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1498 #else
1499 static int end_bio_extent_preparewrite(struct bio *bio,
1500 				       unsigned int bytes_done, int err)
1501 #endif
1502 {
1503 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1504 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1505 	struct extent_map_tree *tree = bio->bi_private;
1506 	u64 start;
1507 	u64 end;
1508 
1509 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1510 	if (bio->bi_size)
1511 		return 1;
1512 #endif
1513 
1514 	do {
1515 		struct page *page = bvec->bv_page;
1516 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1517 			bvec->bv_offset;
1518 		end = start + bvec->bv_len - 1;
1519 
1520 		if (--bvec >= bio->bi_io_vec)
1521 			prefetchw(&bvec->bv_page->flags);
1522 
1523 		if (uptodate) {
1524 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1525 		} else {
1526 			ClearPageUptodate(page);
1527 			SetPageError(page);
1528 		}
1529 
1530 		unlock_extent(tree, start, end, GFP_ATOMIC);
1531 
1532 	} while (bvec >= bio->bi_io_vec);
1533 
1534 	bio_put(bio);
1535 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1536 	return 0;
1537 #endif
1538 }
1539 
1540 static struct bio *
1541 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1542 		 gfp_t gfp_flags)
1543 {
1544 	struct bio *bio;
1545 
1546 	bio = bio_alloc(gfp_flags, nr_vecs);
1547 
1548 	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1549 		while (!bio && (nr_vecs /= 2))
1550 			bio = bio_alloc(gfp_flags, nr_vecs);
1551 	}
1552 
1553 	if (bio) {
1554 		bio->bi_bdev = bdev;
1555 		bio->bi_sector = first_sector;
1556 	}
1557 	return bio;
1558 }
1559 
1560 static int submit_one_bio(int rw, struct bio *bio)
1561 {
1562 	int ret = 0;
1563 	bio_get(bio);
1564 	submit_bio(rw, bio);
1565 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1566 		ret = -EOPNOTSUPP;
1567 	bio_put(bio);
1568 	return ret;
1569 }
1570 
1571 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1572 			      struct page *page, sector_t sector,
1573 			      size_t size, unsigned long offset,
1574 			      struct block_device *bdev,
1575 			      struct bio **bio_ret,
1576 			      unsigned long max_pages,
1577 			      bio_end_io_t end_io_func)
1578 {
1579 	int ret = 0;
1580 	struct bio *bio;
1581 	int nr;
1582 
1583 	if (bio_ret && *bio_ret) {
1584 		bio = *bio_ret;
1585 		if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1586 		    bio_add_page(bio, page, size, offset) < size) {
1587 			ret = submit_one_bio(rw, bio);
1588 			bio = NULL;
1589 		} else {
1590 			return 0;
1591 		}
1592 	}
1593 	nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1594 	bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1595 	if (!bio) {
1596 		printk("failed to allocate bio nr %d\n", nr);
1597 	}
1598 	bio_add_page(bio, page, size, offset);
1599 	bio->bi_end_io = end_io_func;
1600 	bio->bi_private = tree;
1601 	if (bio_ret) {
1602 		*bio_ret = bio;
1603 	} else {
1604 		ret = submit_one_bio(rw, bio);
1605 	}
1606 
1607 	return ret;
1608 }
1609 
1610 void set_page_extent_mapped(struct page *page)
1611 {
1612 	if (!PagePrivate(page)) {
1613 		SetPagePrivate(page);
1614 		WARN_ON(!page->mapping->a_ops->invalidatepage);
1615 		set_page_private(page, EXTENT_PAGE_PRIVATE);
1616 		page_cache_get(page);
1617 	}
1618 }
1619 
1620 /*
1621  * basic readpage implementation.  Locked extent state structs are inserted
1622  * into the tree that are removed when the IO is done (by the end_io
1623  * handlers)
1624  */
1625 static int __extent_read_full_page(struct extent_map_tree *tree,
1626 				   struct page *page,
1627 				   get_extent_t *get_extent,
1628 				   struct bio **bio)
1629 {
1630 	struct inode *inode = page->mapping->host;
1631 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1632 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1633 	u64 end;
1634 	u64 cur = start;
1635 	u64 extent_offset;
1636 	u64 last_byte = i_size_read(inode);
1637 	u64 block_start;
1638 	u64 cur_end;
1639 	sector_t sector;
1640 	struct extent_map *em;
1641 	struct block_device *bdev;
1642 	int ret;
1643 	int nr = 0;
1644 	size_t page_offset = 0;
1645 	size_t iosize;
1646 	size_t blocksize = inode->i_sb->s_blocksize;
1647 
1648 	set_page_extent_mapped(page);
1649 
1650 	end = page_end;
1651 	lock_extent(tree, start, end, GFP_NOFS);
1652 
1653 	while (cur <= end) {
1654 		if (cur >= last_byte) {
1655 			iosize = PAGE_CACHE_SIZE - page_offset;
1656 			zero_user_page(page, page_offset, iosize, KM_USER0);
1657 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1658 					    GFP_NOFS);
1659 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1660 			break;
1661 		}
1662 		em = get_extent(inode, page, page_offset, cur, end, 0);
1663 		if (IS_ERR(em) || !em) {
1664 			SetPageError(page);
1665 			unlock_extent(tree, cur, end, GFP_NOFS);
1666 			break;
1667 		}
1668 
1669 		extent_offset = cur - em->start;
1670 		BUG_ON(em->end < cur);
1671 		BUG_ON(end < cur);
1672 
1673 		iosize = min(em->end - cur, end - cur) + 1;
1674 		cur_end = min(em->end, end);
1675 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1676 		sector = (em->block_start + extent_offset) >> 9;
1677 		bdev = em->bdev;
1678 		block_start = em->block_start;
1679 		free_extent_map(em);
1680 		em = NULL;
1681 
1682 		/* we've found a hole, just zero and go on */
1683 		if (block_start == EXTENT_MAP_HOLE) {
1684 			zero_user_page(page, page_offset, iosize, KM_USER0);
1685 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1686 					    GFP_NOFS);
1687 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1688 			cur = cur + iosize;
1689 			page_offset += iosize;
1690 			continue;
1691 		}
1692 		/* the get_extent function already copied into the page */
1693 		if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1694 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1695 			cur = cur + iosize;
1696 			page_offset += iosize;
1697 			continue;
1698 		}
1699 
1700 		ret = 0;
1701 		if (tree->ops && tree->ops->readpage_io_hook) {
1702 			ret = tree->ops->readpage_io_hook(page, cur,
1703 							  cur + iosize - 1);
1704 		}
1705 		if (!ret) {
1706 			unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1707 			nr -= page->index;
1708 			ret = submit_extent_page(READ, tree, page,
1709 					 sector, iosize, page_offset,
1710 					 bdev, bio, nr,
1711 					 end_bio_extent_readpage);
1712 		}
1713 		if (ret)
1714 			SetPageError(page);
1715 		cur = cur + iosize;
1716 		page_offset += iosize;
1717 		nr++;
1718 	}
1719 	if (!nr) {
1720 		if (!PageError(page))
1721 			SetPageUptodate(page);
1722 		unlock_page(page);
1723 	}
1724 	return 0;
1725 }
1726 
1727 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1728 			    get_extent_t *get_extent)
1729 {
1730 	struct bio *bio = NULL;
1731 	int ret;
1732 
1733 	ret = __extent_read_full_page(tree, page, get_extent, &bio);
1734 	if (bio)
1735 		submit_one_bio(READ, bio);
1736 	return ret;
1737 }
1738 EXPORT_SYMBOL(extent_read_full_page);
1739 
1740 /*
1741  * the writepage semantics are similar to regular writepage.  extent
1742  * records are inserted to lock ranges in the tree, and as dirty areas
1743  * are found, they are marked writeback.  Then the lock bits are removed
1744  * and the end_io handler clears the writeback ranges
1745  */
1746 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1747 			      void *data)
1748 {
1749 	struct inode *inode = page->mapping->host;
1750 	struct extent_page_data *epd = data;
1751 	struct extent_map_tree *tree = epd->tree;
1752 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1753 	u64 delalloc_start;
1754 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1755 	u64 end;
1756 	u64 cur = start;
1757 	u64 extent_offset;
1758 	u64 last_byte = i_size_read(inode);
1759 	u64 block_start;
1760 	u64 iosize;
1761 	sector_t sector;
1762 	struct extent_map *em;
1763 	struct block_device *bdev;
1764 	int ret;
1765 	int nr = 0;
1766 	size_t page_offset = 0;
1767 	size_t blocksize;
1768 	loff_t i_size = i_size_read(inode);
1769 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1770 	u64 nr_delalloc;
1771 	u64 delalloc_end;
1772 
1773 	WARN_ON(!PageLocked(page));
1774 	if (page->index > end_index) {
1775 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1776 		unlock_page(page);
1777 		return 0;
1778 	}
1779 
1780 	if (page->index == end_index) {
1781 		size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1782 		zero_user_page(page, offset,
1783 			       PAGE_CACHE_SIZE - offset, KM_USER0);
1784 	}
1785 
1786 	set_page_extent_mapped(page);
1787 
1788 	delalloc_start = start;
1789 	delalloc_end = 0;
1790 	while(delalloc_end < page_end) {
1791 		nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1792 						       &delalloc_end,
1793 						       128 * 1024 * 1024);
1794 		if (nr_delalloc <= 0)
1795 			break;
1796 		tree->ops->fill_delalloc(inode, delalloc_start,
1797 					 delalloc_end);
1798 		clear_extent_bit(tree, delalloc_start,
1799 				 delalloc_end,
1800 				 EXTENT_LOCKED | EXTENT_DELALLOC,
1801 				 1, 0, GFP_NOFS);
1802 		delalloc_start = delalloc_end + 1;
1803 	}
1804 	lock_extent(tree, start, page_end, GFP_NOFS);
1805 
1806 	end = page_end;
1807 	if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1808 		printk("found delalloc bits after lock_extent\n");
1809 	}
1810 
1811 	if (last_byte <= start) {
1812 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1813 		goto done;
1814 	}
1815 
1816 	set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1817 	blocksize = inode->i_sb->s_blocksize;
1818 
1819 	while (cur <= end) {
1820 		if (cur >= last_byte) {
1821 			clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1822 			break;
1823 		}
1824 		em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1825 		if (IS_ERR(em) || !em) {
1826 			SetPageError(page);
1827 			break;
1828 		}
1829 
1830 		extent_offset = cur - em->start;
1831 		BUG_ON(em->end < cur);
1832 		BUG_ON(end < cur);
1833 		iosize = min(em->end - cur, end - cur) + 1;
1834 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1835 		sector = (em->block_start + extent_offset) >> 9;
1836 		bdev = em->bdev;
1837 		block_start = em->block_start;
1838 		free_extent_map(em);
1839 		em = NULL;
1840 
1841 		if (block_start == EXTENT_MAP_HOLE ||
1842 		    block_start == EXTENT_MAP_INLINE) {
1843 			clear_extent_dirty(tree, cur,
1844 					   cur + iosize - 1, GFP_NOFS);
1845 			cur = cur + iosize;
1846 			page_offset += iosize;
1847 			continue;
1848 		}
1849 
1850 		/* leave this out until we have a page_mkwrite call */
1851 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1852 				   EXTENT_DIRTY, 0)) {
1853 			cur = cur + iosize;
1854 			page_offset += iosize;
1855 			continue;
1856 		}
1857 		clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1858 		if (tree->ops && tree->ops->writepage_io_hook) {
1859 			ret = tree->ops->writepage_io_hook(page, cur,
1860 						cur + iosize - 1);
1861 		} else {
1862 			ret = 0;
1863 		}
1864 		if (ret)
1865 			SetPageError(page);
1866 		else {
1867 			unsigned long max_nr = end_index + 1;
1868 			set_range_writeback(tree, cur, cur + iosize - 1);
1869 			if (!PageWriteback(page)) {
1870 				printk("warning page %lu not writeback, "
1871 				       "cur %llu end %llu\n", page->index,
1872 				       (unsigned long long)cur,
1873 				       (unsigned long long)end);
1874 			}
1875 
1876 			ret = submit_extent_page(WRITE, tree, page, sector,
1877 						 iosize, page_offset, bdev,
1878 						 &epd->bio, max_nr,
1879 						 end_bio_extent_writepage);
1880 			if (ret)
1881 				SetPageError(page);
1882 		}
1883 		cur = cur + iosize;
1884 		page_offset += iosize;
1885 		nr++;
1886 	}
1887 done:
1888 	if (nr == 0) {
1889 		/* make sure the mapping tag for page dirty gets cleared */
1890 		set_page_writeback(page);
1891 		end_page_writeback(page);
1892 	}
1893 	unlock_extent(tree, start, page_end, GFP_NOFS);
1894 	unlock_page(page);
1895 	return 0;
1896 }
1897 
1898 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1899 			  get_extent_t *get_extent,
1900 			  struct writeback_control *wbc)
1901 {
1902 	int ret;
1903 	struct address_space *mapping = page->mapping;
1904 	struct extent_page_data epd = {
1905 		.bio = NULL,
1906 		.tree = tree,
1907 		.get_extent = get_extent,
1908 	};
1909 	struct writeback_control wbc_writepages = {
1910 		.bdi		= wbc->bdi,
1911 		.sync_mode	= WB_SYNC_NONE,
1912 		.older_than_this = NULL,
1913 		.nr_to_write	= 64,
1914 		.range_start	= page_offset(page) + PAGE_CACHE_SIZE,
1915 		.range_end	= (loff_t)-1,
1916 	};
1917 
1918 
1919 	ret = __extent_writepage(page, wbc, &epd);
1920 
1921 	write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
1922 	if (epd.bio)
1923 		submit_one_bio(WRITE, epd.bio);
1924 	return ret;
1925 }
1926 EXPORT_SYMBOL(extent_write_full_page);
1927 
1928 int extent_writepages(struct extent_map_tree *tree,
1929 		      struct address_space *mapping,
1930 		      get_extent_t *get_extent,
1931 		      struct writeback_control *wbc)
1932 {
1933 	int ret;
1934 	struct extent_page_data epd = {
1935 		.bio = NULL,
1936 		.tree = tree,
1937 		.get_extent = get_extent,
1938 	};
1939 
1940 	ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1941 	if (epd.bio)
1942 		submit_one_bio(WRITE, epd.bio);
1943 	return ret;
1944 }
1945 EXPORT_SYMBOL(extent_writepages);
1946 
1947 int extent_readpages(struct extent_map_tree *tree,
1948 		     struct address_space *mapping,
1949 		     struct list_head *pages, unsigned nr_pages,
1950 		     get_extent_t get_extent)
1951 {
1952 	struct bio *bio = NULL;
1953 	unsigned page_idx;
1954 	struct pagevec pvec;
1955 
1956 	pagevec_init(&pvec, 0);
1957 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
1958 		struct page *page = list_entry(pages->prev, struct page, lru);
1959 
1960 		prefetchw(&page->flags);
1961 		list_del(&page->lru);
1962 		/*
1963 		 * what we want to do here is call add_to_page_cache_lru,
1964 		 * but that isn't exported, so we reproduce it here
1965 		 */
1966 		if (!add_to_page_cache(page, mapping,
1967 					page->index, GFP_KERNEL)) {
1968 
1969 			/* open coding of lru_cache_add, also not exported */
1970 			page_cache_get(page);
1971 			if (!pagevec_add(&pvec, page))
1972 				__pagevec_lru_add(&pvec);
1973 			__extent_read_full_page(tree, page, get_extent, &bio);
1974 		}
1975 		page_cache_release(page);
1976 	}
1977 	if (pagevec_count(&pvec))
1978 		__pagevec_lru_add(&pvec);
1979 	BUG_ON(!list_empty(pages));
1980 	if (bio)
1981 		submit_one_bio(READ, bio);
1982 	return 0;
1983 }
1984 EXPORT_SYMBOL(extent_readpages);
1985 
1986 /*
1987  * basic invalidatepage code, this waits on any locked or writeback
1988  * ranges corresponding to the page, and then deletes any extent state
1989  * records from the tree
1990  */
1991 int extent_invalidatepage(struct extent_map_tree *tree,
1992 			  struct page *page, unsigned long offset)
1993 {
1994 	u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
1995 	u64 end = start + PAGE_CACHE_SIZE - 1;
1996 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1997 
1998 	start += (offset + blocksize -1) & ~(blocksize - 1);
1999 	if (start > end)
2000 		return 0;
2001 
2002 	lock_extent(tree, start, end, GFP_NOFS);
2003 	wait_on_extent_writeback(tree, start, end);
2004 	clear_extent_bit(tree, start, end,
2005 			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2006 			 1, 1, GFP_NOFS);
2007 	return 0;
2008 }
2009 EXPORT_SYMBOL(extent_invalidatepage);
2010 
2011 /*
2012  * simple commit_write call, set_range_dirty is used to mark both
2013  * the pages and the extent records as dirty
2014  */
2015 int extent_commit_write(struct extent_map_tree *tree,
2016 			struct inode *inode, struct page *page,
2017 			unsigned from, unsigned to)
2018 {
2019 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2020 
2021 	set_page_extent_mapped(page);
2022 	set_page_dirty(page);
2023 
2024 	if (pos > inode->i_size) {
2025 		i_size_write(inode, pos);
2026 		mark_inode_dirty(inode);
2027 	}
2028 	return 0;
2029 }
2030 EXPORT_SYMBOL(extent_commit_write);
2031 
2032 int extent_prepare_write(struct extent_map_tree *tree,
2033 			 struct inode *inode, struct page *page,
2034 			 unsigned from, unsigned to, get_extent_t *get_extent)
2035 {
2036 	u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2037 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2038 	u64 block_start;
2039 	u64 orig_block_start;
2040 	u64 block_end;
2041 	u64 cur_end;
2042 	struct extent_map *em;
2043 	unsigned blocksize = 1 << inode->i_blkbits;
2044 	size_t page_offset = 0;
2045 	size_t block_off_start;
2046 	size_t block_off_end;
2047 	int err = 0;
2048 	int iocount = 0;
2049 	int ret = 0;
2050 	int isnew;
2051 
2052 	set_page_extent_mapped(page);
2053 
2054 	block_start = (page_start + from) & ~((u64)blocksize - 1);
2055 	block_end = (page_start + to - 1) | (blocksize - 1);
2056 	orig_block_start = block_start;
2057 
2058 	lock_extent(tree, page_start, page_end, GFP_NOFS);
2059 	while(block_start <= block_end) {
2060 		em = get_extent(inode, page, page_offset, block_start,
2061 				block_end, 1);
2062 		if (IS_ERR(em) || !em) {
2063 			goto err;
2064 		}
2065 		cur_end = min(block_end, em->end);
2066 		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2067 		block_off_end = block_off_start + blocksize;
2068 		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2069 
2070 		if (!PageUptodate(page) && isnew &&
2071 		    (block_off_end > to || block_off_start < from)) {
2072 			void *kaddr;
2073 
2074 			kaddr = kmap_atomic(page, KM_USER0);
2075 			if (block_off_end > to)
2076 				memset(kaddr + to, 0, block_off_end - to);
2077 			if (block_off_start < from)
2078 				memset(kaddr + block_off_start, 0,
2079 				       from - block_off_start);
2080 			flush_dcache_page(page);
2081 			kunmap_atomic(kaddr, KM_USER0);
2082 		}
2083 		if (!isnew && !PageUptodate(page) &&
2084 		    (block_off_end > to || block_off_start < from) &&
2085 		    !test_range_bit(tree, block_start, cur_end,
2086 				    EXTENT_UPTODATE, 1)) {
2087 			u64 sector;
2088 			u64 extent_offset = block_start - em->start;
2089 			size_t iosize;
2090 			sector = (em->block_start + extent_offset) >> 9;
2091 			iosize = (cur_end - block_start + blocksize - 1) &
2092 				~((u64)blocksize - 1);
2093 			/*
2094 			 * we've already got the extent locked, but we
2095 			 * need to split the state such that our end_bio
2096 			 * handler can clear the lock.
2097 			 */
2098 			set_extent_bit(tree, block_start,
2099 				       block_start + iosize - 1,
2100 				       EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2101 			ret = submit_extent_page(READ, tree, page,
2102 					 sector, iosize, page_offset, em->bdev,
2103 					 NULL, 1,
2104 					 end_bio_extent_preparewrite);
2105 			iocount++;
2106 			block_start = block_start + iosize;
2107 		} else {
2108 			set_extent_uptodate(tree, block_start, cur_end,
2109 					    GFP_NOFS);
2110 			unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2111 			block_start = cur_end + 1;
2112 		}
2113 		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2114 		free_extent_map(em);
2115 	}
2116 	if (iocount) {
2117 		wait_extent_bit(tree, orig_block_start,
2118 				block_end, EXTENT_LOCKED);
2119 	}
2120 	check_page_uptodate(tree, page);
2121 err:
2122 	/* FIXME, zero out newly allocated blocks on error */
2123 	return err;
2124 }
2125 EXPORT_SYMBOL(extent_prepare_write);
2126 
2127 /*
2128  * a helper for releasepage.  As long as there are no locked extents
2129  * in the range corresponding to the page, both state records and extent
2130  * map records are removed
2131  */
2132 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2133 {
2134 	struct extent_map *em;
2135 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2136 	u64 end = start + PAGE_CACHE_SIZE - 1;
2137 	u64 orig_start = start;
2138 	int ret = 1;
2139 
2140 	while (start <= end) {
2141 		em = lookup_extent_mapping(tree, start, end);
2142 		if (!em || IS_ERR(em))
2143 			break;
2144 		if (!test_range_bit(tree, em->start, em->end,
2145 				    EXTENT_LOCKED, 0)) {
2146 			remove_extent_mapping(tree, em);
2147 			/* once for the rb tree */
2148 			free_extent_map(em);
2149 		}
2150 		start = em->end + 1;
2151 		/* once for us */
2152 		free_extent_map(em);
2153 	}
2154 	if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2155 		ret = 0;
2156 	else
2157 		clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2158 				 1, 1, GFP_NOFS);
2159 	return ret;
2160 }
2161 EXPORT_SYMBOL(try_release_extent_mapping);
2162 
2163 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2164 		get_extent_t *get_extent)
2165 {
2166 	struct inode *inode = mapping->host;
2167 	u64 start = iblock << inode->i_blkbits;
2168 	u64 end = start + (1 << inode->i_blkbits) - 1;
2169 	sector_t sector = 0;
2170 	struct extent_map *em;
2171 
2172 	em = get_extent(inode, NULL, 0, start, end, 0);
2173 	if (!em || IS_ERR(em))
2174 		return 0;
2175 
2176 	if (em->block_start == EXTENT_MAP_INLINE ||
2177 	    em->block_start == EXTENT_MAP_HOLE)
2178 		goto out;
2179 
2180 	sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2181 out:
2182 	free_extent_map(em);
2183 	return sector;
2184 }
2185 
2186 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2187 {
2188 	if (list_empty(&eb->lru)) {
2189 		extent_buffer_get(eb);
2190 		list_add(&eb->lru, &tree->buffer_lru);
2191 		tree->lru_size++;
2192 		if (tree->lru_size >= BUFFER_LRU_MAX) {
2193 			struct extent_buffer *rm;
2194 			rm = list_entry(tree->buffer_lru.prev,
2195 					struct extent_buffer, lru);
2196 			tree->lru_size--;
2197 			list_del_init(&rm->lru);
2198 			free_extent_buffer(rm);
2199 		}
2200 	} else
2201 		list_move(&eb->lru, &tree->buffer_lru);
2202 	return 0;
2203 }
2204 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2205 				      u64 start, unsigned long len)
2206 {
2207 	struct list_head *lru = &tree->buffer_lru;
2208 	struct list_head *cur = lru->next;
2209 	struct extent_buffer *eb;
2210 
2211 	if (list_empty(lru))
2212 		return NULL;
2213 
2214 	do {
2215 		eb = list_entry(cur, struct extent_buffer, lru);
2216 		if (eb->start == start && eb->len == len) {
2217 			extent_buffer_get(eb);
2218 			return eb;
2219 		}
2220 		cur = cur->next;
2221 	} while (cur != lru);
2222 	return NULL;
2223 }
2224 
2225 static inline unsigned long num_extent_pages(u64 start, u64 len)
2226 {
2227 	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2228 		(start >> PAGE_CACHE_SHIFT);
2229 }
2230 
2231 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2232 					      unsigned long i)
2233 {
2234 	struct page *p;
2235 	struct address_space *mapping;
2236 
2237 	if (i == 0)
2238 		return eb->first_page;
2239 	i += eb->start >> PAGE_CACHE_SHIFT;
2240 	mapping = eb->first_page->mapping;
2241 	read_lock_irq(&mapping->tree_lock);
2242 	p = radix_tree_lookup(&mapping->page_tree, i);
2243 	read_unlock_irq(&mapping->tree_lock);
2244 	return p;
2245 }
2246 
2247 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2248 						   u64 start,
2249 						   unsigned long len,
2250 						   gfp_t mask)
2251 {
2252 	struct extent_buffer *eb = NULL;
2253 
2254 	spin_lock(&tree->lru_lock);
2255 	eb = find_lru(tree, start, len);
2256 	spin_unlock(&tree->lru_lock);
2257 	if (eb) {
2258 		return eb;
2259 	}
2260 
2261 	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2262 	INIT_LIST_HEAD(&eb->lru);
2263 	eb->start = start;
2264 	eb->len = len;
2265 	atomic_set(&eb->refs, 1);
2266 
2267 	return eb;
2268 }
2269 
2270 static void __free_extent_buffer(struct extent_buffer *eb)
2271 {
2272 	kmem_cache_free(extent_buffer_cache, eb);
2273 }
2274 
2275 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2276 					  u64 start, unsigned long len,
2277 					  struct page *page0,
2278 					  gfp_t mask)
2279 {
2280 	unsigned long num_pages = num_extent_pages(start, len);
2281 	unsigned long i;
2282 	unsigned long index = start >> PAGE_CACHE_SHIFT;
2283 	struct extent_buffer *eb;
2284 	struct page *p;
2285 	struct address_space *mapping = tree->mapping;
2286 	int uptodate = 1;
2287 
2288 	eb = __alloc_extent_buffer(tree, start, len, mask);
2289 	if (!eb || IS_ERR(eb))
2290 		return NULL;
2291 
2292 	if (eb->flags & EXTENT_BUFFER_FILLED)
2293 		goto lru_add;
2294 
2295 	if (page0) {
2296 		eb->first_page = page0;
2297 		i = 1;
2298 		index++;
2299 		page_cache_get(page0);
2300 		mark_page_accessed(page0);
2301 		set_page_extent_mapped(page0);
2302 		WARN_ON(!PageUptodate(page0));
2303 		set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2304 				 len << 2);
2305 	} else {
2306 		i = 0;
2307 	}
2308 	for (; i < num_pages; i++, index++) {
2309 		p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2310 		if (!p) {
2311 			WARN_ON(1);
2312 			goto fail;
2313 		}
2314 		set_page_extent_mapped(p);
2315 		mark_page_accessed(p);
2316 		if (i == 0) {
2317 			eb->first_page = p;
2318 			set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2319 					 len << 2);
2320 		} else {
2321 			set_page_private(p, EXTENT_PAGE_PRIVATE);
2322 		}
2323 		if (!PageUptodate(p))
2324 			uptodate = 0;
2325 		unlock_page(p);
2326 	}
2327 	if (uptodate)
2328 		eb->flags |= EXTENT_UPTODATE;
2329 	eb->flags |= EXTENT_BUFFER_FILLED;
2330 
2331 lru_add:
2332 	spin_lock(&tree->lru_lock);
2333 	add_lru(tree, eb);
2334 	spin_unlock(&tree->lru_lock);
2335 	return eb;
2336 
2337 fail:
2338 	spin_lock(&tree->lru_lock);
2339 	list_del_init(&eb->lru);
2340 	spin_unlock(&tree->lru_lock);
2341 	if (!atomic_dec_and_test(&eb->refs))
2342 		return NULL;
2343 	for (index = 1; index < i; index++) {
2344 		page_cache_release(extent_buffer_page(eb, index));
2345 	}
2346 	if (i > 0)
2347 		page_cache_release(extent_buffer_page(eb, 0));
2348 	__free_extent_buffer(eb);
2349 	return NULL;
2350 }
2351 EXPORT_SYMBOL(alloc_extent_buffer);
2352 
2353 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2354 					 u64 start, unsigned long len,
2355 					  gfp_t mask)
2356 {
2357 	unsigned long num_pages = num_extent_pages(start, len);
2358 	unsigned long i;
2359 	unsigned long index = start >> PAGE_CACHE_SHIFT;
2360 	struct extent_buffer *eb;
2361 	struct page *p;
2362 	struct address_space *mapping = tree->mapping;
2363 	int uptodate = 1;
2364 
2365 	eb = __alloc_extent_buffer(tree, start, len, mask);
2366 	if (!eb || IS_ERR(eb))
2367 		return NULL;
2368 
2369 	if (eb->flags & EXTENT_BUFFER_FILLED)
2370 		goto lru_add;
2371 
2372 	for (i = 0; i < num_pages; i++, index++) {
2373 		p = find_lock_page(mapping, index);
2374 		if (!p) {
2375 			goto fail;
2376 		}
2377 		set_page_extent_mapped(p);
2378 		mark_page_accessed(p);
2379 
2380 		if (i == 0) {
2381 			eb->first_page = p;
2382 			set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2383 					 len << 2);
2384 		} else {
2385 			set_page_private(p, EXTENT_PAGE_PRIVATE);
2386 		}
2387 
2388 		if (!PageUptodate(p))
2389 			uptodate = 0;
2390 		unlock_page(p);
2391 	}
2392 	if (uptodate)
2393 		eb->flags |= EXTENT_UPTODATE;
2394 	eb->flags |= EXTENT_BUFFER_FILLED;
2395 
2396 lru_add:
2397 	spin_lock(&tree->lru_lock);
2398 	add_lru(tree, eb);
2399 	spin_unlock(&tree->lru_lock);
2400 	return eb;
2401 fail:
2402 	spin_lock(&tree->lru_lock);
2403 	list_del_init(&eb->lru);
2404 	spin_unlock(&tree->lru_lock);
2405 	if (!atomic_dec_and_test(&eb->refs))
2406 		return NULL;
2407 	for (index = 1; index < i; index++) {
2408 		page_cache_release(extent_buffer_page(eb, index));
2409 	}
2410 	if (i > 0)
2411 		page_cache_release(extent_buffer_page(eb, 0));
2412 	__free_extent_buffer(eb);
2413 	return NULL;
2414 }
2415 EXPORT_SYMBOL(find_extent_buffer);
2416 
2417 void free_extent_buffer(struct extent_buffer *eb)
2418 {
2419 	unsigned long i;
2420 	unsigned long num_pages;
2421 
2422 	if (!eb)
2423 		return;
2424 
2425 	if (!atomic_dec_and_test(&eb->refs))
2426 		return;
2427 
2428 	WARN_ON(!list_empty(&eb->lru));
2429 	num_pages = num_extent_pages(eb->start, eb->len);
2430 
2431 	for (i = 1; i < num_pages; i++) {
2432 		page_cache_release(extent_buffer_page(eb, i));
2433 	}
2434 	page_cache_release(extent_buffer_page(eb, 0));
2435 	__free_extent_buffer(eb);
2436 }
2437 EXPORT_SYMBOL(free_extent_buffer);
2438 
2439 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2440 			      struct extent_buffer *eb)
2441 {
2442 	int set;
2443 	unsigned long i;
2444 	unsigned long num_pages;
2445 	struct page *page;
2446 
2447 	u64 start = eb->start;
2448 	u64 end = start + eb->len - 1;
2449 
2450 	set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2451 	num_pages = num_extent_pages(eb->start, eb->len);
2452 
2453 	for (i = 0; i < num_pages; i++) {
2454 		page = extent_buffer_page(eb, i);
2455 		lock_page(page);
2456 		/*
2457 		 * if we're on the last page or the first page and the
2458 		 * block isn't aligned on a page boundary, do extra checks
2459 		 * to make sure we don't clean page that is partially dirty
2460 		 */
2461 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2462 		    ((i == num_pages - 1) &&
2463 		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2464 			start = (u64)page->index << PAGE_CACHE_SHIFT;
2465 			end  = start + PAGE_CACHE_SIZE - 1;
2466 			if (test_range_bit(tree, start, end,
2467 					   EXTENT_DIRTY, 0)) {
2468 				unlock_page(page);
2469 				continue;
2470 			}
2471 		}
2472 		clear_page_dirty_for_io(page);
2473 		write_lock_irq(&page->mapping->tree_lock);
2474 		if (!PageDirty(page)) {
2475 			radix_tree_tag_clear(&page->mapping->page_tree,
2476 						page_index(page),
2477 						PAGECACHE_TAG_DIRTY);
2478 		}
2479 		write_unlock_irq(&page->mapping->tree_lock);
2480 		unlock_page(page);
2481 	}
2482 	return 0;
2483 }
2484 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2485 
2486 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2487 				    struct extent_buffer *eb)
2488 {
2489 	return wait_on_extent_writeback(tree, eb->start,
2490 					eb->start + eb->len - 1);
2491 }
2492 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2493 
2494 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2495 			     struct extent_buffer *eb)
2496 {
2497 	unsigned long i;
2498 	unsigned long num_pages;
2499 
2500 	num_pages = num_extent_pages(eb->start, eb->len);
2501 	for (i = 0; i < num_pages; i++) {
2502 		struct page *page = extent_buffer_page(eb, i);
2503 		/* writepage may need to do something special for the
2504 		 * first page, we have to make sure page->private is
2505 		 * properly set.  releasepage may drop page->private
2506 		 * on us if the page isn't already dirty.
2507 		 */
2508 		if (i == 0) {
2509 			lock_page(page);
2510 			set_page_private(page,
2511 					 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2512 					 eb->len << 2);
2513 		}
2514 		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2515 		if (i == 0)
2516 			unlock_page(page);
2517 	}
2518 	return set_extent_dirty(tree, eb->start,
2519 				eb->start + eb->len - 1, GFP_NOFS);
2520 }
2521 EXPORT_SYMBOL(set_extent_buffer_dirty);
2522 
2523 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2524 				struct extent_buffer *eb)
2525 {
2526 	unsigned long i;
2527 	struct page *page;
2528 	unsigned long num_pages;
2529 
2530 	num_pages = num_extent_pages(eb->start, eb->len);
2531 
2532 	set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2533 			    GFP_NOFS);
2534 	for (i = 0; i < num_pages; i++) {
2535 		page = extent_buffer_page(eb, i);
2536 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2537 		    ((i == num_pages - 1) &&
2538 		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2539 			check_page_uptodate(tree, page);
2540 			continue;
2541 		}
2542 		SetPageUptodate(page);
2543 	}
2544 	return 0;
2545 }
2546 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2547 
2548 int extent_buffer_uptodate(struct extent_map_tree *tree,
2549 			     struct extent_buffer *eb)
2550 {
2551 	if (eb->flags & EXTENT_UPTODATE)
2552 		return 1;
2553 	return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2554 			   EXTENT_UPTODATE, 1);
2555 }
2556 EXPORT_SYMBOL(extent_buffer_uptodate);
2557 
2558 int read_extent_buffer_pages(struct extent_map_tree *tree,
2559 			     struct extent_buffer *eb,
2560 			     u64 start,
2561 			     int wait)
2562 {
2563 	unsigned long i;
2564 	unsigned long start_i;
2565 	struct page *page;
2566 	int err;
2567 	int ret = 0;
2568 	unsigned long num_pages;
2569 
2570 	if (eb->flags & EXTENT_UPTODATE)
2571 		return 0;
2572 
2573 	if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2574 			   EXTENT_UPTODATE, 1)) {
2575 		return 0;
2576 	}
2577 
2578 	if (start) {
2579 		WARN_ON(start < eb->start);
2580 		start_i = (start >> PAGE_CACHE_SHIFT) -
2581 			(eb->start >> PAGE_CACHE_SHIFT);
2582 	} else {
2583 		start_i = 0;
2584 	}
2585 
2586 	num_pages = num_extent_pages(eb->start, eb->len);
2587 	for (i = start_i; i < num_pages; i++) {
2588 		page = extent_buffer_page(eb, i);
2589 		if (PageUptodate(page)) {
2590 			continue;
2591 		}
2592 		if (!wait) {
2593 			if (TestSetPageLocked(page)) {
2594 				continue;
2595 			}
2596 		} else {
2597 			lock_page(page);
2598 		}
2599 		if (!PageUptodate(page)) {
2600 			err = page->mapping->a_ops->readpage(NULL, page);
2601 			if (err) {
2602 				ret = err;
2603 			}
2604 		} else {
2605 			unlock_page(page);
2606 		}
2607 	}
2608 
2609 	if (ret || !wait) {
2610 		return ret;
2611 	}
2612 
2613 	for (i = start_i; i < num_pages; i++) {
2614 		page = extent_buffer_page(eb, i);
2615 		wait_on_page_locked(page);
2616 		if (!PageUptodate(page)) {
2617 			ret = -EIO;
2618 		}
2619 	}
2620 	if (!ret)
2621 		eb->flags |= EXTENT_UPTODATE;
2622 	return ret;
2623 }
2624 EXPORT_SYMBOL(read_extent_buffer_pages);
2625 
2626 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2627 			unsigned long start,
2628 			unsigned long len)
2629 {
2630 	size_t cur;
2631 	size_t offset;
2632 	struct page *page;
2633 	char *kaddr;
2634 	char *dst = (char *)dstv;
2635 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2636 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2637 	unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2638 
2639 	WARN_ON(start > eb->len);
2640 	WARN_ON(start + len > eb->start + eb->len);
2641 
2642 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2643 
2644 	while(len > 0) {
2645 		page = extent_buffer_page(eb, i);
2646 		if (!PageUptodate(page)) {
2647 			printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2648 			WARN_ON(1);
2649 		}
2650 		WARN_ON(!PageUptodate(page));
2651 
2652 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2653 		kaddr = kmap_atomic(page, KM_USER1);
2654 		memcpy(dst, kaddr + offset, cur);
2655 		kunmap_atomic(kaddr, KM_USER1);
2656 
2657 		dst += cur;
2658 		len -= cur;
2659 		offset = 0;
2660 		i++;
2661 	}
2662 }
2663 EXPORT_SYMBOL(read_extent_buffer);
2664 
2665 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2666 			       unsigned long min_len, char **token, char **map,
2667 			       unsigned long *map_start,
2668 			       unsigned long *map_len, int km)
2669 {
2670 	size_t offset = start & (PAGE_CACHE_SIZE - 1);
2671 	char *kaddr;
2672 	struct page *p;
2673 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2674 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2675 	unsigned long end_i = (start_offset + start + min_len - 1) >>
2676 		PAGE_CACHE_SHIFT;
2677 
2678 	if (i != end_i)
2679 		return -EINVAL;
2680 
2681 	if (i == 0) {
2682 		offset = start_offset;
2683 		*map_start = 0;
2684 	} else {
2685 		offset = 0;
2686 		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2687 	}
2688 	if (start + min_len > eb->len) {
2689 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2690 		WARN_ON(1);
2691 	}
2692 
2693 	p = extent_buffer_page(eb, i);
2694 	WARN_ON(!PageUptodate(p));
2695 	kaddr = kmap_atomic(p, km);
2696 	*token = kaddr;
2697 	*map = kaddr + offset;
2698 	*map_len = PAGE_CACHE_SIZE - offset;
2699 	return 0;
2700 }
2701 EXPORT_SYMBOL(map_private_extent_buffer);
2702 
2703 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2704 		      unsigned long min_len,
2705 		      char **token, char **map,
2706 		      unsigned long *map_start,
2707 		      unsigned long *map_len, int km)
2708 {
2709 	int err;
2710 	int save = 0;
2711 	if (eb->map_token) {
2712 		unmap_extent_buffer(eb, eb->map_token, km);
2713 		eb->map_token = NULL;
2714 		save = 1;
2715 	}
2716 	err = map_private_extent_buffer(eb, start, min_len, token, map,
2717 				       map_start, map_len, km);
2718 	if (!err && save) {
2719 		eb->map_token = *token;
2720 		eb->kaddr = *map;
2721 		eb->map_start = *map_start;
2722 		eb->map_len = *map_len;
2723 	}
2724 	return err;
2725 }
2726 EXPORT_SYMBOL(map_extent_buffer);
2727 
2728 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2729 {
2730 	kunmap_atomic(token, km);
2731 }
2732 EXPORT_SYMBOL(unmap_extent_buffer);
2733 
2734 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2735 			  unsigned long start,
2736 			  unsigned long len)
2737 {
2738 	size_t cur;
2739 	size_t offset;
2740 	struct page *page;
2741 	char *kaddr;
2742 	char *ptr = (char *)ptrv;
2743 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2744 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2745 	int ret = 0;
2746 
2747 	WARN_ON(start > eb->len);
2748 	WARN_ON(start + len > eb->start + eb->len);
2749 
2750 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2751 
2752 	while(len > 0) {
2753 		page = extent_buffer_page(eb, i);
2754 		WARN_ON(!PageUptodate(page));
2755 
2756 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2757 
2758 		kaddr = kmap_atomic(page, KM_USER0);
2759 		ret = memcmp(ptr, kaddr + offset, cur);
2760 		kunmap_atomic(kaddr, KM_USER0);
2761 		if (ret)
2762 			break;
2763 
2764 		ptr += cur;
2765 		len -= cur;
2766 		offset = 0;
2767 		i++;
2768 	}
2769 	return ret;
2770 }
2771 EXPORT_SYMBOL(memcmp_extent_buffer);
2772 
2773 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2774 			 unsigned long start, unsigned long len)
2775 {
2776 	size_t cur;
2777 	size_t offset;
2778 	struct page *page;
2779 	char *kaddr;
2780 	char *src = (char *)srcv;
2781 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2782 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2783 
2784 	WARN_ON(start > eb->len);
2785 	WARN_ON(start + len > eb->start + eb->len);
2786 
2787 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2788 
2789 	while(len > 0) {
2790 		page = extent_buffer_page(eb, i);
2791 		WARN_ON(!PageUptodate(page));
2792 
2793 		cur = min(len, PAGE_CACHE_SIZE - offset);
2794 		kaddr = kmap_atomic(page, KM_USER1);
2795 		memcpy(kaddr + offset, src, cur);
2796 		kunmap_atomic(kaddr, KM_USER1);
2797 
2798 		src += cur;
2799 		len -= cur;
2800 		offset = 0;
2801 		i++;
2802 	}
2803 }
2804 EXPORT_SYMBOL(write_extent_buffer);
2805 
2806 void memset_extent_buffer(struct extent_buffer *eb, char c,
2807 			  unsigned long start, unsigned long len)
2808 {
2809 	size_t cur;
2810 	size_t offset;
2811 	struct page *page;
2812 	char *kaddr;
2813 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2814 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2815 
2816 	WARN_ON(start > eb->len);
2817 	WARN_ON(start + len > eb->start + eb->len);
2818 
2819 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2820 
2821 	while(len > 0) {
2822 		page = extent_buffer_page(eb, i);
2823 		WARN_ON(!PageUptodate(page));
2824 
2825 		cur = min(len, PAGE_CACHE_SIZE - offset);
2826 		kaddr = kmap_atomic(page, KM_USER0);
2827 		memset(kaddr + offset, c, cur);
2828 		kunmap_atomic(kaddr, KM_USER0);
2829 
2830 		len -= cur;
2831 		offset = 0;
2832 		i++;
2833 	}
2834 }
2835 EXPORT_SYMBOL(memset_extent_buffer);
2836 
2837 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2838 			unsigned long dst_offset, unsigned long src_offset,
2839 			unsigned long len)
2840 {
2841 	u64 dst_len = dst->len;
2842 	size_t cur;
2843 	size_t offset;
2844 	struct page *page;
2845 	char *kaddr;
2846 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2847 	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2848 
2849 	WARN_ON(src->len != dst_len);
2850 
2851 	offset = (start_offset + dst_offset) &
2852 		((unsigned long)PAGE_CACHE_SIZE - 1);
2853 
2854 	while(len > 0) {
2855 		page = extent_buffer_page(dst, i);
2856 		WARN_ON(!PageUptodate(page));
2857 
2858 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2859 
2860 		kaddr = kmap_atomic(page, KM_USER0);
2861 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
2862 		kunmap_atomic(kaddr, KM_USER0);
2863 
2864 		src_offset += cur;
2865 		len -= cur;
2866 		offset = 0;
2867 		i++;
2868 	}
2869 }
2870 EXPORT_SYMBOL(copy_extent_buffer);
2871 
2872 static void move_pages(struct page *dst_page, struct page *src_page,
2873 		       unsigned long dst_off, unsigned long src_off,
2874 		       unsigned long len)
2875 {
2876 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2877 	if (dst_page == src_page) {
2878 		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2879 	} else {
2880 		char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2881 		char *p = dst_kaddr + dst_off + len;
2882 		char *s = src_kaddr + src_off + len;
2883 
2884 		while (len--)
2885 			*--p = *--s;
2886 
2887 		kunmap_atomic(src_kaddr, KM_USER1);
2888 	}
2889 	kunmap_atomic(dst_kaddr, KM_USER0);
2890 }
2891 
2892 static void copy_pages(struct page *dst_page, struct page *src_page,
2893 		       unsigned long dst_off, unsigned long src_off,
2894 		       unsigned long len)
2895 {
2896 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2897 	char *src_kaddr;
2898 
2899 	if (dst_page != src_page)
2900 		src_kaddr = kmap_atomic(src_page, KM_USER1);
2901 	else
2902 		src_kaddr = dst_kaddr;
2903 
2904 	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2905 	kunmap_atomic(dst_kaddr, KM_USER0);
2906 	if (dst_page != src_page)
2907 		kunmap_atomic(src_kaddr, KM_USER1);
2908 }
2909 
2910 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2911 			   unsigned long src_offset, unsigned long len)
2912 {
2913 	size_t cur;
2914 	size_t dst_off_in_page;
2915 	size_t src_off_in_page;
2916 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2917 	unsigned long dst_i;
2918 	unsigned long src_i;
2919 
2920 	if (src_offset + len > dst->len) {
2921 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2922 		       src_offset, len, dst->len);
2923 		BUG_ON(1);
2924 	}
2925 	if (dst_offset + len > dst->len) {
2926 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2927 		       dst_offset, len, dst->len);
2928 		BUG_ON(1);
2929 	}
2930 
2931 	while(len > 0) {
2932 		dst_off_in_page = (start_offset + dst_offset) &
2933 			((unsigned long)PAGE_CACHE_SIZE - 1);
2934 		src_off_in_page = (start_offset + src_offset) &
2935 			((unsigned long)PAGE_CACHE_SIZE - 1);
2936 
2937 		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2938 		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2939 
2940 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2941 					       src_off_in_page));
2942 		cur = min_t(unsigned long, cur,
2943 			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2944 
2945 		copy_pages(extent_buffer_page(dst, dst_i),
2946 			   extent_buffer_page(dst, src_i),
2947 			   dst_off_in_page, src_off_in_page, cur);
2948 
2949 		src_offset += cur;
2950 		dst_offset += cur;
2951 		len -= cur;
2952 	}
2953 }
2954 EXPORT_SYMBOL(memcpy_extent_buffer);
2955 
2956 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2957 			   unsigned long src_offset, unsigned long len)
2958 {
2959 	size_t cur;
2960 	size_t dst_off_in_page;
2961 	size_t src_off_in_page;
2962 	unsigned long dst_end = dst_offset + len - 1;
2963 	unsigned long src_end = src_offset + len - 1;
2964 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2965 	unsigned long dst_i;
2966 	unsigned long src_i;
2967 
2968 	if (src_offset + len > dst->len) {
2969 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2970 		       src_offset, len, dst->len);
2971 		BUG_ON(1);
2972 	}
2973 	if (dst_offset + len > dst->len) {
2974 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2975 		       dst_offset, len, dst->len);
2976 		BUG_ON(1);
2977 	}
2978 	if (dst_offset < src_offset) {
2979 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2980 		return;
2981 	}
2982 	while(len > 0) {
2983 		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2984 		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2985 
2986 		dst_off_in_page = (start_offset + dst_end) &
2987 			((unsigned long)PAGE_CACHE_SIZE - 1);
2988 		src_off_in_page = (start_offset + src_end) &
2989 			((unsigned long)PAGE_CACHE_SIZE - 1);
2990 
2991 		cur = min_t(unsigned long, len, src_off_in_page + 1);
2992 		cur = min(cur, dst_off_in_page + 1);
2993 		move_pages(extent_buffer_page(dst, dst_i),
2994 			   extent_buffer_page(dst, src_i),
2995 			   dst_off_in_page - cur + 1,
2996 			   src_off_in_page - cur + 1, cur);
2997 
2998 		dst_end -= cur;
2999 		src_end -= cur;
3000 		len -= cur;
3001 	}
3002 }
3003 EXPORT_SYMBOL(memmove_extent_buffer);
3004