xref: /openbmc/linux/fs/btrfs/extent_map.c (revision 96b5179d)
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include "extent_map.h"
12 
13 /* temporary define until extent_map moves out of btrfs */
14 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
15 				       unsigned long extra_flags,
16 				       void (*ctor)(void *, struct kmem_cache *,
17 						    unsigned long));
18 
19 static struct kmem_cache *extent_map_cache;
20 static struct kmem_cache *extent_state_cache;
21 static struct kmem_cache *extent_buffer_cache;
22 
23 static LIST_HEAD(extent_buffers);
24 static LIST_HEAD(buffers);
25 static LIST_HEAD(states);
26 
27 static spinlock_t extent_buffers_lock;
28 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
29 static int nr_extent_buffers;
30 #define MAX_EXTENT_BUFFER_CACHE 128
31 
32 struct tree_entry {
33 	u64 start;
34 	u64 end;
35 	int in_tree;
36 	struct rb_node rb_node;
37 };
38 
39 void __init extent_map_init(void)
40 {
41 	extent_map_cache = btrfs_cache_create("extent_map",
42 					    sizeof(struct extent_map), 0,
43 					    NULL);
44 	extent_state_cache = btrfs_cache_create("extent_state",
45 					    sizeof(struct extent_state), 0,
46 					    NULL);
47 	extent_buffer_cache = btrfs_cache_create("extent_buffers",
48 					    sizeof(struct extent_buffer), 0,
49 					    NULL);
50 	spin_lock_init(&extent_buffers_lock);
51 }
52 
53 void __exit extent_map_exit(void)
54 {
55 	struct extent_buffer *eb;
56 	struct extent_state *state;
57 
58 	while (!list_empty(&extent_buffers)) {
59 		eb = list_entry(extent_buffers.next,
60 				struct extent_buffer, list);
61 		list_del(&eb->list);
62 		kmem_cache_free(extent_buffer_cache, eb);
63 	}
64 	while (!list_empty(&states)) {
65 		state = list_entry(states.next, struct extent_state, list);
66 		printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
67 		list_del(&state->list);
68 		kmem_cache_free(extent_state_cache, state);
69 
70 	}
71 	while (!list_empty(&buffers)) {
72 		eb = list_entry(buffers.next,
73 				struct extent_buffer, leak_list);
74 		printk("buffer leak start %Lu len %lu return %lX\n", eb->start, eb->len, eb->alloc_addr);
75 		list_del(&eb->leak_list);
76 		kmem_cache_free(extent_buffer_cache, eb);
77 	}
78 
79 
80 	if (extent_map_cache)
81 		kmem_cache_destroy(extent_map_cache);
82 	if (extent_state_cache)
83 		kmem_cache_destroy(extent_state_cache);
84 	if (extent_buffer_cache)
85 		kmem_cache_destroy(extent_buffer_cache);
86 }
87 
88 void extent_map_tree_init(struct extent_map_tree *tree,
89 			  struct address_space *mapping, gfp_t mask)
90 {
91 	tree->map.rb_node = NULL;
92 	tree->state.rb_node = NULL;
93 	tree->ops = NULL;
94 	rwlock_init(&tree->lock);
95 	tree->mapping = mapping;
96 }
97 EXPORT_SYMBOL(extent_map_tree_init);
98 
99 struct extent_map *alloc_extent_map(gfp_t mask)
100 {
101 	struct extent_map *em;
102 	em = kmem_cache_alloc(extent_map_cache, mask);
103 	if (!em || IS_ERR(em))
104 		return em;
105 	em->in_tree = 0;
106 	atomic_set(&em->refs, 1);
107 	return em;
108 }
109 EXPORT_SYMBOL(alloc_extent_map);
110 
111 void free_extent_map(struct extent_map *em)
112 {
113 	if (!em)
114 		return;
115 	if (atomic_dec_and_test(&em->refs)) {
116 		WARN_ON(em->in_tree);
117 		kmem_cache_free(extent_map_cache, em);
118 	}
119 }
120 EXPORT_SYMBOL(free_extent_map);
121 
122 
123 struct extent_state *alloc_extent_state(gfp_t mask)
124 {
125 	struct extent_state *state;
126 	unsigned long flags;
127 
128 	state = kmem_cache_alloc(extent_state_cache, mask);
129 	if (!state || IS_ERR(state))
130 		return state;
131 	state->state = 0;
132 	state->in_tree = 0;
133 	state->private = 0;
134 
135 	spin_lock_irqsave(&state_lock, flags);
136 	list_add(&state->list, &states);
137 	spin_unlock_irqrestore(&state_lock, flags);
138 
139 	atomic_set(&state->refs, 1);
140 	init_waitqueue_head(&state->wq);
141 	return state;
142 }
143 EXPORT_SYMBOL(alloc_extent_state);
144 
145 void free_extent_state(struct extent_state *state)
146 {
147 	unsigned long flags;
148 	if (!state)
149 		return;
150 	if (atomic_dec_and_test(&state->refs)) {
151 		WARN_ON(state->in_tree);
152 		spin_lock_irqsave(&state_lock, flags);
153 		list_del(&state->list);
154 		spin_unlock_irqrestore(&state_lock, flags);
155 		kmem_cache_free(extent_state_cache, state);
156 	}
157 }
158 EXPORT_SYMBOL(free_extent_state);
159 
160 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
161 				   struct rb_node *node)
162 {
163 	struct rb_node ** p = &root->rb_node;
164 	struct rb_node * parent = NULL;
165 	struct tree_entry *entry;
166 
167 	while(*p) {
168 		parent = *p;
169 		entry = rb_entry(parent, struct tree_entry, rb_node);
170 
171 		if (offset < entry->start)
172 			p = &(*p)->rb_left;
173 		else if (offset > entry->end)
174 			p = &(*p)->rb_right;
175 		else
176 			return parent;
177 	}
178 
179 	entry = rb_entry(node, struct tree_entry, rb_node);
180 	entry->in_tree = 1;
181 	rb_link_node(node, parent, p);
182 	rb_insert_color(node, root);
183 	return NULL;
184 }
185 
186 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
187 				   struct rb_node **prev_ret)
188 {
189 	struct rb_node * n = root->rb_node;
190 	struct rb_node *prev = NULL;
191 	struct tree_entry *entry;
192 	struct tree_entry *prev_entry = NULL;
193 
194 	while(n) {
195 		entry = rb_entry(n, struct tree_entry, rb_node);
196 		prev = n;
197 		prev_entry = entry;
198 
199 		if (offset < entry->start)
200 			n = n->rb_left;
201 		else if (offset > entry->end)
202 			n = n->rb_right;
203 		else
204 			return n;
205 	}
206 	if (!prev_ret)
207 		return NULL;
208 	while(prev && offset > prev_entry->end) {
209 		prev = rb_next(prev);
210 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
211 	}
212 	*prev_ret = prev;
213 	return NULL;
214 }
215 
216 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
217 {
218 	struct rb_node *prev;
219 	struct rb_node *ret;
220 	ret = __tree_search(root, offset, &prev);
221 	if (!ret)
222 		return prev;
223 	return ret;
224 }
225 
226 static int tree_delete(struct rb_root *root, u64 offset)
227 {
228 	struct rb_node *node;
229 	struct tree_entry *entry;
230 
231 	node = __tree_search(root, offset, NULL);
232 	if (!node)
233 		return -ENOENT;
234 	entry = rb_entry(node, struct tree_entry, rb_node);
235 	entry->in_tree = 0;
236 	rb_erase(node, root);
237 	return 0;
238 }
239 
240 /*
241  * add_extent_mapping tries a simple backward merge with existing
242  * mappings.  The extent_map struct passed in will be inserted into
243  * the tree directly (no copies made, just a reference taken).
244  */
245 int add_extent_mapping(struct extent_map_tree *tree,
246 		       struct extent_map *em)
247 {
248 	int ret = 0;
249 	struct extent_map *prev = NULL;
250 	struct rb_node *rb;
251 
252 	write_lock_irq(&tree->lock);
253 	rb = tree_insert(&tree->map, em->end, &em->rb_node);
254 	if (rb) {
255 		prev = rb_entry(rb, struct extent_map, rb_node);
256 		printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
257 		ret = -EEXIST;
258 		goto out;
259 	}
260 	atomic_inc(&em->refs);
261 	if (em->start != 0) {
262 		rb = rb_prev(&em->rb_node);
263 		if (rb)
264 			prev = rb_entry(rb, struct extent_map, rb_node);
265 		if (prev && prev->end + 1 == em->start &&
266 		    ((em->block_start == EXTENT_MAP_HOLE &&
267 		      prev->block_start == EXTENT_MAP_HOLE) ||
268 			     (em->block_start == prev->block_end + 1))) {
269 			em->start = prev->start;
270 			em->block_start = prev->block_start;
271 			rb_erase(&prev->rb_node, &tree->map);
272 			prev->in_tree = 0;
273 			free_extent_map(prev);
274 		}
275 	 }
276 out:
277 	write_unlock_irq(&tree->lock);
278 	return ret;
279 }
280 EXPORT_SYMBOL(add_extent_mapping);
281 
282 /*
283  * lookup_extent_mapping returns the first extent_map struct in the
284  * tree that intersects the [start, end] (inclusive) range.  There may
285  * be additional objects in the tree that intersect, so check the object
286  * returned carefully to make sure you don't need additional lookups.
287  */
288 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
289 					 u64 start, u64 end)
290 {
291 	struct extent_map *em;
292 	struct rb_node *rb_node;
293 
294 	read_lock_irq(&tree->lock);
295 	rb_node = tree_search(&tree->map, start);
296 	if (!rb_node) {
297 		em = NULL;
298 		goto out;
299 	}
300 	if (IS_ERR(rb_node)) {
301 		em = ERR_PTR(PTR_ERR(rb_node));
302 		goto out;
303 	}
304 	em = rb_entry(rb_node, struct extent_map, rb_node);
305 	if (em->end < start || em->start > end) {
306 		em = NULL;
307 		goto out;
308 	}
309 	atomic_inc(&em->refs);
310 out:
311 	read_unlock_irq(&tree->lock);
312 	return em;
313 }
314 EXPORT_SYMBOL(lookup_extent_mapping);
315 
316 /*
317  * removes an extent_map struct from the tree.  No reference counts are
318  * dropped, and no checks are done to  see if the range is in use
319  */
320 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
321 {
322 	int ret;
323 
324 	write_lock_irq(&tree->lock);
325 	ret = tree_delete(&tree->map, em->end);
326 	write_unlock_irq(&tree->lock);
327 	return ret;
328 }
329 EXPORT_SYMBOL(remove_extent_mapping);
330 
331 /*
332  * utility function to look for merge candidates inside a given range.
333  * Any extents with matching state are merged together into a single
334  * extent in the tree.  Extents with EXTENT_IO in their state field
335  * are not merged because the end_io handlers need to be able to do
336  * operations on them without sleeping (or doing allocations/splits).
337  *
338  * This should be called with the tree lock held.
339  */
340 static int merge_state(struct extent_map_tree *tree,
341 		       struct extent_state *state)
342 {
343 	struct extent_state *other;
344 	struct rb_node *other_node;
345 
346 	if (state->state & EXTENT_IOBITS)
347 		return 0;
348 
349 	other_node = rb_prev(&state->rb_node);
350 	if (other_node) {
351 		other = rb_entry(other_node, struct extent_state, rb_node);
352 		if (other->end == state->start - 1 &&
353 		    other->state == state->state) {
354 			state->start = other->start;
355 			other->in_tree = 0;
356 			rb_erase(&other->rb_node, &tree->state);
357 			free_extent_state(other);
358 		}
359 	}
360 	other_node = rb_next(&state->rb_node);
361 	if (other_node) {
362 		other = rb_entry(other_node, struct extent_state, rb_node);
363 		if (other->start == state->end + 1 &&
364 		    other->state == state->state) {
365 			other->start = state->start;
366 			state->in_tree = 0;
367 			rb_erase(&state->rb_node, &tree->state);
368 			free_extent_state(state);
369 		}
370 	}
371 	return 0;
372 }
373 
374 /*
375  * insert an extent_state struct into the tree.  'bits' are set on the
376  * struct before it is inserted.
377  *
378  * This may return -EEXIST if the extent is already there, in which case the
379  * state struct is freed.
380  *
381  * The tree lock is not taken internally.  This is a utility function and
382  * probably isn't what you want to call (see set/clear_extent_bit).
383  */
384 static int insert_state(struct extent_map_tree *tree,
385 			struct extent_state *state, u64 start, u64 end,
386 			int bits)
387 {
388 	struct rb_node *node;
389 
390 	if (end < start) {
391 		printk("end < start %Lu %Lu\n", end, start);
392 		WARN_ON(1);
393 	}
394 	state->state |= bits;
395 	state->start = start;
396 	state->end = end;
397 	node = tree_insert(&tree->state, end, &state->rb_node);
398 	if (node) {
399 		struct extent_state *found;
400 		found = rb_entry(node, struct extent_state, rb_node);
401 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
402 		free_extent_state(state);
403 		return -EEXIST;
404 	}
405 	merge_state(tree, state);
406 	return 0;
407 }
408 
409 /*
410  * split a given extent state struct in two, inserting the preallocated
411  * struct 'prealloc' as the newly created second half.  'split' indicates an
412  * offset inside 'orig' where it should be split.
413  *
414  * Before calling,
415  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
416  * are two extent state structs in the tree:
417  * prealloc: [orig->start, split - 1]
418  * orig: [ split, orig->end ]
419  *
420  * The tree locks are not taken by this function. They need to be held
421  * by the caller.
422  */
423 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
424 		       struct extent_state *prealloc, u64 split)
425 {
426 	struct rb_node *node;
427 	prealloc->start = orig->start;
428 	prealloc->end = split - 1;
429 	prealloc->state = orig->state;
430 	orig->start = split;
431 
432 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
433 	if (node) {
434 		struct extent_state *found;
435 		found = rb_entry(node, struct extent_state, rb_node);
436 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
437 		free_extent_state(prealloc);
438 		return -EEXIST;
439 	}
440 	return 0;
441 }
442 
443 /*
444  * utility function to clear some bits in an extent state struct.
445  * it will optionally wake up any one waiting on this state (wake == 1), or
446  * forcibly remove the state from the tree (delete == 1).
447  *
448  * If no bits are set on the state struct after clearing things, the
449  * struct is freed and removed from the tree
450  */
451 static int clear_state_bit(struct extent_map_tree *tree,
452 			    struct extent_state *state, int bits, int wake,
453 			    int delete)
454 {
455 	int ret = state->state & bits;
456 	state->state &= ~bits;
457 	if (wake)
458 		wake_up(&state->wq);
459 	if (delete || state->state == 0) {
460 		if (state->in_tree) {
461 			rb_erase(&state->rb_node, &tree->state);
462 			state->in_tree = 0;
463 			free_extent_state(state);
464 		} else {
465 			WARN_ON(1);
466 		}
467 	} else {
468 		merge_state(tree, state);
469 	}
470 	return ret;
471 }
472 
473 /*
474  * clear some bits on a range in the tree.  This may require splitting
475  * or inserting elements in the tree, so the gfp mask is used to
476  * indicate which allocations or sleeping are allowed.
477  *
478  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
479  * the given range from the tree regardless of state (ie for truncate).
480  *
481  * the range [start, end] is inclusive.
482  *
483  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
484  * bits were already set, or zero if none of the bits were already set.
485  */
486 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
487 		     int bits, int wake, int delete, gfp_t mask)
488 {
489 	struct extent_state *state;
490 	struct extent_state *prealloc = NULL;
491 	struct rb_node *node;
492 	unsigned long flags;
493 	int err;
494 	int set = 0;
495 
496 again:
497 	if (!prealloc && (mask & __GFP_WAIT)) {
498 		prealloc = alloc_extent_state(mask);
499 		if (!prealloc)
500 			return -ENOMEM;
501 	}
502 
503 	write_lock_irqsave(&tree->lock, flags);
504 	/*
505 	 * this search will find the extents that end after
506 	 * our range starts
507 	 */
508 	node = tree_search(&tree->state, start);
509 	if (!node)
510 		goto out;
511 	state = rb_entry(node, struct extent_state, rb_node);
512 	if (state->start > end)
513 		goto out;
514 	WARN_ON(state->end < start);
515 
516 	/*
517 	 *     | ---- desired range ---- |
518 	 *  | state | or
519 	 *  | ------------- state -------------- |
520 	 *
521 	 * We need to split the extent we found, and may flip
522 	 * bits on second half.
523 	 *
524 	 * If the extent we found extends past our range, we
525 	 * just split and search again.  It'll get split again
526 	 * the next time though.
527 	 *
528 	 * If the extent we found is inside our range, we clear
529 	 * the desired bit on it.
530 	 */
531 
532 	if (state->start < start) {
533 		err = split_state(tree, state, prealloc, start);
534 		BUG_ON(err == -EEXIST);
535 		prealloc = NULL;
536 		if (err)
537 			goto out;
538 		if (state->end <= end) {
539 			start = state->end + 1;
540 			set |= clear_state_bit(tree, state, bits,
541 					wake, delete);
542 		} else {
543 			start = state->start;
544 		}
545 		goto search_again;
546 	}
547 	/*
548 	 * | ---- desired range ---- |
549 	 *                        | state |
550 	 * We need to split the extent, and clear the bit
551 	 * on the first half
552 	 */
553 	if (state->start <= end && state->end > end) {
554 		err = split_state(tree, state, prealloc, end + 1);
555 		BUG_ON(err == -EEXIST);
556 
557 		if (wake)
558 			wake_up(&state->wq);
559 		set |= clear_state_bit(tree, prealloc, bits,
560 				       wake, delete);
561 		prealloc = NULL;
562 		goto out;
563 	}
564 
565 	start = state->end + 1;
566 	set |= clear_state_bit(tree, state, bits, wake, delete);
567 	goto search_again;
568 
569 out:
570 	write_unlock_irqrestore(&tree->lock, flags);
571 	if (prealloc)
572 		free_extent_state(prealloc);
573 
574 	return set;
575 
576 search_again:
577 	if (start > end)
578 		goto out;
579 	write_unlock_irqrestore(&tree->lock, flags);
580 	if (mask & __GFP_WAIT)
581 		cond_resched();
582 	goto again;
583 }
584 EXPORT_SYMBOL(clear_extent_bit);
585 
586 static int wait_on_state(struct extent_map_tree *tree,
587 			 struct extent_state *state)
588 {
589 	DEFINE_WAIT(wait);
590 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
591 	read_unlock_irq(&tree->lock);
592 	schedule();
593 	read_lock_irq(&tree->lock);
594 	finish_wait(&state->wq, &wait);
595 	return 0;
596 }
597 
598 /*
599  * waits for one or more bits to clear on a range in the state tree.
600  * The range [start, end] is inclusive.
601  * The tree lock is taken by this function
602  */
603 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
604 {
605 	struct extent_state *state;
606 	struct rb_node *node;
607 
608 	read_lock_irq(&tree->lock);
609 again:
610 	while (1) {
611 		/*
612 		 * this search will find all the extents that end after
613 		 * our range starts
614 		 */
615 		node = tree_search(&tree->state, start);
616 		if (!node)
617 			break;
618 
619 		state = rb_entry(node, struct extent_state, rb_node);
620 
621 		if (state->start > end)
622 			goto out;
623 
624 		if (state->state & bits) {
625 			start = state->start;
626 			atomic_inc(&state->refs);
627 			wait_on_state(tree, state);
628 			free_extent_state(state);
629 			goto again;
630 		}
631 		start = state->end + 1;
632 
633 		if (start > end)
634 			break;
635 
636 		if (need_resched()) {
637 			read_unlock_irq(&tree->lock);
638 			cond_resched();
639 			read_lock_irq(&tree->lock);
640 		}
641 	}
642 out:
643 	read_unlock_irq(&tree->lock);
644 	return 0;
645 }
646 EXPORT_SYMBOL(wait_extent_bit);
647 
648 /*
649  * set some bits on a range in the tree.  This may require allocations
650  * or sleeping, so the gfp mask is used to indicate what is allowed.
651  *
652  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
653  * range already has the desired bits set.  The start of the existing
654  * range is returned in failed_start in this case.
655  *
656  * [start, end] is inclusive
657  * This takes the tree lock.
658  */
659 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
660 		   int exclusive, u64 *failed_start, gfp_t mask)
661 {
662 	struct extent_state *state;
663 	struct extent_state *prealloc = NULL;
664 	struct rb_node *node;
665 	unsigned long flags;
666 	int err = 0;
667 	int set;
668 	u64 last_start;
669 	u64 last_end;
670 again:
671 	if (!prealloc && (mask & __GFP_WAIT)) {
672 		prealloc = alloc_extent_state(mask);
673 		if (!prealloc)
674 			return -ENOMEM;
675 	}
676 
677 	write_lock_irqsave(&tree->lock, flags);
678 	/*
679 	 * this search will find all the extents that end after
680 	 * our range starts.
681 	 */
682 	node = tree_search(&tree->state, start);
683 	if (!node) {
684 		err = insert_state(tree, prealloc, start, end, bits);
685 		prealloc = NULL;
686 		BUG_ON(err == -EEXIST);
687 		goto out;
688 	}
689 
690 	state = rb_entry(node, struct extent_state, rb_node);
691 	last_start = state->start;
692 	last_end = state->end;
693 
694 	/*
695 	 * | ---- desired range ---- |
696 	 * | state |
697 	 *
698 	 * Just lock what we found and keep going
699 	 */
700 	if (state->start == start && state->end <= end) {
701 		set = state->state & bits;
702 		if (set && exclusive) {
703 			*failed_start = state->start;
704 			err = -EEXIST;
705 			goto out;
706 		}
707 		state->state |= bits;
708 		start = state->end + 1;
709 		merge_state(tree, state);
710 		goto search_again;
711 	}
712 
713 	/*
714 	 *     | ---- desired range ---- |
715 	 * | state |
716 	 *   or
717 	 * | ------------- state -------------- |
718 	 *
719 	 * We need to split the extent we found, and may flip bits on
720 	 * second half.
721 	 *
722 	 * If the extent we found extends past our
723 	 * range, we just split and search again.  It'll get split
724 	 * again the next time though.
725 	 *
726 	 * If the extent we found is inside our range, we set the
727 	 * desired bit on it.
728 	 */
729 	if (state->start < start) {
730 		set = state->state & bits;
731 		if (exclusive && set) {
732 			*failed_start = start;
733 			err = -EEXIST;
734 			goto out;
735 		}
736 		err = split_state(tree, state, prealloc, start);
737 		BUG_ON(err == -EEXIST);
738 		prealloc = NULL;
739 		if (err)
740 			goto out;
741 		if (state->end <= end) {
742 			state->state |= bits;
743 			start = state->end + 1;
744 			merge_state(tree, state);
745 		} else {
746 			start = state->start;
747 		}
748 		goto search_again;
749 	}
750 	/*
751 	 * | ---- desired range ---- |
752 	 *     | state | or               | state |
753 	 *
754 	 * There's a hole, we need to insert something in it and
755 	 * ignore the extent we found.
756 	 */
757 	if (state->start > start) {
758 		u64 this_end;
759 		if (end < last_start)
760 			this_end = end;
761 		else
762 			this_end = last_start -1;
763 		err = insert_state(tree, prealloc, start, this_end,
764 				   bits);
765 		prealloc = NULL;
766 		BUG_ON(err == -EEXIST);
767 		if (err)
768 			goto out;
769 		start = this_end + 1;
770 		goto search_again;
771 	}
772 	/*
773 	 * | ---- desired range ---- |
774 	 *                        | state |
775 	 * We need to split the extent, and set the bit
776 	 * on the first half
777 	 */
778 	if (state->start <= end && state->end > end) {
779 		set = state->state & bits;
780 		if (exclusive && set) {
781 			*failed_start = start;
782 			err = -EEXIST;
783 			goto out;
784 		}
785 		err = split_state(tree, state, prealloc, end + 1);
786 		BUG_ON(err == -EEXIST);
787 
788 		prealloc->state |= bits;
789 		merge_state(tree, prealloc);
790 		prealloc = NULL;
791 		goto out;
792 	}
793 
794 	goto search_again;
795 
796 out:
797 	write_unlock_irqrestore(&tree->lock, flags);
798 	if (prealloc)
799 		free_extent_state(prealloc);
800 
801 	return err;
802 
803 search_again:
804 	if (start > end)
805 		goto out;
806 	write_unlock_irqrestore(&tree->lock, flags);
807 	if (mask & __GFP_WAIT)
808 		cond_resched();
809 	goto again;
810 }
811 EXPORT_SYMBOL(set_extent_bit);
812 
813 /* wrappers around set/clear extent bit */
814 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
815 		     gfp_t mask)
816 {
817 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
818 			      mask);
819 }
820 EXPORT_SYMBOL(set_extent_dirty);
821 
822 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
823 		    int bits, gfp_t mask)
824 {
825 	return set_extent_bit(tree, start, end, bits, 0, NULL,
826 			      mask);
827 }
828 EXPORT_SYMBOL(set_extent_bits);
829 
830 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
831 		      int bits, gfp_t mask)
832 {
833 	return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
834 }
835 EXPORT_SYMBOL(clear_extent_bits);
836 
837 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
838 		     gfp_t mask)
839 {
840 	return set_extent_bit(tree, start, end,
841 			      EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
842 			      mask);
843 }
844 EXPORT_SYMBOL(set_extent_delalloc);
845 
846 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
847 		       gfp_t mask)
848 {
849 	return clear_extent_bit(tree, start, end,
850 				EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
851 }
852 EXPORT_SYMBOL(clear_extent_dirty);
853 
854 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
855 		     gfp_t mask)
856 {
857 	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
858 			      mask);
859 }
860 EXPORT_SYMBOL(set_extent_new);
861 
862 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
863 		       gfp_t mask)
864 {
865 	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
866 }
867 EXPORT_SYMBOL(clear_extent_new);
868 
869 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
870 			gfp_t mask)
871 {
872 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
873 			      mask);
874 }
875 EXPORT_SYMBOL(set_extent_uptodate);
876 
877 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
878 			  gfp_t mask)
879 {
880 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
881 }
882 EXPORT_SYMBOL(clear_extent_uptodate);
883 
884 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
885 			 gfp_t mask)
886 {
887 	return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
888 			      0, NULL, mask);
889 }
890 EXPORT_SYMBOL(set_extent_writeback);
891 
892 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
893 			   gfp_t mask)
894 {
895 	return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
896 }
897 EXPORT_SYMBOL(clear_extent_writeback);
898 
899 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
900 {
901 	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
902 }
903 EXPORT_SYMBOL(wait_on_extent_writeback);
904 
905 /*
906  * locks a range in ascending order, waiting for any locked regions
907  * it hits on the way.  [start,end] are inclusive, and this will sleep.
908  */
909 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
910 {
911 	int err;
912 	u64 failed_start;
913 	while (1) {
914 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
915 				     &failed_start, mask);
916 		if (err == -EEXIST && (mask & __GFP_WAIT)) {
917 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
918 			start = failed_start;
919 		} else {
920 			break;
921 		}
922 		WARN_ON(start > end);
923 	}
924 	return err;
925 }
926 EXPORT_SYMBOL(lock_extent);
927 
928 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
929 		  gfp_t mask)
930 {
931 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
932 }
933 EXPORT_SYMBOL(unlock_extent);
934 
935 /*
936  * helper function to set pages and extents in the tree dirty
937  */
938 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
939 {
940 	unsigned long index = start >> PAGE_CACHE_SHIFT;
941 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
942 	struct page *page;
943 
944 	while (index <= end_index) {
945 		page = find_get_page(tree->mapping, index);
946 		BUG_ON(!page);
947 		__set_page_dirty_nobuffers(page);
948 		page_cache_release(page);
949 		index++;
950 	}
951 	set_extent_dirty(tree, start, end, GFP_NOFS);
952 	return 0;
953 }
954 EXPORT_SYMBOL(set_range_dirty);
955 
956 /*
957  * helper function to set both pages and extents in the tree writeback
958  */
959 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
960 {
961 	unsigned long index = start >> PAGE_CACHE_SHIFT;
962 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
963 	struct page *page;
964 
965 	while (index <= end_index) {
966 		page = find_get_page(tree->mapping, index);
967 		BUG_ON(!page);
968 		set_page_writeback(page);
969 		page_cache_release(page);
970 		index++;
971 	}
972 	set_extent_writeback(tree, start, end, GFP_NOFS);
973 	return 0;
974 }
975 EXPORT_SYMBOL(set_range_writeback);
976 
977 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
978 			  u64 *start_ret, u64 *end_ret, int bits)
979 {
980 	struct rb_node *node;
981 	struct extent_state *state;
982 	int ret = 1;
983 
984 	write_lock_irq(&tree->lock);
985 	/*
986 	 * this search will find all the extents that end after
987 	 * our range starts.
988 	 */
989 	node = tree_search(&tree->state, start);
990 	if (!node || IS_ERR(node)) {
991 		goto out;
992 	}
993 
994 	while(1) {
995 		state = rb_entry(node, struct extent_state, rb_node);
996 		if (state->state & bits) {
997 			*start_ret = state->start;
998 			*end_ret = state->end;
999 			ret = 0;
1000 			break;
1001 		}
1002 		node = rb_next(node);
1003 		if (!node)
1004 			break;
1005 	}
1006 out:
1007 	write_unlock_irq(&tree->lock);
1008 	return ret;
1009 }
1010 EXPORT_SYMBOL(find_first_extent_bit);
1011 
1012 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1013 			     u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1014 {
1015 	struct rb_node *node;
1016 	struct extent_state *state;
1017 	u64 cur_start = start;
1018 	u64 found = 0;
1019 	u64 total_bytes = 0;
1020 
1021 	write_lock_irq(&tree->lock);
1022 	/*
1023 	 * this search will find all the extents that end after
1024 	 * our range starts.
1025 	 */
1026 search_again:
1027 	node = tree_search(&tree->state, cur_start);
1028 	if (!node || IS_ERR(node)) {
1029 		goto out;
1030 	}
1031 
1032 	while(1) {
1033 		state = rb_entry(node, struct extent_state, rb_node);
1034 		if (state->start != cur_start) {
1035 			goto out;
1036 		}
1037 		if (!(state->state & EXTENT_DELALLOC)) {
1038 			goto out;
1039 		}
1040 		if (state->start >= lock_start) {
1041 			if (state->state & EXTENT_LOCKED) {
1042 				DEFINE_WAIT(wait);
1043 				atomic_inc(&state->refs);
1044 				write_unlock_irq(&tree->lock);
1045 				schedule();
1046 				write_lock_irq(&tree->lock);
1047 				finish_wait(&state->wq, &wait);
1048 				free_extent_state(state);
1049 				goto search_again;
1050 			}
1051 			state->state |= EXTENT_LOCKED;
1052 		}
1053 		found++;
1054 		*end = state->end;
1055 		cur_start = state->end + 1;
1056 		node = rb_next(node);
1057 		if (!node)
1058 			break;
1059 		total_bytes = state->end - state->start + 1;
1060 		if (total_bytes >= max_bytes)
1061 			break;
1062 	}
1063 out:
1064 	write_unlock_irq(&tree->lock);
1065 	return found;
1066 }
1067 
1068 /*
1069  * helper function to lock both pages and extents in the tree.
1070  * pages must be locked first.
1071  */
1072 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1073 {
1074 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1075 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1076 	struct page *page;
1077 	int err;
1078 
1079 	while (index <= end_index) {
1080 		page = grab_cache_page(tree->mapping, index);
1081 		if (!page) {
1082 			err = -ENOMEM;
1083 			goto failed;
1084 		}
1085 		if (IS_ERR(page)) {
1086 			err = PTR_ERR(page);
1087 			goto failed;
1088 		}
1089 		index++;
1090 	}
1091 	lock_extent(tree, start, end, GFP_NOFS);
1092 	return 0;
1093 
1094 failed:
1095 	/*
1096 	 * we failed above in getting the page at 'index', so we undo here
1097 	 * up to but not including the page at 'index'
1098 	 */
1099 	end_index = index;
1100 	index = start >> PAGE_CACHE_SHIFT;
1101 	while (index < end_index) {
1102 		page = find_get_page(tree->mapping, index);
1103 		unlock_page(page);
1104 		page_cache_release(page);
1105 		index++;
1106 	}
1107 	return err;
1108 }
1109 EXPORT_SYMBOL(lock_range);
1110 
1111 /*
1112  * helper function to unlock both pages and extents in the tree.
1113  */
1114 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1115 {
1116 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1117 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1118 	struct page *page;
1119 
1120 	while (index <= end_index) {
1121 		page = find_get_page(tree->mapping, index);
1122 		unlock_page(page);
1123 		page_cache_release(page);
1124 		index++;
1125 	}
1126 	unlock_extent(tree, start, end, GFP_NOFS);
1127 	return 0;
1128 }
1129 EXPORT_SYMBOL(unlock_range);
1130 
1131 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1132 {
1133 	struct rb_node *node;
1134 	struct extent_state *state;
1135 	int ret = 0;
1136 
1137 	write_lock_irq(&tree->lock);
1138 	/*
1139 	 * this search will find all the extents that end after
1140 	 * our range starts.
1141 	 */
1142 	node = tree_search(&tree->state, start);
1143 	if (!node || IS_ERR(node)) {
1144 		ret = -ENOENT;
1145 		goto out;
1146 	}
1147 	state = rb_entry(node, struct extent_state, rb_node);
1148 	if (state->start != start) {
1149 		ret = -ENOENT;
1150 		goto out;
1151 	}
1152 	state->private = private;
1153 out:
1154 	write_unlock_irq(&tree->lock);
1155 	return ret;
1156 }
1157 
1158 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1159 {
1160 	struct rb_node *node;
1161 	struct extent_state *state;
1162 	int ret = 0;
1163 
1164 	read_lock_irq(&tree->lock);
1165 	/*
1166 	 * this search will find all the extents that end after
1167 	 * our range starts.
1168 	 */
1169 	node = tree_search(&tree->state, start);
1170 	if (!node || IS_ERR(node)) {
1171 		ret = -ENOENT;
1172 		goto out;
1173 	}
1174 	state = rb_entry(node, struct extent_state, rb_node);
1175 	if (state->start != start) {
1176 		ret = -ENOENT;
1177 		goto out;
1178 	}
1179 	*private = state->private;
1180 out:
1181 	read_unlock_irq(&tree->lock);
1182 	return ret;
1183 }
1184 
1185 /*
1186  * searches a range in the state tree for a given mask.
1187  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1188  * has the bits set.  Otherwise, 1 is returned if any bit in the
1189  * range is found set.
1190  */
1191 static int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1192 			  int bits, int filled)
1193 {
1194 	struct extent_state *state = NULL;
1195 	struct rb_node *node;
1196 	int bitset = 0;
1197 
1198 	read_lock_irq(&tree->lock);
1199 	node = tree_search(&tree->state, start);
1200 	while (node && start <= end) {
1201 		state = rb_entry(node, struct extent_state, rb_node);
1202 		if (state->start > end)
1203 			break;
1204 
1205 		if (filled && state->start > start) {
1206 			bitset = 0;
1207 			break;
1208 		}
1209 		if (state->state & bits) {
1210 			bitset = 1;
1211 			if (!filled)
1212 				break;
1213 		} else if (filled) {
1214 			bitset = 0;
1215 			break;
1216 		}
1217 		start = state->end + 1;
1218 		if (start > end)
1219 			break;
1220 		node = rb_next(node);
1221 	}
1222 	read_unlock_irq(&tree->lock);
1223 	return bitset;
1224 }
1225 
1226 /*
1227  * helper function to set a given page up to date if all the
1228  * extents in the tree for that page are up to date
1229  */
1230 static int check_page_uptodate(struct extent_map_tree *tree,
1231 			       struct page *page)
1232 {
1233 	u64 start = page->index << PAGE_CACHE_SHIFT;
1234 	u64 end = start + PAGE_CACHE_SIZE - 1;
1235 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1236 		SetPageUptodate(page);
1237 	return 0;
1238 }
1239 
1240 /*
1241  * helper function to unlock a page if all the extents in the tree
1242  * for that page are unlocked
1243  */
1244 static int check_page_locked(struct extent_map_tree *tree,
1245 			     struct page *page)
1246 {
1247 	u64 start = page->index << PAGE_CACHE_SHIFT;
1248 	u64 end = start + PAGE_CACHE_SIZE - 1;
1249 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1250 		unlock_page(page);
1251 	return 0;
1252 }
1253 
1254 /*
1255  * helper function to end page writeback if all the extents
1256  * in the tree for that page are done with writeback
1257  */
1258 static int check_page_writeback(struct extent_map_tree *tree,
1259 			     struct page *page)
1260 {
1261 	u64 start = page->index << PAGE_CACHE_SHIFT;
1262 	u64 end = start + PAGE_CACHE_SIZE - 1;
1263 	if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1264 		end_page_writeback(page);
1265 	return 0;
1266 }
1267 
1268 /* lots and lots of room for performance fixes in the end_bio funcs */
1269 
1270 /*
1271  * after a writepage IO is done, we need to:
1272  * clear the uptodate bits on error
1273  * clear the writeback bits in the extent tree for this IO
1274  * end_page_writeback if the page has no more pending IO
1275  *
1276  * Scheduling is not allowed, so the extent state tree is expected
1277  * to have one and only one object corresponding to this IO.
1278  */
1279 static int end_bio_extent_writepage(struct bio *bio,
1280 				   unsigned int bytes_done, int err)
1281 {
1282 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1283 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1284 	struct extent_map_tree *tree = bio->bi_private;
1285 	u64 start;
1286 	u64 end;
1287 	int whole_page;
1288 
1289 	if (bio->bi_size)
1290 		return 1;
1291 
1292 	do {
1293 		struct page *page = bvec->bv_page;
1294 		start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1295 		end = start + bvec->bv_len - 1;
1296 
1297 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1298 			whole_page = 1;
1299 		else
1300 			whole_page = 0;
1301 
1302 		if (--bvec >= bio->bi_io_vec)
1303 			prefetchw(&bvec->bv_page->flags);
1304 
1305 		if (!uptodate) {
1306 			clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1307 			ClearPageUptodate(page);
1308 			SetPageError(page);
1309 		}
1310 		clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1311 
1312 		if (whole_page)
1313 			end_page_writeback(page);
1314 		else
1315 			check_page_writeback(tree, page);
1316 		if (tree->ops && tree->ops->writepage_end_io_hook)
1317 			tree->ops->writepage_end_io_hook(page, start, end);
1318 	} while (bvec >= bio->bi_io_vec);
1319 
1320 	bio_put(bio);
1321 	return 0;
1322 }
1323 
1324 /*
1325  * after a readpage IO is done, we need to:
1326  * clear the uptodate bits on error
1327  * set the uptodate bits if things worked
1328  * set the page up to date if all extents in the tree are uptodate
1329  * clear the lock bit in the extent tree
1330  * unlock the page if there are no other extents locked for it
1331  *
1332  * Scheduling is not allowed, so the extent state tree is expected
1333  * to have one and only one object corresponding to this IO.
1334  */
1335 static int end_bio_extent_readpage(struct bio *bio,
1336 				   unsigned int bytes_done, int err)
1337 {
1338 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1339 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1340 	struct extent_map_tree *tree = bio->bi_private;
1341 	u64 start;
1342 	u64 end;
1343 	int whole_page;
1344 	int ret;
1345 
1346 	if (bio->bi_size)
1347 		return 1;
1348 
1349 	do {
1350 		struct page *page = bvec->bv_page;
1351 		start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1352 		end = start + bvec->bv_len - 1;
1353 
1354 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1355 			whole_page = 1;
1356 		else
1357 			whole_page = 0;
1358 
1359 		if (--bvec >= bio->bi_io_vec)
1360 			prefetchw(&bvec->bv_page->flags);
1361 
1362 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1363 			ret = tree->ops->readpage_end_io_hook(page, start, end);
1364 			if (ret)
1365 				uptodate = 0;
1366 		}
1367 		if (uptodate) {
1368 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1369 			if (whole_page)
1370 				SetPageUptodate(page);
1371 			else
1372 				check_page_uptodate(tree, page);
1373 		} else {
1374 			ClearPageUptodate(page);
1375 			SetPageError(page);
1376 		}
1377 
1378 		unlock_extent(tree, start, end, GFP_ATOMIC);
1379 
1380 		if (whole_page)
1381 			unlock_page(page);
1382 		else
1383 			check_page_locked(tree, page);
1384 	} while (bvec >= bio->bi_io_vec);
1385 
1386 	bio_put(bio);
1387 	return 0;
1388 }
1389 
1390 /*
1391  * IO done from prepare_write is pretty simple, we just unlock
1392  * the structs in the extent tree when done, and set the uptodate bits
1393  * as appropriate.
1394  */
1395 static int end_bio_extent_preparewrite(struct bio *bio,
1396 				       unsigned int bytes_done, int err)
1397 {
1398 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1399 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1400 	struct extent_map_tree *tree = bio->bi_private;
1401 	u64 start;
1402 	u64 end;
1403 
1404 	if (bio->bi_size)
1405 		return 1;
1406 
1407 	do {
1408 		struct page *page = bvec->bv_page;
1409 		start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1410 		end = start + bvec->bv_len - 1;
1411 
1412 		if (--bvec >= bio->bi_io_vec)
1413 			prefetchw(&bvec->bv_page->flags);
1414 
1415 		if (uptodate) {
1416 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1417 		} else {
1418 			ClearPageUptodate(page);
1419 			SetPageError(page);
1420 		}
1421 
1422 		unlock_extent(tree, start, end, GFP_ATOMIC);
1423 
1424 	} while (bvec >= bio->bi_io_vec);
1425 
1426 	bio_put(bio);
1427 	return 0;
1428 }
1429 
1430 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1431 			      struct page *page, sector_t sector,
1432 			      size_t size, unsigned long offset,
1433 			      struct block_device *bdev,
1434 			      bio_end_io_t end_io_func)
1435 {
1436 	struct bio *bio;
1437 	int ret = 0;
1438 
1439 	bio = bio_alloc(GFP_NOIO, 1);
1440 
1441 	bio->bi_sector = sector;
1442 	bio->bi_bdev = bdev;
1443 	bio->bi_io_vec[0].bv_page = page;
1444 	bio->bi_io_vec[0].bv_len = size;
1445 	bio->bi_io_vec[0].bv_offset = offset;
1446 
1447 	bio->bi_vcnt = 1;
1448 	bio->bi_idx = 0;
1449 	bio->bi_size = size;
1450 
1451 	bio->bi_end_io = end_io_func;
1452 	bio->bi_private = tree;
1453 
1454 	bio_get(bio);
1455 	submit_bio(rw, bio);
1456 
1457 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1458 		ret = -EOPNOTSUPP;
1459 
1460 	bio_put(bio);
1461 	return ret;
1462 }
1463 
1464 void set_page_extent_mapped(struct page *page)
1465 {
1466 	if (!PagePrivate(page)) {
1467 		SetPagePrivate(page);
1468 		WARN_ON(!page->mapping->a_ops->invalidatepage);
1469 		set_page_private(page, 1);
1470 		page_cache_get(page);
1471 	}
1472 }
1473 
1474 /*
1475  * basic readpage implementation.  Locked extent state structs are inserted
1476  * into the tree that are removed when the IO is done (by the end_io
1477  * handlers)
1478  */
1479 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1480 			  get_extent_t *get_extent)
1481 {
1482 	struct inode *inode = page->mapping->host;
1483 	u64 start = page->index << PAGE_CACHE_SHIFT;
1484 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1485 	u64 end;
1486 	u64 cur = start;
1487 	u64 extent_offset;
1488 	u64 last_byte = i_size_read(inode);
1489 	u64 block_start;
1490 	u64 cur_end;
1491 	sector_t sector;
1492 	struct extent_map *em;
1493 	struct block_device *bdev;
1494 	int ret;
1495 	int nr = 0;
1496 	size_t page_offset = 0;
1497 	size_t iosize;
1498 	size_t blocksize = inode->i_sb->s_blocksize;
1499 
1500 	set_page_extent_mapped(page);
1501 
1502 	end = page_end;
1503 	lock_extent(tree, start, end, GFP_NOFS);
1504 
1505 	while (cur <= end) {
1506 		if (cur >= last_byte) {
1507 			iosize = PAGE_CACHE_SIZE - page_offset;
1508 			zero_user_page(page, page_offset, iosize, KM_USER0);
1509 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1510 					    GFP_NOFS);
1511 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1512 			break;
1513 		}
1514 		em = get_extent(inode, page, page_offset, cur, end, 0);
1515 		if (IS_ERR(em) || !em) {
1516 			SetPageError(page);
1517 			unlock_extent(tree, cur, end, GFP_NOFS);
1518 			break;
1519 		}
1520 
1521 		extent_offset = cur - em->start;
1522 		BUG_ON(em->end < cur);
1523 		BUG_ON(end < cur);
1524 
1525 		iosize = min(em->end - cur, end - cur) + 1;
1526 		cur_end = min(em->end, end);
1527 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1528 		sector = (em->block_start + extent_offset) >> 9;
1529 		bdev = em->bdev;
1530 		block_start = em->block_start;
1531 		free_extent_map(em);
1532 		em = NULL;
1533 
1534 		/* we've found a hole, just zero and go on */
1535 		if (block_start == EXTENT_MAP_HOLE) {
1536 			zero_user_page(page, page_offset, iosize, KM_USER0);
1537 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1538 					    GFP_NOFS);
1539 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1540 			cur = cur + iosize;
1541 			page_offset += iosize;
1542 			continue;
1543 		}
1544 		/* the get_extent function already copied into the page */
1545 		if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1546 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1547 			cur = cur + iosize;
1548 			page_offset += iosize;
1549 			continue;
1550 		}
1551 
1552 		ret = 0;
1553 		if (tree->ops && tree->ops->readpage_io_hook) {
1554 			ret = tree->ops->readpage_io_hook(page, cur,
1555 							  cur + iosize - 1);
1556 		}
1557 		if (!ret) {
1558 			ret = submit_extent_page(READ, tree, page,
1559 						 sector, iosize, page_offset,
1560 						 bdev, end_bio_extent_readpage);
1561 		}
1562 		if (ret)
1563 			SetPageError(page);
1564 		cur = cur + iosize;
1565 		page_offset += iosize;
1566 		nr++;
1567 	}
1568 	if (!nr) {
1569 		if (!PageError(page))
1570 			SetPageUptodate(page);
1571 		unlock_page(page);
1572 	}
1573 	return 0;
1574 }
1575 EXPORT_SYMBOL(extent_read_full_page);
1576 
1577 /*
1578  * the writepage semantics are similar to regular writepage.  extent
1579  * records are inserted to lock ranges in the tree, and as dirty areas
1580  * are found, they are marked writeback.  Then the lock bits are removed
1581  * and the end_io handler clears the writeback ranges
1582  */
1583 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1584 			  get_extent_t *get_extent,
1585 			  struct writeback_control *wbc)
1586 {
1587 	struct inode *inode = page->mapping->host;
1588 	u64 start = page->index << PAGE_CACHE_SHIFT;
1589 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1590 	u64 end;
1591 	u64 cur = start;
1592 	u64 extent_offset;
1593 	u64 last_byte = i_size_read(inode);
1594 	u64 block_start;
1595 	sector_t sector;
1596 	struct extent_map *em;
1597 	struct block_device *bdev;
1598 	int ret;
1599 	int nr = 0;
1600 	size_t page_offset = 0;
1601 	size_t iosize;
1602 	size_t blocksize;
1603 	loff_t i_size = i_size_read(inode);
1604 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1605 	u64 nr_delalloc;
1606 	u64 delalloc_end;
1607 
1608 	WARN_ON(!PageLocked(page));
1609 	if (page->index > end_index) {
1610 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1611 		unlock_page(page);
1612 		return 0;
1613 	}
1614 
1615 	if (page->index == end_index) {
1616 		size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1617 		zero_user_page(page, offset,
1618 			       PAGE_CACHE_SIZE - offset, KM_USER0);
1619 	}
1620 
1621 	set_page_extent_mapped(page);
1622 
1623 	lock_extent(tree, start, page_end, GFP_NOFS);
1624 	nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1625 					       &delalloc_end,
1626 					       128 * 1024 * 1024);
1627 	if (nr_delalloc) {
1628 		tree->ops->fill_delalloc(inode, start, delalloc_end);
1629 		if (delalloc_end >= page_end + 1) {
1630 			clear_extent_bit(tree, page_end + 1, delalloc_end,
1631 					 EXTENT_LOCKED | EXTENT_DELALLOC,
1632 					 1, 0, GFP_NOFS);
1633 		}
1634 		clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1635 				 0, 0, GFP_NOFS);
1636 		if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1637 			printk("found delalloc bits after clear extent_bit\n");
1638 		}
1639 	} else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1640 		printk("found delalloc bits after find_delalloc_range returns 0\n");
1641 	}
1642 
1643 	end = page_end;
1644 	if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1645 		printk("found delalloc bits after lock_extent\n");
1646 	}
1647 
1648 	if (last_byte <= start) {
1649 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1650 		goto done;
1651 	}
1652 
1653 	set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1654 	blocksize = inode->i_sb->s_blocksize;
1655 
1656 	while (cur <= end) {
1657 		if (cur >= last_byte) {
1658 			clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1659 			break;
1660 		}
1661 		em = get_extent(inode, page, page_offset, cur, end, 0);
1662 		if (IS_ERR(em) || !em) {
1663 			SetPageError(page);
1664 			break;
1665 		}
1666 
1667 		extent_offset = cur - em->start;
1668 		BUG_ON(em->end < cur);
1669 		BUG_ON(end < cur);
1670 		iosize = min(em->end - cur, end - cur) + 1;
1671 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1672 		sector = (em->block_start + extent_offset) >> 9;
1673 		bdev = em->bdev;
1674 		block_start = em->block_start;
1675 		free_extent_map(em);
1676 		em = NULL;
1677 
1678 		if (block_start == EXTENT_MAP_HOLE ||
1679 		    block_start == EXTENT_MAP_INLINE) {
1680 			clear_extent_dirty(tree, cur,
1681 					   cur + iosize - 1, GFP_NOFS);
1682 			cur = cur + iosize;
1683 			page_offset += iosize;
1684 			continue;
1685 		}
1686 
1687 		/* leave this out until we have a page_mkwrite call */
1688 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1689 				   EXTENT_DIRTY, 0)) {
1690 			cur = cur + iosize;
1691 			page_offset += iosize;
1692 			continue;
1693 		}
1694 		clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1695 		if (tree->ops && tree->ops->writepage_io_hook) {
1696 			ret = tree->ops->writepage_io_hook(page, cur,
1697 						cur + iosize - 1);
1698 		} else {
1699 			ret = 0;
1700 		}
1701 		if (ret)
1702 			SetPageError(page);
1703 		else {
1704 			set_range_writeback(tree, cur, cur + iosize - 1);
1705 			ret = submit_extent_page(WRITE, tree, page, sector,
1706 						 iosize, page_offset, bdev,
1707 						 end_bio_extent_writepage);
1708 			if (ret)
1709 				SetPageError(page);
1710 		}
1711 		cur = cur + iosize;
1712 		page_offset += iosize;
1713 		nr++;
1714 	}
1715 done:
1716 	unlock_extent(tree, start, page_end, GFP_NOFS);
1717 	unlock_page(page);
1718 	return 0;
1719 }
1720 EXPORT_SYMBOL(extent_write_full_page);
1721 
1722 /*
1723  * basic invalidatepage code, this waits on any locked or writeback
1724  * ranges corresponding to the page, and then deletes any extent state
1725  * records from the tree
1726  */
1727 int extent_invalidatepage(struct extent_map_tree *tree,
1728 			  struct page *page, unsigned long offset)
1729 {
1730 	u64 start = (page->index << PAGE_CACHE_SHIFT);
1731 	u64 end = start + PAGE_CACHE_SIZE - 1;
1732 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1733 
1734 	start += (offset + blocksize -1) & ~(blocksize - 1);
1735 	if (start > end)
1736 		return 0;
1737 
1738 	lock_extent(tree, start, end, GFP_NOFS);
1739 	wait_on_extent_writeback(tree, start, end);
1740 	clear_extent_bit(tree, start, end,
1741 			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1742 			 1, 1, GFP_NOFS);
1743 	return 0;
1744 }
1745 EXPORT_SYMBOL(extent_invalidatepage);
1746 
1747 /*
1748  * simple commit_write call, set_range_dirty is used to mark both
1749  * the pages and the extent records as dirty
1750  */
1751 int extent_commit_write(struct extent_map_tree *tree,
1752 			struct inode *inode, struct page *page,
1753 			unsigned from, unsigned to)
1754 {
1755 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1756 
1757 	set_page_extent_mapped(page);
1758 	set_page_dirty(page);
1759 
1760 	if (pos > inode->i_size) {
1761 		i_size_write(inode, pos);
1762 		mark_inode_dirty(inode);
1763 	}
1764 	return 0;
1765 }
1766 EXPORT_SYMBOL(extent_commit_write);
1767 
1768 int extent_prepare_write(struct extent_map_tree *tree,
1769 			 struct inode *inode, struct page *page,
1770 			 unsigned from, unsigned to, get_extent_t *get_extent)
1771 {
1772 	u64 page_start = page->index << PAGE_CACHE_SHIFT;
1773 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1774 	u64 block_start;
1775 	u64 orig_block_start;
1776 	u64 block_end;
1777 	u64 cur_end;
1778 	struct extent_map *em;
1779 	unsigned blocksize = 1 << inode->i_blkbits;
1780 	size_t page_offset = 0;
1781 	size_t block_off_start;
1782 	size_t block_off_end;
1783 	int err = 0;
1784 	int iocount = 0;
1785 	int ret = 0;
1786 	int isnew;
1787 
1788 	set_page_extent_mapped(page);
1789 
1790 	block_start = (page_start + from) & ~((u64)blocksize - 1);
1791 	block_end = (page_start + to - 1) | (blocksize - 1);
1792 	orig_block_start = block_start;
1793 
1794 	lock_extent(tree, page_start, page_end, GFP_NOFS);
1795 	while(block_start <= block_end) {
1796 		em = get_extent(inode, page, page_offset, block_start,
1797 				block_end, 1);
1798 		if (IS_ERR(em) || !em) {
1799 			goto err;
1800 		}
1801 		cur_end = min(block_end, em->end);
1802 		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1803 		block_off_end = block_off_start + blocksize;
1804 		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1805 
1806 		if (!PageUptodate(page) && isnew &&
1807 		    (block_off_end > to || block_off_start < from)) {
1808 			void *kaddr;
1809 
1810 			kaddr = kmap_atomic(page, KM_USER0);
1811 			if (block_off_end > to)
1812 				memset(kaddr + to, 0, block_off_end - to);
1813 			if (block_off_start < from)
1814 				memset(kaddr + block_off_start, 0,
1815 				       from - block_off_start);
1816 			flush_dcache_page(page);
1817 			kunmap_atomic(kaddr, KM_USER0);
1818 		}
1819 		if (!isnew && !PageUptodate(page) &&
1820 		    (block_off_end > to || block_off_start < from) &&
1821 		    !test_range_bit(tree, block_start, cur_end,
1822 				    EXTENT_UPTODATE, 1)) {
1823 			u64 sector;
1824 			u64 extent_offset = block_start - em->start;
1825 			size_t iosize;
1826 			sector = (em->block_start + extent_offset) >> 9;
1827 			iosize = (cur_end - block_start + blocksize - 1) &
1828 				~((u64)blocksize - 1);
1829 			/*
1830 			 * we've already got the extent locked, but we
1831 			 * need to split the state such that our end_bio
1832 			 * handler can clear the lock.
1833 			 */
1834 			set_extent_bit(tree, block_start,
1835 				       block_start + iosize - 1,
1836 				       EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1837 			ret = submit_extent_page(READ, tree, page,
1838 					 sector, iosize, page_offset, em->bdev,
1839 					 end_bio_extent_preparewrite);
1840 			iocount++;
1841 			block_start = block_start + iosize;
1842 		} else {
1843 			set_extent_uptodate(tree, block_start, cur_end,
1844 					    GFP_NOFS);
1845 			unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1846 			block_start = cur_end + 1;
1847 		}
1848 		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1849 		free_extent_map(em);
1850 	}
1851 	if (iocount) {
1852 		wait_extent_bit(tree, orig_block_start,
1853 				block_end, EXTENT_LOCKED);
1854 	}
1855 	check_page_uptodate(tree, page);
1856 err:
1857 	/* FIXME, zero out newly allocated blocks on error */
1858 	return err;
1859 }
1860 EXPORT_SYMBOL(extent_prepare_write);
1861 
1862 /*
1863  * a helper for releasepage.  As long as there are no locked extents
1864  * in the range corresponding to the page, both state records and extent
1865  * map records are removed
1866  */
1867 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1868 {
1869 	struct extent_map *em;
1870 	u64 start = page->index << PAGE_CACHE_SHIFT;
1871 	u64 end = start + PAGE_CACHE_SIZE - 1;
1872 	u64 orig_start = start;
1873 	int ret = 1;
1874 
1875 	while (start <= end) {
1876 		em = lookup_extent_mapping(tree, start, end);
1877 		if (!em || IS_ERR(em))
1878 			break;
1879 		if (!test_range_bit(tree, em->start, em->end,
1880 				    EXTENT_LOCKED, 0)) {
1881 			remove_extent_mapping(tree, em);
1882 			/* once for the rb tree */
1883 			free_extent_map(em);
1884 		}
1885 		start = em->end + 1;
1886 		/* once for us */
1887 		free_extent_map(em);
1888 	}
1889 	if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
1890 		ret = 0;
1891 	else
1892 		clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1893 				 1, 1, GFP_NOFS);
1894 	return ret;
1895 }
1896 EXPORT_SYMBOL(try_release_extent_mapping);
1897 
1898 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
1899 		get_extent_t *get_extent)
1900 {
1901 	struct inode *inode = mapping->host;
1902 	u64 start = iblock << inode->i_blkbits;
1903 	u64 end = start + (1 << inode->i_blkbits) - 1;
1904 	struct extent_map *em;
1905 
1906 	em = get_extent(inode, NULL, 0, start, end, 0);
1907 	if (!em || IS_ERR(em))
1908 		return 0;
1909 
1910 	if (em->block_start == EXTENT_MAP_INLINE ||
1911 	    em->block_start == EXTENT_MAP_HOLE)
1912 		return 0;
1913 
1914 	return (em->block_start + start - em->start) >> inode->i_blkbits;
1915 }
1916 
1917 static struct extent_buffer *__alloc_extent_buffer(gfp_t mask)
1918 {
1919 	struct extent_buffer *eb = NULL;
1920 
1921 	spin_lock(&extent_buffers_lock);
1922 	if (!list_empty(&extent_buffers)) {
1923 		eb = list_entry(extent_buffers.next, struct extent_buffer,
1924 				list);
1925 		list_del(&eb->list);
1926 		WARN_ON(nr_extent_buffers == 0);
1927 		nr_extent_buffers--;
1928 	}
1929 	spin_unlock(&extent_buffers_lock);
1930 
1931 	if (eb) {
1932 		memset(eb, 0, sizeof(*eb));
1933 	} else {
1934 		eb = kmem_cache_zalloc(extent_buffer_cache, mask);
1935 	}
1936 	spin_lock(&extent_buffers_lock);
1937 	list_add(&eb->leak_list, &buffers);
1938 	spin_unlock(&extent_buffers_lock);
1939 
1940 	return eb;
1941 }
1942 
1943 static void __free_extent_buffer(struct extent_buffer *eb)
1944 {
1945 
1946 	spin_lock(&extent_buffers_lock);
1947 	list_del_init(&eb->leak_list);
1948 	spin_unlock(&extent_buffers_lock);
1949 
1950 	if (nr_extent_buffers >= MAX_EXTENT_BUFFER_CACHE) {
1951 		kmem_cache_free(extent_buffer_cache, eb);
1952 	} else {
1953 		spin_lock(&extent_buffers_lock);
1954 		list_add(&eb->list, &extent_buffers);
1955 		nr_extent_buffers++;
1956 		spin_unlock(&extent_buffers_lock);
1957 	}
1958 }
1959 
1960 static inline struct page *extent_buffer_page(struct extent_buffer *eb, int i)
1961 {
1962 	struct page *p;
1963 	if (i == 0)
1964 		return eb->first_page;
1965 	i += eb->start >> PAGE_CACHE_SHIFT;
1966 	p = find_get_page(eb->first_page->mapping, i);
1967 	page_cache_release(p);
1968 	return p;
1969 }
1970 
1971 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
1972 					  u64 start, unsigned long len,
1973 					  gfp_t mask)
1974 {
1975 	unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) -
1976 				  (start >> PAGE_CACHE_SHIFT) + 1;
1977 	unsigned long i;
1978 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1979 	struct extent_buffer *eb;
1980 	struct page *p;
1981 	struct address_space *mapping = tree->mapping;
1982 	int uptodate = 0;
1983 
1984 	eb = __alloc_extent_buffer(mask);
1985 	if (!eb || IS_ERR(eb))
1986 		return NULL;
1987 
1988 	eb->alloc_addr = __builtin_return_address(0);
1989 	eb->start = start;
1990 	eb->len = len;
1991 	atomic_set(&eb->refs, 1);
1992 
1993 	for (i = 0; i < num_pages; i++, index++) {
1994 		p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
1995 		if (!p) {
1996 			/* make sure the free only frees the pages we've
1997 			 * grabbed a reference on
1998 			 */
1999 			eb->len = i << PAGE_CACHE_SHIFT;
2000 			eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2001 			goto fail;
2002 		}
2003 		set_page_extent_mapped(p);
2004 		if (i == 0)
2005 			eb->first_page = p;
2006 		if (!PageUptodate(p))
2007 			uptodate = 0;
2008 		unlock_page(p);
2009 	}
2010 	if (uptodate)
2011 		eb->flags |= EXTENT_UPTODATE;
2012 	return eb;
2013 fail:
2014 	free_extent_buffer(eb);
2015 	return NULL;
2016 }
2017 EXPORT_SYMBOL(alloc_extent_buffer);
2018 
2019 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2020 					 u64 start, unsigned long len,
2021 					  gfp_t mask)
2022 {
2023 	unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) -
2024 				  (start >> PAGE_CACHE_SHIFT) + 1;
2025 	unsigned long i;
2026 	unsigned long index = start >> PAGE_CACHE_SHIFT;
2027 	struct extent_buffer *eb;
2028 	struct page *p;
2029 	struct address_space *mapping = tree->mapping;
2030 
2031 	eb = __alloc_extent_buffer(mask);
2032 	if (!eb || IS_ERR(eb))
2033 		return NULL;
2034 
2035 	eb->alloc_addr = __builtin_return_address(0);
2036 	eb->start = start;
2037 	eb->len = len;
2038 	atomic_set(&eb->refs, 1);
2039 
2040 	for (i = 0; i < num_pages; i++, index++) {
2041 		p = find_get_page(mapping, index);
2042 		if (!p) {
2043 			/* make sure the free only frees the pages we've
2044 			 * grabbed a reference on
2045 			 */
2046 			eb->len = i << PAGE_CACHE_SHIFT;
2047 			eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2048 			goto fail;
2049 		}
2050 		set_page_extent_mapped(p);
2051 		if (i == 0)
2052 			eb->first_page = p;
2053 	}
2054 	return eb;
2055 fail:
2056 	free_extent_buffer(eb);
2057 	return NULL;
2058 }
2059 EXPORT_SYMBOL(find_extent_buffer);
2060 
2061 void free_extent_buffer(struct extent_buffer *eb)
2062 {
2063 	unsigned long i;
2064 	unsigned long num_pages;
2065 
2066 	if (!eb)
2067 		return;
2068 
2069 	if (!atomic_dec_and_test(&eb->refs))
2070 		return;
2071 
2072 	num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) -
2073 		(eb->start >> PAGE_CACHE_SHIFT) + 1;
2074 
2075 	if (eb->first_page)
2076 		page_cache_release(eb->first_page);
2077 	for (i = 1; i < num_pages; i++) {
2078 		page_cache_release(extent_buffer_page(eb, i));
2079 	}
2080 	__free_extent_buffer(eb);
2081 }
2082 EXPORT_SYMBOL(free_extent_buffer);
2083 
2084 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2085 			      struct extent_buffer *eb)
2086 {
2087 	int set;
2088 	unsigned long i;
2089 	unsigned long num_pages;
2090 	struct page *page;
2091 
2092 	u64 start = eb->start;
2093 	u64 end = start + eb->len - 1;
2094 
2095 	set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2096 	num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) -
2097 		(eb->start >> PAGE_CACHE_SHIFT) + 1;
2098 
2099 	for (i = 0; i < num_pages; i++) {
2100 		page = extent_buffer_page(eb, i);
2101 		lock_page(page);
2102 		/*
2103 		 * if we're on the last page or the first page and the
2104 		 * block isn't aligned on a page boundary, do extra checks
2105 		 * to make sure we don't clean page that is partially dirty
2106 		 */
2107 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2108 		    ((i == num_pages - 1) &&
2109 		     ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
2110 			start = page->index << PAGE_CACHE_SHIFT;
2111 			end  = start + PAGE_CACHE_SIZE - 1;
2112 			if (test_range_bit(tree, start, end,
2113 					   EXTENT_DIRTY, 0)) {
2114 				unlock_page(page);
2115 				continue;
2116 			}
2117 		}
2118 		clear_page_dirty_for_io(page);
2119 		unlock_page(page);
2120 	}
2121 	return 0;
2122 }
2123 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2124 
2125 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2126 				    struct extent_buffer *eb)
2127 {
2128 	return wait_on_extent_writeback(tree, eb->start,
2129 					eb->start + eb->len - 1);
2130 }
2131 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2132 
2133 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2134 			     struct extent_buffer *eb)
2135 {
2136 	return set_range_dirty(tree, eb->start, eb->start + eb->len - 1);
2137 }
2138 EXPORT_SYMBOL(set_extent_buffer_dirty);
2139 
2140 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2141 				struct extent_buffer *eb)
2142 {
2143 	unsigned long i;
2144 	struct page *page;
2145 	unsigned long num_pages;
2146 
2147 	num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) -
2148 		(eb->start >> PAGE_CACHE_SHIFT) + 1;
2149 
2150 	set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2151 			    GFP_NOFS);
2152 	for (i = 0; i < num_pages; i++) {
2153 		page = extent_buffer_page(eb, i);
2154 		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2155 		    ((i == num_pages - 1) &&
2156 		     ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
2157 			check_page_uptodate(tree, page);
2158 			continue;
2159 		}
2160 		SetPageUptodate(page);
2161 	}
2162 	return 0;
2163 }
2164 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2165 
2166 int extent_buffer_uptodate(struct extent_map_tree *tree,
2167 			     struct extent_buffer *eb)
2168 {
2169 	if (eb->flags & EXTENT_UPTODATE)
2170 		return 1;
2171 	return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2172 			   EXTENT_UPTODATE, 1);
2173 }
2174 EXPORT_SYMBOL(extent_buffer_uptodate);
2175 
2176 int read_extent_buffer_pages(struct extent_map_tree *tree,
2177 			     struct extent_buffer *eb, int wait)
2178 {
2179 	unsigned long i;
2180 	struct page *page;
2181 	int err;
2182 	int ret = 0;
2183 	unsigned long num_pages;
2184 
2185 	if (eb->flags & EXTENT_UPTODATE)
2186 		return 0;
2187 
2188 	if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2189 			   EXTENT_UPTODATE, 1)) {
2190 		return 0;
2191 	}
2192 
2193 	num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) -
2194 		(eb->start >> PAGE_CACHE_SHIFT) + 1;
2195 	for (i = 0; i < num_pages; i++) {
2196 		page = extent_buffer_page(eb, i);
2197 		if (PageUptodate(page)) {
2198 			continue;
2199 		}
2200 		if (!wait) {
2201 			if (TestSetPageLocked(page)) {
2202 				continue;
2203 			}
2204 		} else {
2205 			lock_page(page);
2206 		}
2207 		if (!PageUptodate(page)) {
2208 			err = page->mapping->a_ops->readpage(NULL, page);
2209 			if (err) {
2210 				ret = err;
2211 			}
2212 		} else {
2213 			unlock_page(page);
2214 		}
2215 	}
2216 
2217 	if (ret || !wait) {
2218 		return ret;
2219 	}
2220 
2221 	for (i = 0; i < num_pages; i++) {
2222 		page = extent_buffer_page(eb, i);
2223 		wait_on_page_locked(page);
2224 		if (!PageUptodate(page)) {
2225 			ret = -EIO;
2226 		}
2227 	}
2228 	eb->flags |= EXTENT_UPTODATE;
2229 	return ret;
2230 }
2231 EXPORT_SYMBOL(read_extent_buffer_pages);
2232 
2233 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2234 			unsigned long start,
2235 			unsigned long len)
2236 {
2237 	size_t cur;
2238 	size_t offset;
2239 	struct page *page;
2240 	char *kaddr;
2241 	char *dst = (char *)dstv;
2242 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2243 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2244 
2245 	WARN_ON(start > eb->len);
2246 	WARN_ON(start + len > eb->start + eb->len);
2247 
2248 	offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2249 	if (i == 0)
2250 		offset += start_offset;
2251 
2252 	while(len > 0) {
2253 		page = extent_buffer_page(eb, i);
2254 		WARN_ON(!PageUptodate(page));
2255 
2256 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2257 		kaddr = kmap_atomic(page, KM_USER0);
2258 		memcpy(dst, kaddr + offset, cur);
2259 		kunmap_atomic(kaddr, KM_USER0);
2260 
2261 		dst += cur;
2262 		len -= cur;
2263 		offset = 0;
2264 		i++;
2265 	}
2266 }
2267 EXPORT_SYMBOL(read_extent_buffer);
2268 
2269 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2270 		      unsigned long min_len,
2271 		      char **token, char **map,
2272 		      unsigned long *map_start,
2273 		      unsigned long *map_len, int km)
2274 {
2275 	size_t offset = start & (PAGE_CACHE_SIZE - 1);
2276 	char *kaddr;
2277 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2278 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2279 	unsigned long end_i = (start_offset + start + min_len) >>
2280 				PAGE_CACHE_SHIFT;
2281 
2282 	if (i != end_i)
2283 		return -EINVAL;
2284 
2285 	WARN_ON(start > eb->len);
2286 
2287 	if (i == 0) {
2288 		offset = start_offset;
2289 		*map_start = 0;
2290 	} else {
2291 		*map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2292 	}
2293 
2294 	kaddr = kmap_atomic(extent_buffer_page(eb, i), km);
2295 	*token = kaddr;
2296 	*map = kaddr + offset;
2297 	*map_len = PAGE_CACHE_SIZE - offset;
2298 	return 0;
2299 }
2300 EXPORT_SYMBOL(map_extent_buffer);
2301 
2302 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2303 {
2304 	kunmap_atomic(token, km);
2305 }
2306 EXPORT_SYMBOL(unmap_extent_buffer);
2307 
2308 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2309 			  unsigned long start,
2310 			  unsigned long len)
2311 {
2312 	size_t cur;
2313 	size_t offset;
2314 	struct page *page;
2315 	char *kaddr;
2316 	char *ptr = (char *)ptrv;
2317 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2318 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2319 	int ret = 0;
2320 
2321 	WARN_ON(start > eb->len);
2322 	WARN_ON(start + len > eb->start + eb->len);
2323 
2324 	offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2325 	if (i == 0)
2326 		offset += start_offset;
2327 
2328 	while(len > 0) {
2329 		page = extent_buffer_page(eb, i);
2330 		WARN_ON(!PageUptodate(page));
2331 
2332 		cur = min(len, (PAGE_CACHE_SIZE - offset));
2333 
2334 		kaddr = kmap_atomic(page, KM_USER0);
2335 		ret = memcmp(ptr, kaddr + offset, cur);
2336 		kunmap_atomic(kaddr, KM_USER0);
2337 		if (ret)
2338 			break;
2339 
2340 		ptr += cur;
2341 		len -= cur;
2342 		offset = 0;
2343 		i++;
2344 	}
2345 	return ret;
2346 }
2347 EXPORT_SYMBOL(memcmp_extent_buffer);
2348 
2349 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2350 			 unsigned long start, unsigned long len)
2351 {
2352 	size_t cur;
2353 	size_t offset;
2354 	struct page *page;
2355 	char *kaddr;
2356 	char *src = (char *)srcv;
2357 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2358 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2359 
2360 	WARN_ON(start > eb->len);
2361 	WARN_ON(start + len > eb->start + eb->len);
2362 
2363 	offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2364 	if (i == 0)
2365 		offset += start_offset;
2366 
2367 	while(len > 0) {
2368 		page = extent_buffer_page(eb, i);
2369 		WARN_ON(!PageUptodate(page));
2370 
2371 		cur = min(len, PAGE_CACHE_SIZE - offset);
2372 		kaddr = kmap_atomic(page, KM_USER0);
2373 		memcpy(kaddr + offset, src, cur);
2374 		kunmap_atomic(kaddr, KM_USER0);
2375 
2376 		src += cur;
2377 		len -= cur;
2378 		offset = 0;
2379 		i++;
2380 	}
2381 }
2382 EXPORT_SYMBOL(write_extent_buffer);
2383 
2384 void memset_extent_buffer(struct extent_buffer *eb, char c,
2385 			  unsigned long start, unsigned long len)
2386 {
2387 	size_t cur;
2388 	size_t offset;
2389 	struct page *page;
2390 	char *kaddr;
2391 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2392 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2393 
2394 	WARN_ON(start > eb->len);
2395 	WARN_ON(start + len > eb->start + eb->len);
2396 
2397 	offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2398 	if (i == 0)
2399 		offset += start_offset;
2400 
2401 	while(len > 0) {
2402 		page = extent_buffer_page(eb, i);
2403 		WARN_ON(!PageUptodate(page));
2404 
2405 		cur = min(len, PAGE_CACHE_SIZE - offset);
2406 		kaddr = kmap_atomic(page, KM_USER0);
2407 		memset(kaddr + offset, c, cur);
2408 		kunmap_atomic(kaddr, KM_USER0);
2409 
2410 		len -= cur;
2411 		offset = 0;
2412 		i++;
2413 	}
2414 }
2415 EXPORT_SYMBOL(memset_extent_buffer);
2416 
2417 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2418 			unsigned long dst_offset, unsigned long src_offset,
2419 			unsigned long len)
2420 {
2421 	u64 dst_len = dst->len;
2422 	size_t cur;
2423 	size_t offset;
2424 	struct page *page;
2425 	char *kaddr;
2426 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2427 	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2428 
2429 	WARN_ON(src->len != dst_len);
2430 
2431 	offset = dst_offset & ((unsigned long)PAGE_CACHE_SIZE - 1);
2432 	if (i == 0)
2433 		offset += start_offset;
2434 
2435 	while(len > 0) {
2436 		page = extent_buffer_page(dst, i);
2437 		WARN_ON(!PageUptodate(page));
2438 
2439 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2440 
2441 		kaddr = kmap_atomic(page, KM_USER1);
2442 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
2443 		kunmap_atomic(kaddr, KM_USER1);
2444 
2445 		src_offset += cur;
2446 		len -= cur;
2447 		offset = 0;
2448 		i++;
2449 	}
2450 }
2451 EXPORT_SYMBOL(copy_extent_buffer);
2452 
2453 static void move_pages(struct page *dst_page, struct page *src_page,
2454 		       unsigned long dst_off, unsigned long src_off,
2455 		       unsigned long len)
2456 {
2457 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2458 	if (dst_page == src_page) {
2459 		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2460 	} else {
2461 		char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2462 		char *p = dst_kaddr + dst_off + len;
2463 		char *s = src_kaddr + src_off + len;
2464 
2465 		while (len--)
2466 			*--p = *--s;
2467 
2468 		kunmap_atomic(src_kaddr, KM_USER1);
2469 	}
2470 	kunmap_atomic(dst_kaddr, KM_USER0);
2471 }
2472 
2473 static void copy_pages(struct page *dst_page, struct page *src_page,
2474 		       unsigned long dst_off, unsigned long src_off,
2475 		       unsigned long len)
2476 {
2477 	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2478 	char *src_kaddr;
2479 
2480 	if (dst_page != src_page)
2481 		src_kaddr = kmap_atomic(src_page, KM_USER1);
2482 	else
2483 		src_kaddr = dst_kaddr;
2484 
2485 	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2486 	kunmap_atomic(dst_kaddr, KM_USER0);
2487 	if (dst_page != src_page)
2488 		kunmap_atomic(src_kaddr, KM_USER1);
2489 }
2490 
2491 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2492 			   unsigned long src_offset, unsigned long len)
2493 {
2494 	size_t cur;
2495 	size_t dst_off_in_page;
2496 	size_t src_off_in_page;
2497 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2498 	unsigned long dst_i;
2499 	unsigned long src_i;
2500 
2501 	if (src_offset + len > dst->len) {
2502 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2503 		       src_offset, len, dst->len);
2504 		BUG_ON(1);
2505 	}
2506 	if (dst_offset + len > dst->len) {
2507 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2508 		       dst_offset, len, dst->len);
2509 		BUG_ON(1);
2510 	}
2511 
2512 	while(len > 0) {
2513 		dst_off_in_page = dst_offset &
2514 			((unsigned long)PAGE_CACHE_SIZE - 1);
2515 		src_off_in_page = src_offset &
2516 			((unsigned long)PAGE_CACHE_SIZE - 1);
2517 
2518 		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2519 		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2520 
2521 		if (src_i == 0)
2522 			src_off_in_page += start_offset;
2523 		if (dst_i == 0)
2524 			dst_off_in_page += start_offset;
2525 
2526 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2527 					       src_off_in_page));
2528 		cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE -
2529 					       dst_off_in_page));
2530 
2531 		copy_pages(extent_buffer_page(dst, dst_i),
2532 			   extent_buffer_page(dst, src_i),
2533 			   dst_off_in_page, src_off_in_page, cur);
2534 
2535 		src_offset += cur;
2536 		dst_offset += cur;
2537 		len -= cur;
2538 	}
2539 }
2540 EXPORT_SYMBOL(memcpy_extent_buffer);
2541 
2542 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2543 			   unsigned long src_offset, unsigned long len)
2544 {
2545 	size_t cur;
2546 	size_t dst_off_in_page;
2547 	size_t src_off_in_page;
2548 	unsigned long dst_end = dst_offset + len - 1;
2549 	unsigned long src_end = src_offset + len - 1;
2550 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2551 	unsigned long dst_i;
2552 	unsigned long src_i;
2553 
2554 	if (src_offset + len > dst->len) {
2555 		printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2556 		       src_offset, len, dst->len);
2557 		BUG_ON(1);
2558 	}
2559 	if (dst_offset + len > dst->len) {
2560 		printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2561 		       dst_offset, len, dst->len);
2562 		BUG_ON(1);
2563 	}
2564 	if (dst_offset < src_offset) {
2565 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2566 		return;
2567 	}
2568 	while(len > 0) {
2569 		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2570 		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2571 
2572 		dst_off_in_page = dst_end &
2573 			((unsigned long)PAGE_CACHE_SIZE - 1);
2574 		src_off_in_page = src_end &
2575 			((unsigned long)PAGE_CACHE_SIZE - 1);
2576 
2577 		if (src_i == 0)
2578 			src_off_in_page += start_offset;
2579 		if (dst_i == 0)
2580 			dst_off_in_page += start_offset;
2581 
2582 		cur = min(len, src_off_in_page + 1);
2583 		cur = min(cur, dst_off_in_page + 1);
2584 
2585 		move_pages(extent_buffer_page(dst, dst_i),
2586 			   extent_buffer_page(dst, src_i),
2587 			   dst_off_in_page - cur + 1,
2588 			   src_off_in_page - cur + 1, cur);
2589 
2590 		dst_end -= cur - 1;
2591 		src_end -= cur - 1;
2592 		len -= cur;
2593 	}
2594 }
2595 EXPORT_SYMBOL(memmove_extent_buffer);
2596