xref: /openbmc/linux/fs/btrfs/extent_io.c (revision 63dc02bd)
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include <linux/prefetch.h>
14 #include <linux/cleancache.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20 #include "volumes.h"
21 #include "check-integrity.h"
22 #include "locking.h"
23 
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26 
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29 
30 #define LEAK_DEBUG 0
31 #if LEAK_DEBUG
32 static DEFINE_SPINLOCK(leak_lock);
33 #endif
34 
35 #define BUFFER_LRU_MAX 64
36 
37 struct tree_entry {
38 	u64 start;
39 	u64 end;
40 	struct rb_node rb_node;
41 };
42 
43 struct extent_page_data {
44 	struct bio *bio;
45 	struct extent_io_tree *tree;
46 	get_extent_t *get_extent;
47 
48 	/* tells writepage not to lock the state bits for this range
49 	 * it still does the unlocking
50 	 */
51 	unsigned int extent_locked:1;
52 
53 	/* tells the submit_bio code to use a WRITE_SYNC */
54 	unsigned int sync_io:1;
55 };
56 
57 static noinline void flush_write_bio(void *data);
58 static inline struct btrfs_fs_info *
59 tree_fs_info(struct extent_io_tree *tree)
60 {
61 	return btrfs_sb(tree->mapping->host->i_sb);
62 }
63 
64 int __init extent_io_init(void)
65 {
66 	extent_state_cache = kmem_cache_create("extent_state",
67 			sizeof(struct extent_state), 0,
68 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
69 	if (!extent_state_cache)
70 		return -ENOMEM;
71 
72 	extent_buffer_cache = kmem_cache_create("extent_buffers",
73 			sizeof(struct extent_buffer), 0,
74 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
75 	if (!extent_buffer_cache)
76 		goto free_state_cache;
77 	return 0;
78 
79 free_state_cache:
80 	kmem_cache_destroy(extent_state_cache);
81 	return -ENOMEM;
82 }
83 
84 void extent_io_exit(void)
85 {
86 	struct extent_state *state;
87 	struct extent_buffer *eb;
88 
89 	while (!list_empty(&states)) {
90 		state = list_entry(states.next, struct extent_state, leak_list);
91 		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
92 		       "state %lu in tree %p refs %d\n",
93 		       (unsigned long long)state->start,
94 		       (unsigned long long)state->end,
95 		       state->state, state->tree, atomic_read(&state->refs));
96 		list_del(&state->leak_list);
97 		kmem_cache_free(extent_state_cache, state);
98 
99 	}
100 
101 	while (!list_empty(&buffers)) {
102 		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
103 		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
104 		       "refs %d\n", (unsigned long long)eb->start,
105 		       eb->len, atomic_read(&eb->refs));
106 		list_del(&eb->leak_list);
107 		kmem_cache_free(extent_buffer_cache, eb);
108 	}
109 	if (extent_state_cache)
110 		kmem_cache_destroy(extent_state_cache);
111 	if (extent_buffer_cache)
112 		kmem_cache_destroy(extent_buffer_cache);
113 }
114 
115 void extent_io_tree_init(struct extent_io_tree *tree,
116 			 struct address_space *mapping)
117 {
118 	tree->state = RB_ROOT;
119 	INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
120 	tree->ops = NULL;
121 	tree->dirty_bytes = 0;
122 	spin_lock_init(&tree->lock);
123 	spin_lock_init(&tree->buffer_lock);
124 	tree->mapping = mapping;
125 }
126 
127 static struct extent_state *alloc_extent_state(gfp_t mask)
128 {
129 	struct extent_state *state;
130 #if LEAK_DEBUG
131 	unsigned long flags;
132 #endif
133 
134 	state = kmem_cache_alloc(extent_state_cache, mask);
135 	if (!state)
136 		return state;
137 	state->state = 0;
138 	state->private = 0;
139 	state->tree = NULL;
140 #if LEAK_DEBUG
141 	spin_lock_irqsave(&leak_lock, flags);
142 	list_add(&state->leak_list, &states);
143 	spin_unlock_irqrestore(&leak_lock, flags);
144 #endif
145 	atomic_set(&state->refs, 1);
146 	init_waitqueue_head(&state->wq);
147 	trace_alloc_extent_state(state, mask, _RET_IP_);
148 	return state;
149 }
150 
151 void free_extent_state(struct extent_state *state)
152 {
153 	if (!state)
154 		return;
155 	if (atomic_dec_and_test(&state->refs)) {
156 #if LEAK_DEBUG
157 		unsigned long flags;
158 #endif
159 		WARN_ON(state->tree);
160 #if LEAK_DEBUG
161 		spin_lock_irqsave(&leak_lock, flags);
162 		list_del(&state->leak_list);
163 		spin_unlock_irqrestore(&leak_lock, flags);
164 #endif
165 		trace_free_extent_state(state, _RET_IP_);
166 		kmem_cache_free(extent_state_cache, state);
167 	}
168 }
169 
170 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
171 				   struct rb_node *node)
172 {
173 	struct rb_node **p = &root->rb_node;
174 	struct rb_node *parent = NULL;
175 	struct tree_entry *entry;
176 
177 	while (*p) {
178 		parent = *p;
179 		entry = rb_entry(parent, struct tree_entry, rb_node);
180 
181 		if (offset < entry->start)
182 			p = &(*p)->rb_left;
183 		else if (offset > entry->end)
184 			p = &(*p)->rb_right;
185 		else
186 			return parent;
187 	}
188 
189 	entry = rb_entry(node, struct tree_entry, rb_node);
190 	rb_link_node(node, parent, p);
191 	rb_insert_color(node, root);
192 	return NULL;
193 }
194 
195 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
196 				     struct rb_node **prev_ret,
197 				     struct rb_node **next_ret)
198 {
199 	struct rb_root *root = &tree->state;
200 	struct rb_node *n = root->rb_node;
201 	struct rb_node *prev = NULL;
202 	struct rb_node *orig_prev = NULL;
203 	struct tree_entry *entry;
204 	struct tree_entry *prev_entry = NULL;
205 
206 	while (n) {
207 		entry = rb_entry(n, struct tree_entry, rb_node);
208 		prev = n;
209 		prev_entry = entry;
210 
211 		if (offset < entry->start)
212 			n = n->rb_left;
213 		else if (offset > entry->end)
214 			n = n->rb_right;
215 		else
216 			return n;
217 	}
218 
219 	if (prev_ret) {
220 		orig_prev = prev;
221 		while (prev && offset > prev_entry->end) {
222 			prev = rb_next(prev);
223 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224 		}
225 		*prev_ret = prev;
226 		prev = orig_prev;
227 	}
228 
229 	if (next_ret) {
230 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
231 		while (prev && offset < prev_entry->start) {
232 			prev = rb_prev(prev);
233 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
234 		}
235 		*next_ret = prev;
236 	}
237 	return NULL;
238 }
239 
240 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
241 					  u64 offset)
242 {
243 	struct rb_node *prev = NULL;
244 	struct rb_node *ret;
245 
246 	ret = __etree_search(tree, offset, &prev, NULL);
247 	if (!ret)
248 		return prev;
249 	return ret;
250 }
251 
252 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
253 		     struct extent_state *other)
254 {
255 	if (tree->ops && tree->ops->merge_extent_hook)
256 		tree->ops->merge_extent_hook(tree->mapping->host, new,
257 					     other);
258 }
259 
260 /*
261  * utility function to look for merge candidates inside a given range.
262  * Any extents with matching state are merged together into a single
263  * extent in the tree.  Extents with EXTENT_IO in their state field
264  * are not merged because the end_io handlers need to be able to do
265  * operations on them without sleeping (or doing allocations/splits).
266  *
267  * This should be called with the tree lock held.
268  */
269 static void merge_state(struct extent_io_tree *tree,
270 		        struct extent_state *state)
271 {
272 	struct extent_state *other;
273 	struct rb_node *other_node;
274 
275 	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
276 		return;
277 
278 	other_node = rb_prev(&state->rb_node);
279 	if (other_node) {
280 		other = rb_entry(other_node, struct extent_state, rb_node);
281 		if (other->end == state->start - 1 &&
282 		    other->state == state->state) {
283 			merge_cb(tree, state, other);
284 			state->start = other->start;
285 			other->tree = NULL;
286 			rb_erase(&other->rb_node, &tree->state);
287 			free_extent_state(other);
288 		}
289 	}
290 	other_node = rb_next(&state->rb_node);
291 	if (other_node) {
292 		other = rb_entry(other_node, struct extent_state, rb_node);
293 		if (other->start == state->end + 1 &&
294 		    other->state == state->state) {
295 			merge_cb(tree, state, other);
296 			state->end = other->end;
297 			other->tree = NULL;
298 			rb_erase(&other->rb_node, &tree->state);
299 			free_extent_state(other);
300 		}
301 	}
302 }
303 
304 static void set_state_cb(struct extent_io_tree *tree,
305 			 struct extent_state *state, int *bits)
306 {
307 	if (tree->ops && tree->ops->set_bit_hook)
308 		tree->ops->set_bit_hook(tree->mapping->host, state, bits);
309 }
310 
311 static void clear_state_cb(struct extent_io_tree *tree,
312 			   struct extent_state *state, int *bits)
313 {
314 	if (tree->ops && tree->ops->clear_bit_hook)
315 		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
316 }
317 
318 static void set_state_bits(struct extent_io_tree *tree,
319 			   struct extent_state *state, int *bits);
320 
321 /*
322  * insert an extent_state struct into the tree.  'bits' are set on the
323  * struct before it is inserted.
324  *
325  * This may return -EEXIST if the extent is already there, in which case the
326  * state struct is freed.
327  *
328  * The tree lock is not taken internally.  This is a utility function and
329  * probably isn't what you want to call (see set/clear_extent_bit).
330  */
331 static int insert_state(struct extent_io_tree *tree,
332 			struct extent_state *state, u64 start, u64 end,
333 			int *bits)
334 {
335 	struct rb_node *node;
336 
337 	if (end < start) {
338 		printk(KERN_ERR "btrfs end < start %llu %llu\n",
339 		       (unsigned long long)end,
340 		       (unsigned long long)start);
341 		WARN_ON(1);
342 	}
343 	state->start = start;
344 	state->end = end;
345 
346 	set_state_bits(tree, state, bits);
347 
348 	node = tree_insert(&tree->state, end, &state->rb_node);
349 	if (node) {
350 		struct extent_state *found;
351 		found = rb_entry(node, struct extent_state, rb_node);
352 		printk(KERN_ERR "btrfs found node %llu %llu on insert of "
353 		       "%llu %llu\n", (unsigned long long)found->start,
354 		       (unsigned long long)found->end,
355 		       (unsigned long long)start, (unsigned long long)end);
356 		return -EEXIST;
357 	}
358 	state->tree = tree;
359 	merge_state(tree, state);
360 	return 0;
361 }
362 
363 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
364 		     u64 split)
365 {
366 	if (tree->ops && tree->ops->split_extent_hook)
367 		tree->ops->split_extent_hook(tree->mapping->host, orig, split);
368 }
369 
370 /*
371  * split a given extent state struct in two, inserting the preallocated
372  * struct 'prealloc' as the newly created second half.  'split' indicates an
373  * offset inside 'orig' where it should be split.
374  *
375  * Before calling,
376  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
377  * are two extent state structs in the tree:
378  * prealloc: [orig->start, split - 1]
379  * orig: [ split, orig->end ]
380  *
381  * The tree locks are not taken by this function. They need to be held
382  * by the caller.
383  */
384 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
385 		       struct extent_state *prealloc, u64 split)
386 {
387 	struct rb_node *node;
388 
389 	split_cb(tree, orig, split);
390 
391 	prealloc->start = orig->start;
392 	prealloc->end = split - 1;
393 	prealloc->state = orig->state;
394 	orig->start = split;
395 
396 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
397 	if (node) {
398 		free_extent_state(prealloc);
399 		return -EEXIST;
400 	}
401 	prealloc->tree = tree;
402 	return 0;
403 }
404 
405 static struct extent_state *next_state(struct extent_state *state)
406 {
407 	struct rb_node *next = rb_next(&state->rb_node);
408 	if (next)
409 		return rb_entry(next, struct extent_state, rb_node);
410 	else
411 		return NULL;
412 }
413 
414 /*
415  * utility function to clear some bits in an extent state struct.
416  * it will optionally wake up any one waiting on this state (wake == 1)
417  *
418  * If no bits are set on the state struct after clearing things, the
419  * struct is freed and removed from the tree
420  */
421 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
422 					    struct extent_state *state,
423 					    int *bits, int wake)
424 {
425 	struct extent_state *next;
426 	int bits_to_clear = *bits & ~EXTENT_CTLBITS;
427 
428 	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
429 		u64 range = state->end - state->start + 1;
430 		WARN_ON(range > tree->dirty_bytes);
431 		tree->dirty_bytes -= range;
432 	}
433 	clear_state_cb(tree, state, bits);
434 	state->state &= ~bits_to_clear;
435 	if (wake)
436 		wake_up(&state->wq);
437 	if (state->state == 0) {
438 		next = next_state(state);
439 		if (state->tree) {
440 			rb_erase(&state->rb_node, &tree->state);
441 			state->tree = NULL;
442 			free_extent_state(state);
443 		} else {
444 			WARN_ON(1);
445 		}
446 	} else {
447 		merge_state(tree, state);
448 		next = next_state(state);
449 	}
450 	return next;
451 }
452 
453 static struct extent_state *
454 alloc_extent_state_atomic(struct extent_state *prealloc)
455 {
456 	if (!prealloc)
457 		prealloc = alloc_extent_state(GFP_ATOMIC);
458 
459 	return prealloc;
460 }
461 
462 void extent_io_tree_panic(struct extent_io_tree *tree, int err)
463 {
464 	btrfs_panic(tree_fs_info(tree), err, "Locking error: "
465 		    "Extent tree was modified by another "
466 		    "thread while locked.");
467 }
468 
469 /*
470  * clear some bits on a range in the tree.  This may require splitting
471  * or inserting elements in the tree, so the gfp mask is used to
472  * indicate which allocations or sleeping are allowed.
473  *
474  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
475  * the given range from the tree regardless of state (ie for truncate).
476  *
477  * the range [start, end] is inclusive.
478  *
479  * This takes the tree lock, and returns 0 on success and < 0 on error.
480  */
481 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
482 		     int bits, int wake, int delete,
483 		     struct extent_state **cached_state,
484 		     gfp_t mask)
485 {
486 	struct extent_state *state;
487 	struct extent_state *cached;
488 	struct extent_state *prealloc = NULL;
489 	struct rb_node *node;
490 	u64 last_end;
491 	int err;
492 	int clear = 0;
493 
494 	if (delete)
495 		bits |= ~EXTENT_CTLBITS;
496 	bits |= EXTENT_FIRST_DELALLOC;
497 
498 	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
499 		clear = 1;
500 again:
501 	if (!prealloc && (mask & __GFP_WAIT)) {
502 		prealloc = alloc_extent_state(mask);
503 		if (!prealloc)
504 			return -ENOMEM;
505 	}
506 
507 	spin_lock(&tree->lock);
508 	if (cached_state) {
509 		cached = *cached_state;
510 
511 		if (clear) {
512 			*cached_state = NULL;
513 			cached_state = NULL;
514 		}
515 
516 		if (cached && cached->tree && cached->start <= start &&
517 		    cached->end > start) {
518 			if (clear)
519 				atomic_dec(&cached->refs);
520 			state = cached;
521 			goto hit_next;
522 		}
523 		if (clear)
524 			free_extent_state(cached);
525 	}
526 	/*
527 	 * this search will find the extents that end after
528 	 * our range starts
529 	 */
530 	node = tree_search(tree, start);
531 	if (!node)
532 		goto out;
533 	state = rb_entry(node, struct extent_state, rb_node);
534 hit_next:
535 	if (state->start > end)
536 		goto out;
537 	WARN_ON(state->end < start);
538 	last_end = state->end;
539 
540 	/* the state doesn't have the wanted bits, go ahead */
541 	if (!(state->state & bits)) {
542 		state = next_state(state);
543 		goto next;
544 	}
545 
546 	/*
547 	 *     | ---- desired range ---- |
548 	 *  | state | or
549 	 *  | ------------- state -------------- |
550 	 *
551 	 * We need to split the extent we found, and may flip
552 	 * bits on second half.
553 	 *
554 	 * If the extent we found extends past our range, we
555 	 * just split and search again.  It'll get split again
556 	 * the next time though.
557 	 *
558 	 * If the extent we found is inside our range, we clear
559 	 * the desired bit on it.
560 	 */
561 
562 	if (state->start < start) {
563 		prealloc = alloc_extent_state_atomic(prealloc);
564 		BUG_ON(!prealloc);
565 		err = split_state(tree, state, prealloc, start);
566 		if (err)
567 			extent_io_tree_panic(tree, err);
568 
569 		prealloc = NULL;
570 		if (err)
571 			goto out;
572 		if (state->end <= end) {
573 			clear_state_bit(tree, state, &bits, wake);
574 			if (last_end == (u64)-1)
575 				goto out;
576 			start = last_end + 1;
577 		}
578 		goto search_again;
579 	}
580 	/*
581 	 * | ---- desired range ---- |
582 	 *                        | state |
583 	 * We need to split the extent, and clear the bit
584 	 * on the first half
585 	 */
586 	if (state->start <= end && state->end > end) {
587 		prealloc = alloc_extent_state_atomic(prealloc);
588 		BUG_ON(!prealloc);
589 		err = split_state(tree, state, prealloc, end + 1);
590 		if (err)
591 			extent_io_tree_panic(tree, err);
592 
593 		if (wake)
594 			wake_up(&state->wq);
595 
596 		clear_state_bit(tree, prealloc, &bits, wake);
597 
598 		prealloc = NULL;
599 		goto out;
600 	}
601 
602 	state = clear_state_bit(tree, state, &bits, wake);
603 next:
604 	if (last_end == (u64)-1)
605 		goto out;
606 	start = last_end + 1;
607 	if (start <= end && state && !need_resched())
608 		goto hit_next;
609 	goto search_again;
610 
611 out:
612 	spin_unlock(&tree->lock);
613 	if (prealloc)
614 		free_extent_state(prealloc);
615 
616 	return 0;
617 
618 search_again:
619 	if (start > end)
620 		goto out;
621 	spin_unlock(&tree->lock);
622 	if (mask & __GFP_WAIT)
623 		cond_resched();
624 	goto again;
625 }
626 
627 static void wait_on_state(struct extent_io_tree *tree,
628 			  struct extent_state *state)
629 		__releases(tree->lock)
630 		__acquires(tree->lock)
631 {
632 	DEFINE_WAIT(wait);
633 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
634 	spin_unlock(&tree->lock);
635 	schedule();
636 	spin_lock(&tree->lock);
637 	finish_wait(&state->wq, &wait);
638 }
639 
640 /*
641  * waits for one or more bits to clear on a range in the state tree.
642  * The range [start, end] is inclusive.
643  * The tree lock is taken by this function
644  */
645 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
646 {
647 	struct extent_state *state;
648 	struct rb_node *node;
649 
650 	spin_lock(&tree->lock);
651 again:
652 	while (1) {
653 		/*
654 		 * this search will find all the extents that end after
655 		 * our range starts
656 		 */
657 		node = tree_search(tree, start);
658 		if (!node)
659 			break;
660 
661 		state = rb_entry(node, struct extent_state, rb_node);
662 
663 		if (state->start > end)
664 			goto out;
665 
666 		if (state->state & bits) {
667 			start = state->start;
668 			atomic_inc(&state->refs);
669 			wait_on_state(tree, state);
670 			free_extent_state(state);
671 			goto again;
672 		}
673 		start = state->end + 1;
674 
675 		if (start > end)
676 			break;
677 
678 		cond_resched_lock(&tree->lock);
679 	}
680 out:
681 	spin_unlock(&tree->lock);
682 }
683 
684 static void set_state_bits(struct extent_io_tree *tree,
685 			   struct extent_state *state,
686 			   int *bits)
687 {
688 	int bits_to_set = *bits & ~EXTENT_CTLBITS;
689 
690 	set_state_cb(tree, state, bits);
691 	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
692 		u64 range = state->end - state->start + 1;
693 		tree->dirty_bytes += range;
694 	}
695 	state->state |= bits_to_set;
696 }
697 
698 static void cache_state(struct extent_state *state,
699 			struct extent_state **cached_ptr)
700 {
701 	if (cached_ptr && !(*cached_ptr)) {
702 		if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
703 			*cached_ptr = state;
704 			atomic_inc(&state->refs);
705 		}
706 	}
707 }
708 
709 static void uncache_state(struct extent_state **cached_ptr)
710 {
711 	if (cached_ptr && (*cached_ptr)) {
712 		struct extent_state *state = *cached_ptr;
713 		*cached_ptr = NULL;
714 		free_extent_state(state);
715 	}
716 }
717 
718 /*
719  * set some bits on a range in the tree.  This may require allocations or
720  * sleeping, so the gfp mask is used to indicate what is allowed.
721  *
722  * If any of the exclusive bits are set, this will fail with -EEXIST if some
723  * part of the range already has the desired bits set.  The start of the
724  * existing range is returned in failed_start in this case.
725  *
726  * [start, end] is inclusive This takes the tree lock.
727  */
728 
729 static int __must_check
730 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
731 		 int bits, int exclusive_bits, u64 *failed_start,
732 		 struct extent_state **cached_state, gfp_t mask)
733 {
734 	struct extent_state *state;
735 	struct extent_state *prealloc = NULL;
736 	struct rb_node *node;
737 	int err = 0;
738 	u64 last_start;
739 	u64 last_end;
740 
741 	bits |= EXTENT_FIRST_DELALLOC;
742 again:
743 	if (!prealloc && (mask & __GFP_WAIT)) {
744 		prealloc = alloc_extent_state(mask);
745 		BUG_ON(!prealloc);
746 	}
747 
748 	spin_lock(&tree->lock);
749 	if (cached_state && *cached_state) {
750 		state = *cached_state;
751 		if (state->start <= start && state->end > start &&
752 		    state->tree) {
753 			node = &state->rb_node;
754 			goto hit_next;
755 		}
756 	}
757 	/*
758 	 * this search will find all the extents that end after
759 	 * our range starts.
760 	 */
761 	node = tree_search(tree, start);
762 	if (!node) {
763 		prealloc = alloc_extent_state_atomic(prealloc);
764 		BUG_ON(!prealloc);
765 		err = insert_state(tree, prealloc, start, end, &bits);
766 		if (err)
767 			extent_io_tree_panic(tree, err);
768 
769 		prealloc = NULL;
770 		goto out;
771 	}
772 	state = rb_entry(node, struct extent_state, rb_node);
773 hit_next:
774 	last_start = state->start;
775 	last_end = state->end;
776 
777 	/*
778 	 * | ---- desired range ---- |
779 	 * | state |
780 	 *
781 	 * Just lock what we found and keep going
782 	 */
783 	if (state->start == start && state->end <= end) {
784 		struct rb_node *next_node;
785 		if (state->state & exclusive_bits) {
786 			*failed_start = state->start;
787 			err = -EEXIST;
788 			goto out;
789 		}
790 
791 		set_state_bits(tree, state, &bits);
792 
793 		cache_state(state, cached_state);
794 		merge_state(tree, state);
795 		if (last_end == (u64)-1)
796 			goto out;
797 
798 		start = last_end + 1;
799 		next_node = rb_next(&state->rb_node);
800 		if (next_node && start < end && prealloc && !need_resched()) {
801 			state = rb_entry(next_node, struct extent_state,
802 					 rb_node);
803 			if (state->start == start)
804 				goto hit_next;
805 		}
806 		goto search_again;
807 	}
808 
809 	/*
810 	 *     | ---- desired range ---- |
811 	 * | state |
812 	 *   or
813 	 * | ------------- state -------------- |
814 	 *
815 	 * We need to split the extent we found, and may flip bits on
816 	 * second half.
817 	 *
818 	 * If the extent we found extends past our
819 	 * range, we just split and search again.  It'll get split
820 	 * again the next time though.
821 	 *
822 	 * If the extent we found is inside our range, we set the
823 	 * desired bit on it.
824 	 */
825 	if (state->start < start) {
826 		if (state->state & exclusive_bits) {
827 			*failed_start = start;
828 			err = -EEXIST;
829 			goto out;
830 		}
831 
832 		prealloc = alloc_extent_state_atomic(prealloc);
833 		BUG_ON(!prealloc);
834 		err = split_state(tree, state, prealloc, start);
835 		if (err)
836 			extent_io_tree_panic(tree, err);
837 
838 		prealloc = NULL;
839 		if (err)
840 			goto out;
841 		if (state->end <= end) {
842 			set_state_bits(tree, state, &bits);
843 			cache_state(state, cached_state);
844 			merge_state(tree, state);
845 			if (last_end == (u64)-1)
846 				goto out;
847 			start = last_end + 1;
848 		}
849 		goto search_again;
850 	}
851 	/*
852 	 * | ---- desired range ---- |
853 	 *     | state | or               | state |
854 	 *
855 	 * There's a hole, we need to insert something in it and
856 	 * ignore the extent we found.
857 	 */
858 	if (state->start > start) {
859 		u64 this_end;
860 		if (end < last_start)
861 			this_end = end;
862 		else
863 			this_end = last_start - 1;
864 
865 		prealloc = alloc_extent_state_atomic(prealloc);
866 		BUG_ON(!prealloc);
867 
868 		/*
869 		 * Avoid to free 'prealloc' if it can be merged with
870 		 * the later extent.
871 		 */
872 		err = insert_state(tree, prealloc, start, this_end,
873 				   &bits);
874 		if (err)
875 			extent_io_tree_panic(tree, err);
876 
877 		cache_state(prealloc, cached_state);
878 		prealloc = NULL;
879 		start = this_end + 1;
880 		goto search_again;
881 	}
882 	/*
883 	 * | ---- desired range ---- |
884 	 *                        | state |
885 	 * We need to split the extent, and set the bit
886 	 * on the first half
887 	 */
888 	if (state->start <= end && state->end > end) {
889 		if (state->state & exclusive_bits) {
890 			*failed_start = start;
891 			err = -EEXIST;
892 			goto out;
893 		}
894 
895 		prealloc = alloc_extent_state_atomic(prealloc);
896 		BUG_ON(!prealloc);
897 		err = split_state(tree, state, prealloc, end + 1);
898 		if (err)
899 			extent_io_tree_panic(tree, err);
900 
901 		set_state_bits(tree, prealloc, &bits);
902 		cache_state(prealloc, cached_state);
903 		merge_state(tree, prealloc);
904 		prealloc = NULL;
905 		goto out;
906 	}
907 
908 	goto search_again;
909 
910 out:
911 	spin_unlock(&tree->lock);
912 	if (prealloc)
913 		free_extent_state(prealloc);
914 
915 	return err;
916 
917 search_again:
918 	if (start > end)
919 		goto out;
920 	spin_unlock(&tree->lock);
921 	if (mask & __GFP_WAIT)
922 		cond_resched();
923 	goto again;
924 }
925 
926 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
927 		   u64 *failed_start, struct extent_state **cached_state,
928 		   gfp_t mask)
929 {
930 	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
931 				cached_state, mask);
932 }
933 
934 
935 /**
936  * convert_extent - convert all bits in a given range from one bit to another
937  * @tree:	the io tree to search
938  * @start:	the start offset in bytes
939  * @end:	the end offset in bytes (inclusive)
940  * @bits:	the bits to set in this range
941  * @clear_bits:	the bits to clear in this range
942  * @mask:	the allocation mask
943  *
944  * This will go through and set bits for the given range.  If any states exist
945  * already in this range they are set with the given bit and cleared of the
946  * clear_bits.  This is only meant to be used by things that are mergeable, ie
947  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
948  * boundary bits like LOCK.
949  */
950 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
951 		       int bits, int clear_bits, gfp_t mask)
952 {
953 	struct extent_state *state;
954 	struct extent_state *prealloc = NULL;
955 	struct rb_node *node;
956 	int err = 0;
957 	u64 last_start;
958 	u64 last_end;
959 
960 again:
961 	if (!prealloc && (mask & __GFP_WAIT)) {
962 		prealloc = alloc_extent_state(mask);
963 		if (!prealloc)
964 			return -ENOMEM;
965 	}
966 
967 	spin_lock(&tree->lock);
968 	/*
969 	 * this search will find all the extents that end after
970 	 * our range starts.
971 	 */
972 	node = tree_search(tree, start);
973 	if (!node) {
974 		prealloc = alloc_extent_state_atomic(prealloc);
975 		if (!prealloc) {
976 			err = -ENOMEM;
977 			goto out;
978 		}
979 		err = insert_state(tree, prealloc, start, end, &bits);
980 		prealloc = NULL;
981 		if (err)
982 			extent_io_tree_panic(tree, err);
983 		goto out;
984 	}
985 	state = rb_entry(node, struct extent_state, rb_node);
986 hit_next:
987 	last_start = state->start;
988 	last_end = state->end;
989 
990 	/*
991 	 * | ---- desired range ---- |
992 	 * | state |
993 	 *
994 	 * Just lock what we found and keep going
995 	 */
996 	if (state->start == start && state->end <= end) {
997 		struct rb_node *next_node;
998 
999 		set_state_bits(tree, state, &bits);
1000 		clear_state_bit(tree, state, &clear_bits, 0);
1001 		if (last_end == (u64)-1)
1002 			goto out;
1003 
1004 		start = last_end + 1;
1005 		next_node = rb_next(&state->rb_node);
1006 		if (next_node && start < end && prealloc && !need_resched()) {
1007 			state = rb_entry(next_node, struct extent_state,
1008 					 rb_node);
1009 			if (state->start == start)
1010 				goto hit_next;
1011 		}
1012 		goto search_again;
1013 	}
1014 
1015 	/*
1016 	 *     | ---- desired range ---- |
1017 	 * | state |
1018 	 *   or
1019 	 * | ------------- state -------------- |
1020 	 *
1021 	 * We need to split the extent we found, and may flip bits on
1022 	 * second half.
1023 	 *
1024 	 * If the extent we found extends past our
1025 	 * range, we just split and search again.  It'll get split
1026 	 * again the next time though.
1027 	 *
1028 	 * If the extent we found is inside our range, we set the
1029 	 * desired bit on it.
1030 	 */
1031 	if (state->start < start) {
1032 		prealloc = alloc_extent_state_atomic(prealloc);
1033 		if (!prealloc) {
1034 			err = -ENOMEM;
1035 			goto out;
1036 		}
1037 		err = split_state(tree, state, prealloc, start);
1038 		if (err)
1039 			extent_io_tree_panic(tree, err);
1040 		prealloc = NULL;
1041 		if (err)
1042 			goto out;
1043 		if (state->end <= end) {
1044 			set_state_bits(tree, state, &bits);
1045 			clear_state_bit(tree, state, &clear_bits, 0);
1046 			if (last_end == (u64)-1)
1047 				goto out;
1048 			start = last_end + 1;
1049 		}
1050 		goto search_again;
1051 	}
1052 	/*
1053 	 * | ---- desired range ---- |
1054 	 *     | state | or               | state |
1055 	 *
1056 	 * There's a hole, we need to insert something in it and
1057 	 * ignore the extent we found.
1058 	 */
1059 	if (state->start > start) {
1060 		u64 this_end;
1061 		if (end < last_start)
1062 			this_end = end;
1063 		else
1064 			this_end = last_start - 1;
1065 
1066 		prealloc = alloc_extent_state_atomic(prealloc);
1067 		if (!prealloc) {
1068 			err = -ENOMEM;
1069 			goto out;
1070 		}
1071 
1072 		/*
1073 		 * Avoid to free 'prealloc' if it can be merged with
1074 		 * the later extent.
1075 		 */
1076 		err = insert_state(tree, prealloc, start, this_end,
1077 				   &bits);
1078 		if (err)
1079 			extent_io_tree_panic(tree, err);
1080 		prealloc = NULL;
1081 		start = this_end + 1;
1082 		goto search_again;
1083 	}
1084 	/*
1085 	 * | ---- desired range ---- |
1086 	 *                        | state |
1087 	 * We need to split the extent, and set the bit
1088 	 * on the first half
1089 	 */
1090 	if (state->start <= end && state->end > end) {
1091 		prealloc = alloc_extent_state_atomic(prealloc);
1092 		if (!prealloc) {
1093 			err = -ENOMEM;
1094 			goto out;
1095 		}
1096 
1097 		err = split_state(tree, state, prealloc, end + 1);
1098 		if (err)
1099 			extent_io_tree_panic(tree, err);
1100 
1101 		set_state_bits(tree, prealloc, &bits);
1102 		clear_state_bit(tree, prealloc, &clear_bits, 0);
1103 		prealloc = NULL;
1104 		goto out;
1105 	}
1106 
1107 	goto search_again;
1108 
1109 out:
1110 	spin_unlock(&tree->lock);
1111 	if (prealloc)
1112 		free_extent_state(prealloc);
1113 
1114 	return err;
1115 
1116 search_again:
1117 	if (start > end)
1118 		goto out;
1119 	spin_unlock(&tree->lock);
1120 	if (mask & __GFP_WAIT)
1121 		cond_resched();
1122 	goto again;
1123 }
1124 
1125 /* wrappers around set/clear extent bit */
1126 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1127 		     gfp_t mask)
1128 {
1129 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1130 			      NULL, mask);
1131 }
1132 
1133 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1134 		    int bits, gfp_t mask)
1135 {
1136 	return set_extent_bit(tree, start, end, bits, NULL,
1137 			      NULL, mask);
1138 }
1139 
1140 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1141 		      int bits, gfp_t mask)
1142 {
1143 	return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1144 }
1145 
1146 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1147 			struct extent_state **cached_state, gfp_t mask)
1148 {
1149 	return set_extent_bit(tree, start, end,
1150 			      EXTENT_DELALLOC | EXTENT_UPTODATE,
1151 			      NULL, cached_state, mask);
1152 }
1153 
1154 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1155 		       gfp_t mask)
1156 {
1157 	return clear_extent_bit(tree, start, end,
1158 				EXTENT_DIRTY | EXTENT_DELALLOC |
1159 				EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1160 }
1161 
1162 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1163 		     gfp_t mask)
1164 {
1165 	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1166 			      NULL, mask);
1167 }
1168 
1169 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1170 			struct extent_state **cached_state, gfp_t mask)
1171 {
1172 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1173 			      cached_state, mask);
1174 }
1175 
1176 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1177 				 u64 end, struct extent_state **cached_state,
1178 				 gfp_t mask)
1179 {
1180 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1181 				cached_state, mask);
1182 }
1183 
1184 /*
1185  * either insert or lock state struct between start and end use mask to tell
1186  * us if waiting is desired.
1187  */
1188 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1189 		     int bits, struct extent_state **cached_state)
1190 {
1191 	int err;
1192 	u64 failed_start;
1193 	while (1) {
1194 		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1195 				       EXTENT_LOCKED, &failed_start,
1196 				       cached_state, GFP_NOFS);
1197 		if (err == -EEXIST) {
1198 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1199 			start = failed_start;
1200 		} else
1201 			break;
1202 		WARN_ON(start > end);
1203 	}
1204 	return err;
1205 }
1206 
1207 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1208 {
1209 	return lock_extent_bits(tree, start, end, 0, NULL);
1210 }
1211 
1212 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1213 {
1214 	int err;
1215 	u64 failed_start;
1216 
1217 	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1218 			       &failed_start, NULL, GFP_NOFS);
1219 	if (err == -EEXIST) {
1220 		if (failed_start > start)
1221 			clear_extent_bit(tree, start, failed_start - 1,
1222 					 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1223 		return 0;
1224 	}
1225 	return 1;
1226 }
1227 
1228 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1229 			 struct extent_state **cached, gfp_t mask)
1230 {
1231 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1232 				mask);
1233 }
1234 
1235 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1236 {
1237 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1238 				GFP_NOFS);
1239 }
1240 
1241 /*
1242  * helper function to set both pages and extents in the tree writeback
1243  */
1244 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1245 {
1246 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1247 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1248 	struct page *page;
1249 
1250 	while (index <= end_index) {
1251 		page = find_get_page(tree->mapping, index);
1252 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1253 		set_page_writeback(page);
1254 		page_cache_release(page);
1255 		index++;
1256 	}
1257 	return 0;
1258 }
1259 
1260 /* find the first state struct with 'bits' set after 'start', and
1261  * return it.  tree->lock must be held.  NULL will returned if
1262  * nothing was found after 'start'
1263  */
1264 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1265 						 u64 start, int bits)
1266 {
1267 	struct rb_node *node;
1268 	struct extent_state *state;
1269 
1270 	/*
1271 	 * this search will find all the extents that end after
1272 	 * our range starts.
1273 	 */
1274 	node = tree_search(tree, start);
1275 	if (!node)
1276 		goto out;
1277 
1278 	while (1) {
1279 		state = rb_entry(node, struct extent_state, rb_node);
1280 		if (state->end >= start && (state->state & bits))
1281 			return state;
1282 
1283 		node = rb_next(node);
1284 		if (!node)
1285 			break;
1286 	}
1287 out:
1288 	return NULL;
1289 }
1290 
1291 /*
1292  * find the first offset in the io tree with 'bits' set. zero is
1293  * returned if we find something, and *start_ret and *end_ret are
1294  * set to reflect the state struct that was found.
1295  *
1296  * If nothing was found, 1 is returned, < 0 on error
1297  */
1298 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1299 			  u64 *start_ret, u64 *end_ret, int bits)
1300 {
1301 	struct extent_state *state;
1302 	int ret = 1;
1303 
1304 	spin_lock(&tree->lock);
1305 	state = find_first_extent_bit_state(tree, start, bits);
1306 	if (state) {
1307 		*start_ret = state->start;
1308 		*end_ret = state->end;
1309 		ret = 0;
1310 	}
1311 	spin_unlock(&tree->lock);
1312 	return ret;
1313 }
1314 
1315 /*
1316  * find a contiguous range of bytes in the file marked as delalloc, not
1317  * more than 'max_bytes'.  start and end are used to return the range,
1318  *
1319  * 1 is returned if we find something, 0 if nothing was in the tree
1320  */
1321 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1322 					u64 *start, u64 *end, u64 max_bytes,
1323 					struct extent_state **cached_state)
1324 {
1325 	struct rb_node *node;
1326 	struct extent_state *state;
1327 	u64 cur_start = *start;
1328 	u64 found = 0;
1329 	u64 total_bytes = 0;
1330 
1331 	spin_lock(&tree->lock);
1332 
1333 	/*
1334 	 * this search will find all the extents that end after
1335 	 * our range starts.
1336 	 */
1337 	node = tree_search(tree, cur_start);
1338 	if (!node) {
1339 		if (!found)
1340 			*end = (u64)-1;
1341 		goto out;
1342 	}
1343 
1344 	while (1) {
1345 		state = rb_entry(node, struct extent_state, rb_node);
1346 		if (found && (state->start != cur_start ||
1347 			      (state->state & EXTENT_BOUNDARY))) {
1348 			goto out;
1349 		}
1350 		if (!(state->state & EXTENT_DELALLOC)) {
1351 			if (!found)
1352 				*end = state->end;
1353 			goto out;
1354 		}
1355 		if (!found) {
1356 			*start = state->start;
1357 			*cached_state = state;
1358 			atomic_inc(&state->refs);
1359 		}
1360 		found++;
1361 		*end = state->end;
1362 		cur_start = state->end + 1;
1363 		node = rb_next(node);
1364 		if (!node)
1365 			break;
1366 		total_bytes += state->end - state->start + 1;
1367 		if (total_bytes >= max_bytes)
1368 			break;
1369 	}
1370 out:
1371 	spin_unlock(&tree->lock);
1372 	return found;
1373 }
1374 
1375 static noinline void __unlock_for_delalloc(struct inode *inode,
1376 					   struct page *locked_page,
1377 					   u64 start, u64 end)
1378 {
1379 	int ret;
1380 	struct page *pages[16];
1381 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1382 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1383 	unsigned long nr_pages = end_index - index + 1;
1384 	int i;
1385 
1386 	if (index == locked_page->index && end_index == index)
1387 		return;
1388 
1389 	while (nr_pages > 0) {
1390 		ret = find_get_pages_contig(inode->i_mapping, index,
1391 				     min_t(unsigned long, nr_pages,
1392 				     ARRAY_SIZE(pages)), pages);
1393 		for (i = 0; i < ret; i++) {
1394 			if (pages[i] != locked_page)
1395 				unlock_page(pages[i]);
1396 			page_cache_release(pages[i]);
1397 		}
1398 		nr_pages -= ret;
1399 		index += ret;
1400 		cond_resched();
1401 	}
1402 }
1403 
1404 static noinline int lock_delalloc_pages(struct inode *inode,
1405 					struct page *locked_page,
1406 					u64 delalloc_start,
1407 					u64 delalloc_end)
1408 {
1409 	unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1410 	unsigned long start_index = index;
1411 	unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1412 	unsigned long pages_locked = 0;
1413 	struct page *pages[16];
1414 	unsigned long nrpages;
1415 	int ret;
1416 	int i;
1417 
1418 	/* the caller is responsible for locking the start index */
1419 	if (index == locked_page->index && index == end_index)
1420 		return 0;
1421 
1422 	/* skip the page at the start index */
1423 	nrpages = end_index - index + 1;
1424 	while (nrpages > 0) {
1425 		ret = find_get_pages_contig(inode->i_mapping, index,
1426 				     min_t(unsigned long,
1427 				     nrpages, ARRAY_SIZE(pages)), pages);
1428 		if (ret == 0) {
1429 			ret = -EAGAIN;
1430 			goto done;
1431 		}
1432 		/* now we have an array of pages, lock them all */
1433 		for (i = 0; i < ret; i++) {
1434 			/*
1435 			 * the caller is taking responsibility for
1436 			 * locked_page
1437 			 */
1438 			if (pages[i] != locked_page) {
1439 				lock_page(pages[i]);
1440 				if (!PageDirty(pages[i]) ||
1441 				    pages[i]->mapping != inode->i_mapping) {
1442 					ret = -EAGAIN;
1443 					unlock_page(pages[i]);
1444 					page_cache_release(pages[i]);
1445 					goto done;
1446 				}
1447 			}
1448 			page_cache_release(pages[i]);
1449 			pages_locked++;
1450 		}
1451 		nrpages -= ret;
1452 		index += ret;
1453 		cond_resched();
1454 	}
1455 	ret = 0;
1456 done:
1457 	if (ret && pages_locked) {
1458 		__unlock_for_delalloc(inode, locked_page,
1459 			      delalloc_start,
1460 			      ((u64)(start_index + pages_locked - 1)) <<
1461 			      PAGE_CACHE_SHIFT);
1462 	}
1463 	return ret;
1464 }
1465 
1466 /*
1467  * find a contiguous range of bytes in the file marked as delalloc, not
1468  * more than 'max_bytes'.  start and end are used to return the range,
1469  *
1470  * 1 is returned if we find something, 0 if nothing was in the tree
1471  */
1472 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1473 					     struct extent_io_tree *tree,
1474 					     struct page *locked_page,
1475 					     u64 *start, u64 *end,
1476 					     u64 max_bytes)
1477 {
1478 	u64 delalloc_start;
1479 	u64 delalloc_end;
1480 	u64 found;
1481 	struct extent_state *cached_state = NULL;
1482 	int ret;
1483 	int loops = 0;
1484 
1485 again:
1486 	/* step one, find a bunch of delalloc bytes starting at start */
1487 	delalloc_start = *start;
1488 	delalloc_end = 0;
1489 	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1490 				    max_bytes, &cached_state);
1491 	if (!found || delalloc_end <= *start) {
1492 		*start = delalloc_start;
1493 		*end = delalloc_end;
1494 		free_extent_state(cached_state);
1495 		return found;
1496 	}
1497 
1498 	/*
1499 	 * start comes from the offset of locked_page.  We have to lock
1500 	 * pages in order, so we can't process delalloc bytes before
1501 	 * locked_page
1502 	 */
1503 	if (delalloc_start < *start)
1504 		delalloc_start = *start;
1505 
1506 	/*
1507 	 * make sure to limit the number of pages we try to lock down
1508 	 * if we're looping.
1509 	 */
1510 	if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1511 		delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1512 
1513 	/* step two, lock all the pages after the page that has start */
1514 	ret = lock_delalloc_pages(inode, locked_page,
1515 				  delalloc_start, delalloc_end);
1516 	if (ret == -EAGAIN) {
1517 		/* some of the pages are gone, lets avoid looping by
1518 		 * shortening the size of the delalloc range we're searching
1519 		 */
1520 		free_extent_state(cached_state);
1521 		if (!loops) {
1522 			unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1523 			max_bytes = PAGE_CACHE_SIZE - offset;
1524 			loops = 1;
1525 			goto again;
1526 		} else {
1527 			found = 0;
1528 			goto out_failed;
1529 		}
1530 	}
1531 	BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1532 
1533 	/* step three, lock the state bits for the whole range */
1534 	lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1535 
1536 	/* then test to make sure it is all still delalloc */
1537 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1538 			     EXTENT_DELALLOC, 1, cached_state);
1539 	if (!ret) {
1540 		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1541 				     &cached_state, GFP_NOFS);
1542 		__unlock_for_delalloc(inode, locked_page,
1543 			      delalloc_start, delalloc_end);
1544 		cond_resched();
1545 		goto again;
1546 	}
1547 	free_extent_state(cached_state);
1548 	*start = delalloc_start;
1549 	*end = delalloc_end;
1550 out_failed:
1551 	return found;
1552 }
1553 
1554 int extent_clear_unlock_delalloc(struct inode *inode,
1555 				struct extent_io_tree *tree,
1556 				u64 start, u64 end, struct page *locked_page,
1557 				unsigned long op)
1558 {
1559 	int ret;
1560 	struct page *pages[16];
1561 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1562 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1563 	unsigned long nr_pages = end_index - index + 1;
1564 	int i;
1565 	int clear_bits = 0;
1566 
1567 	if (op & EXTENT_CLEAR_UNLOCK)
1568 		clear_bits |= EXTENT_LOCKED;
1569 	if (op & EXTENT_CLEAR_DIRTY)
1570 		clear_bits |= EXTENT_DIRTY;
1571 
1572 	if (op & EXTENT_CLEAR_DELALLOC)
1573 		clear_bits |= EXTENT_DELALLOC;
1574 
1575 	clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1576 	if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1577 		    EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1578 		    EXTENT_SET_PRIVATE2)))
1579 		return 0;
1580 
1581 	while (nr_pages > 0) {
1582 		ret = find_get_pages_contig(inode->i_mapping, index,
1583 				     min_t(unsigned long,
1584 				     nr_pages, ARRAY_SIZE(pages)), pages);
1585 		for (i = 0; i < ret; i++) {
1586 
1587 			if (op & EXTENT_SET_PRIVATE2)
1588 				SetPagePrivate2(pages[i]);
1589 
1590 			if (pages[i] == locked_page) {
1591 				page_cache_release(pages[i]);
1592 				continue;
1593 			}
1594 			if (op & EXTENT_CLEAR_DIRTY)
1595 				clear_page_dirty_for_io(pages[i]);
1596 			if (op & EXTENT_SET_WRITEBACK)
1597 				set_page_writeback(pages[i]);
1598 			if (op & EXTENT_END_WRITEBACK)
1599 				end_page_writeback(pages[i]);
1600 			if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1601 				unlock_page(pages[i]);
1602 			page_cache_release(pages[i]);
1603 		}
1604 		nr_pages -= ret;
1605 		index += ret;
1606 		cond_resched();
1607 	}
1608 	return 0;
1609 }
1610 
1611 /*
1612  * count the number of bytes in the tree that have a given bit(s)
1613  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1614  * cached.  The total number found is returned.
1615  */
1616 u64 count_range_bits(struct extent_io_tree *tree,
1617 		     u64 *start, u64 search_end, u64 max_bytes,
1618 		     unsigned long bits, int contig)
1619 {
1620 	struct rb_node *node;
1621 	struct extent_state *state;
1622 	u64 cur_start = *start;
1623 	u64 total_bytes = 0;
1624 	u64 last = 0;
1625 	int found = 0;
1626 
1627 	if (search_end <= cur_start) {
1628 		WARN_ON(1);
1629 		return 0;
1630 	}
1631 
1632 	spin_lock(&tree->lock);
1633 	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1634 		total_bytes = tree->dirty_bytes;
1635 		goto out;
1636 	}
1637 	/*
1638 	 * this search will find all the extents that end after
1639 	 * our range starts.
1640 	 */
1641 	node = tree_search(tree, cur_start);
1642 	if (!node)
1643 		goto out;
1644 
1645 	while (1) {
1646 		state = rb_entry(node, struct extent_state, rb_node);
1647 		if (state->start > search_end)
1648 			break;
1649 		if (contig && found && state->start > last + 1)
1650 			break;
1651 		if (state->end >= cur_start && (state->state & bits) == bits) {
1652 			total_bytes += min(search_end, state->end) + 1 -
1653 				       max(cur_start, state->start);
1654 			if (total_bytes >= max_bytes)
1655 				break;
1656 			if (!found) {
1657 				*start = max(cur_start, state->start);
1658 				found = 1;
1659 			}
1660 			last = state->end;
1661 		} else if (contig && found) {
1662 			break;
1663 		}
1664 		node = rb_next(node);
1665 		if (!node)
1666 			break;
1667 	}
1668 out:
1669 	spin_unlock(&tree->lock);
1670 	return total_bytes;
1671 }
1672 
1673 /*
1674  * set the private field for a given byte offset in the tree.  If there isn't
1675  * an extent_state there already, this does nothing.
1676  */
1677 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1678 {
1679 	struct rb_node *node;
1680 	struct extent_state *state;
1681 	int ret = 0;
1682 
1683 	spin_lock(&tree->lock);
1684 	/*
1685 	 * this search will find all the extents that end after
1686 	 * our range starts.
1687 	 */
1688 	node = tree_search(tree, start);
1689 	if (!node) {
1690 		ret = -ENOENT;
1691 		goto out;
1692 	}
1693 	state = rb_entry(node, struct extent_state, rb_node);
1694 	if (state->start != start) {
1695 		ret = -ENOENT;
1696 		goto out;
1697 	}
1698 	state->private = private;
1699 out:
1700 	spin_unlock(&tree->lock);
1701 	return ret;
1702 }
1703 
1704 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1705 {
1706 	struct rb_node *node;
1707 	struct extent_state *state;
1708 	int ret = 0;
1709 
1710 	spin_lock(&tree->lock);
1711 	/*
1712 	 * this search will find all the extents that end after
1713 	 * our range starts.
1714 	 */
1715 	node = tree_search(tree, start);
1716 	if (!node) {
1717 		ret = -ENOENT;
1718 		goto out;
1719 	}
1720 	state = rb_entry(node, struct extent_state, rb_node);
1721 	if (state->start != start) {
1722 		ret = -ENOENT;
1723 		goto out;
1724 	}
1725 	*private = state->private;
1726 out:
1727 	spin_unlock(&tree->lock);
1728 	return ret;
1729 }
1730 
1731 /*
1732  * searches a range in the state tree for a given mask.
1733  * If 'filled' == 1, this returns 1 only if every extent in the tree
1734  * has the bits set.  Otherwise, 1 is returned if any bit in the
1735  * range is found set.
1736  */
1737 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1738 		   int bits, int filled, struct extent_state *cached)
1739 {
1740 	struct extent_state *state = NULL;
1741 	struct rb_node *node;
1742 	int bitset = 0;
1743 
1744 	spin_lock(&tree->lock);
1745 	if (cached && cached->tree && cached->start <= start &&
1746 	    cached->end > start)
1747 		node = &cached->rb_node;
1748 	else
1749 		node = tree_search(tree, start);
1750 	while (node && start <= end) {
1751 		state = rb_entry(node, struct extent_state, rb_node);
1752 
1753 		if (filled && state->start > start) {
1754 			bitset = 0;
1755 			break;
1756 		}
1757 
1758 		if (state->start > end)
1759 			break;
1760 
1761 		if (state->state & bits) {
1762 			bitset = 1;
1763 			if (!filled)
1764 				break;
1765 		} else if (filled) {
1766 			bitset = 0;
1767 			break;
1768 		}
1769 
1770 		if (state->end == (u64)-1)
1771 			break;
1772 
1773 		start = state->end + 1;
1774 		if (start > end)
1775 			break;
1776 		node = rb_next(node);
1777 		if (!node) {
1778 			if (filled)
1779 				bitset = 0;
1780 			break;
1781 		}
1782 	}
1783 	spin_unlock(&tree->lock);
1784 	return bitset;
1785 }
1786 
1787 /*
1788  * helper function to set a given page up to date if all the
1789  * extents in the tree for that page are up to date
1790  */
1791 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1792 {
1793 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1794 	u64 end = start + PAGE_CACHE_SIZE - 1;
1795 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1796 		SetPageUptodate(page);
1797 }
1798 
1799 /*
1800  * helper function to unlock a page if all the extents in the tree
1801  * for that page are unlocked
1802  */
1803 static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1804 {
1805 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1806 	u64 end = start + PAGE_CACHE_SIZE - 1;
1807 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1808 		unlock_page(page);
1809 }
1810 
1811 /*
1812  * helper function to end page writeback if all the extents
1813  * in the tree for that page are done with writeback
1814  */
1815 static void check_page_writeback(struct extent_io_tree *tree,
1816 				 struct page *page)
1817 {
1818 	end_page_writeback(page);
1819 }
1820 
1821 /*
1822  * When IO fails, either with EIO or csum verification fails, we
1823  * try other mirrors that might have a good copy of the data.  This
1824  * io_failure_record is used to record state as we go through all the
1825  * mirrors.  If another mirror has good data, the page is set up to date
1826  * and things continue.  If a good mirror can't be found, the original
1827  * bio end_io callback is called to indicate things have failed.
1828  */
1829 struct io_failure_record {
1830 	struct page *page;
1831 	u64 start;
1832 	u64 len;
1833 	u64 logical;
1834 	unsigned long bio_flags;
1835 	int this_mirror;
1836 	int failed_mirror;
1837 	int in_validation;
1838 };
1839 
1840 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1841 				int did_repair)
1842 {
1843 	int ret;
1844 	int err = 0;
1845 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1846 
1847 	set_state_private(failure_tree, rec->start, 0);
1848 	ret = clear_extent_bits(failure_tree, rec->start,
1849 				rec->start + rec->len - 1,
1850 				EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1851 	if (ret)
1852 		err = ret;
1853 
1854 	if (did_repair) {
1855 		ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1856 					rec->start + rec->len - 1,
1857 					EXTENT_DAMAGED, GFP_NOFS);
1858 		if (ret && !err)
1859 			err = ret;
1860 	}
1861 
1862 	kfree(rec);
1863 	return err;
1864 }
1865 
1866 static void repair_io_failure_callback(struct bio *bio, int err)
1867 {
1868 	complete(bio->bi_private);
1869 }
1870 
1871 /*
1872  * this bypasses the standard btrfs submit functions deliberately, as
1873  * the standard behavior is to write all copies in a raid setup. here we only
1874  * want to write the one bad copy. so we do the mapping for ourselves and issue
1875  * submit_bio directly.
1876  * to avoid any synchonization issues, wait for the data after writing, which
1877  * actually prevents the read that triggered the error from finishing.
1878  * currently, there can be no more than two copies of every data bit. thus,
1879  * exactly one rewrite is required.
1880  */
1881 int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1882 			u64 length, u64 logical, struct page *page,
1883 			int mirror_num)
1884 {
1885 	struct bio *bio;
1886 	struct btrfs_device *dev;
1887 	DECLARE_COMPLETION_ONSTACK(compl);
1888 	u64 map_length = 0;
1889 	u64 sector;
1890 	struct btrfs_bio *bbio = NULL;
1891 	int ret;
1892 
1893 	BUG_ON(!mirror_num);
1894 
1895 	bio = bio_alloc(GFP_NOFS, 1);
1896 	if (!bio)
1897 		return -EIO;
1898 	bio->bi_private = &compl;
1899 	bio->bi_end_io = repair_io_failure_callback;
1900 	bio->bi_size = 0;
1901 	map_length = length;
1902 
1903 	ret = btrfs_map_block(map_tree, WRITE, logical,
1904 			      &map_length, &bbio, mirror_num);
1905 	if (ret) {
1906 		bio_put(bio);
1907 		return -EIO;
1908 	}
1909 	BUG_ON(mirror_num != bbio->mirror_num);
1910 	sector = bbio->stripes[mirror_num-1].physical >> 9;
1911 	bio->bi_sector = sector;
1912 	dev = bbio->stripes[mirror_num-1].dev;
1913 	kfree(bbio);
1914 	if (!dev || !dev->bdev || !dev->writeable) {
1915 		bio_put(bio);
1916 		return -EIO;
1917 	}
1918 	bio->bi_bdev = dev->bdev;
1919 	bio_add_page(bio, page, length, start-page_offset(page));
1920 	btrfsic_submit_bio(WRITE_SYNC, bio);
1921 	wait_for_completion(&compl);
1922 
1923 	if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1924 		/* try to remap that extent elsewhere? */
1925 		bio_put(bio);
1926 		return -EIO;
1927 	}
1928 
1929 	printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1930 			"sector %llu)\n", page->mapping->host->i_ino, start,
1931 			dev->name, sector);
1932 
1933 	bio_put(bio);
1934 	return 0;
1935 }
1936 
1937 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1938 			 int mirror_num)
1939 {
1940 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1941 	u64 start = eb->start;
1942 	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
1943 	int ret = 0;
1944 
1945 	for (i = 0; i < num_pages; i++) {
1946 		struct page *p = extent_buffer_page(eb, i);
1947 		ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1948 					start, p, mirror_num);
1949 		if (ret)
1950 			break;
1951 		start += PAGE_CACHE_SIZE;
1952 	}
1953 
1954 	return ret;
1955 }
1956 
1957 /*
1958  * each time an IO finishes, we do a fast check in the IO failure tree
1959  * to see if we need to process or clean up an io_failure_record
1960  */
1961 static int clean_io_failure(u64 start, struct page *page)
1962 {
1963 	u64 private;
1964 	u64 private_failure;
1965 	struct io_failure_record *failrec;
1966 	struct btrfs_mapping_tree *map_tree;
1967 	struct extent_state *state;
1968 	int num_copies;
1969 	int did_repair = 0;
1970 	int ret;
1971 	struct inode *inode = page->mapping->host;
1972 
1973 	private = 0;
1974 	ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1975 				(u64)-1, 1, EXTENT_DIRTY, 0);
1976 	if (!ret)
1977 		return 0;
1978 
1979 	ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1980 				&private_failure);
1981 	if (ret)
1982 		return 0;
1983 
1984 	failrec = (struct io_failure_record *)(unsigned long) private_failure;
1985 	BUG_ON(!failrec->this_mirror);
1986 
1987 	if (failrec->in_validation) {
1988 		/* there was no real error, just free the record */
1989 		pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1990 			 failrec->start);
1991 		did_repair = 1;
1992 		goto out;
1993 	}
1994 
1995 	spin_lock(&BTRFS_I(inode)->io_tree.lock);
1996 	state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1997 					    failrec->start,
1998 					    EXTENT_LOCKED);
1999 	spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2000 
2001 	if (state && state->start == failrec->start) {
2002 		map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
2003 		num_copies = btrfs_num_copies(map_tree, failrec->logical,
2004 						failrec->len);
2005 		if (num_copies > 1)  {
2006 			ret = repair_io_failure(map_tree, start, failrec->len,
2007 						failrec->logical, page,
2008 						failrec->failed_mirror);
2009 			did_repair = !ret;
2010 		}
2011 	}
2012 
2013 out:
2014 	if (!ret)
2015 		ret = free_io_failure(inode, failrec, did_repair);
2016 
2017 	return ret;
2018 }
2019 
2020 /*
2021  * this is a generic handler for readpage errors (default
2022  * readpage_io_failed_hook). if other copies exist, read those and write back
2023  * good data to the failed position. does not investigate in remapping the
2024  * failed extent elsewhere, hoping the device will be smart enough to do this as
2025  * needed
2026  */
2027 
2028 static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2029 				u64 start, u64 end, int failed_mirror,
2030 				struct extent_state *state)
2031 {
2032 	struct io_failure_record *failrec = NULL;
2033 	u64 private;
2034 	struct extent_map *em;
2035 	struct inode *inode = page->mapping->host;
2036 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2037 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2038 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2039 	struct bio *bio;
2040 	int num_copies;
2041 	int ret;
2042 	int read_mode;
2043 	u64 logical;
2044 
2045 	BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2046 
2047 	ret = get_state_private(failure_tree, start, &private);
2048 	if (ret) {
2049 		failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2050 		if (!failrec)
2051 			return -ENOMEM;
2052 		failrec->start = start;
2053 		failrec->len = end - start + 1;
2054 		failrec->this_mirror = 0;
2055 		failrec->bio_flags = 0;
2056 		failrec->in_validation = 0;
2057 
2058 		read_lock(&em_tree->lock);
2059 		em = lookup_extent_mapping(em_tree, start, failrec->len);
2060 		if (!em) {
2061 			read_unlock(&em_tree->lock);
2062 			kfree(failrec);
2063 			return -EIO;
2064 		}
2065 
2066 		if (em->start > start || em->start + em->len < start) {
2067 			free_extent_map(em);
2068 			em = NULL;
2069 		}
2070 		read_unlock(&em_tree->lock);
2071 
2072 		if (!em || IS_ERR(em)) {
2073 			kfree(failrec);
2074 			return -EIO;
2075 		}
2076 		logical = start - em->start;
2077 		logical = em->block_start + logical;
2078 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2079 			logical = em->block_start;
2080 			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2081 			extent_set_compress_type(&failrec->bio_flags,
2082 						 em->compress_type);
2083 		}
2084 		pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2085 			 "len=%llu\n", logical, start, failrec->len);
2086 		failrec->logical = logical;
2087 		free_extent_map(em);
2088 
2089 		/* set the bits in the private failure tree */
2090 		ret = set_extent_bits(failure_tree, start, end,
2091 					EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2092 		if (ret >= 0)
2093 			ret = set_state_private(failure_tree, start,
2094 						(u64)(unsigned long)failrec);
2095 		/* set the bits in the inode's tree */
2096 		if (ret >= 0)
2097 			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2098 						GFP_NOFS);
2099 		if (ret < 0) {
2100 			kfree(failrec);
2101 			return ret;
2102 		}
2103 	} else {
2104 		failrec = (struct io_failure_record *)(unsigned long)private;
2105 		pr_debug("bio_readpage_error: (found) logical=%llu, "
2106 			 "start=%llu, len=%llu, validation=%d\n",
2107 			 failrec->logical, failrec->start, failrec->len,
2108 			 failrec->in_validation);
2109 		/*
2110 		 * when data can be on disk more than twice, add to failrec here
2111 		 * (e.g. with a list for failed_mirror) to make
2112 		 * clean_io_failure() clean all those errors at once.
2113 		 */
2114 	}
2115 	num_copies = btrfs_num_copies(
2116 			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
2117 			      failrec->logical, failrec->len);
2118 	if (num_copies == 1) {
2119 		/*
2120 		 * we only have a single copy of the data, so don't bother with
2121 		 * all the retry and error correction code that follows. no
2122 		 * matter what the error is, it is very likely to persist.
2123 		 */
2124 		pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2125 			 "state=%p, num_copies=%d, next_mirror %d, "
2126 			 "failed_mirror %d\n", state, num_copies,
2127 			 failrec->this_mirror, failed_mirror);
2128 		free_io_failure(inode, failrec, 0);
2129 		return -EIO;
2130 	}
2131 
2132 	if (!state) {
2133 		spin_lock(&tree->lock);
2134 		state = find_first_extent_bit_state(tree, failrec->start,
2135 						    EXTENT_LOCKED);
2136 		if (state && state->start != failrec->start)
2137 			state = NULL;
2138 		spin_unlock(&tree->lock);
2139 	}
2140 
2141 	/*
2142 	 * there are two premises:
2143 	 *	a) deliver good data to the caller
2144 	 *	b) correct the bad sectors on disk
2145 	 */
2146 	if (failed_bio->bi_vcnt > 1) {
2147 		/*
2148 		 * to fulfill b), we need to know the exact failing sectors, as
2149 		 * we don't want to rewrite any more than the failed ones. thus,
2150 		 * we need separate read requests for the failed bio
2151 		 *
2152 		 * if the following BUG_ON triggers, our validation request got
2153 		 * merged. we need separate requests for our algorithm to work.
2154 		 */
2155 		BUG_ON(failrec->in_validation);
2156 		failrec->in_validation = 1;
2157 		failrec->this_mirror = failed_mirror;
2158 		read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2159 	} else {
2160 		/*
2161 		 * we're ready to fulfill a) and b) alongside. get a good copy
2162 		 * of the failed sector and if we succeed, we have setup
2163 		 * everything for repair_io_failure to do the rest for us.
2164 		 */
2165 		if (failrec->in_validation) {
2166 			BUG_ON(failrec->this_mirror != failed_mirror);
2167 			failrec->in_validation = 0;
2168 			failrec->this_mirror = 0;
2169 		}
2170 		failrec->failed_mirror = failed_mirror;
2171 		failrec->this_mirror++;
2172 		if (failrec->this_mirror == failed_mirror)
2173 			failrec->this_mirror++;
2174 		read_mode = READ_SYNC;
2175 	}
2176 
2177 	if (!state || failrec->this_mirror > num_copies) {
2178 		pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2179 			 "next_mirror %d, failed_mirror %d\n", state,
2180 			 num_copies, failrec->this_mirror, failed_mirror);
2181 		free_io_failure(inode, failrec, 0);
2182 		return -EIO;
2183 	}
2184 
2185 	bio = bio_alloc(GFP_NOFS, 1);
2186 	if (!bio) {
2187 		free_io_failure(inode, failrec, 0);
2188 		return -EIO;
2189 	}
2190 	bio->bi_private = state;
2191 	bio->bi_end_io = failed_bio->bi_end_io;
2192 	bio->bi_sector = failrec->logical >> 9;
2193 	bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2194 	bio->bi_size = 0;
2195 
2196 	bio_add_page(bio, page, failrec->len, start - page_offset(page));
2197 
2198 	pr_debug("bio_readpage_error: submitting new read[%#x] to "
2199 		 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2200 		 failrec->this_mirror, num_copies, failrec->in_validation);
2201 
2202 	ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2203 					 failrec->this_mirror,
2204 					 failrec->bio_flags, 0);
2205 	return ret;
2206 }
2207 
2208 /* lots and lots of room for performance fixes in the end_bio funcs */
2209 
2210 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2211 {
2212 	int uptodate = (err == 0);
2213 	struct extent_io_tree *tree;
2214 	int ret;
2215 
2216 	tree = &BTRFS_I(page->mapping->host)->io_tree;
2217 
2218 	if (tree->ops && tree->ops->writepage_end_io_hook) {
2219 		ret = tree->ops->writepage_end_io_hook(page, start,
2220 					       end, NULL, uptodate);
2221 		if (ret)
2222 			uptodate = 0;
2223 	}
2224 
2225 	if (!uptodate && tree->ops &&
2226 	    tree->ops->writepage_io_failed_hook) {
2227 		ret = tree->ops->writepage_io_failed_hook(NULL, page,
2228 						 start, end, NULL);
2229 		/* Writeback already completed */
2230 		if (ret == 0)
2231 			return 1;
2232 	}
2233 
2234 	if (!uptodate) {
2235 		clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2236 		ClearPageUptodate(page);
2237 		SetPageError(page);
2238 	}
2239 	return 0;
2240 }
2241 
2242 /*
2243  * after a writepage IO is done, we need to:
2244  * clear the uptodate bits on error
2245  * clear the writeback bits in the extent tree for this IO
2246  * end_page_writeback if the page has no more pending IO
2247  *
2248  * Scheduling is not allowed, so the extent state tree is expected
2249  * to have one and only one object corresponding to this IO.
2250  */
2251 static void end_bio_extent_writepage(struct bio *bio, int err)
2252 {
2253 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2254 	struct extent_io_tree *tree;
2255 	u64 start;
2256 	u64 end;
2257 	int whole_page;
2258 
2259 	do {
2260 		struct page *page = bvec->bv_page;
2261 		tree = &BTRFS_I(page->mapping->host)->io_tree;
2262 
2263 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2264 			 bvec->bv_offset;
2265 		end = start + bvec->bv_len - 1;
2266 
2267 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2268 			whole_page = 1;
2269 		else
2270 			whole_page = 0;
2271 
2272 		if (--bvec >= bio->bi_io_vec)
2273 			prefetchw(&bvec->bv_page->flags);
2274 
2275 		if (end_extent_writepage(page, err, start, end))
2276 			continue;
2277 
2278 		if (whole_page)
2279 			end_page_writeback(page);
2280 		else
2281 			check_page_writeback(tree, page);
2282 	} while (bvec >= bio->bi_io_vec);
2283 
2284 	bio_put(bio);
2285 }
2286 
2287 /*
2288  * after a readpage IO is done, we need to:
2289  * clear the uptodate bits on error
2290  * set the uptodate bits if things worked
2291  * set the page up to date if all extents in the tree are uptodate
2292  * clear the lock bit in the extent tree
2293  * unlock the page if there are no other extents locked for it
2294  *
2295  * Scheduling is not allowed, so the extent state tree is expected
2296  * to have one and only one object corresponding to this IO.
2297  */
2298 static void end_bio_extent_readpage(struct bio *bio, int err)
2299 {
2300 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2301 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2302 	struct bio_vec *bvec = bio->bi_io_vec;
2303 	struct extent_io_tree *tree;
2304 	u64 start;
2305 	u64 end;
2306 	int whole_page;
2307 	int mirror;
2308 	int ret;
2309 
2310 	if (err)
2311 		uptodate = 0;
2312 
2313 	do {
2314 		struct page *page = bvec->bv_page;
2315 		struct extent_state *cached = NULL;
2316 		struct extent_state *state;
2317 
2318 		pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2319 			 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2320 			 (long int)bio->bi_bdev);
2321 		tree = &BTRFS_I(page->mapping->host)->io_tree;
2322 
2323 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2324 			bvec->bv_offset;
2325 		end = start + bvec->bv_len - 1;
2326 
2327 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2328 			whole_page = 1;
2329 		else
2330 			whole_page = 0;
2331 
2332 		if (++bvec <= bvec_end)
2333 			prefetchw(&bvec->bv_page->flags);
2334 
2335 		spin_lock(&tree->lock);
2336 		state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2337 		if (state && state->start == start) {
2338 			/*
2339 			 * take a reference on the state, unlock will drop
2340 			 * the ref
2341 			 */
2342 			cache_state(state, &cached);
2343 		}
2344 		spin_unlock(&tree->lock);
2345 
2346 		mirror = (int)(unsigned long)bio->bi_bdev;
2347 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2348 			ret = tree->ops->readpage_end_io_hook(page, start, end,
2349 							      state, mirror);
2350 			if (ret)
2351 				uptodate = 0;
2352 			else
2353 				clean_io_failure(start, page);
2354 		}
2355 
2356 		if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2357 			ret = tree->ops->readpage_io_failed_hook(page, mirror);
2358 			if (!ret && !err &&
2359 			    test_bit(BIO_UPTODATE, &bio->bi_flags))
2360 				uptodate = 1;
2361 		} else if (!uptodate) {
2362 			/*
2363 			 * The generic bio_readpage_error handles errors the
2364 			 * following way: If possible, new read requests are
2365 			 * created and submitted and will end up in
2366 			 * end_bio_extent_readpage as well (if we're lucky, not
2367 			 * in the !uptodate case). In that case it returns 0 and
2368 			 * we just go on with the next page in our bio. If it
2369 			 * can't handle the error it will return -EIO and we
2370 			 * remain responsible for that page.
2371 			 */
2372 			ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
2373 			if (ret == 0) {
2374 				uptodate =
2375 					test_bit(BIO_UPTODATE, &bio->bi_flags);
2376 				if (err)
2377 					uptodate = 0;
2378 				uncache_state(&cached);
2379 				continue;
2380 			}
2381 		}
2382 
2383 		if (uptodate && tree->track_uptodate) {
2384 			set_extent_uptodate(tree, start, end, &cached,
2385 					    GFP_ATOMIC);
2386 		}
2387 		unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2388 
2389 		if (whole_page) {
2390 			if (uptodate) {
2391 				SetPageUptodate(page);
2392 			} else {
2393 				ClearPageUptodate(page);
2394 				SetPageError(page);
2395 			}
2396 			unlock_page(page);
2397 		} else {
2398 			if (uptodate) {
2399 				check_page_uptodate(tree, page);
2400 			} else {
2401 				ClearPageUptodate(page);
2402 				SetPageError(page);
2403 			}
2404 			check_page_locked(tree, page);
2405 		}
2406 	} while (bvec <= bvec_end);
2407 
2408 	bio_put(bio);
2409 }
2410 
2411 struct bio *
2412 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2413 		gfp_t gfp_flags)
2414 {
2415 	struct bio *bio;
2416 
2417 	bio = bio_alloc(gfp_flags, nr_vecs);
2418 
2419 	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2420 		while (!bio && (nr_vecs /= 2))
2421 			bio = bio_alloc(gfp_flags, nr_vecs);
2422 	}
2423 
2424 	if (bio) {
2425 		bio->bi_size = 0;
2426 		bio->bi_bdev = bdev;
2427 		bio->bi_sector = first_sector;
2428 	}
2429 	return bio;
2430 }
2431 
2432 /*
2433  * Since writes are async, they will only return -ENOMEM.
2434  * Reads can return the full range of I/O error conditions.
2435  */
2436 static int __must_check submit_one_bio(int rw, struct bio *bio,
2437 				       int mirror_num, unsigned long bio_flags)
2438 {
2439 	int ret = 0;
2440 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2441 	struct page *page = bvec->bv_page;
2442 	struct extent_io_tree *tree = bio->bi_private;
2443 	u64 start;
2444 
2445 	start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
2446 
2447 	bio->bi_private = NULL;
2448 
2449 	bio_get(bio);
2450 
2451 	if (tree->ops && tree->ops->submit_bio_hook)
2452 		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2453 					   mirror_num, bio_flags, start);
2454 	else
2455 		btrfsic_submit_bio(rw, bio);
2456 
2457 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2458 		ret = -EOPNOTSUPP;
2459 	bio_put(bio);
2460 	return ret;
2461 }
2462 
2463 static int merge_bio(struct extent_io_tree *tree, struct page *page,
2464 		     unsigned long offset, size_t size, struct bio *bio,
2465 		     unsigned long bio_flags)
2466 {
2467 	int ret = 0;
2468 	if (tree->ops && tree->ops->merge_bio_hook)
2469 		ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2470 						bio_flags);
2471 	BUG_ON(ret < 0);
2472 	return ret;
2473 
2474 }
2475 
2476 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2477 			      struct page *page, sector_t sector,
2478 			      size_t size, unsigned long offset,
2479 			      struct block_device *bdev,
2480 			      struct bio **bio_ret,
2481 			      unsigned long max_pages,
2482 			      bio_end_io_t end_io_func,
2483 			      int mirror_num,
2484 			      unsigned long prev_bio_flags,
2485 			      unsigned long bio_flags)
2486 {
2487 	int ret = 0;
2488 	struct bio *bio;
2489 	int nr;
2490 	int contig = 0;
2491 	int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2492 	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2493 	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2494 
2495 	if (bio_ret && *bio_ret) {
2496 		bio = *bio_ret;
2497 		if (old_compressed)
2498 			contig = bio->bi_sector == sector;
2499 		else
2500 			contig = bio->bi_sector + (bio->bi_size >> 9) ==
2501 				sector;
2502 
2503 		if (prev_bio_flags != bio_flags || !contig ||
2504 		    merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2505 		    bio_add_page(bio, page, page_size, offset) < page_size) {
2506 			ret = submit_one_bio(rw, bio, mirror_num,
2507 					     prev_bio_flags);
2508 			if (ret < 0)
2509 				return ret;
2510 			bio = NULL;
2511 		} else {
2512 			return 0;
2513 		}
2514 	}
2515 	if (this_compressed)
2516 		nr = BIO_MAX_PAGES;
2517 	else
2518 		nr = bio_get_nr_vecs(bdev);
2519 
2520 	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2521 	if (!bio)
2522 		return -ENOMEM;
2523 
2524 	bio_add_page(bio, page, page_size, offset);
2525 	bio->bi_end_io = end_io_func;
2526 	bio->bi_private = tree;
2527 
2528 	if (bio_ret)
2529 		*bio_ret = bio;
2530 	else
2531 		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2532 
2533 	return ret;
2534 }
2535 
2536 void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2537 {
2538 	if (!PagePrivate(page)) {
2539 		SetPagePrivate(page);
2540 		page_cache_get(page);
2541 		set_page_private(page, (unsigned long)eb);
2542 	} else {
2543 		WARN_ON(page->private != (unsigned long)eb);
2544 	}
2545 }
2546 
2547 void set_page_extent_mapped(struct page *page)
2548 {
2549 	if (!PagePrivate(page)) {
2550 		SetPagePrivate(page);
2551 		page_cache_get(page);
2552 		set_page_private(page, EXTENT_PAGE_PRIVATE);
2553 	}
2554 }
2555 
2556 /*
2557  * basic readpage implementation.  Locked extent state structs are inserted
2558  * into the tree that are removed when the IO is done (by the end_io
2559  * handlers)
2560  * XXX JDM: This needs looking at to ensure proper page locking
2561  */
2562 static int __extent_read_full_page(struct extent_io_tree *tree,
2563 				   struct page *page,
2564 				   get_extent_t *get_extent,
2565 				   struct bio **bio, int mirror_num,
2566 				   unsigned long *bio_flags)
2567 {
2568 	struct inode *inode = page->mapping->host;
2569 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2570 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
2571 	u64 end;
2572 	u64 cur = start;
2573 	u64 extent_offset;
2574 	u64 last_byte = i_size_read(inode);
2575 	u64 block_start;
2576 	u64 cur_end;
2577 	sector_t sector;
2578 	struct extent_map *em;
2579 	struct block_device *bdev;
2580 	struct btrfs_ordered_extent *ordered;
2581 	int ret;
2582 	int nr = 0;
2583 	size_t pg_offset = 0;
2584 	size_t iosize;
2585 	size_t disk_io_size;
2586 	size_t blocksize = inode->i_sb->s_blocksize;
2587 	unsigned long this_bio_flag = 0;
2588 
2589 	set_page_extent_mapped(page);
2590 
2591 	if (!PageUptodate(page)) {
2592 		if (cleancache_get_page(page) == 0) {
2593 			BUG_ON(blocksize != PAGE_SIZE);
2594 			goto out;
2595 		}
2596 	}
2597 
2598 	end = page_end;
2599 	while (1) {
2600 		lock_extent(tree, start, end);
2601 		ordered = btrfs_lookup_ordered_extent(inode, start);
2602 		if (!ordered)
2603 			break;
2604 		unlock_extent(tree, start, end);
2605 		btrfs_start_ordered_extent(inode, ordered, 1);
2606 		btrfs_put_ordered_extent(ordered);
2607 	}
2608 
2609 	if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2610 		char *userpage;
2611 		size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2612 
2613 		if (zero_offset) {
2614 			iosize = PAGE_CACHE_SIZE - zero_offset;
2615 			userpage = kmap_atomic(page);
2616 			memset(userpage + zero_offset, 0, iosize);
2617 			flush_dcache_page(page);
2618 			kunmap_atomic(userpage);
2619 		}
2620 	}
2621 	while (cur <= end) {
2622 		if (cur >= last_byte) {
2623 			char *userpage;
2624 			struct extent_state *cached = NULL;
2625 
2626 			iosize = PAGE_CACHE_SIZE - pg_offset;
2627 			userpage = kmap_atomic(page);
2628 			memset(userpage + pg_offset, 0, iosize);
2629 			flush_dcache_page(page);
2630 			kunmap_atomic(userpage);
2631 			set_extent_uptodate(tree, cur, cur + iosize - 1,
2632 					    &cached, GFP_NOFS);
2633 			unlock_extent_cached(tree, cur, cur + iosize - 1,
2634 					     &cached, GFP_NOFS);
2635 			break;
2636 		}
2637 		em = get_extent(inode, page, pg_offset, cur,
2638 				end - cur + 1, 0);
2639 		if (IS_ERR_OR_NULL(em)) {
2640 			SetPageError(page);
2641 			unlock_extent(tree, cur, end);
2642 			break;
2643 		}
2644 		extent_offset = cur - em->start;
2645 		BUG_ON(extent_map_end(em) <= cur);
2646 		BUG_ON(end < cur);
2647 
2648 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2649 			this_bio_flag = EXTENT_BIO_COMPRESSED;
2650 			extent_set_compress_type(&this_bio_flag,
2651 						 em->compress_type);
2652 		}
2653 
2654 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2655 		cur_end = min(extent_map_end(em) - 1, end);
2656 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2657 		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2658 			disk_io_size = em->block_len;
2659 			sector = em->block_start >> 9;
2660 		} else {
2661 			sector = (em->block_start + extent_offset) >> 9;
2662 			disk_io_size = iosize;
2663 		}
2664 		bdev = em->bdev;
2665 		block_start = em->block_start;
2666 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2667 			block_start = EXTENT_MAP_HOLE;
2668 		free_extent_map(em);
2669 		em = NULL;
2670 
2671 		/* we've found a hole, just zero and go on */
2672 		if (block_start == EXTENT_MAP_HOLE) {
2673 			char *userpage;
2674 			struct extent_state *cached = NULL;
2675 
2676 			userpage = kmap_atomic(page);
2677 			memset(userpage + pg_offset, 0, iosize);
2678 			flush_dcache_page(page);
2679 			kunmap_atomic(userpage);
2680 
2681 			set_extent_uptodate(tree, cur, cur + iosize - 1,
2682 					    &cached, GFP_NOFS);
2683 			unlock_extent_cached(tree, cur, cur + iosize - 1,
2684 			                     &cached, GFP_NOFS);
2685 			cur = cur + iosize;
2686 			pg_offset += iosize;
2687 			continue;
2688 		}
2689 		/* the get_extent function already copied into the page */
2690 		if (test_range_bit(tree, cur, cur_end,
2691 				   EXTENT_UPTODATE, 1, NULL)) {
2692 			check_page_uptodate(tree, page);
2693 			unlock_extent(tree, cur, cur + iosize - 1);
2694 			cur = cur + iosize;
2695 			pg_offset += iosize;
2696 			continue;
2697 		}
2698 		/* we have an inline extent but it didn't get marked up
2699 		 * to date.  Error out
2700 		 */
2701 		if (block_start == EXTENT_MAP_INLINE) {
2702 			SetPageError(page);
2703 			unlock_extent(tree, cur, cur + iosize - 1);
2704 			cur = cur + iosize;
2705 			pg_offset += iosize;
2706 			continue;
2707 		}
2708 
2709 		ret = 0;
2710 		if (tree->ops && tree->ops->readpage_io_hook) {
2711 			ret = tree->ops->readpage_io_hook(page, cur,
2712 							  cur + iosize - 1);
2713 		}
2714 		if (!ret) {
2715 			unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2716 			pnr -= page->index;
2717 			ret = submit_extent_page(READ, tree, page,
2718 					 sector, disk_io_size, pg_offset,
2719 					 bdev, bio, pnr,
2720 					 end_bio_extent_readpage, mirror_num,
2721 					 *bio_flags,
2722 					 this_bio_flag);
2723 			BUG_ON(ret == -ENOMEM);
2724 			nr++;
2725 			*bio_flags = this_bio_flag;
2726 		}
2727 		if (ret)
2728 			SetPageError(page);
2729 		cur = cur + iosize;
2730 		pg_offset += iosize;
2731 	}
2732 out:
2733 	if (!nr) {
2734 		if (!PageError(page))
2735 			SetPageUptodate(page);
2736 		unlock_page(page);
2737 	}
2738 	return 0;
2739 }
2740 
2741 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2742 			    get_extent_t *get_extent, int mirror_num)
2743 {
2744 	struct bio *bio = NULL;
2745 	unsigned long bio_flags = 0;
2746 	int ret;
2747 
2748 	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2749 				      &bio_flags);
2750 	if (bio)
2751 		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2752 	return ret;
2753 }
2754 
2755 static noinline void update_nr_written(struct page *page,
2756 				      struct writeback_control *wbc,
2757 				      unsigned long nr_written)
2758 {
2759 	wbc->nr_to_write -= nr_written;
2760 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2761 	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2762 		page->mapping->writeback_index = page->index + nr_written;
2763 }
2764 
2765 /*
2766  * the writepage semantics are similar to regular writepage.  extent
2767  * records are inserted to lock ranges in the tree, and as dirty areas
2768  * are found, they are marked writeback.  Then the lock bits are removed
2769  * and the end_io handler clears the writeback ranges
2770  */
2771 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2772 			      void *data)
2773 {
2774 	struct inode *inode = page->mapping->host;
2775 	struct extent_page_data *epd = data;
2776 	struct extent_io_tree *tree = epd->tree;
2777 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2778 	u64 delalloc_start;
2779 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
2780 	u64 end;
2781 	u64 cur = start;
2782 	u64 extent_offset;
2783 	u64 last_byte = i_size_read(inode);
2784 	u64 block_start;
2785 	u64 iosize;
2786 	sector_t sector;
2787 	struct extent_state *cached_state = NULL;
2788 	struct extent_map *em;
2789 	struct block_device *bdev;
2790 	int ret;
2791 	int nr = 0;
2792 	size_t pg_offset = 0;
2793 	size_t blocksize;
2794 	loff_t i_size = i_size_read(inode);
2795 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2796 	u64 nr_delalloc;
2797 	u64 delalloc_end;
2798 	int page_started;
2799 	int compressed;
2800 	int write_flags;
2801 	unsigned long nr_written = 0;
2802 	bool fill_delalloc = true;
2803 
2804 	if (wbc->sync_mode == WB_SYNC_ALL)
2805 		write_flags = WRITE_SYNC;
2806 	else
2807 		write_flags = WRITE;
2808 
2809 	trace___extent_writepage(page, inode, wbc);
2810 
2811 	WARN_ON(!PageLocked(page));
2812 
2813 	ClearPageError(page);
2814 
2815 	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2816 	if (page->index > end_index ||
2817 	   (page->index == end_index && !pg_offset)) {
2818 		page->mapping->a_ops->invalidatepage(page, 0);
2819 		unlock_page(page);
2820 		return 0;
2821 	}
2822 
2823 	if (page->index == end_index) {
2824 		char *userpage;
2825 
2826 		userpage = kmap_atomic(page);
2827 		memset(userpage + pg_offset, 0,
2828 		       PAGE_CACHE_SIZE - pg_offset);
2829 		kunmap_atomic(userpage);
2830 		flush_dcache_page(page);
2831 	}
2832 	pg_offset = 0;
2833 
2834 	set_page_extent_mapped(page);
2835 
2836 	if (!tree->ops || !tree->ops->fill_delalloc)
2837 		fill_delalloc = false;
2838 
2839 	delalloc_start = start;
2840 	delalloc_end = 0;
2841 	page_started = 0;
2842 	if (!epd->extent_locked && fill_delalloc) {
2843 		u64 delalloc_to_write = 0;
2844 		/*
2845 		 * make sure the wbc mapping index is at least updated
2846 		 * to this page.
2847 		 */
2848 		update_nr_written(page, wbc, 0);
2849 
2850 		while (delalloc_end < page_end) {
2851 			nr_delalloc = find_lock_delalloc_range(inode, tree,
2852 						       page,
2853 						       &delalloc_start,
2854 						       &delalloc_end,
2855 						       128 * 1024 * 1024);
2856 			if (nr_delalloc == 0) {
2857 				delalloc_start = delalloc_end + 1;
2858 				continue;
2859 			}
2860 			ret = tree->ops->fill_delalloc(inode, page,
2861 						       delalloc_start,
2862 						       delalloc_end,
2863 						       &page_started,
2864 						       &nr_written);
2865 			/* File system has been set read-only */
2866 			if (ret) {
2867 				SetPageError(page);
2868 				goto done;
2869 			}
2870 			/*
2871 			 * delalloc_end is already one less than the total
2872 			 * length, so we don't subtract one from
2873 			 * PAGE_CACHE_SIZE
2874 			 */
2875 			delalloc_to_write += (delalloc_end - delalloc_start +
2876 					      PAGE_CACHE_SIZE) >>
2877 					      PAGE_CACHE_SHIFT;
2878 			delalloc_start = delalloc_end + 1;
2879 		}
2880 		if (wbc->nr_to_write < delalloc_to_write) {
2881 			int thresh = 8192;
2882 
2883 			if (delalloc_to_write < thresh * 2)
2884 				thresh = delalloc_to_write;
2885 			wbc->nr_to_write = min_t(u64, delalloc_to_write,
2886 						 thresh);
2887 		}
2888 
2889 		/* did the fill delalloc function already unlock and start
2890 		 * the IO?
2891 		 */
2892 		if (page_started) {
2893 			ret = 0;
2894 			/*
2895 			 * we've unlocked the page, so we can't update
2896 			 * the mapping's writeback index, just update
2897 			 * nr_to_write.
2898 			 */
2899 			wbc->nr_to_write -= nr_written;
2900 			goto done_unlocked;
2901 		}
2902 	}
2903 	if (tree->ops && tree->ops->writepage_start_hook) {
2904 		ret = tree->ops->writepage_start_hook(page, start,
2905 						      page_end);
2906 		if (ret) {
2907 			/* Fixup worker will requeue */
2908 			if (ret == -EBUSY)
2909 				wbc->pages_skipped++;
2910 			else
2911 				redirty_page_for_writepage(wbc, page);
2912 			update_nr_written(page, wbc, nr_written);
2913 			unlock_page(page);
2914 			ret = 0;
2915 			goto done_unlocked;
2916 		}
2917 	}
2918 
2919 	/*
2920 	 * we don't want to touch the inode after unlocking the page,
2921 	 * so we update the mapping writeback index now
2922 	 */
2923 	update_nr_written(page, wbc, nr_written + 1);
2924 
2925 	end = page_end;
2926 	if (last_byte <= start) {
2927 		if (tree->ops && tree->ops->writepage_end_io_hook)
2928 			tree->ops->writepage_end_io_hook(page, start,
2929 							 page_end, NULL, 1);
2930 		goto done;
2931 	}
2932 
2933 	blocksize = inode->i_sb->s_blocksize;
2934 
2935 	while (cur <= end) {
2936 		if (cur >= last_byte) {
2937 			if (tree->ops && tree->ops->writepage_end_io_hook)
2938 				tree->ops->writepage_end_io_hook(page, cur,
2939 							 page_end, NULL, 1);
2940 			break;
2941 		}
2942 		em = epd->get_extent(inode, page, pg_offset, cur,
2943 				     end - cur + 1, 1);
2944 		if (IS_ERR_OR_NULL(em)) {
2945 			SetPageError(page);
2946 			break;
2947 		}
2948 
2949 		extent_offset = cur - em->start;
2950 		BUG_ON(extent_map_end(em) <= cur);
2951 		BUG_ON(end < cur);
2952 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2953 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2954 		sector = (em->block_start + extent_offset) >> 9;
2955 		bdev = em->bdev;
2956 		block_start = em->block_start;
2957 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2958 		free_extent_map(em);
2959 		em = NULL;
2960 
2961 		/*
2962 		 * compressed and inline extents are written through other
2963 		 * paths in the FS
2964 		 */
2965 		if (compressed || block_start == EXTENT_MAP_HOLE ||
2966 		    block_start == EXTENT_MAP_INLINE) {
2967 			/*
2968 			 * end_io notification does not happen here for
2969 			 * compressed extents
2970 			 */
2971 			if (!compressed && tree->ops &&
2972 			    tree->ops->writepage_end_io_hook)
2973 				tree->ops->writepage_end_io_hook(page, cur,
2974 							 cur + iosize - 1,
2975 							 NULL, 1);
2976 			else if (compressed) {
2977 				/* we don't want to end_page_writeback on
2978 				 * a compressed extent.  this happens
2979 				 * elsewhere
2980 				 */
2981 				nr++;
2982 			}
2983 
2984 			cur += iosize;
2985 			pg_offset += iosize;
2986 			continue;
2987 		}
2988 		/* leave this out until we have a page_mkwrite call */
2989 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2990 				   EXTENT_DIRTY, 0, NULL)) {
2991 			cur = cur + iosize;
2992 			pg_offset += iosize;
2993 			continue;
2994 		}
2995 
2996 		if (tree->ops && tree->ops->writepage_io_hook) {
2997 			ret = tree->ops->writepage_io_hook(page, cur,
2998 						cur + iosize - 1);
2999 		} else {
3000 			ret = 0;
3001 		}
3002 		if (ret) {
3003 			SetPageError(page);
3004 		} else {
3005 			unsigned long max_nr = end_index + 1;
3006 
3007 			set_range_writeback(tree, cur, cur + iosize - 1);
3008 			if (!PageWriteback(page)) {
3009 				printk(KERN_ERR "btrfs warning page %lu not "
3010 				       "writeback, cur %llu end %llu\n",
3011 				       page->index, (unsigned long long)cur,
3012 				       (unsigned long long)end);
3013 			}
3014 
3015 			ret = submit_extent_page(write_flags, tree, page,
3016 						 sector, iosize, pg_offset,
3017 						 bdev, &epd->bio, max_nr,
3018 						 end_bio_extent_writepage,
3019 						 0, 0, 0);
3020 			if (ret)
3021 				SetPageError(page);
3022 		}
3023 		cur = cur + iosize;
3024 		pg_offset += iosize;
3025 		nr++;
3026 	}
3027 done:
3028 	if (nr == 0) {
3029 		/* make sure the mapping tag for page dirty gets cleared */
3030 		set_page_writeback(page);
3031 		end_page_writeback(page);
3032 	}
3033 	unlock_page(page);
3034 
3035 done_unlocked:
3036 
3037 	/* drop our reference on any cached states */
3038 	free_extent_state(cached_state);
3039 	return 0;
3040 }
3041 
3042 static int eb_wait(void *word)
3043 {
3044 	io_schedule();
3045 	return 0;
3046 }
3047 
3048 static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3049 {
3050 	wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3051 		    TASK_UNINTERRUPTIBLE);
3052 }
3053 
3054 static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3055 				     struct btrfs_fs_info *fs_info,
3056 				     struct extent_page_data *epd)
3057 {
3058 	unsigned long i, num_pages;
3059 	int flush = 0;
3060 	int ret = 0;
3061 
3062 	if (!btrfs_try_tree_write_lock(eb)) {
3063 		flush = 1;
3064 		flush_write_bio(epd);
3065 		btrfs_tree_lock(eb);
3066 	}
3067 
3068 	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3069 		btrfs_tree_unlock(eb);
3070 		if (!epd->sync_io)
3071 			return 0;
3072 		if (!flush) {
3073 			flush_write_bio(epd);
3074 			flush = 1;
3075 		}
3076 		while (1) {
3077 			wait_on_extent_buffer_writeback(eb);
3078 			btrfs_tree_lock(eb);
3079 			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3080 				break;
3081 			btrfs_tree_unlock(eb);
3082 		}
3083 	}
3084 
3085 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3086 		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3087 		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3088 		spin_lock(&fs_info->delalloc_lock);
3089 		if (fs_info->dirty_metadata_bytes >= eb->len)
3090 			fs_info->dirty_metadata_bytes -= eb->len;
3091 		else
3092 			WARN_ON(1);
3093 		spin_unlock(&fs_info->delalloc_lock);
3094 		ret = 1;
3095 	}
3096 
3097 	btrfs_tree_unlock(eb);
3098 
3099 	if (!ret)
3100 		return ret;
3101 
3102 	num_pages = num_extent_pages(eb->start, eb->len);
3103 	for (i = 0; i < num_pages; i++) {
3104 		struct page *p = extent_buffer_page(eb, i);
3105 
3106 		if (!trylock_page(p)) {
3107 			if (!flush) {
3108 				flush_write_bio(epd);
3109 				flush = 1;
3110 			}
3111 			lock_page(p);
3112 		}
3113 	}
3114 
3115 	return ret;
3116 }
3117 
3118 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3119 {
3120 	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3121 	smp_mb__after_clear_bit();
3122 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3123 }
3124 
3125 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3126 {
3127 	int uptodate = err == 0;
3128 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3129 	struct extent_buffer *eb;
3130 	int done;
3131 
3132 	do {
3133 		struct page *page = bvec->bv_page;
3134 
3135 		bvec--;
3136 		eb = (struct extent_buffer *)page->private;
3137 		BUG_ON(!eb);
3138 		done = atomic_dec_and_test(&eb->io_pages);
3139 
3140 		if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3141 			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3142 			ClearPageUptodate(page);
3143 			SetPageError(page);
3144 		}
3145 
3146 		end_page_writeback(page);
3147 
3148 		if (!done)
3149 			continue;
3150 
3151 		end_extent_buffer_writeback(eb);
3152 	} while (bvec >= bio->bi_io_vec);
3153 
3154 	bio_put(bio);
3155 
3156 }
3157 
3158 static int write_one_eb(struct extent_buffer *eb,
3159 			struct btrfs_fs_info *fs_info,
3160 			struct writeback_control *wbc,
3161 			struct extent_page_data *epd)
3162 {
3163 	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3164 	u64 offset = eb->start;
3165 	unsigned long i, num_pages;
3166 	int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3167 	int ret;
3168 
3169 	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3170 	num_pages = num_extent_pages(eb->start, eb->len);
3171 	atomic_set(&eb->io_pages, num_pages);
3172 	for (i = 0; i < num_pages; i++) {
3173 		struct page *p = extent_buffer_page(eb, i);
3174 
3175 		clear_page_dirty_for_io(p);
3176 		set_page_writeback(p);
3177 		ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3178 					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3179 					 -1, end_bio_extent_buffer_writepage,
3180 					 0, 0, 0);
3181 		if (ret) {
3182 			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3183 			SetPageError(p);
3184 			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3185 				end_extent_buffer_writeback(eb);
3186 			ret = -EIO;
3187 			break;
3188 		}
3189 		offset += PAGE_CACHE_SIZE;
3190 		update_nr_written(p, wbc, 1);
3191 		unlock_page(p);
3192 	}
3193 
3194 	if (unlikely(ret)) {
3195 		for (; i < num_pages; i++) {
3196 			struct page *p = extent_buffer_page(eb, i);
3197 			unlock_page(p);
3198 		}
3199 	}
3200 
3201 	return ret;
3202 }
3203 
3204 int btree_write_cache_pages(struct address_space *mapping,
3205 				   struct writeback_control *wbc)
3206 {
3207 	struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3208 	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3209 	struct extent_buffer *eb, *prev_eb = NULL;
3210 	struct extent_page_data epd = {
3211 		.bio = NULL,
3212 		.tree = tree,
3213 		.extent_locked = 0,
3214 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3215 	};
3216 	int ret = 0;
3217 	int done = 0;
3218 	int nr_to_write_done = 0;
3219 	struct pagevec pvec;
3220 	int nr_pages;
3221 	pgoff_t index;
3222 	pgoff_t end;		/* Inclusive */
3223 	int scanned = 0;
3224 	int tag;
3225 
3226 	pagevec_init(&pvec, 0);
3227 	if (wbc->range_cyclic) {
3228 		index = mapping->writeback_index; /* Start from prev offset */
3229 		end = -1;
3230 	} else {
3231 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
3232 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
3233 		scanned = 1;
3234 	}
3235 	if (wbc->sync_mode == WB_SYNC_ALL)
3236 		tag = PAGECACHE_TAG_TOWRITE;
3237 	else
3238 		tag = PAGECACHE_TAG_DIRTY;
3239 retry:
3240 	if (wbc->sync_mode == WB_SYNC_ALL)
3241 		tag_pages_for_writeback(mapping, index, end);
3242 	while (!done && !nr_to_write_done && (index <= end) &&
3243 	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3244 			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3245 		unsigned i;
3246 
3247 		scanned = 1;
3248 		for (i = 0; i < nr_pages; i++) {
3249 			struct page *page = pvec.pages[i];
3250 
3251 			if (!PagePrivate(page))
3252 				continue;
3253 
3254 			if (!wbc->range_cyclic && page->index > end) {
3255 				done = 1;
3256 				break;
3257 			}
3258 
3259 			eb = (struct extent_buffer *)page->private;
3260 			if (!eb) {
3261 				WARN_ON(1);
3262 				continue;
3263 			}
3264 
3265 			if (eb == prev_eb)
3266 				continue;
3267 
3268 			if (!atomic_inc_not_zero(&eb->refs)) {
3269 				WARN_ON(1);
3270 				continue;
3271 			}
3272 
3273 			prev_eb = eb;
3274 			ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3275 			if (!ret) {
3276 				free_extent_buffer(eb);
3277 				continue;
3278 			}
3279 
3280 			ret = write_one_eb(eb, fs_info, wbc, &epd);
3281 			if (ret) {
3282 				done = 1;
3283 				free_extent_buffer(eb);
3284 				break;
3285 			}
3286 			free_extent_buffer(eb);
3287 
3288 			/*
3289 			 * the filesystem may choose to bump up nr_to_write.
3290 			 * We have to make sure to honor the new nr_to_write
3291 			 * at any time
3292 			 */
3293 			nr_to_write_done = wbc->nr_to_write <= 0;
3294 		}
3295 		pagevec_release(&pvec);
3296 		cond_resched();
3297 	}
3298 	if (!scanned && !done) {
3299 		/*
3300 		 * We hit the last page and there is more work to be done: wrap
3301 		 * back to the start of the file
3302 		 */
3303 		scanned = 1;
3304 		index = 0;
3305 		goto retry;
3306 	}
3307 	flush_write_bio(&epd);
3308 	return ret;
3309 }
3310 
3311 /**
3312  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3313  * @mapping: address space structure to write
3314  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3315  * @writepage: function called for each page
3316  * @data: data passed to writepage function
3317  *
3318  * If a page is already under I/O, write_cache_pages() skips it, even
3319  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3320  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3321  * and msync() need to guarantee that all the data which was dirty at the time
3322  * the call was made get new I/O started against them.  If wbc->sync_mode is
3323  * WB_SYNC_ALL then we were called for data integrity and we must wait for
3324  * existing IO to complete.
3325  */
3326 static int extent_write_cache_pages(struct extent_io_tree *tree,
3327 			     struct address_space *mapping,
3328 			     struct writeback_control *wbc,
3329 			     writepage_t writepage, void *data,
3330 			     void (*flush_fn)(void *))
3331 {
3332 	int ret = 0;
3333 	int done = 0;
3334 	int nr_to_write_done = 0;
3335 	struct pagevec pvec;
3336 	int nr_pages;
3337 	pgoff_t index;
3338 	pgoff_t end;		/* Inclusive */
3339 	int scanned = 0;
3340 	int tag;
3341 
3342 	pagevec_init(&pvec, 0);
3343 	if (wbc->range_cyclic) {
3344 		index = mapping->writeback_index; /* Start from prev offset */
3345 		end = -1;
3346 	} else {
3347 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
3348 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
3349 		scanned = 1;
3350 	}
3351 	if (wbc->sync_mode == WB_SYNC_ALL)
3352 		tag = PAGECACHE_TAG_TOWRITE;
3353 	else
3354 		tag = PAGECACHE_TAG_DIRTY;
3355 retry:
3356 	if (wbc->sync_mode == WB_SYNC_ALL)
3357 		tag_pages_for_writeback(mapping, index, end);
3358 	while (!done && !nr_to_write_done && (index <= end) &&
3359 	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3360 			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3361 		unsigned i;
3362 
3363 		scanned = 1;
3364 		for (i = 0; i < nr_pages; i++) {
3365 			struct page *page = pvec.pages[i];
3366 
3367 			/*
3368 			 * At this point we hold neither mapping->tree_lock nor
3369 			 * lock on the page itself: the page may be truncated or
3370 			 * invalidated (changing page->mapping to NULL), or even
3371 			 * swizzled back from swapper_space to tmpfs file
3372 			 * mapping
3373 			 */
3374 			if (tree->ops &&
3375 			    tree->ops->write_cache_pages_lock_hook) {
3376 				tree->ops->write_cache_pages_lock_hook(page,
3377 							       data, flush_fn);
3378 			} else {
3379 				if (!trylock_page(page)) {
3380 					flush_fn(data);
3381 					lock_page(page);
3382 				}
3383 			}
3384 
3385 			if (unlikely(page->mapping != mapping)) {
3386 				unlock_page(page);
3387 				continue;
3388 			}
3389 
3390 			if (!wbc->range_cyclic && page->index > end) {
3391 				done = 1;
3392 				unlock_page(page);
3393 				continue;
3394 			}
3395 
3396 			if (wbc->sync_mode != WB_SYNC_NONE) {
3397 				if (PageWriteback(page))
3398 					flush_fn(data);
3399 				wait_on_page_writeback(page);
3400 			}
3401 
3402 			if (PageWriteback(page) ||
3403 			    !clear_page_dirty_for_io(page)) {
3404 				unlock_page(page);
3405 				continue;
3406 			}
3407 
3408 			ret = (*writepage)(page, wbc, data);
3409 
3410 			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3411 				unlock_page(page);
3412 				ret = 0;
3413 			}
3414 			if (ret)
3415 				done = 1;
3416 
3417 			/*
3418 			 * the filesystem may choose to bump up nr_to_write.
3419 			 * We have to make sure to honor the new nr_to_write
3420 			 * at any time
3421 			 */
3422 			nr_to_write_done = wbc->nr_to_write <= 0;
3423 		}
3424 		pagevec_release(&pvec);
3425 		cond_resched();
3426 	}
3427 	if (!scanned && !done) {
3428 		/*
3429 		 * We hit the last page and there is more work to be done: wrap
3430 		 * back to the start of the file
3431 		 */
3432 		scanned = 1;
3433 		index = 0;
3434 		goto retry;
3435 	}
3436 	return ret;
3437 }
3438 
3439 static void flush_epd_write_bio(struct extent_page_data *epd)
3440 {
3441 	if (epd->bio) {
3442 		int rw = WRITE;
3443 		int ret;
3444 
3445 		if (epd->sync_io)
3446 			rw = WRITE_SYNC;
3447 
3448 		ret = submit_one_bio(rw, epd->bio, 0, 0);
3449 		BUG_ON(ret < 0); /* -ENOMEM */
3450 		epd->bio = NULL;
3451 	}
3452 }
3453 
3454 static noinline void flush_write_bio(void *data)
3455 {
3456 	struct extent_page_data *epd = data;
3457 	flush_epd_write_bio(epd);
3458 }
3459 
3460 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3461 			  get_extent_t *get_extent,
3462 			  struct writeback_control *wbc)
3463 {
3464 	int ret;
3465 	struct extent_page_data epd = {
3466 		.bio = NULL,
3467 		.tree = tree,
3468 		.get_extent = get_extent,
3469 		.extent_locked = 0,
3470 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3471 	};
3472 
3473 	ret = __extent_writepage(page, wbc, &epd);
3474 
3475 	flush_epd_write_bio(&epd);
3476 	return ret;
3477 }
3478 
3479 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3480 			      u64 start, u64 end, get_extent_t *get_extent,
3481 			      int mode)
3482 {
3483 	int ret = 0;
3484 	struct address_space *mapping = inode->i_mapping;
3485 	struct page *page;
3486 	unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3487 		PAGE_CACHE_SHIFT;
3488 
3489 	struct extent_page_data epd = {
3490 		.bio = NULL,
3491 		.tree = tree,
3492 		.get_extent = get_extent,
3493 		.extent_locked = 1,
3494 		.sync_io = mode == WB_SYNC_ALL,
3495 	};
3496 	struct writeback_control wbc_writepages = {
3497 		.sync_mode	= mode,
3498 		.nr_to_write	= nr_pages * 2,
3499 		.range_start	= start,
3500 		.range_end	= end + 1,
3501 	};
3502 
3503 	while (start <= end) {
3504 		page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3505 		if (clear_page_dirty_for_io(page))
3506 			ret = __extent_writepage(page, &wbc_writepages, &epd);
3507 		else {
3508 			if (tree->ops && tree->ops->writepage_end_io_hook)
3509 				tree->ops->writepage_end_io_hook(page, start,
3510 						 start + PAGE_CACHE_SIZE - 1,
3511 						 NULL, 1);
3512 			unlock_page(page);
3513 		}
3514 		page_cache_release(page);
3515 		start += PAGE_CACHE_SIZE;
3516 	}
3517 
3518 	flush_epd_write_bio(&epd);
3519 	return ret;
3520 }
3521 
3522 int extent_writepages(struct extent_io_tree *tree,
3523 		      struct address_space *mapping,
3524 		      get_extent_t *get_extent,
3525 		      struct writeback_control *wbc)
3526 {
3527 	int ret = 0;
3528 	struct extent_page_data epd = {
3529 		.bio = NULL,
3530 		.tree = tree,
3531 		.get_extent = get_extent,
3532 		.extent_locked = 0,
3533 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3534 	};
3535 
3536 	ret = extent_write_cache_pages(tree, mapping, wbc,
3537 				       __extent_writepage, &epd,
3538 				       flush_write_bio);
3539 	flush_epd_write_bio(&epd);
3540 	return ret;
3541 }
3542 
3543 int extent_readpages(struct extent_io_tree *tree,
3544 		     struct address_space *mapping,
3545 		     struct list_head *pages, unsigned nr_pages,
3546 		     get_extent_t get_extent)
3547 {
3548 	struct bio *bio = NULL;
3549 	unsigned page_idx;
3550 	unsigned long bio_flags = 0;
3551 
3552 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3553 		struct page *page = list_entry(pages->prev, struct page, lru);
3554 
3555 		prefetchw(&page->flags);
3556 		list_del(&page->lru);
3557 		if (!add_to_page_cache_lru(page, mapping,
3558 					page->index, GFP_NOFS)) {
3559 			__extent_read_full_page(tree, page, get_extent,
3560 						&bio, 0, &bio_flags);
3561 		}
3562 		page_cache_release(page);
3563 	}
3564 	BUG_ON(!list_empty(pages));
3565 	if (bio)
3566 		return submit_one_bio(READ, bio, 0, bio_flags);
3567 	return 0;
3568 }
3569 
3570 /*
3571  * basic invalidatepage code, this waits on any locked or writeback
3572  * ranges corresponding to the page, and then deletes any extent state
3573  * records from the tree
3574  */
3575 int extent_invalidatepage(struct extent_io_tree *tree,
3576 			  struct page *page, unsigned long offset)
3577 {
3578 	struct extent_state *cached_state = NULL;
3579 	u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3580 	u64 end = start + PAGE_CACHE_SIZE - 1;
3581 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3582 
3583 	start += (offset + blocksize - 1) & ~(blocksize - 1);
3584 	if (start > end)
3585 		return 0;
3586 
3587 	lock_extent_bits(tree, start, end, 0, &cached_state);
3588 	wait_on_page_writeback(page);
3589 	clear_extent_bit(tree, start, end,
3590 			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3591 			 EXTENT_DO_ACCOUNTING,
3592 			 1, 1, &cached_state, GFP_NOFS);
3593 	return 0;
3594 }
3595 
3596 /*
3597  * a helper for releasepage, this tests for areas of the page that
3598  * are locked or under IO and drops the related state bits if it is safe
3599  * to drop the page.
3600  */
3601 int try_release_extent_state(struct extent_map_tree *map,
3602 			     struct extent_io_tree *tree, struct page *page,
3603 			     gfp_t mask)
3604 {
3605 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3606 	u64 end = start + PAGE_CACHE_SIZE - 1;
3607 	int ret = 1;
3608 
3609 	if (test_range_bit(tree, start, end,
3610 			   EXTENT_IOBITS, 0, NULL))
3611 		ret = 0;
3612 	else {
3613 		if ((mask & GFP_NOFS) == GFP_NOFS)
3614 			mask = GFP_NOFS;
3615 		/*
3616 		 * at this point we can safely clear everything except the
3617 		 * locked bit and the nodatasum bit
3618 		 */
3619 		ret = clear_extent_bit(tree, start, end,
3620 				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3621 				 0, 0, NULL, mask);
3622 
3623 		/* if clear_extent_bit failed for enomem reasons,
3624 		 * we can't allow the release to continue.
3625 		 */
3626 		if (ret < 0)
3627 			ret = 0;
3628 		else
3629 			ret = 1;
3630 	}
3631 	return ret;
3632 }
3633 
3634 /*
3635  * a helper for releasepage.  As long as there are no locked extents
3636  * in the range corresponding to the page, both state records and extent
3637  * map records are removed
3638  */
3639 int try_release_extent_mapping(struct extent_map_tree *map,
3640 			       struct extent_io_tree *tree, struct page *page,
3641 			       gfp_t mask)
3642 {
3643 	struct extent_map *em;
3644 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3645 	u64 end = start + PAGE_CACHE_SIZE - 1;
3646 
3647 	if ((mask & __GFP_WAIT) &&
3648 	    page->mapping->host->i_size > 16 * 1024 * 1024) {
3649 		u64 len;
3650 		while (start <= end) {
3651 			len = end - start + 1;
3652 			write_lock(&map->lock);
3653 			em = lookup_extent_mapping(map, start, len);
3654 			if (!em) {
3655 				write_unlock(&map->lock);
3656 				break;
3657 			}
3658 			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3659 			    em->start != start) {
3660 				write_unlock(&map->lock);
3661 				free_extent_map(em);
3662 				break;
3663 			}
3664 			if (!test_range_bit(tree, em->start,
3665 					    extent_map_end(em) - 1,
3666 					    EXTENT_LOCKED | EXTENT_WRITEBACK,
3667 					    0, NULL)) {
3668 				remove_extent_mapping(map, em);
3669 				/* once for the rb tree */
3670 				free_extent_map(em);
3671 			}
3672 			start = extent_map_end(em);
3673 			write_unlock(&map->lock);
3674 
3675 			/* once for us */
3676 			free_extent_map(em);
3677 		}
3678 	}
3679 	return try_release_extent_state(map, tree, page, mask);
3680 }
3681 
3682 /*
3683  * helper function for fiemap, which doesn't want to see any holes.
3684  * This maps until we find something past 'last'
3685  */
3686 static struct extent_map *get_extent_skip_holes(struct inode *inode,
3687 						u64 offset,
3688 						u64 last,
3689 						get_extent_t *get_extent)
3690 {
3691 	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3692 	struct extent_map *em;
3693 	u64 len;
3694 
3695 	if (offset >= last)
3696 		return NULL;
3697 
3698 	while(1) {
3699 		len = last - offset;
3700 		if (len == 0)
3701 			break;
3702 		len = (len + sectorsize - 1) & ~(sectorsize - 1);
3703 		em = get_extent(inode, NULL, 0, offset, len, 0);
3704 		if (IS_ERR_OR_NULL(em))
3705 			return em;
3706 
3707 		/* if this isn't a hole return it */
3708 		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3709 		    em->block_start != EXTENT_MAP_HOLE) {
3710 			return em;
3711 		}
3712 
3713 		/* this is a hole, advance to the next extent */
3714 		offset = extent_map_end(em);
3715 		free_extent_map(em);
3716 		if (offset >= last)
3717 			break;
3718 	}
3719 	return NULL;
3720 }
3721 
3722 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3723 		__u64 start, __u64 len, get_extent_t *get_extent)
3724 {
3725 	int ret = 0;
3726 	u64 off = start;
3727 	u64 max = start + len;
3728 	u32 flags = 0;
3729 	u32 found_type;
3730 	u64 last;
3731 	u64 last_for_get_extent = 0;
3732 	u64 disko = 0;
3733 	u64 isize = i_size_read(inode);
3734 	struct btrfs_key found_key;
3735 	struct extent_map *em = NULL;
3736 	struct extent_state *cached_state = NULL;
3737 	struct btrfs_path *path;
3738 	struct btrfs_file_extent_item *item;
3739 	int end = 0;
3740 	u64 em_start = 0;
3741 	u64 em_len = 0;
3742 	u64 em_end = 0;
3743 	unsigned long emflags;
3744 
3745 	if (len == 0)
3746 		return -EINVAL;
3747 
3748 	path = btrfs_alloc_path();
3749 	if (!path)
3750 		return -ENOMEM;
3751 	path->leave_spinning = 1;
3752 
3753 	start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3754 	len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3755 
3756 	/*
3757 	 * lookup the last file extent.  We're not using i_size here
3758 	 * because there might be preallocation past i_size
3759 	 */
3760 	ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3761 				       path, btrfs_ino(inode), -1, 0);
3762 	if (ret < 0) {
3763 		btrfs_free_path(path);
3764 		return ret;
3765 	}
3766 	WARN_ON(!ret);
3767 	path->slots[0]--;
3768 	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3769 			      struct btrfs_file_extent_item);
3770 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3771 	found_type = btrfs_key_type(&found_key);
3772 
3773 	/* No extents, but there might be delalloc bits */
3774 	if (found_key.objectid != btrfs_ino(inode) ||
3775 	    found_type != BTRFS_EXTENT_DATA_KEY) {
3776 		/* have to trust i_size as the end */
3777 		last = (u64)-1;
3778 		last_for_get_extent = isize;
3779 	} else {
3780 		/*
3781 		 * remember the start of the last extent.  There are a
3782 		 * bunch of different factors that go into the length of the
3783 		 * extent, so its much less complex to remember where it started
3784 		 */
3785 		last = found_key.offset;
3786 		last_for_get_extent = last + 1;
3787 	}
3788 	btrfs_free_path(path);
3789 
3790 	/*
3791 	 * we might have some extents allocated but more delalloc past those
3792 	 * extents.  so, we trust isize unless the start of the last extent is
3793 	 * beyond isize
3794 	 */
3795 	if (last < isize) {
3796 		last = (u64)-1;
3797 		last_for_get_extent = isize;
3798 	}
3799 
3800 	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3801 			 &cached_state);
3802 
3803 	em = get_extent_skip_holes(inode, start, last_for_get_extent,
3804 				   get_extent);
3805 	if (!em)
3806 		goto out;
3807 	if (IS_ERR(em)) {
3808 		ret = PTR_ERR(em);
3809 		goto out;
3810 	}
3811 
3812 	while (!end) {
3813 		u64 offset_in_extent;
3814 
3815 		/* break if the extent we found is outside the range */
3816 		if (em->start >= max || extent_map_end(em) < off)
3817 			break;
3818 
3819 		/*
3820 		 * get_extent may return an extent that starts before our
3821 		 * requested range.  We have to make sure the ranges
3822 		 * we return to fiemap always move forward and don't
3823 		 * overlap, so adjust the offsets here
3824 		 */
3825 		em_start = max(em->start, off);
3826 
3827 		/*
3828 		 * record the offset from the start of the extent
3829 		 * for adjusting the disk offset below
3830 		 */
3831 		offset_in_extent = em_start - em->start;
3832 		em_end = extent_map_end(em);
3833 		em_len = em_end - em_start;
3834 		emflags = em->flags;
3835 		disko = 0;
3836 		flags = 0;
3837 
3838 		/*
3839 		 * bump off for our next call to get_extent
3840 		 */
3841 		off = extent_map_end(em);
3842 		if (off >= max)
3843 			end = 1;
3844 
3845 		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3846 			end = 1;
3847 			flags |= FIEMAP_EXTENT_LAST;
3848 		} else if (em->block_start == EXTENT_MAP_INLINE) {
3849 			flags |= (FIEMAP_EXTENT_DATA_INLINE |
3850 				  FIEMAP_EXTENT_NOT_ALIGNED);
3851 		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
3852 			flags |= (FIEMAP_EXTENT_DELALLOC |
3853 				  FIEMAP_EXTENT_UNKNOWN);
3854 		} else {
3855 			disko = em->block_start + offset_in_extent;
3856 		}
3857 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3858 			flags |= FIEMAP_EXTENT_ENCODED;
3859 
3860 		free_extent_map(em);
3861 		em = NULL;
3862 		if ((em_start >= last) || em_len == (u64)-1 ||
3863 		   (last == (u64)-1 && isize <= em_end)) {
3864 			flags |= FIEMAP_EXTENT_LAST;
3865 			end = 1;
3866 		}
3867 
3868 		/* now scan forward to see if this is really the last extent. */
3869 		em = get_extent_skip_holes(inode, off, last_for_get_extent,
3870 					   get_extent);
3871 		if (IS_ERR(em)) {
3872 			ret = PTR_ERR(em);
3873 			goto out;
3874 		}
3875 		if (!em) {
3876 			flags |= FIEMAP_EXTENT_LAST;
3877 			end = 1;
3878 		}
3879 		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3880 					      em_len, flags);
3881 		if (ret)
3882 			goto out_free;
3883 	}
3884 out_free:
3885 	free_extent_map(em);
3886 out:
3887 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3888 			     &cached_state, GFP_NOFS);
3889 	return ret;
3890 }
3891 
3892 inline struct page *extent_buffer_page(struct extent_buffer *eb,
3893 					      unsigned long i)
3894 {
3895 	return eb->pages[i];
3896 }
3897 
3898 inline unsigned long num_extent_pages(u64 start, u64 len)
3899 {
3900 	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3901 		(start >> PAGE_CACHE_SHIFT);
3902 }
3903 
3904 static void __free_extent_buffer(struct extent_buffer *eb)
3905 {
3906 #if LEAK_DEBUG
3907 	unsigned long flags;
3908 	spin_lock_irqsave(&leak_lock, flags);
3909 	list_del(&eb->leak_list);
3910 	spin_unlock_irqrestore(&leak_lock, flags);
3911 #endif
3912 	if (eb->pages && eb->pages != eb->inline_pages)
3913 		kfree(eb->pages);
3914 	kmem_cache_free(extent_buffer_cache, eb);
3915 }
3916 
3917 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3918 						   u64 start,
3919 						   unsigned long len,
3920 						   gfp_t mask)
3921 {
3922 	struct extent_buffer *eb = NULL;
3923 #if LEAK_DEBUG
3924 	unsigned long flags;
3925 #endif
3926 
3927 	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3928 	if (eb == NULL)
3929 		return NULL;
3930 	eb->start = start;
3931 	eb->len = len;
3932 	eb->tree = tree;
3933 	rwlock_init(&eb->lock);
3934 	atomic_set(&eb->write_locks, 0);
3935 	atomic_set(&eb->read_locks, 0);
3936 	atomic_set(&eb->blocking_readers, 0);
3937 	atomic_set(&eb->blocking_writers, 0);
3938 	atomic_set(&eb->spinning_readers, 0);
3939 	atomic_set(&eb->spinning_writers, 0);
3940 	eb->lock_nested = 0;
3941 	init_waitqueue_head(&eb->write_lock_wq);
3942 	init_waitqueue_head(&eb->read_lock_wq);
3943 
3944 #if LEAK_DEBUG
3945 	spin_lock_irqsave(&leak_lock, flags);
3946 	list_add(&eb->leak_list, &buffers);
3947 	spin_unlock_irqrestore(&leak_lock, flags);
3948 #endif
3949 	spin_lock_init(&eb->refs_lock);
3950 	atomic_set(&eb->refs, 1);
3951 	atomic_set(&eb->io_pages, 0);
3952 
3953 	if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3954 		struct page **pages;
3955 		int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
3956 			PAGE_CACHE_SHIFT;
3957 		pages = kzalloc(num_pages, mask);
3958 		if (!pages) {
3959 			__free_extent_buffer(eb);
3960 			return NULL;
3961 		}
3962 		eb->pages = pages;
3963 	} else {
3964 		eb->pages = eb->inline_pages;
3965 	}
3966 
3967 	return eb;
3968 }
3969 
3970 static int extent_buffer_under_io(struct extent_buffer *eb)
3971 {
3972 	return (atomic_read(&eb->io_pages) ||
3973 		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3974 		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3975 }
3976 
3977 /*
3978  * Helper for releasing extent buffer page.
3979  */
3980 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3981 						unsigned long start_idx)
3982 {
3983 	unsigned long index;
3984 	struct page *page;
3985 
3986 	BUG_ON(extent_buffer_under_io(eb));
3987 
3988 	index = num_extent_pages(eb->start, eb->len);
3989 	if (start_idx >= index)
3990 		return;
3991 
3992 	do {
3993 		index--;
3994 		page = extent_buffer_page(eb, index);
3995 		if (page) {
3996 			spin_lock(&page->mapping->private_lock);
3997 			/*
3998 			 * We do this since we'll remove the pages after we've
3999 			 * removed the eb from the radix tree, so we could race
4000 			 * and have this page now attached to the new eb.  So
4001 			 * only clear page_private if it's still connected to
4002 			 * this eb.
4003 			 */
4004 			if (PagePrivate(page) &&
4005 			    page->private == (unsigned long)eb) {
4006 				BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4007 				BUG_ON(PageDirty(page));
4008 				BUG_ON(PageWriteback(page));
4009 				/*
4010 				 * We need to make sure we haven't be attached
4011 				 * to a new eb.
4012 				 */
4013 				ClearPagePrivate(page);
4014 				set_page_private(page, 0);
4015 				/* One for the page private */
4016 				page_cache_release(page);
4017 			}
4018 			spin_unlock(&page->mapping->private_lock);
4019 
4020 			/* One for when we alloced the page */
4021 			page_cache_release(page);
4022 		}
4023 	} while (index != start_idx);
4024 }
4025 
4026 /*
4027  * Helper for releasing the extent buffer.
4028  */
4029 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4030 {
4031 	btrfs_release_extent_buffer_page(eb, 0);
4032 	__free_extent_buffer(eb);
4033 }
4034 
4035 static void check_buffer_tree_ref(struct extent_buffer *eb)
4036 {
4037 	/* the ref bit is tricky.  We have to make sure it is set
4038 	 * if we have the buffer dirty.   Otherwise the
4039 	 * code to free a buffer can end up dropping a dirty
4040 	 * page
4041 	 *
4042 	 * Once the ref bit is set, it won't go away while the
4043 	 * buffer is dirty or in writeback, and it also won't
4044 	 * go away while we have the reference count on the
4045 	 * eb bumped.
4046 	 *
4047 	 * We can't just set the ref bit without bumping the
4048 	 * ref on the eb because free_extent_buffer might
4049 	 * see the ref bit and try to clear it.  If this happens
4050 	 * free_extent_buffer might end up dropping our original
4051 	 * ref by mistake and freeing the page before we are able
4052 	 * to add one more ref.
4053 	 *
4054 	 * So bump the ref count first, then set the bit.  If someone
4055 	 * beat us to it, drop the ref we added.
4056 	 */
4057 	if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4058 		atomic_inc(&eb->refs);
4059 		if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4060 			atomic_dec(&eb->refs);
4061 	}
4062 }
4063 
4064 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4065 {
4066 	unsigned long num_pages, i;
4067 
4068 	check_buffer_tree_ref(eb);
4069 
4070 	num_pages = num_extent_pages(eb->start, eb->len);
4071 	for (i = 0; i < num_pages; i++) {
4072 		struct page *p = extent_buffer_page(eb, i);
4073 		mark_page_accessed(p);
4074 	}
4075 }
4076 
4077 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4078 					  u64 start, unsigned long len)
4079 {
4080 	unsigned long num_pages = num_extent_pages(start, len);
4081 	unsigned long i;
4082 	unsigned long index = start >> PAGE_CACHE_SHIFT;
4083 	struct extent_buffer *eb;
4084 	struct extent_buffer *exists = NULL;
4085 	struct page *p;
4086 	struct address_space *mapping = tree->mapping;
4087 	int uptodate = 1;
4088 	int ret;
4089 
4090 	rcu_read_lock();
4091 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4092 	if (eb && atomic_inc_not_zero(&eb->refs)) {
4093 		rcu_read_unlock();
4094 		mark_extent_buffer_accessed(eb);
4095 		return eb;
4096 	}
4097 	rcu_read_unlock();
4098 
4099 	eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
4100 	if (!eb)
4101 		return NULL;
4102 
4103 	for (i = 0; i < num_pages; i++, index++) {
4104 		p = find_or_create_page(mapping, index, GFP_NOFS);
4105 		if (!p) {
4106 			WARN_ON(1);
4107 			goto free_eb;
4108 		}
4109 
4110 		spin_lock(&mapping->private_lock);
4111 		if (PagePrivate(p)) {
4112 			/*
4113 			 * We could have already allocated an eb for this page
4114 			 * and attached one so lets see if we can get a ref on
4115 			 * the existing eb, and if we can we know it's good and
4116 			 * we can just return that one, else we know we can just
4117 			 * overwrite page->private.
4118 			 */
4119 			exists = (struct extent_buffer *)p->private;
4120 			if (atomic_inc_not_zero(&exists->refs)) {
4121 				spin_unlock(&mapping->private_lock);
4122 				unlock_page(p);
4123 				page_cache_release(p);
4124 				mark_extent_buffer_accessed(exists);
4125 				goto free_eb;
4126 			}
4127 
4128 			/*
4129 			 * Do this so attach doesn't complain and we need to
4130 			 * drop the ref the old guy had.
4131 			 */
4132 			ClearPagePrivate(p);
4133 			WARN_ON(PageDirty(p));
4134 			page_cache_release(p);
4135 		}
4136 		attach_extent_buffer_page(eb, p);
4137 		spin_unlock(&mapping->private_lock);
4138 		WARN_ON(PageDirty(p));
4139 		mark_page_accessed(p);
4140 		eb->pages[i] = p;
4141 		if (!PageUptodate(p))
4142 			uptodate = 0;
4143 
4144 		/*
4145 		 * see below about how we avoid a nasty race with release page
4146 		 * and why we unlock later
4147 		 */
4148 	}
4149 	if (uptodate)
4150 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4151 again:
4152 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4153 	if (ret)
4154 		goto free_eb;
4155 
4156 	spin_lock(&tree->buffer_lock);
4157 	ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4158 	if (ret == -EEXIST) {
4159 		exists = radix_tree_lookup(&tree->buffer,
4160 						start >> PAGE_CACHE_SHIFT);
4161 		if (!atomic_inc_not_zero(&exists->refs)) {
4162 			spin_unlock(&tree->buffer_lock);
4163 			radix_tree_preload_end();
4164 			exists = NULL;
4165 			goto again;
4166 		}
4167 		spin_unlock(&tree->buffer_lock);
4168 		radix_tree_preload_end();
4169 		mark_extent_buffer_accessed(exists);
4170 		goto free_eb;
4171 	}
4172 	/* add one reference for the tree */
4173 	spin_lock(&eb->refs_lock);
4174 	check_buffer_tree_ref(eb);
4175 	spin_unlock(&eb->refs_lock);
4176 	spin_unlock(&tree->buffer_lock);
4177 	radix_tree_preload_end();
4178 
4179 	/*
4180 	 * there is a race where release page may have
4181 	 * tried to find this extent buffer in the radix
4182 	 * but failed.  It will tell the VM it is safe to
4183 	 * reclaim the, and it will clear the page private bit.
4184 	 * We must make sure to set the page private bit properly
4185 	 * after the extent buffer is in the radix tree so
4186 	 * it doesn't get lost
4187 	 */
4188 	SetPageChecked(eb->pages[0]);
4189 	for (i = 1; i < num_pages; i++) {
4190 		p = extent_buffer_page(eb, i);
4191 		ClearPageChecked(p);
4192 		unlock_page(p);
4193 	}
4194 	unlock_page(eb->pages[0]);
4195 	return eb;
4196 
4197 free_eb:
4198 	for (i = 0; i < num_pages; i++) {
4199 		if (eb->pages[i])
4200 			unlock_page(eb->pages[i]);
4201 	}
4202 
4203 	WARN_ON(!atomic_dec_and_test(&eb->refs));
4204 	btrfs_release_extent_buffer(eb);
4205 	return exists;
4206 }
4207 
4208 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
4209 					 u64 start, unsigned long len)
4210 {
4211 	struct extent_buffer *eb;
4212 
4213 	rcu_read_lock();
4214 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4215 	if (eb && atomic_inc_not_zero(&eb->refs)) {
4216 		rcu_read_unlock();
4217 		mark_extent_buffer_accessed(eb);
4218 		return eb;
4219 	}
4220 	rcu_read_unlock();
4221 
4222 	return NULL;
4223 }
4224 
4225 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4226 {
4227 	struct extent_buffer *eb =
4228 			container_of(head, struct extent_buffer, rcu_head);
4229 
4230 	__free_extent_buffer(eb);
4231 }
4232 
4233 /* Expects to have eb->eb_lock already held */
4234 static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4235 {
4236 	WARN_ON(atomic_read(&eb->refs) == 0);
4237 	if (atomic_dec_and_test(&eb->refs)) {
4238 		struct extent_io_tree *tree = eb->tree;
4239 
4240 		spin_unlock(&eb->refs_lock);
4241 
4242 		spin_lock(&tree->buffer_lock);
4243 		radix_tree_delete(&tree->buffer,
4244 				  eb->start >> PAGE_CACHE_SHIFT);
4245 		spin_unlock(&tree->buffer_lock);
4246 
4247 		/* Should be safe to release our pages at this point */
4248 		btrfs_release_extent_buffer_page(eb, 0);
4249 
4250 		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4251 		return;
4252 	}
4253 	spin_unlock(&eb->refs_lock);
4254 }
4255 
4256 void free_extent_buffer(struct extent_buffer *eb)
4257 {
4258 	if (!eb)
4259 		return;
4260 
4261 	spin_lock(&eb->refs_lock);
4262 	if (atomic_read(&eb->refs) == 2 &&
4263 	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4264 	    !extent_buffer_under_io(eb) &&
4265 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4266 		atomic_dec(&eb->refs);
4267 
4268 	/*
4269 	 * I know this is terrible, but it's temporary until we stop tracking
4270 	 * the uptodate bits and such for the extent buffers.
4271 	 */
4272 	release_extent_buffer(eb, GFP_ATOMIC);
4273 }
4274 
4275 void free_extent_buffer_stale(struct extent_buffer *eb)
4276 {
4277 	if (!eb)
4278 		return;
4279 
4280 	spin_lock(&eb->refs_lock);
4281 	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4282 
4283 	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4284 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4285 		atomic_dec(&eb->refs);
4286 	release_extent_buffer(eb, GFP_NOFS);
4287 }
4288 
4289 void clear_extent_buffer_dirty(struct extent_buffer *eb)
4290 {
4291 	unsigned long i;
4292 	unsigned long num_pages;
4293 	struct page *page;
4294 
4295 	num_pages = num_extent_pages(eb->start, eb->len);
4296 
4297 	for (i = 0; i < num_pages; i++) {
4298 		page = extent_buffer_page(eb, i);
4299 		if (!PageDirty(page))
4300 			continue;
4301 
4302 		lock_page(page);
4303 		WARN_ON(!PagePrivate(page));
4304 
4305 		clear_page_dirty_for_io(page);
4306 		spin_lock_irq(&page->mapping->tree_lock);
4307 		if (!PageDirty(page)) {
4308 			radix_tree_tag_clear(&page->mapping->page_tree,
4309 						page_index(page),
4310 						PAGECACHE_TAG_DIRTY);
4311 		}
4312 		spin_unlock_irq(&page->mapping->tree_lock);
4313 		ClearPageError(page);
4314 		unlock_page(page);
4315 	}
4316 	WARN_ON(atomic_read(&eb->refs) == 0);
4317 }
4318 
4319 int set_extent_buffer_dirty(struct extent_buffer *eb)
4320 {
4321 	unsigned long i;
4322 	unsigned long num_pages;
4323 	int was_dirty = 0;
4324 
4325 	check_buffer_tree_ref(eb);
4326 
4327 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4328 
4329 	num_pages = num_extent_pages(eb->start, eb->len);
4330 	WARN_ON(atomic_read(&eb->refs) == 0);
4331 	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4332 
4333 	for (i = 0; i < num_pages; i++)
4334 		set_page_dirty(extent_buffer_page(eb, i));
4335 	return was_dirty;
4336 }
4337 
4338 static int range_straddles_pages(u64 start, u64 len)
4339 {
4340 	if (len < PAGE_CACHE_SIZE)
4341 		return 1;
4342 	if (start & (PAGE_CACHE_SIZE - 1))
4343 		return 1;
4344 	if ((start + len) & (PAGE_CACHE_SIZE - 1))
4345 		return 1;
4346 	return 0;
4347 }
4348 
4349 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4350 {
4351 	unsigned long i;
4352 	struct page *page;
4353 	unsigned long num_pages;
4354 
4355 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4356 	num_pages = num_extent_pages(eb->start, eb->len);
4357 	for (i = 0; i < num_pages; i++) {
4358 		page = extent_buffer_page(eb, i);
4359 		if (page)
4360 			ClearPageUptodate(page);
4361 	}
4362 	return 0;
4363 }
4364 
4365 int set_extent_buffer_uptodate(struct extent_buffer *eb)
4366 {
4367 	unsigned long i;
4368 	struct page *page;
4369 	unsigned long num_pages;
4370 
4371 	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4372 	num_pages = num_extent_pages(eb->start, eb->len);
4373 	for (i = 0; i < num_pages; i++) {
4374 		page = extent_buffer_page(eb, i);
4375 		SetPageUptodate(page);
4376 	}
4377 	return 0;
4378 }
4379 
4380 int extent_range_uptodate(struct extent_io_tree *tree,
4381 			  u64 start, u64 end)
4382 {
4383 	struct page *page;
4384 	int ret;
4385 	int pg_uptodate = 1;
4386 	int uptodate;
4387 	unsigned long index;
4388 
4389 	if (range_straddles_pages(start, end - start + 1)) {
4390 		ret = test_range_bit(tree, start, end,
4391 				     EXTENT_UPTODATE, 1, NULL);
4392 		if (ret)
4393 			return 1;
4394 	}
4395 	while (start <= end) {
4396 		index = start >> PAGE_CACHE_SHIFT;
4397 		page = find_get_page(tree->mapping, index);
4398 		if (!page)
4399 			return 1;
4400 		uptodate = PageUptodate(page);
4401 		page_cache_release(page);
4402 		if (!uptodate) {
4403 			pg_uptodate = 0;
4404 			break;
4405 		}
4406 		start += PAGE_CACHE_SIZE;
4407 	}
4408 	return pg_uptodate;
4409 }
4410 
4411 int extent_buffer_uptodate(struct extent_buffer *eb)
4412 {
4413 	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4414 }
4415 
4416 int read_extent_buffer_pages(struct extent_io_tree *tree,
4417 			     struct extent_buffer *eb, u64 start, int wait,
4418 			     get_extent_t *get_extent, int mirror_num)
4419 {
4420 	unsigned long i;
4421 	unsigned long start_i;
4422 	struct page *page;
4423 	int err;
4424 	int ret = 0;
4425 	int locked_pages = 0;
4426 	int all_uptodate = 1;
4427 	unsigned long num_pages;
4428 	unsigned long num_reads = 0;
4429 	struct bio *bio = NULL;
4430 	unsigned long bio_flags = 0;
4431 
4432 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4433 		return 0;
4434 
4435 	if (start) {
4436 		WARN_ON(start < eb->start);
4437 		start_i = (start >> PAGE_CACHE_SHIFT) -
4438 			(eb->start >> PAGE_CACHE_SHIFT);
4439 	} else {
4440 		start_i = 0;
4441 	}
4442 
4443 	num_pages = num_extent_pages(eb->start, eb->len);
4444 	for (i = start_i; i < num_pages; i++) {
4445 		page = extent_buffer_page(eb, i);
4446 		if (wait == WAIT_NONE) {
4447 			if (!trylock_page(page))
4448 				goto unlock_exit;
4449 		} else {
4450 			lock_page(page);
4451 		}
4452 		locked_pages++;
4453 		if (!PageUptodate(page)) {
4454 			num_reads++;
4455 			all_uptodate = 0;
4456 		}
4457 	}
4458 	if (all_uptodate) {
4459 		if (start_i == 0)
4460 			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4461 		goto unlock_exit;
4462 	}
4463 
4464 	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4465 	eb->read_mirror = 0;
4466 	atomic_set(&eb->io_pages, num_reads);
4467 	for (i = start_i; i < num_pages; i++) {
4468 		page = extent_buffer_page(eb, i);
4469 		if (!PageUptodate(page)) {
4470 			ClearPageError(page);
4471 			err = __extent_read_full_page(tree, page,
4472 						      get_extent, &bio,
4473 						      mirror_num, &bio_flags);
4474 			if (err)
4475 				ret = err;
4476 		} else {
4477 			unlock_page(page);
4478 		}
4479 	}
4480 
4481 	if (bio) {
4482 		err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4483 		if (err)
4484 			return err;
4485 	}
4486 
4487 	if (ret || wait != WAIT_COMPLETE)
4488 		return ret;
4489 
4490 	for (i = start_i; i < num_pages; i++) {
4491 		page = extent_buffer_page(eb, i);
4492 		wait_on_page_locked(page);
4493 		if (!PageUptodate(page))
4494 			ret = -EIO;
4495 	}
4496 
4497 	return ret;
4498 
4499 unlock_exit:
4500 	i = start_i;
4501 	while (locked_pages > 0) {
4502 		page = extent_buffer_page(eb, i);
4503 		i++;
4504 		unlock_page(page);
4505 		locked_pages--;
4506 	}
4507 	return ret;
4508 }
4509 
4510 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4511 			unsigned long start,
4512 			unsigned long len)
4513 {
4514 	size_t cur;
4515 	size_t offset;
4516 	struct page *page;
4517 	char *kaddr;
4518 	char *dst = (char *)dstv;
4519 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4520 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4521 
4522 	WARN_ON(start > eb->len);
4523 	WARN_ON(start + len > eb->start + eb->len);
4524 
4525 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4526 
4527 	while (len > 0) {
4528 		page = extent_buffer_page(eb, i);
4529 
4530 		cur = min(len, (PAGE_CACHE_SIZE - offset));
4531 		kaddr = page_address(page);
4532 		memcpy(dst, kaddr + offset, cur);
4533 
4534 		dst += cur;
4535 		len -= cur;
4536 		offset = 0;
4537 		i++;
4538 	}
4539 }
4540 
4541 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4542 			       unsigned long min_len, char **map,
4543 			       unsigned long *map_start,
4544 			       unsigned long *map_len)
4545 {
4546 	size_t offset = start & (PAGE_CACHE_SIZE - 1);
4547 	char *kaddr;
4548 	struct page *p;
4549 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4550 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4551 	unsigned long end_i = (start_offset + start + min_len - 1) >>
4552 		PAGE_CACHE_SHIFT;
4553 
4554 	if (i != end_i)
4555 		return -EINVAL;
4556 
4557 	if (i == 0) {
4558 		offset = start_offset;
4559 		*map_start = 0;
4560 	} else {
4561 		offset = 0;
4562 		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4563 	}
4564 
4565 	if (start + min_len > eb->len) {
4566 		printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4567 		       "wanted %lu %lu\n", (unsigned long long)eb->start,
4568 		       eb->len, start, min_len);
4569 		WARN_ON(1);
4570 		return -EINVAL;
4571 	}
4572 
4573 	p = extent_buffer_page(eb, i);
4574 	kaddr = page_address(p);
4575 	*map = kaddr + offset;
4576 	*map_len = PAGE_CACHE_SIZE - offset;
4577 	return 0;
4578 }
4579 
4580 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4581 			  unsigned long start,
4582 			  unsigned long len)
4583 {
4584 	size_t cur;
4585 	size_t offset;
4586 	struct page *page;
4587 	char *kaddr;
4588 	char *ptr = (char *)ptrv;
4589 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4590 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4591 	int ret = 0;
4592 
4593 	WARN_ON(start > eb->len);
4594 	WARN_ON(start + len > eb->start + eb->len);
4595 
4596 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4597 
4598 	while (len > 0) {
4599 		page = extent_buffer_page(eb, i);
4600 
4601 		cur = min(len, (PAGE_CACHE_SIZE - offset));
4602 
4603 		kaddr = page_address(page);
4604 		ret = memcmp(ptr, kaddr + offset, cur);
4605 		if (ret)
4606 			break;
4607 
4608 		ptr += cur;
4609 		len -= cur;
4610 		offset = 0;
4611 		i++;
4612 	}
4613 	return ret;
4614 }
4615 
4616 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4617 			 unsigned long start, unsigned long len)
4618 {
4619 	size_t cur;
4620 	size_t offset;
4621 	struct page *page;
4622 	char *kaddr;
4623 	char *src = (char *)srcv;
4624 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4625 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4626 
4627 	WARN_ON(start > eb->len);
4628 	WARN_ON(start + len > eb->start + eb->len);
4629 
4630 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4631 
4632 	while (len > 0) {
4633 		page = extent_buffer_page(eb, i);
4634 		WARN_ON(!PageUptodate(page));
4635 
4636 		cur = min(len, PAGE_CACHE_SIZE - offset);
4637 		kaddr = page_address(page);
4638 		memcpy(kaddr + offset, src, cur);
4639 
4640 		src += cur;
4641 		len -= cur;
4642 		offset = 0;
4643 		i++;
4644 	}
4645 }
4646 
4647 void memset_extent_buffer(struct extent_buffer *eb, char c,
4648 			  unsigned long start, unsigned long len)
4649 {
4650 	size_t cur;
4651 	size_t offset;
4652 	struct page *page;
4653 	char *kaddr;
4654 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4655 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4656 
4657 	WARN_ON(start > eb->len);
4658 	WARN_ON(start + len > eb->start + eb->len);
4659 
4660 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4661 
4662 	while (len > 0) {
4663 		page = extent_buffer_page(eb, i);
4664 		WARN_ON(!PageUptodate(page));
4665 
4666 		cur = min(len, PAGE_CACHE_SIZE - offset);
4667 		kaddr = page_address(page);
4668 		memset(kaddr + offset, c, cur);
4669 
4670 		len -= cur;
4671 		offset = 0;
4672 		i++;
4673 	}
4674 }
4675 
4676 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4677 			unsigned long dst_offset, unsigned long src_offset,
4678 			unsigned long len)
4679 {
4680 	u64 dst_len = dst->len;
4681 	size_t cur;
4682 	size_t offset;
4683 	struct page *page;
4684 	char *kaddr;
4685 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4686 	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4687 
4688 	WARN_ON(src->len != dst_len);
4689 
4690 	offset = (start_offset + dst_offset) &
4691 		((unsigned long)PAGE_CACHE_SIZE - 1);
4692 
4693 	while (len > 0) {
4694 		page = extent_buffer_page(dst, i);
4695 		WARN_ON(!PageUptodate(page));
4696 
4697 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4698 
4699 		kaddr = page_address(page);
4700 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
4701 
4702 		src_offset += cur;
4703 		len -= cur;
4704 		offset = 0;
4705 		i++;
4706 	}
4707 }
4708 
4709 static void move_pages(struct page *dst_page, struct page *src_page,
4710 		       unsigned long dst_off, unsigned long src_off,
4711 		       unsigned long len)
4712 {
4713 	char *dst_kaddr = page_address(dst_page);
4714 	if (dst_page == src_page) {
4715 		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4716 	} else {
4717 		char *src_kaddr = page_address(src_page);
4718 		char *p = dst_kaddr + dst_off + len;
4719 		char *s = src_kaddr + src_off + len;
4720 
4721 		while (len--)
4722 			*--p = *--s;
4723 	}
4724 }
4725 
4726 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4727 {
4728 	unsigned long distance = (src > dst) ? src - dst : dst - src;
4729 	return distance < len;
4730 }
4731 
4732 static void copy_pages(struct page *dst_page, struct page *src_page,
4733 		       unsigned long dst_off, unsigned long src_off,
4734 		       unsigned long len)
4735 {
4736 	char *dst_kaddr = page_address(dst_page);
4737 	char *src_kaddr;
4738 	int must_memmove = 0;
4739 
4740 	if (dst_page != src_page) {
4741 		src_kaddr = page_address(src_page);
4742 	} else {
4743 		src_kaddr = dst_kaddr;
4744 		if (areas_overlap(src_off, dst_off, len))
4745 			must_memmove = 1;
4746 	}
4747 
4748 	if (must_memmove)
4749 		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4750 	else
4751 		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4752 }
4753 
4754 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4755 			   unsigned long src_offset, unsigned long len)
4756 {
4757 	size_t cur;
4758 	size_t dst_off_in_page;
4759 	size_t src_off_in_page;
4760 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4761 	unsigned long dst_i;
4762 	unsigned long src_i;
4763 
4764 	if (src_offset + len > dst->len) {
4765 		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4766 		       "len %lu dst len %lu\n", src_offset, len, dst->len);
4767 		BUG_ON(1);
4768 	}
4769 	if (dst_offset + len > dst->len) {
4770 		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4771 		       "len %lu dst len %lu\n", dst_offset, len, dst->len);
4772 		BUG_ON(1);
4773 	}
4774 
4775 	while (len > 0) {
4776 		dst_off_in_page = (start_offset + dst_offset) &
4777 			((unsigned long)PAGE_CACHE_SIZE - 1);
4778 		src_off_in_page = (start_offset + src_offset) &
4779 			((unsigned long)PAGE_CACHE_SIZE - 1);
4780 
4781 		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4782 		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4783 
4784 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4785 					       src_off_in_page));
4786 		cur = min_t(unsigned long, cur,
4787 			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4788 
4789 		copy_pages(extent_buffer_page(dst, dst_i),
4790 			   extent_buffer_page(dst, src_i),
4791 			   dst_off_in_page, src_off_in_page, cur);
4792 
4793 		src_offset += cur;
4794 		dst_offset += cur;
4795 		len -= cur;
4796 	}
4797 }
4798 
4799 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4800 			   unsigned long src_offset, unsigned long len)
4801 {
4802 	size_t cur;
4803 	size_t dst_off_in_page;
4804 	size_t src_off_in_page;
4805 	unsigned long dst_end = dst_offset + len - 1;
4806 	unsigned long src_end = src_offset + len - 1;
4807 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4808 	unsigned long dst_i;
4809 	unsigned long src_i;
4810 
4811 	if (src_offset + len > dst->len) {
4812 		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4813 		       "len %lu len %lu\n", src_offset, len, dst->len);
4814 		BUG_ON(1);
4815 	}
4816 	if (dst_offset + len > dst->len) {
4817 		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4818 		       "len %lu len %lu\n", dst_offset, len, dst->len);
4819 		BUG_ON(1);
4820 	}
4821 	if (dst_offset < src_offset) {
4822 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4823 		return;
4824 	}
4825 	while (len > 0) {
4826 		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4827 		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4828 
4829 		dst_off_in_page = (start_offset + dst_end) &
4830 			((unsigned long)PAGE_CACHE_SIZE - 1);
4831 		src_off_in_page = (start_offset + src_end) &
4832 			((unsigned long)PAGE_CACHE_SIZE - 1);
4833 
4834 		cur = min_t(unsigned long, len, src_off_in_page + 1);
4835 		cur = min(cur, dst_off_in_page + 1);
4836 		move_pages(extent_buffer_page(dst, dst_i),
4837 			   extent_buffer_page(dst, src_i),
4838 			   dst_off_in_page - cur + 1,
4839 			   src_off_in_page - cur + 1, cur);
4840 
4841 		dst_end -= cur;
4842 		src_end -= cur;
4843 		len -= cur;
4844 	}
4845 }
4846 
4847 int try_release_extent_buffer(struct page *page, gfp_t mask)
4848 {
4849 	struct extent_buffer *eb;
4850 
4851 	/*
4852 	 * We need to make sure noboody is attaching this page to an eb right
4853 	 * now.
4854 	 */
4855 	spin_lock(&page->mapping->private_lock);
4856 	if (!PagePrivate(page)) {
4857 		spin_unlock(&page->mapping->private_lock);
4858 		return 1;
4859 	}
4860 
4861 	eb = (struct extent_buffer *)page->private;
4862 	BUG_ON(!eb);
4863 
4864 	/*
4865 	 * This is a little awful but should be ok, we need to make sure that
4866 	 * the eb doesn't disappear out from under us while we're looking at
4867 	 * this page.
4868 	 */
4869 	spin_lock(&eb->refs_lock);
4870 	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4871 		spin_unlock(&eb->refs_lock);
4872 		spin_unlock(&page->mapping->private_lock);
4873 		return 0;
4874 	}
4875 	spin_unlock(&page->mapping->private_lock);
4876 
4877 	if ((mask & GFP_NOFS) == GFP_NOFS)
4878 		mask = GFP_NOFS;
4879 
4880 	/*
4881 	 * If tree ref isn't set then we know the ref on this eb is a real ref,
4882 	 * so just return, this page will likely be freed soon anyway.
4883 	 */
4884 	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4885 		spin_unlock(&eb->refs_lock);
4886 		return 0;
4887 	}
4888 	release_extent_buffer(eb, mask);
4889 
4890 	return 1;
4891 }
4892