xref: /openbmc/linux/fs/btrfs/extent_io.c (revision f21e49be)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/prefetch.h>
15 #include <linux/cleancache.h>
16 #include <linux/fsverity.h>
17 #include "misc.h"
18 #include "extent_io.h"
19 #include "extent-io-tree.h"
20 #include "extent_map.h"
21 #include "ctree.h"
22 #include "btrfs_inode.h"
23 #include "volumes.h"
24 #include "check-integrity.h"
25 #include "locking.h"
26 #include "rcu-string.h"
27 #include "backref.h"
28 #include "disk-io.h"
29 #include "subpage.h"
30 #include "zoned.h"
31 #include "block-group.h"
32 
33 static struct kmem_cache *extent_state_cache;
34 static struct kmem_cache *extent_buffer_cache;
35 static struct bio_set btrfs_bioset;
36 
37 static inline bool extent_state_in_tree(const struct extent_state *state)
38 {
39 	return !RB_EMPTY_NODE(&state->rb_node);
40 }
41 
42 #ifdef CONFIG_BTRFS_DEBUG
43 static LIST_HEAD(states);
44 static DEFINE_SPINLOCK(leak_lock);
45 
46 static inline void btrfs_leak_debug_add(spinlock_t *lock,
47 					struct list_head *new,
48 					struct list_head *head)
49 {
50 	unsigned long flags;
51 
52 	spin_lock_irqsave(lock, flags);
53 	list_add(new, head);
54 	spin_unlock_irqrestore(lock, flags);
55 }
56 
57 static inline void btrfs_leak_debug_del(spinlock_t *lock,
58 					struct list_head *entry)
59 {
60 	unsigned long flags;
61 
62 	spin_lock_irqsave(lock, flags);
63 	list_del(entry);
64 	spin_unlock_irqrestore(lock, flags);
65 }
66 
67 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
68 {
69 	struct extent_buffer *eb;
70 	unsigned long flags;
71 
72 	/*
73 	 * If we didn't get into open_ctree our allocated_ebs will not be
74 	 * initialized, so just skip this.
75 	 */
76 	if (!fs_info->allocated_ebs.next)
77 		return;
78 
79 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
80 	while (!list_empty(&fs_info->allocated_ebs)) {
81 		eb = list_first_entry(&fs_info->allocated_ebs,
82 				      struct extent_buffer, leak_list);
83 		pr_err(
84 	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
85 		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
86 		       btrfs_header_owner(eb));
87 		list_del(&eb->leak_list);
88 		kmem_cache_free(extent_buffer_cache, eb);
89 	}
90 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
91 }
92 
93 static inline void btrfs_extent_state_leak_debug_check(void)
94 {
95 	struct extent_state *state;
96 
97 	while (!list_empty(&states)) {
98 		state = list_entry(states.next, struct extent_state, leak_list);
99 		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
100 		       state->start, state->end, state->state,
101 		       extent_state_in_tree(state),
102 		       refcount_read(&state->refs));
103 		list_del(&state->leak_list);
104 		kmem_cache_free(extent_state_cache, state);
105 	}
106 }
107 
108 #define btrfs_debug_check_extent_io_range(tree, start, end)		\
109 	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
110 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
111 		struct extent_io_tree *tree, u64 start, u64 end)
112 {
113 	struct inode *inode = tree->private_data;
114 	u64 isize;
115 
116 	if (!inode || !is_data_inode(inode))
117 		return;
118 
119 	isize = i_size_read(inode);
120 	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
121 		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
122 		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
123 			caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
124 	}
125 }
126 #else
127 #define btrfs_leak_debug_add(lock, new, head)	do {} while (0)
128 #define btrfs_leak_debug_del(lock, entry)	do {} while (0)
129 #define btrfs_extent_state_leak_debug_check()	do {} while (0)
130 #define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
131 #endif
132 
133 struct tree_entry {
134 	u64 start;
135 	u64 end;
136 	struct rb_node rb_node;
137 };
138 
139 struct extent_page_data {
140 	struct btrfs_bio_ctrl bio_ctrl;
141 	/* tells writepage not to lock the state bits for this range
142 	 * it still does the unlocking
143 	 */
144 	unsigned int extent_locked:1;
145 
146 	/* tells the submit_bio code to use REQ_SYNC */
147 	unsigned int sync_io:1;
148 };
149 
150 static int add_extent_changeset(struct extent_state *state, u32 bits,
151 				 struct extent_changeset *changeset,
152 				 int set)
153 {
154 	int ret;
155 
156 	if (!changeset)
157 		return 0;
158 	if (set && (state->state & bits) == bits)
159 		return 0;
160 	if (!set && (state->state & bits) == 0)
161 		return 0;
162 	changeset->bytes_changed += state->end - state->start + 1;
163 	ret = ulist_add(&changeset->range_changed, state->start, state->end,
164 			GFP_ATOMIC);
165 	return ret;
166 }
167 
168 int __must_check submit_one_bio(struct bio *bio, int mirror_num,
169 				unsigned long bio_flags)
170 {
171 	blk_status_t ret = 0;
172 	struct extent_io_tree *tree = bio->bi_private;
173 
174 	bio->bi_private = NULL;
175 
176 	/* Caller should ensure the bio has at least some range added */
177 	ASSERT(bio->bi_iter.bi_size);
178 	if (is_data_inode(tree->private_data))
179 		ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
180 					    bio_flags);
181 	else
182 		ret = btrfs_submit_metadata_bio(tree->private_data, bio,
183 						mirror_num, bio_flags);
184 
185 	return blk_status_to_errno(ret);
186 }
187 
188 /* Cleanup unsubmitted bios */
189 static void end_write_bio(struct extent_page_data *epd, int ret)
190 {
191 	struct bio *bio = epd->bio_ctrl.bio;
192 
193 	if (bio) {
194 		bio->bi_status = errno_to_blk_status(ret);
195 		bio_endio(bio);
196 		epd->bio_ctrl.bio = NULL;
197 	}
198 }
199 
200 /*
201  * Submit bio from extent page data via submit_one_bio
202  *
203  * Return 0 if everything is OK.
204  * Return <0 for error.
205  */
206 static int __must_check flush_write_bio(struct extent_page_data *epd)
207 {
208 	int ret = 0;
209 	struct bio *bio = epd->bio_ctrl.bio;
210 
211 	if (bio) {
212 		ret = submit_one_bio(bio, 0, 0);
213 		/*
214 		 * Clean up of epd->bio is handled by its endio function.
215 		 * And endio is either triggered by successful bio execution
216 		 * or the error handler of submit bio hook.
217 		 * So at this point, no matter what happened, we don't need
218 		 * to clean up epd->bio.
219 		 */
220 		epd->bio_ctrl.bio = NULL;
221 	}
222 	return ret;
223 }
224 
225 int __init extent_state_cache_init(void)
226 {
227 	extent_state_cache = kmem_cache_create("btrfs_extent_state",
228 			sizeof(struct extent_state), 0,
229 			SLAB_MEM_SPREAD, NULL);
230 	if (!extent_state_cache)
231 		return -ENOMEM;
232 	return 0;
233 }
234 
235 int __init extent_io_init(void)
236 {
237 	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
238 			sizeof(struct extent_buffer), 0,
239 			SLAB_MEM_SPREAD, NULL);
240 	if (!extent_buffer_cache)
241 		return -ENOMEM;
242 
243 	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
244 			offsetof(struct btrfs_bio, bio),
245 			BIOSET_NEED_BVECS))
246 		goto free_buffer_cache;
247 
248 	if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
249 		goto free_bioset;
250 
251 	return 0;
252 
253 free_bioset:
254 	bioset_exit(&btrfs_bioset);
255 
256 free_buffer_cache:
257 	kmem_cache_destroy(extent_buffer_cache);
258 	extent_buffer_cache = NULL;
259 	return -ENOMEM;
260 }
261 
262 void __cold extent_state_cache_exit(void)
263 {
264 	btrfs_extent_state_leak_debug_check();
265 	kmem_cache_destroy(extent_state_cache);
266 }
267 
268 void __cold extent_io_exit(void)
269 {
270 	/*
271 	 * Make sure all delayed rcu free are flushed before we
272 	 * destroy caches.
273 	 */
274 	rcu_barrier();
275 	kmem_cache_destroy(extent_buffer_cache);
276 	bioset_exit(&btrfs_bioset);
277 }
278 
279 /*
280  * For the file_extent_tree, we want to hold the inode lock when we lookup and
281  * update the disk_i_size, but lockdep will complain because our io_tree we hold
282  * the tree lock and get the inode lock when setting delalloc.  These two things
283  * are unrelated, so make a class for the file_extent_tree so we don't get the
284  * two locking patterns mixed up.
285  */
286 static struct lock_class_key file_extent_tree_class;
287 
288 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
289 			 struct extent_io_tree *tree, unsigned int owner,
290 			 void *private_data)
291 {
292 	tree->fs_info = fs_info;
293 	tree->state = RB_ROOT;
294 	tree->dirty_bytes = 0;
295 	spin_lock_init(&tree->lock);
296 	tree->private_data = private_data;
297 	tree->owner = owner;
298 	if (owner == IO_TREE_INODE_FILE_EXTENT)
299 		lockdep_set_class(&tree->lock, &file_extent_tree_class);
300 }
301 
302 void extent_io_tree_release(struct extent_io_tree *tree)
303 {
304 	spin_lock(&tree->lock);
305 	/*
306 	 * Do a single barrier for the waitqueue_active check here, the state
307 	 * of the waitqueue should not change once extent_io_tree_release is
308 	 * called.
309 	 */
310 	smp_mb();
311 	while (!RB_EMPTY_ROOT(&tree->state)) {
312 		struct rb_node *node;
313 		struct extent_state *state;
314 
315 		node = rb_first(&tree->state);
316 		state = rb_entry(node, struct extent_state, rb_node);
317 		rb_erase(&state->rb_node, &tree->state);
318 		RB_CLEAR_NODE(&state->rb_node);
319 		/*
320 		 * btree io trees aren't supposed to have tasks waiting for
321 		 * changes in the flags of extent states ever.
322 		 */
323 		ASSERT(!waitqueue_active(&state->wq));
324 		free_extent_state(state);
325 
326 		cond_resched_lock(&tree->lock);
327 	}
328 	spin_unlock(&tree->lock);
329 }
330 
331 static struct extent_state *alloc_extent_state(gfp_t mask)
332 {
333 	struct extent_state *state;
334 
335 	/*
336 	 * The given mask might be not appropriate for the slab allocator,
337 	 * drop the unsupported bits
338 	 */
339 	mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
340 	state = kmem_cache_alloc(extent_state_cache, mask);
341 	if (!state)
342 		return state;
343 	state->state = 0;
344 	state->failrec = NULL;
345 	RB_CLEAR_NODE(&state->rb_node);
346 	btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
347 	refcount_set(&state->refs, 1);
348 	init_waitqueue_head(&state->wq);
349 	trace_alloc_extent_state(state, mask, _RET_IP_);
350 	return state;
351 }
352 
353 void free_extent_state(struct extent_state *state)
354 {
355 	if (!state)
356 		return;
357 	if (refcount_dec_and_test(&state->refs)) {
358 		WARN_ON(extent_state_in_tree(state));
359 		btrfs_leak_debug_del(&leak_lock, &state->leak_list);
360 		trace_free_extent_state(state, _RET_IP_);
361 		kmem_cache_free(extent_state_cache, state);
362 	}
363 }
364 
365 static struct rb_node *tree_insert(struct rb_root *root,
366 				   struct rb_node *search_start,
367 				   u64 offset,
368 				   struct rb_node *node,
369 				   struct rb_node ***p_in,
370 				   struct rb_node **parent_in)
371 {
372 	struct rb_node **p;
373 	struct rb_node *parent = NULL;
374 	struct tree_entry *entry;
375 
376 	if (p_in && parent_in) {
377 		p = *p_in;
378 		parent = *parent_in;
379 		goto do_insert;
380 	}
381 
382 	p = search_start ? &search_start : &root->rb_node;
383 	while (*p) {
384 		parent = *p;
385 		entry = rb_entry(parent, struct tree_entry, rb_node);
386 
387 		if (offset < entry->start)
388 			p = &(*p)->rb_left;
389 		else if (offset > entry->end)
390 			p = &(*p)->rb_right;
391 		else
392 			return parent;
393 	}
394 
395 do_insert:
396 	rb_link_node(node, parent, p);
397 	rb_insert_color(node, root);
398 	return NULL;
399 }
400 
401 /**
402  * Search @tree for an entry that contains @offset. Such entry would have
403  * entry->start <= offset && entry->end >= offset.
404  *
405  * @tree:       the tree to search
406  * @offset:     offset that should fall within an entry in @tree
407  * @next_ret:   pointer to the first entry whose range ends after @offset
408  * @prev_ret:   pointer to the first entry whose range begins before @offset
409  * @p_ret:      pointer where new node should be anchored (used when inserting an
410  *	        entry in the tree)
411  * @parent_ret: points to entry which would have been the parent of the entry,
412  *               containing @offset
413  *
414  * This function returns a pointer to the entry that contains @offset byte
415  * address. If no such entry exists, then NULL is returned and the other
416  * pointer arguments to the function are filled, otherwise the found entry is
417  * returned and other pointers are left untouched.
418  */
419 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
420 				      struct rb_node **next_ret,
421 				      struct rb_node **prev_ret,
422 				      struct rb_node ***p_ret,
423 				      struct rb_node **parent_ret)
424 {
425 	struct rb_root *root = &tree->state;
426 	struct rb_node **n = &root->rb_node;
427 	struct rb_node *prev = NULL;
428 	struct rb_node *orig_prev = NULL;
429 	struct tree_entry *entry;
430 	struct tree_entry *prev_entry = NULL;
431 
432 	while (*n) {
433 		prev = *n;
434 		entry = rb_entry(prev, struct tree_entry, rb_node);
435 		prev_entry = entry;
436 
437 		if (offset < entry->start)
438 			n = &(*n)->rb_left;
439 		else if (offset > entry->end)
440 			n = &(*n)->rb_right;
441 		else
442 			return *n;
443 	}
444 
445 	if (p_ret)
446 		*p_ret = n;
447 	if (parent_ret)
448 		*parent_ret = prev;
449 
450 	if (next_ret) {
451 		orig_prev = prev;
452 		while (prev && offset > prev_entry->end) {
453 			prev = rb_next(prev);
454 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
455 		}
456 		*next_ret = prev;
457 		prev = orig_prev;
458 	}
459 
460 	if (prev_ret) {
461 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
462 		while (prev && offset < prev_entry->start) {
463 			prev = rb_prev(prev);
464 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
465 		}
466 		*prev_ret = prev;
467 	}
468 	return NULL;
469 }
470 
471 static inline struct rb_node *
472 tree_search_for_insert(struct extent_io_tree *tree,
473 		       u64 offset,
474 		       struct rb_node ***p_ret,
475 		       struct rb_node **parent_ret)
476 {
477 	struct rb_node *next= NULL;
478 	struct rb_node *ret;
479 
480 	ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
481 	if (!ret)
482 		return next;
483 	return ret;
484 }
485 
486 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
487 					  u64 offset)
488 {
489 	return tree_search_for_insert(tree, offset, NULL, NULL);
490 }
491 
492 /*
493  * utility function to look for merge candidates inside a given range.
494  * Any extents with matching state are merged together into a single
495  * extent in the tree.  Extents with EXTENT_IO in their state field
496  * are not merged because the end_io handlers need to be able to do
497  * operations on them without sleeping (or doing allocations/splits).
498  *
499  * This should be called with the tree lock held.
500  */
501 static void merge_state(struct extent_io_tree *tree,
502 		        struct extent_state *state)
503 {
504 	struct extent_state *other;
505 	struct rb_node *other_node;
506 
507 	if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
508 		return;
509 
510 	other_node = rb_prev(&state->rb_node);
511 	if (other_node) {
512 		other = rb_entry(other_node, struct extent_state, rb_node);
513 		if (other->end == state->start - 1 &&
514 		    other->state == state->state) {
515 			if (tree->private_data &&
516 			    is_data_inode(tree->private_data))
517 				btrfs_merge_delalloc_extent(tree->private_data,
518 							    state, other);
519 			state->start = other->start;
520 			rb_erase(&other->rb_node, &tree->state);
521 			RB_CLEAR_NODE(&other->rb_node);
522 			free_extent_state(other);
523 		}
524 	}
525 	other_node = rb_next(&state->rb_node);
526 	if (other_node) {
527 		other = rb_entry(other_node, struct extent_state, rb_node);
528 		if (other->start == state->end + 1 &&
529 		    other->state == state->state) {
530 			if (tree->private_data &&
531 			    is_data_inode(tree->private_data))
532 				btrfs_merge_delalloc_extent(tree->private_data,
533 							    state, other);
534 			state->end = other->end;
535 			rb_erase(&other->rb_node, &tree->state);
536 			RB_CLEAR_NODE(&other->rb_node);
537 			free_extent_state(other);
538 		}
539 	}
540 }
541 
542 static void set_state_bits(struct extent_io_tree *tree,
543 			   struct extent_state *state, u32 *bits,
544 			   struct extent_changeset *changeset);
545 
546 /*
547  * insert an extent_state struct into the tree.  'bits' are set on the
548  * struct before it is inserted.
549  *
550  * This may return -EEXIST if the extent is already there, in which case the
551  * state struct is freed.
552  *
553  * The tree lock is not taken internally.  This is a utility function and
554  * probably isn't what you want to call (see set/clear_extent_bit).
555  */
556 static int insert_state(struct extent_io_tree *tree,
557 			struct extent_state *state, u64 start, u64 end,
558 			struct rb_node ***p,
559 			struct rb_node **parent,
560 			u32 *bits, struct extent_changeset *changeset)
561 {
562 	struct rb_node *node;
563 
564 	if (end < start) {
565 		btrfs_err(tree->fs_info,
566 			"insert state: end < start %llu %llu", end, start);
567 		WARN_ON(1);
568 	}
569 	state->start = start;
570 	state->end = end;
571 
572 	set_state_bits(tree, state, bits, changeset);
573 
574 	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
575 	if (node) {
576 		struct extent_state *found;
577 		found = rb_entry(node, struct extent_state, rb_node);
578 		btrfs_err(tree->fs_info,
579 		       "found node %llu %llu on insert of %llu %llu",
580 		       found->start, found->end, start, end);
581 		return -EEXIST;
582 	}
583 	merge_state(tree, state);
584 	return 0;
585 }
586 
587 /*
588  * split a given extent state struct in two, inserting the preallocated
589  * struct 'prealloc' as the newly created second half.  'split' indicates an
590  * offset inside 'orig' where it should be split.
591  *
592  * Before calling,
593  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
594  * are two extent state structs in the tree:
595  * prealloc: [orig->start, split - 1]
596  * orig: [ split, orig->end ]
597  *
598  * The tree locks are not taken by this function. They need to be held
599  * by the caller.
600  */
601 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
602 		       struct extent_state *prealloc, u64 split)
603 {
604 	struct rb_node *node;
605 
606 	if (tree->private_data && is_data_inode(tree->private_data))
607 		btrfs_split_delalloc_extent(tree->private_data, orig, split);
608 
609 	prealloc->start = orig->start;
610 	prealloc->end = split - 1;
611 	prealloc->state = orig->state;
612 	orig->start = split;
613 
614 	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
615 			   &prealloc->rb_node, NULL, NULL);
616 	if (node) {
617 		free_extent_state(prealloc);
618 		return -EEXIST;
619 	}
620 	return 0;
621 }
622 
623 static struct extent_state *next_state(struct extent_state *state)
624 {
625 	struct rb_node *next = rb_next(&state->rb_node);
626 	if (next)
627 		return rb_entry(next, struct extent_state, rb_node);
628 	else
629 		return NULL;
630 }
631 
632 /*
633  * utility function to clear some bits in an extent state struct.
634  * it will optionally wake up anyone waiting on this state (wake == 1).
635  *
636  * If no bits are set on the state struct after clearing things, the
637  * struct is freed and removed from the tree
638  */
639 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
640 					    struct extent_state *state,
641 					    u32 *bits, int wake,
642 					    struct extent_changeset *changeset)
643 {
644 	struct extent_state *next;
645 	u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
646 	int ret;
647 
648 	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
649 		u64 range = state->end - state->start + 1;
650 		WARN_ON(range > tree->dirty_bytes);
651 		tree->dirty_bytes -= range;
652 	}
653 
654 	if (tree->private_data && is_data_inode(tree->private_data))
655 		btrfs_clear_delalloc_extent(tree->private_data, state, bits);
656 
657 	ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
658 	BUG_ON(ret < 0);
659 	state->state &= ~bits_to_clear;
660 	if (wake)
661 		wake_up(&state->wq);
662 	if (state->state == 0) {
663 		next = next_state(state);
664 		if (extent_state_in_tree(state)) {
665 			rb_erase(&state->rb_node, &tree->state);
666 			RB_CLEAR_NODE(&state->rb_node);
667 			free_extent_state(state);
668 		} else {
669 			WARN_ON(1);
670 		}
671 	} else {
672 		merge_state(tree, state);
673 		next = next_state(state);
674 	}
675 	return next;
676 }
677 
678 static struct extent_state *
679 alloc_extent_state_atomic(struct extent_state *prealloc)
680 {
681 	if (!prealloc)
682 		prealloc = alloc_extent_state(GFP_ATOMIC);
683 
684 	return prealloc;
685 }
686 
687 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
688 {
689 	btrfs_panic(tree->fs_info, err,
690 	"locking error: extent tree was modified by another thread while locked");
691 }
692 
693 /*
694  * clear some bits on a range in the tree.  This may require splitting
695  * or inserting elements in the tree, so the gfp mask is used to
696  * indicate which allocations or sleeping are allowed.
697  *
698  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
699  * the given range from the tree regardless of state (ie for truncate).
700  *
701  * the range [start, end] is inclusive.
702  *
703  * This takes the tree lock, and returns 0 on success and < 0 on error.
704  */
705 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
706 		       u32 bits, int wake, int delete,
707 		       struct extent_state **cached_state,
708 		       gfp_t mask, struct extent_changeset *changeset)
709 {
710 	struct extent_state *state;
711 	struct extent_state *cached;
712 	struct extent_state *prealloc = NULL;
713 	struct rb_node *node;
714 	u64 last_end;
715 	int err;
716 	int clear = 0;
717 
718 	btrfs_debug_check_extent_io_range(tree, start, end);
719 	trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
720 
721 	if (bits & EXTENT_DELALLOC)
722 		bits |= EXTENT_NORESERVE;
723 
724 	if (delete)
725 		bits |= ~EXTENT_CTLBITS;
726 
727 	if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
728 		clear = 1;
729 again:
730 	if (!prealloc && gfpflags_allow_blocking(mask)) {
731 		/*
732 		 * Don't care for allocation failure here because we might end
733 		 * up not needing the pre-allocated extent state at all, which
734 		 * is the case if we only have in the tree extent states that
735 		 * cover our input range and don't cover too any other range.
736 		 * If we end up needing a new extent state we allocate it later.
737 		 */
738 		prealloc = alloc_extent_state(mask);
739 	}
740 
741 	spin_lock(&tree->lock);
742 	if (cached_state) {
743 		cached = *cached_state;
744 
745 		if (clear) {
746 			*cached_state = NULL;
747 			cached_state = NULL;
748 		}
749 
750 		if (cached && extent_state_in_tree(cached) &&
751 		    cached->start <= start && cached->end > start) {
752 			if (clear)
753 				refcount_dec(&cached->refs);
754 			state = cached;
755 			goto hit_next;
756 		}
757 		if (clear)
758 			free_extent_state(cached);
759 	}
760 	/*
761 	 * this search will find the extents that end after
762 	 * our range starts
763 	 */
764 	node = tree_search(tree, start);
765 	if (!node)
766 		goto out;
767 	state = rb_entry(node, struct extent_state, rb_node);
768 hit_next:
769 	if (state->start > end)
770 		goto out;
771 	WARN_ON(state->end < start);
772 	last_end = state->end;
773 
774 	/* the state doesn't have the wanted bits, go ahead */
775 	if (!(state->state & bits)) {
776 		state = next_state(state);
777 		goto next;
778 	}
779 
780 	/*
781 	 *     | ---- desired range ---- |
782 	 *  | state | or
783 	 *  | ------------- state -------------- |
784 	 *
785 	 * We need to split the extent we found, and may flip
786 	 * bits on second half.
787 	 *
788 	 * If the extent we found extends past our range, we
789 	 * just split and search again.  It'll get split again
790 	 * the next time though.
791 	 *
792 	 * If the extent we found is inside our range, we clear
793 	 * the desired bit on it.
794 	 */
795 
796 	if (state->start < start) {
797 		prealloc = alloc_extent_state_atomic(prealloc);
798 		BUG_ON(!prealloc);
799 		err = split_state(tree, state, prealloc, start);
800 		if (err)
801 			extent_io_tree_panic(tree, err);
802 
803 		prealloc = NULL;
804 		if (err)
805 			goto out;
806 		if (state->end <= end) {
807 			state = clear_state_bit(tree, state, &bits, wake,
808 						changeset);
809 			goto next;
810 		}
811 		goto search_again;
812 	}
813 	/*
814 	 * | ---- desired range ---- |
815 	 *                        | state |
816 	 * We need to split the extent, and clear the bit
817 	 * on the first half
818 	 */
819 	if (state->start <= end && state->end > end) {
820 		prealloc = alloc_extent_state_atomic(prealloc);
821 		BUG_ON(!prealloc);
822 		err = split_state(tree, state, prealloc, end + 1);
823 		if (err)
824 			extent_io_tree_panic(tree, err);
825 
826 		if (wake)
827 			wake_up(&state->wq);
828 
829 		clear_state_bit(tree, prealloc, &bits, wake, changeset);
830 
831 		prealloc = NULL;
832 		goto out;
833 	}
834 
835 	state = clear_state_bit(tree, state, &bits, wake, changeset);
836 next:
837 	if (last_end == (u64)-1)
838 		goto out;
839 	start = last_end + 1;
840 	if (start <= end && state && !need_resched())
841 		goto hit_next;
842 
843 search_again:
844 	if (start > end)
845 		goto out;
846 	spin_unlock(&tree->lock);
847 	if (gfpflags_allow_blocking(mask))
848 		cond_resched();
849 	goto again;
850 
851 out:
852 	spin_unlock(&tree->lock);
853 	if (prealloc)
854 		free_extent_state(prealloc);
855 
856 	return 0;
857 
858 }
859 
860 static void wait_on_state(struct extent_io_tree *tree,
861 			  struct extent_state *state)
862 		__releases(tree->lock)
863 		__acquires(tree->lock)
864 {
865 	DEFINE_WAIT(wait);
866 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
867 	spin_unlock(&tree->lock);
868 	schedule();
869 	spin_lock(&tree->lock);
870 	finish_wait(&state->wq, &wait);
871 }
872 
873 /*
874  * waits for one or more bits to clear on a range in the state tree.
875  * The range [start, end] is inclusive.
876  * The tree lock is taken by this function
877  */
878 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
879 			    u32 bits)
880 {
881 	struct extent_state *state;
882 	struct rb_node *node;
883 
884 	btrfs_debug_check_extent_io_range(tree, start, end);
885 
886 	spin_lock(&tree->lock);
887 again:
888 	while (1) {
889 		/*
890 		 * this search will find all the extents that end after
891 		 * our range starts
892 		 */
893 		node = tree_search(tree, start);
894 process_node:
895 		if (!node)
896 			break;
897 
898 		state = rb_entry(node, struct extent_state, rb_node);
899 
900 		if (state->start > end)
901 			goto out;
902 
903 		if (state->state & bits) {
904 			start = state->start;
905 			refcount_inc(&state->refs);
906 			wait_on_state(tree, state);
907 			free_extent_state(state);
908 			goto again;
909 		}
910 		start = state->end + 1;
911 
912 		if (start > end)
913 			break;
914 
915 		if (!cond_resched_lock(&tree->lock)) {
916 			node = rb_next(node);
917 			goto process_node;
918 		}
919 	}
920 out:
921 	spin_unlock(&tree->lock);
922 }
923 
924 static void set_state_bits(struct extent_io_tree *tree,
925 			   struct extent_state *state,
926 			   u32 *bits, struct extent_changeset *changeset)
927 {
928 	u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
929 	int ret;
930 
931 	if (tree->private_data && is_data_inode(tree->private_data))
932 		btrfs_set_delalloc_extent(tree->private_data, state, bits);
933 
934 	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
935 		u64 range = state->end - state->start + 1;
936 		tree->dirty_bytes += range;
937 	}
938 	ret = add_extent_changeset(state, bits_to_set, changeset, 1);
939 	BUG_ON(ret < 0);
940 	state->state |= bits_to_set;
941 }
942 
943 static void cache_state_if_flags(struct extent_state *state,
944 				 struct extent_state **cached_ptr,
945 				 unsigned flags)
946 {
947 	if (cached_ptr && !(*cached_ptr)) {
948 		if (!flags || (state->state & flags)) {
949 			*cached_ptr = state;
950 			refcount_inc(&state->refs);
951 		}
952 	}
953 }
954 
955 static void cache_state(struct extent_state *state,
956 			struct extent_state **cached_ptr)
957 {
958 	return cache_state_if_flags(state, cached_ptr,
959 				    EXTENT_LOCKED | EXTENT_BOUNDARY);
960 }
961 
962 /*
963  * set some bits on a range in the tree.  This may require allocations or
964  * sleeping, so the gfp mask is used to indicate what is allowed.
965  *
966  * If any of the exclusive bits are set, this will fail with -EEXIST if some
967  * part of the range already has the desired bits set.  The start of the
968  * existing range is returned in failed_start in this case.
969  *
970  * [start, end] is inclusive This takes the tree lock.
971  */
972 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
973 		   u32 exclusive_bits, u64 *failed_start,
974 		   struct extent_state **cached_state, gfp_t mask,
975 		   struct extent_changeset *changeset)
976 {
977 	struct extent_state *state;
978 	struct extent_state *prealloc = NULL;
979 	struct rb_node *node;
980 	struct rb_node **p;
981 	struct rb_node *parent;
982 	int err = 0;
983 	u64 last_start;
984 	u64 last_end;
985 
986 	btrfs_debug_check_extent_io_range(tree, start, end);
987 	trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
988 
989 	if (exclusive_bits)
990 		ASSERT(failed_start);
991 	else
992 		ASSERT(failed_start == NULL);
993 again:
994 	if (!prealloc && gfpflags_allow_blocking(mask)) {
995 		/*
996 		 * Don't care for allocation failure here because we might end
997 		 * up not needing the pre-allocated extent state at all, which
998 		 * is the case if we only have in the tree extent states that
999 		 * cover our input range and don't cover too any other range.
1000 		 * If we end up needing a new extent state we allocate it later.
1001 		 */
1002 		prealloc = alloc_extent_state(mask);
1003 	}
1004 
1005 	spin_lock(&tree->lock);
1006 	if (cached_state && *cached_state) {
1007 		state = *cached_state;
1008 		if (state->start <= start && state->end > start &&
1009 		    extent_state_in_tree(state)) {
1010 			node = &state->rb_node;
1011 			goto hit_next;
1012 		}
1013 	}
1014 	/*
1015 	 * this search will find all the extents that end after
1016 	 * our range starts.
1017 	 */
1018 	node = tree_search_for_insert(tree, start, &p, &parent);
1019 	if (!node) {
1020 		prealloc = alloc_extent_state_atomic(prealloc);
1021 		BUG_ON(!prealloc);
1022 		err = insert_state(tree, prealloc, start, end,
1023 				   &p, &parent, &bits, changeset);
1024 		if (err)
1025 			extent_io_tree_panic(tree, err);
1026 
1027 		cache_state(prealloc, cached_state);
1028 		prealloc = NULL;
1029 		goto out;
1030 	}
1031 	state = rb_entry(node, struct extent_state, rb_node);
1032 hit_next:
1033 	last_start = state->start;
1034 	last_end = state->end;
1035 
1036 	/*
1037 	 * | ---- desired range ---- |
1038 	 * | state |
1039 	 *
1040 	 * Just lock what we found and keep going
1041 	 */
1042 	if (state->start == start && state->end <= end) {
1043 		if (state->state & exclusive_bits) {
1044 			*failed_start = state->start;
1045 			err = -EEXIST;
1046 			goto out;
1047 		}
1048 
1049 		set_state_bits(tree, state, &bits, changeset);
1050 		cache_state(state, cached_state);
1051 		merge_state(tree, state);
1052 		if (last_end == (u64)-1)
1053 			goto out;
1054 		start = last_end + 1;
1055 		state = next_state(state);
1056 		if (start < end && state && state->start == start &&
1057 		    !need_resched())
1058 			goto hit_next;
1059 		goto search_again;
1060 	}
1061 
1062 	/*
1063 	 *     | ---- desired range ---- |
1064 	 * | state |
1065 	 *   or
1066 	 * | ------------- state -------------- |
1067 	 *
1068 	 * We need to split the extent we found, and may flip bits on
1069 	 * second half.
1070 	 *
1071 	 * If the extent we found extends past our
1072 	 * range, we just split and search again.  It'll get split
1073 	 * again the next time though.
1074 	 *
1075 	 * If the extent we found is inside our range, we set the
1076 	 * desired bit on it.
1077 	 */
1078 	if (state->start < start) {
1079 		if (state->state & exclusive_bits) {
1080 			*failed_start = start;
1081 			err = -EEXIST;
1082 			goto out;
1083 		}
1084 
1085 		/*
1086 		 * If this extent already has all the bits we want set, then
1087 		 * skip it, not necessary to split it or do anything with it.
1088 		 */
1089 		if ((state->state & bits) == bits) {
1090 			start = state->end + 1;
1091 			cache_state(state, cached_state);
1092 			goto search_again;
1093 		}
1094 
1095 		prealloc = alloc_extent_state_atomic(prealloc);
1096 		BUG_ON(!prealloc);
1097 		err = split_state(tree, state, prealloc, start);
1098 		if (err)
1099 			extent_io_tree_panic(tree, err);
1100 
1101 		prealloc = NULL;
1102 		if (err)
1103 			goto out;
1104 		if (state->end <= end) {
1105 			set_state_bits(tree, state, &bits, changeset);
1106 			cache_state(state, cached_state);
1107 			merge_state(tree, state);
1108 			if (last_end == (u64)-1)
1109 				goto out;
1110 			start = last_end + 1;
1111 			state = next_state(state);
1112 			if (start < end && state && state->start == start &&
1113 			    !need_resched())
1114 				goto hit_next;
1115 		}
1116 		goto search_again;
1117 	}
1118 	/*
1119 	 * | ---- desired range ---- |
1120 	 *     | state | or               | state |
1121 	 *
1122 	 * There's a hole, we need to insert something in it and
1123 	 * ignore the extent we found.
1124 	 */
1125 	if (state->start > start) {
1126 		u64 this_end;
1127 		if (end < last_start)
1128 			this_end = end;
1129 		else
1130 			this_end = last_start - 1;
1131 
1132 		prealloc = alloc_extent_state_atomic(prealloc);
1133 		BUG_ON(!prealloc);
1134 
1135 		/*
1136 		 * Avoid to free 'prealloc' if it can be merged with
1137 		 * the later extent.
1138 		 */
1139 		err = insert_state(tree, prealloc, start, this_end,
1140 				   NULL, NULL, &bits, changeset);
1141 		if (err)
1142 			extent_io_tree_panic(tree, err);
1143 
1144 		cache_state(prealloc, cached_state);
1145 		prealloc = NULL;
1146 		start = this_end + 1;
1147 		goto search_again;
1148 	}
1149 	/*
1150 	 * | ---- desired range ---- |
1151 	 *                        | state |
1152 	 * We need to split the extent, and set the bit
1153 	 * on the first half
1154 	 */
1155 	if (state->start <= end && state->end > end) {
1156 		if (state->state & exclusive_bits) {
1157 			*failed_start = start;
1158 			err = -EEXIST;
1159 			goto out;
1160 		}
1161 
1162 		prealloc = alloc_extent_state_atomic(prealloc);
1163 		BUG_ON(!prealloc);
1164 		err = split_state(tree, state, prealloc, end + 1);
1165 		if (err)
1166 			extent_io_tree_panic(tree, err);
1167 
1168 		set_state_bits(tree, prealloc, &bits, changeset);
1169 		cache_state(prealloc, cached_state);
1170 		merge_state(tree, prealloc);
1171 		prealloc = NULL;
1172 		goto out;
1173 	}
1174 
1175 search_again:
1176 	if (start > end)
1177 		goto out;
1178 	spin_unlock(&tree->lock);
1179 	if (gfpflags_allow_blocking(mask))
1180 		cond_resched();
1181 	goto again;
1182 
1183 out:
1184 	spin_unlock(&tree->lock);
1185 	if (prealloc)
1186 		free_extent_state(prealloc);
1187 
1188 	return err;
1189 
1190 }
1191 
1192 /**
1193  * convert_extent_bit - convert all bits in a given range from one bit to
1194  * 			another
1195  * @tree:	the io tree to search
1196  * @start:	the start offset in bytes
1197  * @end:	the end offset in bytes (inclusive)
1198  * @bits:	the bits to set in this range
1199  * @clear_bits:	the bits to clear in this range
1200  * @cached_state:	state that we're going to cache
1201  *
1202  * This will go through and set bits for the given range.  If any states exist
1203  * already in this range they are set with the given bit and cleared of the
1204  * clear_bits.  This is only meant to be used by things that are mergeable, ie
1205  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1206  * boundary bits like LOCK.
1207  *
1208  * All allocations are done with GFP_NOFS.
1209  */
1210 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1211 		       u32 bits, u32 clear_bits,
1212 		       struct extent_state **cached_state)
1213 {
1214 	struct extent_state *state;
1215 	struct extent_state *prealloc = NULL;
1216 	struct rb_node *node;
1217 	struct rb_node **p;
1218 	struct rb_node *parent;
1219 	int err = 0;
1220 	u64 last_start;
1221 	u64 last_end;
1222 	bool first_iteration = true;
1223 
1224 	btrfs_debug_check_extent_io_range(tree, start, end);
1225 	trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1226 				       clear_bits);
1227 
1228 again:
1229 	if (!prealloc) {
1230 		/*
1231 		 * Best effort, don't worry if extent state allocation fails
1232 		 * here for the first iteration. We might have a cached state
1233 		 * that matches exactly the target range, in which case no
1234 		 * extent state allocations are needed. We'll only know this
1235 		 * after locking the tree.
1236 		 */
1237 		prealloc = alloc_extent_state(GFP_NOFS);
1238 		if (!prealloc && !first_iteration)
1239 			return -ENOMEM;
1240 	}
1241 
1242 	spin_lock(&tree->lock);
1243 	if (cached_state && *cached_state) {
1244 		state = *cached_state;
1245 		if (state->start <= start && state->end > start &&
1246 		    extent_state_in_tree(state)) {
1247 			node = &state->rb_node;
1248 			goto hit_next;
1249 		}
1250 	}
1251 
1252 	/*
1253 	 * this search will find all the extents that end after
1254 	 * our range starts.
1255 	 */
1256 	node = tree_search_for_insert(tree, start, &p, &parent);
1257 	if (!node) {
1258 		prealloc = alloc_extent_state_atomic(prealloc);
1259 		if (!prealloc) {
1260 			err = -ENOMEM;
1261 			goto out;
1262 		}
1263 		err = insert_state(tree, prealloc, start, end,
1264 				   &p, &parent, &bits, NULL);
1265 		if (err)
1266 			extent_io_tree_panic(tree, err);
1267 		cache_state(prealloc, cached_state);
1268 		prealloc = NULL;
1269 		goto out;
1270 	}
1271 	state = rb_entry(node, struct extent_state, rb_node);
1272 hit_next:
1273 	last_start = state->start;
1274 	last_end = state->end;
1275 
1276 	/*
1277 	 * | ---- desired range ---- |
1278 	 * | state |
1279 	 *
1280 	 * Just lock what we found and keep going
1281 	 */
1282 	if (state->start == start && state->end <= end) {
1283 		set_state_bits(tree, state, &bits, NULL);
1284 		cache_state(state, cached_state);
1285 		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1286 		if (last_end == (u64)-1)
1287 			goto out;
1288 		start = last_end + 1;
1289 		if (start < end && state && state->start == start &&
1290 		    !need_resched())
1291 			goto hit_next;
1292 		goto search_again;
1293 	}
1294 
1295 	/*
1296 	 *     | ---- desired range ---- |
1297 	 * | state |
1298 	 *   or
1299 	 * | ------------- state -------------- |
1300 	 *
1301 	 * We need to split the extent we found, and may flip bits on
1302 	 * second half.
1303 	 *
1304 	 * If the extent we found extends past our
1305 	 * range, we just split and search again.  It'll get split
1306 	 * again the next time though.
1307 	 *
1308 	 * If the extent we found is inside our range, we set the
1309 	 * desired bit on it.
1310 	 */
1311 	if (state->start < start) {
1312 		prealloc = alloc_extent_state_atomic(prealloc);
1313 		if (!prealloc) {
1314 			err = -ENOMEM;
1315 			goto out;
1316 		}
1317 		err = split_state(tree, state, prealloc, start);
1318 		if (err)
1319 			extent_io_tree_panic(tree, err);
1320 		prealloc = NULL;
1321 		if (err)
1322 			goto out;
1323 		if (state->end <= end) {
1324 			set_state_bits(tree, state, &bits, NULL);
1325 			cache_state(state, cached_state);
1326 			state = clear_state_bit(tree, state, &clear_bits, 0,
1327 						NULL);
1328 			if (last_end == (u64)-1)
1329 				goto out;
1330 			start = last_end + 1;
1331 			if (start < end && state && state->start == start &&
1332 			    !need_resched())
1333 				goto hit_next;
1334 		}
1335 		goto search_again;
1336 	}
1337 	/*
1338 	 * | ---- desired range ---- |
1339 	 *     | state | or               | state |
1340 	 *
1341 	 * There's a hole, we need to insert something in it and
1342 	 * ignore the extent we found.
1343 	 */
1344 	if (state->start > start) {
1345 		u64 this_end;
1346 		if (end < last_start)
1347 			this_end = end;
1348 		else
1349 			this_end = last_start - 1;
1350 
1351 		prealloc = alloc_extent_state_atomic(prealloc);
1352 		if (!prealloc) {
1353 			err = -ENOMEM;
1354 			goto out;
1355 		}
1356 
1357 		/*
1358 		 * Avoid to free 'prealloc' if it can be merged with
1359 		 * the later extent.
1360 		 */
1361 		err = insert_state(tree, prealloc, start, this_end,
1362 				   NULL, NULL, &bits, NULL);
1363 		if (err)
1364 			extent_io_tree_panic(tree, err);
1365 		cache_state(prealloc, cached_state);
1366 		prealloc = NULL;
1367 		start = this_end + 1;
1368 		goto search_again;
1369 	}
1370 	/*
1371 	 * | ---- desired range ---- |
1372 	 *                        | state |
1373 	 * We need to split the extent, and set the bit
1374 	 * on the first half
1375 	 */
1376 	if (state->start <= end && state->end > end) {
1377 		prealloc = alloc_extent_state_atomic(prealloc);
1378 		if (!prealloc) {
1379 			err = -ENOMEM;
1380 			goto out;
1381 		}
1382 
1383 		err = split_state(tree, state, prealloc, end + 1);
1384 		if (err)
1385 			extent_io_tree_panic(tree, err);
1386 
1387 		set_state_bits(tree, prealloc, &bits, NULL);
1388 		cache_state(prealloc, cached_state);
1389 		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1390 		prealloc = NULL;
1391 		goto out;
1392 	}
1393 
1394 search_again:
1395 	if (start > end)
1396 		goto out;
1397 	spin_unlock(&tree->lock);
1398 	cond_resched();
1399 	first_iteration = false;
1400 	goto again;
1401 
1402 out:
1403 	spin_unlock(&tree->lock);
1404 	if (prealloc)
1405 		free_extent_state(prealloc);
1406 
1407 	return err;
1408 }
1409 
1410 /* wrappers around set/clear extent bit */
1411 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1412 			   u32 bits, struct extent_changeset *changeset)
1413 {
1414 	/*
1415 	 * We don't support EXTENT_LOCKED yet, as current changeset will
1416 	 * record any bits changed, so for EXTENT_LOCKED case, it will
1417 	 * either fail with -EEXIST or changeset will record the whole
1418 	 * range.
1419 	 */
1420 	BUG_ON(bits & EXTENT_LOCKED);
1421 
1422 	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1423 			      changeset);
1424 }
1425 
1426 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1427 			   u32 bits)
1428 {
1429 	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1430 			      GFP_NOWAIT, NULL);
1431 }
1432 
1433 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1434 		     u32 bits, int wake, int delete,
1435 		     struct extent_state **cached)
1436 {
1437 	return __clear_extent_bit(tree, start, end, bits, wake, delete,
1438 				  cached, GFP_NOFS, NULL);
1439 }
1440 
1441 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1442 		u32 bits, struct extent_changeset *changeset)
1443 {
1444 	/*
1445 	 * Don't support EXTENT_LOCKED case, same reason as
1446 	 * set_record_extent_bits().
1447 	 */
1448 	BUG_ON(bits & EXTENT_LOCKED);
1449 
1450 	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1451 				  changeset);
1452 }
1453 
1454 /*
1455  * either insert or lock state struct between start and end use mask to tell
1456  * us if waiting is desired.
1457  */
1458 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1459 		     struct extent_state **cached_state)
1460 {
1461 	int err;
1462 	u64 failed_start;
1463 
1464 	while (1) {
1465 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
1466 				     EXTENT_LOCKED, &failed_start,
1467 				     cached_state, GFP_NOFS, NULL);
1468 		if (err == -EEXIST) {
1469 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1470 			start = failed_start;
1471 		} else
1472 			break;
1473 		WARN_ON(start > end);
1474 	}
1475 	return err;
1476 }
1477 
1478 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1479 {
1480 	int err;
1481 	u64 failed_start;
1482 
1483 	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1484 			     &failed_start, NULL, GFP_NOFS, NULL);
1485 	if (err == -EEXIST) {
1486 		if (failed_start > start)
1487 			clear_extent_bit(tree, start, failed_start - 1,
1488 					 EXTENT_LOCKED, 1, 0, NULL);
1489 		return 0;
1490 	}
1491 	return 1;
1492 }
1493 
1494 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1495 {
1496 	unsigned long index = start >> PAGE_SHIFT;
1497 	unsigned long end_index = end >> PAGE_SHIFT;
1498 	struct page *page;
1499 
1500 	while (index <= end_index) {
1501 		page = find_get_page(inode->i_mapping, index);
1502 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1503 		clear_page_dirty_for_io(page);
1504 		put_page(page);
1505 		index++;
1506 	}
1507 }
1508 
1509 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1510 {
1511 	unsigned long index = start >> PAGE_SHIFT;
1512 	unsigned long end_index = end >> PAGE_SHIFT;
1513 	struct page *page;
1514 
1515 	while (index <= end_index) {
1516 		page = find_get_page(inode->i_mapping, index);
1517 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1518 		__set_page_dirty_nobuffers(page);
1519 		account_page_redirty(page);
1520 		put_page(page);
1521 		index++;
1522 	}
1523 }
1524 
1525 /* find the first state struct with 'bits' set after 'start', and
1526  * return it.  tree->lock must be held.  NULL will returned if
1527  * nothing was found after 'start'
1528  */
1529 static struct extent_state *
1530 find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
1531 {
1532 	struct rb_node *node;
1533 	struct extent_state *state;
1534 
1535 	/*
1536 	 * this search will find all the extents that end after
1537 	 * our range starts.
1538 	 */
1539 	node = tree_search(tree, start);
1540 	if (!node)
1541 		goto out;
1542 
1543 	while (1) {
1544 		state = rb_entry(node, struct extent_state, rb_node);
1545 		if (state->end >= start && (state->state & bits))
1546 			return state;
1547 
1548 		node = rb_next(node);
1549 		if (!node)
1550 			break;
1551 	}
1552 out:
1553 	return NULL;
1554 }
1555 
1556 /*
1557  * Find the first offset in the io tree with one or more @bits set.
1558  *
1559  * Note: If there are multiple bits set in @bits, any of them will match.
1560  *
1561  * Return 0 if we find something, and update @start_ret and @end_ret.
1562  * Return 1 if we found nothing.
1563  */
1564 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1565 			  u64 *start_ret, u64 *end_ret, u32 bits,
1566 			  struct extent_state **cached_state)
1567 {
1568 	struct extent_state *state;
1569 	int ret = 1;
1570 
1571 	spin_lock(&tree->lock);
1572 	if (cached_state && *cached_state) {
1573 		state = *cached_state;
1574 		if (state->end == start - 1 && extent_state_in_tree(state)) {
1575 			while ((state = next_state(state)) != NULL) {
1576 				if (state->state & bits)
1577 					goto got_it;
1578 			}
1579 			free_extent_state(*cached_state);
1580 			*cached_state = NULL;
1581 			goto out;
1582 		}
1583 		free_extent_state(*cached_state);
1584 		*cached_state = NULL;
1585 	}
1586 
1587 	state = find_first_extent_bit_state(tree, start, bits);
1588 got_it:
1589 	if (state) {
1590 		cache_state_if_flags(state, cached_state, 0);
1591 		*start_ret = state->start;
1592 		*end_ret = state->end;
1593 		ret = 0;
1594 	}
1595 out:
1596 	spin_unlock(&tree->lock);
1597 	return ret;
1598 }
1599 
1600 /**
1601  * Find a contiguous area of bits
1602  *
1603  * @tree:      io tree to check
1604  * @start:     offset to start the search from
1605  * @start_ret: the first offset we found with the bits set
1606  * @end_ret:   the final contiguous range of the bits that were set
1607  * @bits:      bits to look for
1608  *
1609  * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1610  * to set bits appropriately, and then merge them again.  During this time it
1611  * will drop the tree->lock, so use this helper if you want to find the actual
1612  * contiguous area for given bits.  We will search to the first bit we find, and
1613  * then walk down the tree until we find a non-contiguous area.  The area
1614  * returned will be the full contiguous area with the bits set.
1615  */
1616 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1617 			       u64 *start_ret, u64 *end_ret, u32 bits)
1618 {
1619 	struct extent_state *state;
1620 	int ret = 1;
1621 
1622 	spin_lock(&tree->lock);
1623 	state = find_first_extent_bit_state(tree, start, bits);
1624 	if (state) {
1625 		*start_ret = state->start;
1626 		*end_ret = state->end;
1627 		while ((state = next_state(state)) != NULL) {
1628 			if (state->start > (*end_ret + 1))
1629 				break;
1630 			*end_ret = state->end;
1631 		}
1632 		ret = 0;
1633 	}
1634 	spin_unlock(&tree->lock);
1635 	return ret;
1636 }
1637 
1638 /**
1639  * Find the first range that has @bits not set. This range could start before
1640  * @start.
1641  *
1642  * @tree:      the tree to search
1643  * @start:     offset at/after which the found extent should start
1644  * @start_ret: records the beginning of the range
1645  * @end_ret:   records the end of the range (inclusive)
1646  * @bits:      the set of bits which must be unset
1647  *
1648  * Since unallocated range is also considered one which doesn't have the bits
1649  * set it's possible that @end_ret contains -1, this happens in case the range
1650  * spans (last_range_end, end of device]. In this case it's up to the caller to
1651  * trim @end_ret to the appropriate size.
1652  */
1653 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1654 				 u64 *start_ret, u64 *end_ret, u32 bits)
1655 {
1656 	struct extent_state *state;
1657 	struct rb_node *node, *prev = NULL, *next;
1658 
1659 	spin_lock(&tree->lock);
1660 
1661 	/* Find first extent with bits cleared */
1662 	while (1) {
1663 		node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1664 		if (!node && !next && !prev) {
1665 			/*
1666 			 * Tree is completely empty, send full range and let
1667 			 * caller deal with it
1668 			 */
1669 			*start_ret = 0;
1670 			*end_ret = -1;
1671 			goto out;
1672 		} else if (!node && !next) {
1673 			/*
1674 			 * We are past the last allocated chunk, set start at
1675 			 * the end of the last extent.
1676 			 */
1677 			state = rb_entry(prev, struct extent_state, rb_node);
1678 			*start_ret = state->end + 1;
1679 			*end_ret = -1;
1680 			goto out;
1681 		} else if (!node) {
1682 			node = next;
1683 		}
1684 		/*
1685 		 * At this point 'node' either contains 'start' or start is
1686 		 * before 'node'
1687 		 */
1688 		state = rb_entry(node, struct extent_state, rb_node);
1689 
1690 		if (in_range(start, state->start, state->end - state->start + 1)) {
1691 			if (state->state & bits) {
1692 				/*
1693 				 * |--range with bits sets--|
1694 				 *    |
1695 				 *    start
1696 				 */
1697 				start = state->end + 1;
1698 			} else {
1699 				/*
1700 				 * 'start' falls within a range that doesn't
1701 				 * have the bits set, so take its start as
1702 				 * the beginning of the desired range
1703 				 *
1704 				 * |--range with bits cleared----|
1705 				 *      |
1706 				 *      start
1707 				 */
1708 				*start_ret = state->start;
1709 				break;
1710 			}
1711 		} else {
1712 			/*
1713 			 * |---prev range---|---hole/unset---|---node range---|
1714 			 *                          |
1715 			 *                        start
1716 			 *
1717 			 *                        or
1718 			 *
1719 			 * |---hole/unset--||--first node--|
1720 			 * 0   |
1721 			 *    start
1722 			 */
1723 			if (prev) {
1724 				state = rb_entry(prev, struct extent_state,
1725 						 rb_node);
1726 				*start_ret = state->end + 1;
1727 			} else {
1728 				*start_ret = 0;
1729 			}
1730 			break;
1731 		}
1732 	}
1733 
1734 	/*
1735 	 * Find the longest stretch from start until an entry which has the
1736 	 * bits set
1737 	 */
1738 	while (1) {
1739 		state = rb_entry(node, struct extent_state, rb_node);
1740 		if (state->end >= start && !(state->state & bits)) {
1741 			*end_ret = state->end;
1742 		} else {
1743 			*end_ret = state->start - 1;
1744 			break;
1745 		}
1746 
1747 		node = rb_next(node);
1748 		if (!node)
1749 			break;
1750 	}
1751 out:
1752 	spin_unlock(&tree->lock);
1753 }
1754 
1755 /*
1756  * find a contiguous range of bytes in the file marked as delalloc, not
1757  * more than 'max_bytes'.  start and end are used to return the range,
1758  *
1759  * true is returned if we find something, false if nothing was in the tree
1760  */
1761 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1762 			       u64 *end, u64 max_bytes,
1763 			       struct extent_state **cached_state)
1764 {
1765 	struct rb_node *node;
1766 	struct extent_state *state;
1767 	u64 cur_start = *start;
1768 	bool found = false;
1769 	u64 total_bytes = 0;
1770 
1771 	spin_lock(&tree->lock);
1772 
1773 	/*
1774 	 * this search will find all the extents that end after
1775 	 * our range starts.
1776 	 */
1777 	node = tree_search(tree, cur_start);
1778 	if (!node) {
1779 		*end = (u64)-1;
1780 		goto out;
1781 	}
1782 
1783 	while (1) {
1784 		state = rb_entry(node, struct extent_state, rb_node);
1785 		if (found && (state->start != cur_start ||
1786 			      (state->state & EXTENT_BOUNDARY))) {
1787 			goto out;
1788 		}
1789 		if (!(state->state & EXTENT_DELALLOC)) {
1790 			if (!found)
1791 				*end = state->end;
1792 			goto out;
1793 		}
1794 		if (!found) {
1795 			*start = state->start;
1796 			*cached_state = state;
1797 			refcount_inc(&state->refs);
1798 		}
1799 		found = true;
1800 		*end = state->end;
1801 		cur_start = state->end + 1;
1802 		node = rb_next(node);
1803 		total_bytes += state->end - state->start + 1;
1804 		if (total_bytes >= max_bytes)
1805 			break;
1806 		if (!node)
1807 			break;
1808 	}
1809 out:
1810 	spin_unlock(&tree->lock);
1811 	return found;
1812 }
1813 
1814 /*
1815  * Process one page for __process_pages_contig().
1816  *
1817  * Return >0 if we hit @page == @locked_page.
1818  * Return 0 if we updated the page status.
1819  * Return -EGAIN if the we need to try again.
1820  * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
1821  */
1822 static int process_one_page(struct btrfs_fs_info *fs_info,
1823 			    struct address_space *mapping,
1824 			    struct page *page, struct page *locked_page,
1825 			    unsigned long page_ops, u64 start, u64 end)
1826 {
1827 	u32 len;
1828 
1829 	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
1830 	len = end + 1 - start;
1831 
1832 	if (page_ops & PAGE_SET_ORDERED)
1833 		btrfs_page_clamp_set_ordered(fs_info, page, start, len);
1834 	if (page_ops & PAGE_SET_ERROR)
1835 		btrfs_page_clamp_set_error(fs_info, page, start, len);
1836 	if (page_ops & PAGE_START_WRITEBACK) {
1837 		btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
1838 		btrfs_page_clamp_set_writeback(fs_info, page, start, len);
1839 	}
1840 	if (page_ops & PAGE_END_WRITEBACK)
1841 		btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
1842 
1843 	if (page == locked_page)
1844 		return 1;
1845 
1846 	if (page_ops & PAGE_LOCK) {
1847 		int ret;
1848 
1849 		ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
1850 		if (ret)
1851 			return ret;
1852 		if (!PageDirty(page) || page->mapping != mapping) {
1853 			btrfs_page_end_writer_lock(fs_info, page, start, len);
1854 			return -EAGAIN;
1855 		}
1856 	}
1857 	if (page_ops & PAGE_UNLOCK)
1858 		btrfs_page_end_writer_lock(fs_info, page, start, len);
1859 	return 0;
1860 }
1861 
1862 static int __process_pages_contig(struct address_space *mapping,
1863 				  struct page *locked_page,
1864 				  u64 start, u64 end, unsigned long page_ops,
1865 				  u64 *processed_end)
1866 {
1867 	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
1868 	pgoff_t start_index = start >> PAGE_SHIFT;
1869 	pgoff_t end_index = end >> PAGE_SHIFT;
1870 	pgoff_t index = start_index;
1871 	unsigned long nr_pages = end_index - start_index + 1;
1872 	unsigned long pages_processed = 0;
1873 	struct page *pages[16];
1874 	int err = 0;
1875 	int i;
1876 
1877 	if (page_ops & PAGE_LOCK) {
1878 		ASSERT(page_ops == PAGE_LOCK);
1879 		ASSERT(processed_end && *processed_end == start);
1880 	}
1881 
1882 	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1883 		mapping_set_error(mapping, -EIO);
1884 
1885 	while (nr_pages > 0) {
1886 		int found_pages;
1887 
1888 		found_pages = find_get_pages_contig(mapping, index,
1889 				     min_t(unsigned long,
1890 				     nr_pages, ARRAY_SIZE(pages)), pages);
1891 		if (found_pages == 0) {
1892 			/*
1893 			 * Only if we're going to lock these pages, we can find
1894 			 * nothing at @index.
1895 			 */
1896 			ASSERT(page_ops & PAGE_LOCK);
1897 			err = -EAGAIN;
1898 			goto out;
1899 		}
1900 
1901 		for (i = 0; i < found_pages; i++) {
1902 			int process_ret;
1903 
1904 			process_ret = process_one_page(fs_info, mapping,
1905 					pages[i], locked_page, page_ops,
1906 					start, end);
1907 			if (process_ret < 0) {
1908 				for (; i < found_pages; i++)
1909 					put_page(pages[i]);
1910 				err = -EAGAIN;
1911 				goto out;
1912 			}
1913 			put_page(pages[i]);
1914 			pages_processed++;
1915 		}
1916 		nr_pages -= found_pages;
1917 		index += found_pages;
1918 		cond_resched();
1919 	}
1920 out:
1921 	if (err && processed_end) {
1922 		/*
1923 		 * Update @processed_end. I know this is awful since it has
1924 		 * two different return value patterns (inclusive vs exclusive).
1925 		 *
1926 		 * But the exclusive pattern is necessary if @start is 0, or we
1927 		 * underflow and check against processed_end won't work as
1928 		 * expected.
1929 		 */
1930 		if (pages_processed)
1931 			*processed_end = min(end,
1932 			((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
1933 		else
1934 			*processed_end = start;
1935 	}
1936 	return err;
1937 }
1938 
1939 static noinline void __unlock_for_delalloc(struct inode *inode,
1940 					   struct page *locked_page,
1941 					   u64 start, u64 end)
1942 {
1943 	unsigned long index = start >> PAGE_SHIFT;
1944 	unsigned long end_index = end >> PAGE_SHIFT;
1945 
1946 	ASSERT(locked_page);
1947 	if (index == locked_page->index && end_index == index)
1948 		return;
1949 
1950 	__process_pages_contig(inode->i_mapping, locked_page, start, end,
1951 			       PAGE_UNLOCK, NULL);
1952 }
1953 
1954 static noinline int lock_delalloc_pages(struct inode *inode,
1955 					struct page *locked_page,
1956 					u64 delalloc_start,
1957 					u64 delalloc_end)
1958 {
1959 	unsigned long index = delalloc_start >> PAGE_SHIFT;
1960 	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1961 	u64 processed_end = delalloc_start;
1962 	int ret;
1963 
1964 	ASSERT(locked_page);
1965 	if (index == locked_page->index && index == end_index)
1966 		return 0;
1967 
1968 	ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
1969 				     delalloc_end, PAGE_LOCK, &processed_end);
1970 	if (ret == -EAGAIN && processed_end > delalloc_start)
1971 		__unlock_for_delalloc(inode, locked_page, delalloc_start,
1972 				      processed_end);
1973 	return ret;
1974 }
1975 
1976 /*
1977  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1978  * more than @max_bytes.
1979  *
1980  * @start:	The original start bytenr to search.
1981  *		Will store the extent range start bytenr.
1982  * @end:	The original end bytenr of the search range
1983  *		Will store the extent range end bytenr.
1984  *
1985  * Return true if we find a delalloc range which starts inside the original
1986  * range, and @start/@end will store the delalloc range start/end.
1987  *
1988  * Return false if we can't find any delalloc range which starts inside the
1989  * original range, and @start/@end will be the non-delalloc range start/end.
1990  */
1991 EXPORT_FOR_TESTS
1992 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1993 				    struct page *locked_page, u64 *start,
1994 				    u64 *end)
1995 {
1996 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1997 	const u64 orig_start = *start;
1998 	const u64 orig_end = *end;
1999 	u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
2000 	u64 delalloc_start;
2001 	u64 delalloc_end;
2002 	bool found;
2003 	struct extent_state *cached_state = NULL;
2004 	int ret;
2005 	int loops = 0;
2006 
2007 	/* Caller should pass a valid @end to indicate the search range end */
2008 	ASSERT(orig_end > orig_start);
2009 
2010 	/* The range should at least cover part of the page */
2011 	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
2012 		 orig_end <= page_offset(locked_page)));
2013 again:
2014 	/* step one, find a bunch of delalloc bytes starting at start */
2015 	delalloc_start = *start;
2016 	delalloc_end = 0;
2017 	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
2018 					  max_bytes, &cached_state);
2019 	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
2020 		*start = delalloc_start;
2021 
2022 		/* @delalloc_end can be -1, never go beyond @orig_end */
2023 		*end = min(delalloc_end, orig_end);
2024 		free_extent_state(cached_state);
2025 		return false;
2026 	}
2027 
2028 	/*
2029 	 * start comes from the offset of locked_page.  We have to lock
2030 	 * pages in order, so we can't process delalloc bytes before
2031 	 * locked_page
2032 	 */
2033 	if (delalloc_start < *start)
2034 		delalloc_start = *start;
2035 
2036 	/*
2037 	 * make sure to limit the number of pages we try to lock down
2038 	 */
2039 	if (delalloc_end + 1 - delalloc_start > max_bytes)
2040 		delalloc_end = delalloc_start + max_bytes - 1;
2041 
2042 	/* step two, lock all the pages after the page that has start */
2043 	ret = lock_delalloc_pages(inode, locked_page,
2044 				  delalloc_start, delalloc_end);
2045 	ASSERT(!ret || ret == -EAGAIN);
2046 	if (ret == -EAGAIN) {
2047 		/* some of the pages are gone, lets avoid looping by
2048 		 * shortening the size of the delalloc range we're searching
2049 		 */
2050 		free_extent_state(cached_state);
2051 		cached_state = NULL;
2052 		if (!loops) {
2053 			max_bytes = PAGE_SIZE;
2054 			loops = 1;
2055 			goto again;
2056 		} else {
2057 			found = false;
2058 			goto out_failed;
2059 		}
2060 	}
2061 
2062 	/* step three, lock the state bits for the whole range */
2063 	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
2064 
2065 	/* then test to make sure it is all still delalloc */
2066 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
2067 			     EXTENT_DELALLOC, 1, cached_state);
2068 	if (!ret) {
2069 		unlock_extent_cached(tree, delalloc_start, delalloc_end,
2070 				     &cached_state);
2071 		__unlock_for_delalloc(inode, locked_page,
2072 			      delalloc_start, delalloc_end);
2073 		cond_resched();
2074 		goto again;
2075 	}
2076 	free_extent_state(cached_state);
2077 	*start = delalloc_start;
2078 	*end = delalloc_end;
2079 out_failed:
2080 	return found;
2081 }
2082 
2083 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2084 				  struct page *locked_page,
2085 				  u32 clear_bits, unsigned long page_ops)
2086 {
2087 	clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2088 
2089 	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
2090 			       start, end, page_ops, NULL);
2091 }
2092 
2093 /*
2094  * count the number of bytes in the tree that have a given bit(s)
2095  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
2096  * cached.  The total number found is returned.
2097  */
2098 u64 count_range_bits(struct extent_io_tree *tree,
2099 		     u64 *start, u64 search_end, u64 max_bytes,
2100 		     u32 bits, int contig)
2101 {
2102 	struct rb_node *node;
2103 	struct extent_state *state;
2104 	u64 cur_start = *start;
2105 	u64 total_bytes = 0;
2106 	u64 last = 0;
2107 	int found = 0;
2108 
2109 	if (WARN_ON(search_end <= cur_start))
2110 		return 0;
2111 
2112 	spin_lock(&tree->lock);
2113 	if (cur_start == 0 && bits == EXTENT_DIRTY) {
2114 		total_bytes = tree->dirty_bytes;
2115 		goto out;
2116 	}
2117 	/*
2118 	 * this search will find all the extents that end after
2119 	 * our range starts.
2120 	 */
2121 	node = tree_search(tree, cur_start);
2122 	if (!node)
2123 		goto out;
2124 
2125 	while (1) {
2126 		state = rb_entry(node, struct extent_state, rb_node);
2127 		if (state->start > search_end)
2128 			break;
2129 		if (contig && found && state->start > last + 1)
2130 			break;
2131 		if (state->end >= cur_start && (state->state & bits) == bits) {
2132 			total_bytes += min(search_end, state->end) + 1 -
2133 				       max(cur_start, state->start);
2134 			if (total_bytes >= max_bytes)
2135 				break;
2136 			if (!found) {
2137 				*start = max(cur_start, state->start);
2138 				found = 1;
2139 			}
2140 			last = state->end;
2141 		} else if (contig && found) {
2142 			break;
2143 		}
2144 		node = rb_next(node);
2145 		if (!node)
2146 			break;
2147 	}
2148 out:
2149 	spin_unlock(&tree->lock);
2150 	return total_bytes;
2151 }
2152 
2153 /*
2154  * set the private field for a given byte offset in the tree.  If there isn't
2155  * an extent_state there already, this does nothing.
2156  */
2157 int set_state_failrec(struct extent_io_tree *tree, u64 start,
2158 		      struct io_failure_record *failrec)
2159 {
2160 	struct rb_node *node;
2161 	struct extent_state *state;
2162 	int ret = 0;
2163 
2164 	spin_lock(&tree->lock);
2165 	/*
2166 	 * this search will find all the extents that end after
2167 	 * our range starts.
2168 	 */
2169 	node = tree_search(tree, start);
2170 	if (!node) {
2171 		ret = -ENOENT;
2172 		goto out;
2173 	}
2174 	state = rb_entry(node, struct extent_state, rb_node);
2175 	if (state->start != start) {
2176 		ret = -ENOENT;
2177 		goto out;
2178 	}
2179 	state->failrec = failrec;
2180 out:
2181 	spin_unlock(&tree->lock);
2182 	return ret;
2183 }
2184 
2185 struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
2186 {
2187 	struct rb_node *node;
2188 	struct extent_state *state;
2189 	struct io_failure_record *failrec;
2190 
2191 	spin_lock(&tree->lock);
2192 	/*
2193 	 * this search will find all the extents that end after
2194 	 * our range starts.
2195 	 */
2196 	node = tree_search(tree, start);
2197 	if (!node) {
2198 		failrec = ERR_PTR(-ENOENT);
2199 		goto out;
2200 	}
2201 	state = rb_entry(node, struct extent_state, rb_node);
2202 	if (state->start != start) {
2203 		failrec = ERR_PTR(-ENOENT);
2204 		goto out;
2205 	}
2206 
2207 	failrec = state->failrec;
2208 out:
2209 	spin_unlock(&tree->lock);
2210 	return failrec;
2211 }
2212 
2213 /*
2214  * searches a range in the state tree for a given mask.
2215  * If 'filled' == 1, this returns 1 only if every extent in the tree
2216  * has the bits set.  Otherwise, 1 is returned if any bit in the
2217  * range is found set.
2218  */
2219 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2220 		   u32 bits, int filled, struct extent_state *cached)
2221 {
2222 	struct extent_state *state = NULL;
2223 	struct rb_node *node;
2224 	int bitset = 0;
2225 
2226 	spin_lock(&tree->lock);
2227 	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2228 	    cached->end > start)
2229 		node = &cached->rb_node;
2230 	else
2231 		node = tree_search(tree, start);
2232 	while (node && start <= end) {
2233 		state = rb_entry(node, struct extent_state, rb_node);
2234 
2235 		if (filled && state->start > start) {
2236 			bitset = 0;
2237 			break;
2238 		}
2239 
2240 		if (state->start > end)
2241 			break;
2242 
2243 		if (state->state & bits) {
2244 			bitset = 1;
2245 			if (!filled)
2246 				break;
2247 		} else if (filled) {
2248 			bitset = 0;
2249 			break;
2250 		}
2251 
2252 		if (state->end == (u64)-1)
2253 			break;
2254 
2255 		start = state->end + 1;
2256 		if (start > end)
2257 			break;
2258 		node = rb_next(node);
2259 		if (!node) {
2260 			if (filled)
2261 				bitset = 0;
2262 			break;
2263 		}
2264 	}
2265 	spin_unlock(&tree->lock);
2266 	return bitset;
2267 }
2268 
2269 int free_io_failure(struct extent_io_tree *failure_tree,
2270 		    struct extent_io_tree *io_tree,
2271 		    struct io_failure_record *rec)
2272 {
2273 	int ret;
2274 	int err = 0;
2275 
2276 	set_state_failrec(failure_tree, rec->start, NULL);
2277 	ret = clear_extent_bits(failure_tree, rec->start,
2278 				rec->start + rec->len - 1,
2279 				EXTENT_LOCKED | EXTENT_DIRTY);
2280 	if (ret)
2281 		err = ret;
2282 
2283 	ret = clear_extent_bits(io_tree, rec->start,
2284 				rec->start + rec->len - 1,
2285 				EXTENT_DAMAGED);
2286 	if (ret && !err)
2287 		err = ret;
2288 
2289 	kfree(rec);
2290 	return err;
2291 }
2292 
2293 /*
2294  * this bypasses the standard btrfs submit functions deliberately, as
2295  * the standard behavior is to write all copies in a raid setup. here we only
2296  * want to write the one bad copy. so we do the mapping for ourselves and issue
2297  * submit_bio directly.
2298  * to avoid any synchronization issues, wait for the data after writing, which
2299  * actually prevents the read that triggered the error from finishing.
2300  * currently, there can be no more than two copies of every data bit. thus,
2301  * exactly one rewrite is required.
2302  */
2303 static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2304 			     u64 length, u64 logical, struct page *page,
2305 			     unsigned int pg_offset, int mirror_num)
2306 {
2307 	struct bio *bio;
2308 	struct btrfs_device *dev;
2309 	u64 map_length = 0;
2310 	u64 sector;
2311 	struct btrfs_io_context *bioc = NULL;
2312 	int ret;
2313 
2314 	ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2315 	BUG_ON(!mirror_num);
2316 
2317 	if (btrfs_is_zoned(fs_info))
2318 		return btrfs_repair_one_zone(fs_info, logical);
2319 
2320 	bio = btrfs_bio_alloc(1);
2321 	bio->bi_iter.bi_size = 0;
2322 	map_length = length;
2323 
2324 	/*
2325 	 * Avoid races with device replace and make sure our bioc has devices
2326 	 * associated to its stripes that don't go away while we are doing the
2327 	 * read repair operation.
2328 	 */
2329 	btrfs_bio_counter_inc_blocked(fs_info);
2330 	if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2331 		/*
2332 		 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2333 		 * to update all raid stripes, but here we just want to correct
2334 		 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2335 		 * stripe's dev and sector.
2336 		 */
2337 		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2338 				      &map_length, &bioc, 0);
2339 		if (ret) {
2340 			btrfs_bio_counter_dec(fs_info);
2341 			bio_put(bio);
2342 			return -EIO;
2343 		}
2344 		ASSERT(bioc->mirror_num == 1);
2345 	} else {
2346 		ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2347 				      &map_length, &bioc, mirror_num);
2348 		if (ret) {
2349 			btrfs_bio_counter_dec(fs_info);
2350 			bio_put(bio);
2351 			return -EIO;
2352 		}
2353 		BUG_ON(mirror_num != bioc->mirror_num);
2354 	}
2355 
2356 	sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
2357 	bio->bi_iter.bi_sector = sector;
2358 	dev = bioc->stripes[bioc->mirror_num - 1].dev;
2359 	btrfs_put_bioc(bioc);
2360 	if (!dev || !dev->bdev ||
2361 	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2362 		btrfs_bio_counter_dec(fs_info);
2363 		bio_put(bio);
2364 		return -EIO;
2365 	}
2366 	bio_set_dev(bio, dev->bdev);
2367 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2368 	bio_add_page(bio, page, length, pg_offset);
2369 
2370 	if (btrfsic_submit_bio_wait(bio)) {
2371 		/* try to remap that extent elsewhere? */
2372 		btrfs_bio_counter_dec(fs_info);
2373 		bio_put(bio);
2374 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2375 		return -EIO;
2376 	}
2377 
2378 	btrfs_info_rl_in_rcu(fs_info,
2379 		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
2380 				  ino, start,
2381 				  rcu_str_deref(dev->name), sector);
2382 	btrfs_bio_counter_dec(fs_info);
2383 	bio_put(bio);
2384 	return 0;
2385 }
2386 
2387 int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
2388 {
2389 	struct btrfs_fs_info *fs_info = eb->fs_info;
2390 	u64 start = eb->start;
2391 	int i, num_pages = num_extent_pages(eb);
2392 	int ret = 0;
2393 
2394 	if (sb_rdonly(fs_info->sb))
2395 		return -EROFS;
2396 
2397 	for (i = 0; i < num_pages; i++) {
2398 		struct page *p = eb->pages[i];
2399 
2400 		ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2401 					start - page_offset(p), mirror_num);
2402 		if (ret)
2403 			break;
2404 		start += PAGE_SIZE;
2405 	}
2406 
2407 	return ret;
2408 }
2409 
2410 /*
2411  * each time an IO finishes, we do a fast check in the IO failure tree
2412  * to see if we need to process or clean up an io_failure_record
2413  */
2414 int clean_io_failure(struct btrfs_fs_info *fs_info,
2415 		     struct extent_io_tree *failure_tree,
2416 		     struct extent_io_tree *io_tree, u64 start,
2417 		     struct page *page, u64 ino, unsigned int pg_offset)
2418 {
2419 	u64 private;
2420 	struct io_failure_record *failrec;
2421 	struct extent_state *state;
2422 	int num_copies;
2423 	int ret;
2424 
2425 	private = 0;
2426 	ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2427 			       EXTENT_DIRTY, 0);
2428 	if (!ret)
2429 		return 0;
2430 
2431 	failrec = get_state_failrec(failure_tree, start);
2432 	if (IS_ERR(failrec))
2433 		return 0;
2434 
2435 	BUG_ON(!failrec->this_mirror);
2436 
2437 	if (sb_rdonly(fs_info->sb))
2438 		goto out;
2439 
2440 	spin_lock(&io_tree->lock);
2441 	state = find_first_extent_bit_state(io_tree,
2442 					    failrec->start,
2443 					    EXTENT_LOCKED);
2444 	spin_unlock(&io_tree->lock);
2445 
2446 	if (state && state->start <= failrec->start &&
2447 	    state->end >= failrec->start + failrec->len - 1) {
2448 		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2449 					      failrec->len);
2450 		if (num_copies > 1)  {
2451 			repair_io_failure(fs_info, ino, start, failrec->len,
2452 					  failrec->logical, page, pg_offset,
2453 					  failrec->failed_mirror);
2454 		}
2455 	}
2456 
2457 out:
2458 	free_io_failure(failure_tree, io_tree, failrec);
2459 
2460 	return 0;
2461 }
2462 
2463 /*
2464  * Can be called when
2465  * - hold extent lock
2466  * - under ordered extent
2467  * - the inode is freeing
2468  */
2469 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2470 {
2471 	struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2472 	struct io_failure_record *failrec;
2473 	struct extent_state *state, *next;
2474 
2475 	if (RB_EMPTY_ROOT(&failure_tree->state))
2476 		return;
2477 
2478 	spin_lock(&failure_tree->lock);
2479 	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2480 	while (state) {
2481 		if (state->start > end)
2482 			break;
2483 
2484 		ASSERT(state->end <= end);
2485 
2486 		next = next_state(state);
2487 
2488 		failrec = state->failrec;
2489 		free_extent_state(state);
2490 		kfree(failrec);
2491 
2492 		state = next;
2493 	}
2494 	spin_unlock(&failure_tree->lock);
2495 }
2496 
2497 static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
2498 							     u64 start)
2499 {
2500 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2501 	struct io_failure_record *failrec;
2502 	struct extent_map *em;
2503 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2504 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2505 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2506 	const u32 sectorsize = fs_info->sectorsize;
2507 	int ret;
2508 	u64 logical;
2509 
2510 	failrec = get_state_failrec(failure_tree, start);
2511 	if (!IS_ERR(failrec)) {
2512 		btrfs_debug(fs_info,
2513 	"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
2514 			failrec->logical, failrec->start, failrec->len);
2515 		/*
2516 		 * when data can be on disk more than twice, add to failrec here
2517 		 * (e.g. with a list for failed_mirror) to make
2518 		 * clean_io_failure() clean all those errors at once.
2519 		 */
2520 
2521 		return failrec;
2522 	}
2523 
2524 	failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2525 	if (!failrec)
2526 		return ERR_PTR(-ENOMEM);
2527 
2528 	failrec->start = start;
2529 	failrec->len = sectorsize;
2530 	failrec->this_mirror = 0;
2531 	failrec->bio_flags = 0;
2532 
2533 	read_lock(&em_tree->lock);
2534 	em = lookup_extent_mapping(em_tree, start, failrec->len);
2535 	if (!em) {
2536 		read_unlock(&em_tree->lock);
2537 		kfree(failrec);
2538 		return ERR_PTR(-EIO);
2539 	}
2540 
2541 	if (em->start > start || em->start + em->len <= start) {
2542 		free_extent_map(em);
2543 		em = NULL;
2544 	}
2545 	read_unlock(&em_tree->lock);
2546 	if (!em) {
2547 		kfree(failrec);
2548 		return ERR_PTR(-EIO);
2549 	}
2550 
2551 	logical = start - em->start;
2552 	logical = em->block_start + logical;
2553 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2554 		logical = em->block_start;
2555 		failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2556 		extent_set_compress_type(&failrec->bio_flags, em->compress_type);
2557 	}
2558 
2559 	btrfs_debug(fs_info,
2560 		    "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2561 		    logical, start, failrec->len);
2562 
2563 	failrec->logical = logical;
2564 	free_extent_map(em);
2565 
2566 	/* Set the bits in the private failure tree */
2567 	ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
2568 			      EXTENT_LOCKED | EXTENT_DIRTY);
2569 	if (ret >= 0) {
2570 		ret = set_state_failrec(failure_tree, start, failrec);
2571 		/* Set the bits in the inode's tree */
2572 		ret = set_extent_bits(tree, start, start + sectorsize - 1,
2573 				      EXTENT_DAMAGED);
2574 	} else if (ret < 0) {
2575 		kfree(failrec);
2576 		return ERR_PTR(ret);
2577 	}
2578 
2579 	return failrec;
2580 }
2581 
2582 static bool btrfs_check_repairable(struct inode *inode,
2583 				   struct io_failure_record *failrec,
2584 				   int failed_mirror)
2585 {
2586 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2587 	int num_copies;
2588 
2589 	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2590 	if (num_copies == 1) {
2591 		/*
2592 		 * we only have a single copy of the data, so don't bother with
2593 		 * all the retry and error correction code that follows. no
2594 		 * matter what the error is, it is very likely to persist.
2595 		 */
2596 		btrfs_debug(fs_info,
2597 			"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2598 			num_copies, failrec->this_mirror, failed_mirror);
2599 		return false;
2600 	}
2601 
2602 	/* The failure record should only contain one sector */
2603 	ASSERT(failrec->len == fs_info->sectorsize);
2604 
2605 	/*
2606 	 * There are two premises:
2607 	 * a) deliver good data to the caller
2608 	 * b) correct the bad sectors on disk
2609 	 *
2610 	 * Since we're only doing repair for one sector, we only need to get
2611 	 * a good copy of the failed sector and if we succeed, we have setup
2612 	 * everything for repair_io_failure to do the rest for us.
2613 	 */
2614 	failrec->failed_mirror = failed_mirror;
2615 	failrec->this_mirror++;
2616 	if (failrec->this_mirror == failed_mirror)
2617 		failrec->this_mirror++;
2618 
2619 	if (failrec->this_mirror > num_copies) {
2620 		btrfs_debug(fs_info,
2621 			"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2622 			num_copies, failrec->this_mirror, failed_mirror);
2623 		return false;
2624 	}
2625 
2626 	return true;
2627 }
2628 
2629 int btrfs_repair_one_sector(struct inode *inode,
2630 			    struct bio *failed_bio, u32 bio_offset,
2631 			    struct page *page, unsigned int pgoff,
2632 			    u64 start, int failed_mirror,
2633 			    submit_bio_hook_t *submit_bio_hook)
2634 {
2635 	struct io_failure_record *failrec;
2636 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2637 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2638 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2639 	struct btrfs_bio *failed_bbio = btrfs_bio(failed_bio);
2640 	const int icsum = bio_offset >> fs_info->sectorsize_bits;
2641 	struct bio *repair_bio;
2642 	struct btrfs_bio *repair_bbio;
2643 	blk_status_t status;
2644 
2645 	btrfs_debug(fs_info,
2646 		   "repair read error: read error at %llu", start);
2647 
2648 	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2649 
2650 	failrec = btrfs_get_io_failure_record(inode, start);
2651 	if (IS_ERR(failrec))
2652 		return PTR_ERR(failrec);
2653 
2654 
2655 	if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
2656 		free_io_failure(failure_tree, tree, failrec);
2657 		return -EIO;
2658 	}
2659 
2660 	repair_bio = btrfs_bio_alloc(1);
2661 	repair_bbio = btrfs_bio(repair_bio);
2662 	repair_bio->bi_opf = REQ_OP_READ;
2663 	repair_bio->bi_end_io = failed_bio->bi_end_io;
2664 	repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2665 	repair_bio->bi_private = failed_bio->bi_private;
2666 
2667 	if (failed_bbio->csum) {
2668 		const u32 csum_size = fs_info->csum_size;
2669 
2670 		repair_bbio->csum = repair_bbio->csum_inline;
2671 		memcpy(repair_bbio->csum,
2672 		       failed_bbio->csum + csum_size * icsum, csum_size);
2673 	}
2674 
2675 	bio_add_page(repair_bio, page, failrec->len, pgoff);
2676 	repair_bbio->iter = repair_bio->bi_iter;
2677 
2678 	btrfs_debug(btrfs_sb(inode->i_sb),
2679 		    "repair read error: submitting new read to mirror %d",
2680 		    failrec->this_mirror);
2681 
2682 	status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
2683 				 failrec->bio_flags);
2684 	if (status) {
2685 		free_io_failure(failure_tree, tree, failrec);
2686 		bio_put(repair_bio);
2687 	}
2688 	return blk_status_to_errno(status);
2689 }
2690 
2691 static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
2692 {
2693 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
2694 
2695 	ASSERT(page_offset(page) <= start &&
2696 	       start + len <= page_offset(page) + PAGE_SIZE);
2697 
2698 	if (uptodate) {
2699 		if (fsverity_active(page->mapping->host) &&
2700 		    !PageError(page) &&
2701 		    !PageUptodate(page) &&
2702 		    start < i_size_read(page->mapping->host) &&
2703 		    !fsverity_verify_page(page)) {
2704 			btrfs_page_set_error(fs_info, page, start, len);
2705 		} else {
2706 			btrfs_page_set_uptodate(fs_info, page, start, len);
2707 		}
2708 	} else {
2709 		btrfs_page_clear_uptodate(fs_info, page, start, len);
2710 		btrfs_page_set_error(fs_info, page, start, len);
2711 	}
2712 
2713 	if (fs_info->sectorsize == PAGE_SIZE)
2714 		unlock_page(page);
2715 	else
2716 		btrfs_subpage_end_reader(fs_info, page, start, len);
2717 }
2718 
2719 static blk_status_t submit_read_repair(struct inode *inode,
2720 				      struct bio *failed_bio, u32 bio_offset,
2721 				      struct page *page, unsigned int pgoff,
2722 				      u64 start, u64 end, int failed_mirror,
2723 				      unsigned int error_bitmap,
2724 				      submit_bio_hook_t *submit_bio_hook)
2725 {
2726 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2727 	const u32 sectorsize = fs_info->sectorsize;
2728 	const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
2729 	int error = 0;
2730 	int i;
2731 
2732 	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2733 
2734 	/* We're here because we had some read errors or csum mismatch */
2735 	ASSERT(error_bitmap);
2736 
2737 	/*
2738 	 * We only get called on buffered IO, thus page must be mapped and bio
2739 	 * must not be cloned.
2740 	 */
2741 	ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));
2742 
2743 	/* Iterate through all the sectors in the range */
2744 	for (i = 0; i < nr_bits; i++) {
2745 		const unsigned int offset = i * sectorsize;
2746 		struct extent_state *cached = NULL;
2747 		bool uptodate = false;
2748 		int ret;
2749 
2750 		if (!(error_bitmap & (1U << i))) {
2751 			/*
2752 			 * This sector has no error, just end the page read
2753 			 * and unlock the range.
2754 			 */
2755 			uptodate = true;
2756 			goto next;
2757 		}
2758 
2759 		ret = btrfs_repair_one_sector(inode, failed_bio,
2760 				bio_offset + offset,
2761 				page, pgoff + offset, start + offset,
2762 				failed_mirror, submit_bio_hook);
2763 		if (!ret) {
2764 			/*
2765 			 * We have submitted the read repair, the page release
2766 			 * will be handled by the endio function of the
2767 			 * submitted repair bio.
2768 			 * Thus we don't need to do any thing here.
2769 			 */
2770 			continue;
2771 		}
2772 		/*
2773 		 * Repair failed, just record the error but still continue.
2774 		 * Or the remaining sectors will not be properly unlocked.
2775 		 */
2776 		if (!error)
2777 			error = ret;
2778 next:
2779 		end_page_read(page, uptodate, start + offset, sectorsize);
2780 		if (uptodate)
2781 			set_extent_uptodate(&BTRFS_I(inode)->io_tree,
2782 					start + offset,
2783 					start + offset + sectorsize - 1,
2784 					&cached, GFP_ATOMIC);
2785 		unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree,
2786 				start + offset,
2787 				start + offset + sectorsize - 1,
2788 				&cached);
2789 	}
2790 	return errno_to_blk_status(error);
2791 }
2792 
2793 /* lots and lots of room for performance fixes in the end_bio funcs */
2794 
2795 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2796 {
2797 	struct btrfs_inode *inode;
2798 	const bool uptodate = (err == 0);
2799 	int ret = 0;
2800 
2801 	ASSERT(page && page->mapping);
2802 	inode = BTRFS_I(page->mapping->host);
2803 	btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
2804 
2805 	if (!uptodate) {
2806 		const struct btrfs_fs_info *fs_info = inode->root->fs_info;
2807 		u32 len;
2808 
2809 		ASSERT(end + 1 - start <= U32_MAX);
2810 		len = end + 1 - start;
2811 
2812 		btrfs_page_clear_uptodate(fs_info, page, start, len);
2813 		btrfs_page_set_error(fs_info, page, start, len);
2814 		ret = err < 0 ? err : -EIO;
2815 		mapping_set_error(page->mapping, ret);
2816 	}
2817 }
2818 
2819 /*
2820  * after a writepage IO is done, we need to:
2821  * clear the uptodate bits on error
2822  * clear the writeback bits in the extent tree for this IO
2823  * end_page_writeback if the page has no more pending IO
2824  *
2825  * Scheduling is not allowed, so the extent state tree is expected
2826  * to have one and only one object corresponding to this IO.
2827  */
2828 static void end_bio_extent_writepage(struct bio *bio)
2829 {
2830 	int error = blk_status_to_errno(bio->bi_status);
2831 	struct bio_vec *bvec;
2832 	u64 start;
2833 	u64 end;
2834 	struct bvec_iter_all iter_all;
2835 	bool first_bvec = true;
2836 
2837 	ASSERT(!bio_flagged(bio, BIO_CLONED));
2838 	bio_for_each_segment_all(bvec, bio, iter_all) {
2839 		struct page *page = bvec->bv_page;
2840 		struct inode *inode = page->mapping->host;
2841 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2842 		const u32 sectorsize = fs_info->sectorsize;
2843 
2844 		/* Our read/write should always be sector aligned. */
2845 		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
2846 			btrfs_err(fs_info,
2847 		"partial page write in btrfs with offset %u and length %u",
2848 				  bvec->bv_offset, bvec->bv_len);
2849 		else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
2850 			btrfs_info(fs_info,
2851 		"incomplete page write with offset %u and length %u",
2852 				   bvec->bv_offset, bvec->bv_len);
2853 
2854 		start = page_offset(page) + bvec->bv_offset;
2855 		end = start + bvec->bv_len - 1;
2856 
2857 		if (first_bvec) {
2858 			btrfs_record_physical_zoned(inode, start, bio);
2859 			first_bvec = false;
2860 		}
2861 
2862 		end_extent_writepage(page, error, start, end);
2863 
2864 		btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
2865 	}
2866 
2867 	bio_put(bio);
2868 }
2869 
2870 /*
2871  * Record previously processed extent range
2872  *
2873  * For endio_readpage_release_extent() to handle a full extent range, reducing
2874  * the extent io operations.
2875  */
2876 struct processed_extent {
2877 	struct btrfs_inode *inode;
2878 	/* Start of the range in @inode */
2879 	u64 start;
2880 	/* End of the range in @inode */
2881 	u64 end;
2882 	bool uptodate;
2883 };
2884 
2885 /*
2886  * Try to release processed extent range
2887  *
2888  * May not release the extent range right now if the current range is
2889  * contiguous to processed extent.
2890  *
2891  * Will release processed extent when any of @inode, @uptodate, the range is
2892  * no longer contiguous to the processed range.
2893  *
2894  * Passing @inode == NULL will force processed extent to be released.
2895  */
2896 static void endio_readpage_release_extent(struct processed_extent *processed,
2897 			      struct btrfs_inode *inode, u64 start, u64 end,
2898 			      bool uptodate)
2899 {
2900 	struct extent_state *cached = NULL;
2901 	struct extent_io_tree *tree;
2902 
2903 	/* The first extent, initialize @processed */
2904 	if (!processed->inode)
2905 		goto update;
2906 
2907 	/*
2908 	 * Contiguous to processed extent, just uptodate the end.
2909 	 *
2910 	 * Several things to notice:
2911 	 *
2912 	 * - bio can be merged as long as on-disk bytenr is contiguous
2913 	 *   This means we can have page belonging to other inodes, thus need to
2914 	 *   check if the inode still matches.
2915 	 * - bvec can contain range beyond current page for multi-page bvec
2916 	 *   Thus we need to do processed->end + 1 >= start check
2917 	 */
2918 	if (processed->inode == inode && processed->uptodate == uptodate &&
2919 	    processed->end + 1 >= start && end >= processed->end) {
2920 		processed->end = end;
2921 		return;
2922 	}
2923 
2924 	tree = &processed->inode->io_tree;
2925 	/*
2926 	 * Now we don't have range contiguous to the processed range, release
2927 	 * the processed range now.
2928 	 */
2929 	if (processed->uptodate && tree->track_uptodate)
2930 		set_extent_uptodate(tree, processed->start, processed->end,
2931 				    &cached, GFP_ATOMIC);
2932 	unlock_extent_cached_atomic(tree, processed->start, processed->end,
2933 				    &cached);
2934 
2935 update:
2936 	/* Update processed to current range */
2937 	processed->inode = inode;
2938 	processed->start = start;
2939 	processed->end = end;
2940 	processed->uptodate = uptodate;
2941 }
2942 
2943 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
2944 {
2945 	ASSERT(PageLocked(page));
2946 	if (fs_info->sectorsize == PAGE_SIZE)
2947 		return;
2948 
2949 	ASSERT(PagePrivate(page));
2950 	btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
2951 }
2952 
2953 /*
2954  * Find extent buffer for a givne bytenr.
2955  *
2956  * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
2957  * in endio context.
2958  */
2959 static struct extent_buffer *find_extent_buffer_readpage(
2960 		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
2961 {
2962 	struct extent_buffer *eb;
2963 
2964 	/*
2965 	 * For regular sectorsize, we can use page->private to grab extent
2966 	 * buffer
2967 	 */
2968 	if (fs_info->sectorsize == PAGE_SIZE) {
2969 		ASSERT(PagePrivate(page) && page->private);
2970 		return (struct extent_buffer *)page->private;
2971 	}
2972 
2973 	/* For subpage case, we need to lookup buffer radix tree */
2974 	rcu_read_lock();
2975 	eb = radix_tree_lookup(&fs_info->buffer_radix,
2976 			       bytenr >> fs_info->sectorsize_bits);
2977 	rcu_read_unlock();
2978 	ASSERT(eb);
2979 	return eb;
2980 }
2981 
2982 /*
2983  * after a readpage IO is done, we need to:
2984  * clear the uptodate bits on error
2985  * set the uptodate bits if things worked
2986  * set the page up to date if all extents in the tree are uptodate
2987  * clear the lock bit in the extent tree
2988  * unlock the page if there are no other extents locked for it
2989  *
2990  * Scheduling is not allowed, so the extent state tree is expected
2991  * to have one and only one object corresponding to this IO.
2992  */
2993 static void end_bio_extent_readpage(struct bio *bio)
2994 {
2995 	struct bio_vec *bvec;
2996 	struct btrfs_bio *bbio = btrfs_bio(bio);
2997 	struct extent_io_tree *tree, *failure_tree;
2998 	struct processed_extent processed = { 0 };
2999 	/*
3000 	 * The offset to the beginning of a bio, since one bio can never be
3001 	 * larger than UINT_MAX, u32 here is enough.
3002 	 */
3003 	u32 bio_offset = 0;
3004 	int mirror;
3005 	int ret;
3006 	struct bvec_iter_all iter_all;
3007 
3008 	ASSERT(!bio_flagged(bio, BIO_CLONED));
3009 	bio_for_each_segment_all(bvec, bio, iter_all) {
3010 		bool uptodate = !bio->bi_status;
3011 		struct page *page = bvec->bv_page;
3012 		struct inode *inode = page->mapping->host;
3013 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3014 		const u32 sectorsize = fs_info->sectorsize;
3015 		unsigned int error_bitmap = (unsigned int)-1;
3016 		u64 start;
3017 		u64 end;
3018 		u32 len;
3019 
3020 		btrfs_debug(fs_info,
3021 			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
3022 			bio->bi_iter.bi_sector, bio->bi_status,
3023 			bbio->mirror_num);
3024 		tree = &BTRFS_I(inode)->io_tree;
3025 		failure_tree = &BTRFS_I(inode)->io_failure_tree;
3026 
3027 		/*
3028 		 * We always issue full-sector reads, but if some block in a
3029 		 * page fails to read, blk_update_request() will advance
3030 		 * bv_offset and adjust bv_len to compensate.  Print a warning
3031 		 * for unaligned offsets, and an error if they don't add up to
3032 		 * a full sector.
3033 		 */
3034 		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
3035 			btrfs_err(fs_info,
3036 		"partial page read in btrfs with offset %u and length %u",
3037 				  bvec->bv_offset, bvec->bv_len);
3038 		else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
3039 				     sectorsize))
3040 			btrfs_info(fs_info,
3041 		"incomplete page read with offset %u and length %u",
3042 				   bvec->bv_offset, bvec->bv_len);
3043 
3044 		start = page_offset(page) + bvec->bv_offset;
3045 		end = start + bvec->bv_len - 1;
3046 		len = bvec->bv_len;
3047 
3048 		mirror = bbio->mirror_num;
3049 		if (likely(uptodate)) {
3050 			if (is_data_inode(inode)) {
3051 				error_bitmap = btrfs_verify_data_csum(bbio,
3052 						bio_offset, page, start, end);
3053 				ret = error_bitmap;
3054 			} else {
3055 				ret = btrfs_validate_metadata_buffer(bbio,
3056 					page, start, end, mirror);
3057 			}
3058 			if (ret)
3059 				uptodate = false;
3060 			else
3061 				clean_io_failure(BTRFS_I(inode)->root->fs_info,
3062 						 failure_tree, tree, start,
3063 						 page,
3064 						 btrfs_ino(BTRFS_I(inode)), 0);
3065 		}
3066 
3067 		if (likely(uptodate))
3068 			goto readpage_ok;
3069 
3070 		if (is_data_inode(inode)) {
3071 			/*
3072 			 * btrfs_submit_read_repair() will handle all the good
3073 			 * and bad sectors, we just continue to the next bvec.
3074 			 */
3075 			submit_read_repair(inode, bio, bio_offset, page,
3076 					   start - page_offset(page), start,
3077 					   end, mirror, error_bitmap,
3078 					   btrfs_submit_data_bio);
3079 
3080 			ASSERT(bio_offset + len > bio_offset);
3081 			bio_offset += len;
3082 			continue;
3083 		} else {
3084 			struct extent_buffer *eb;
3085 
3086 			eb = find_extent_buffer_readpage(fs_info, page, start);
3087 			set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3088 			eb->read_mirror = mirror;
3089 			atomic_dec(&eb->io_pages);
3090 			if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
3091 					       &eb->bflags))
3092 				btree_readahead_hook(eb, -EIO);
3093 		}
3094 readpage_ok:
3095 		if (likely(uptodate)) {
3096 			loff_t i_size = i_size_read(inode);
3097 			pgoff_t end_index = i_size >> PAGE_SHIFT;
3098 
3099 			/*
3100 			 * Zero out the remaining part if this range straddles
3101 			 * i_size.
3102 			 *
3103 			 * Here we should only zero the range inside the bvec,
3104 			 * not touch anything else.
3105 			 *
3106 			 * NOTE: i_size is exclusive while end is inclusive.
3107 			 */
3108 			if (page->index == end_index && i_size <= end) {
3109 				u32 zero_start = max(offset_in_page(i_size),
3110 						     offset_in_page(start));
3111 
3112 				zero_user_segment(page, zero_start,
3113 						  offset_in_page(end) + 1);
3114 			}
3115 		}
3116 		ASSERT(bio_offset + len > bio_offset);
3117 		bio_offset += len;
3118 
3119 		/* Update page status and unlock */
3120 		end_page_read(page, uptodate, start, len);
3121 		endio_readpage_release_extent(&processed, BTRFS_I(inode),
3122 					      start, end, PageUptodate(page));
3123 	}
3124 	/* Release the last extent */
3125 	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
3126 	btrfs_bio_free_csum(bbio);
3127 	bio_put(bio);
3128 }
3129 
3130 /*
3131  * Initialize the members up to but not including 'bio'. Use after allocating a
3132  * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
3133  * 'bio' because use of __GFP_ZERO is not supported.
3134  */
3135 static inline void btrfs_bio_init(struct btrfs_bio *bbio)
3136 {
3137 	memset(bbio, 0, offsetof(struct btrfs_bio, bio));
3138 }
3139 
3140 /*
3141  * Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
3142  *
3143  * The bio allocation is backed by bioset and does not fail.
3144  */
3145 struct bio *btrfs_bio_alloc(unsigned int nr_iovecs)
3146 {
3147 	struct bio *bio;
3148 
3149 	ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
3150 	bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
3151 	btrfs_bio_init(btrfs_bio(bio));
3152 	return bio;
3153 }
3154 
3155 struct bio *btrfs_bio_clone(struct bio *bio)
3156 {
3157 	struct btrfs_bio *bbio;
3158 	struct bio *new;
3159 
3160 	/* Bio allocation backed by a bioset does not fail */
3161 	new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
3162 	bbio = btrfs_bio(new);
3163 	btrfs_bio_init(bbio);
3164 	bbio->iter = bio->bi_iter;
3165 	return new;
3166 }
3167 
3168 struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
3169 {
3170 	struct bio *bio;
3171 	struct btrfs_bio *bbio;
3172 
3173 	ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
3174 
3175 	/* this will never fail when it's backed by a bioset */
3176 	bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
3177 	ASSERT(bio);
3178 
3179 	bbio = btrfs_bio(bio);
3180 	btrfs_bio_init(bbio);
3181 
3182 	bio_trim(bio, offset >> 9, size >> 9);
3183 	bbio->iter = bio->bi_iter;
3184 	return bio;
3185 }
3186 
3187 /**
3188  * Attempt to add a page to bio
3189  *
3190  * @bio:	destination bio
3191  * @page:	page to add to the bio
3192  * @disk_bytenr:  offset of the new bio or to check whether we are adding
3193  *                a contiguous page to the previous one
3194  * @pg_offset:	starting offset in the page
3195  * @size:	portion of page that we want to write
3196  * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
3197  * @bio_flags:	flags of the current bio to see if we can merge them
3198  *
3199  * Attempt to add a page to bio considering stripe alignment etc.
3200  *
3201  * Return >= 0 for the number of bytes added to the bio.
3202  * Can return 0 if the current bio is already at stripe/zone boundary.
3203  * Return <0 for error.
3204  */
3205 static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
3206 			      struct page *page,
3207 			      u64 disk_bytenr, unsigned int size,
3208 			      unsigned int pg_offset,
3209 			      unsigned long bio_flags)
3210 {
3211 	struct bio *bio = bio_ctrl->bio;
3212 	u32 bio_size = bio->bi_iter.bi_size;
3213 	u32 real_size;
3214 	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
3215 	bool contig;
3216 	int ret;
3217 
3218 	ASSERT(bio);
3219 	/* The limit should be calculated when bio_ctrl->bio is allocated */
3220 	ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
3221 	if (bio_ctrl->bio_flags != bio_flags)
3222 		return 0;
3223 
3224 	if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
3225 		contig = bio->bi_iter.bi_sector == sector;
3226 	else
3227 		contig = bio_end_sector(bio) == sector;
3228 	if (!contig)
3229 		return 0;
3230 
3231 	real_size = min(bio_ctrl->len_to_oe_boundary,
3232 			bio_ctrl->len_to_stripe_boundary) - bio_size;
3233 	real_size = min(real_size, size);
3234 
3235 	/*
3236 	 * If real_size is 0, never call bio_add_*_page(), as even size is 0,
3237 	 * bio will still execute its endio function on the page!
3238 	 */
3239 	if (real_size == 0)
3240 		return 0;
3241 
3242 	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
3243 		ret = bio_add_zone_append_page(bio, page, real_size, pg_offset);
3244 	else
3245 		ret = bio_add_page(bio, page, real_size, pg_offset);
3246 
3247 	return ret;
3248 }
3249 
3250 static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
3251 			       struct btrfs_inode *inode, u64 file_offset)
3252 {
3253 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3254 	struct btrfs_io_geometry geom;
3255 	struct btrfs_ordered_extent *ordered;
3256 	struct extent_map *em;
3257 	u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
3258 	int ret;
3259 
3260 	/*
3261 	 * Pages for compressed extent are never submitted to disk directly,
3262 	 * thus it has no real boundary, just set them to U32_MAX.
3263 	 *
3264 	 * The split happens for real compressed bio, which happens in
3265 	 * btrfs_submit_compressed_read/write().
3266 	 */
3267 	if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED) {
3268 		bio_ctrl->len_to_oe_boundary = U32_MAX;
3269 		bio_ctrl->len_to_stripe_boundary = U32_MAX;
3270 		return 0;
3271 	}
3272 	em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
3273 	if (IS_ERR(em))
3274 		return PTR_ERR(em);
3275 	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
3276 				    logical, &geom);
3277 	free_extent_map(em);
3278 	if (ret < 0) {
3279 		return ret;
3280 	}
3281 	if (geom.len > U32_MAX)
3282 		bio_ctrl->len_to_stripe_boundary = U32_MAX;
3283 	else
3284 		bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
3285 
3286 	if (!btrfs_is_zoned(fs_info) ||
3287 	    bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
3288 		bio_ctrl->len_to_oe_boundary = U32_MAX;
3289 		return 0;
3290 	}
3291 
3292 	/* Ordered extent not yet created, so we're good */
3293 	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
3294 	if (!ordered) {
3295 		bio_ctrl->len_to_oe_boundary = U32_MAX;
3296 		return 0;
3297 	}
3298 
3299 	bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
3300 		ordered->disk_bytenr + ordered->disk_num_bytes - logical);
3301 	btrfs_put_ordered_extent(ordered);
3302 	return 0;
3303 }
3304 
3305 static int alloc_new_bio(struct btrfs_inode *inode,
3306 			 struct btrfs_bio_ctrl *bio_ctrl,
3307 			 struct writeback_control *wbc,
3308 			 unsigned int opf,
3309 			 bio_end_io_t end_io_func,
3310 			 u64 disk_bytenr, u32 offset, u64 file_offset,
3311 			 unsigned long bio_flags)
3312 {
3313 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3314 	struct bio *bio;
3315 	int ret;
3316 
3317 	bio = btrfs_bio_alloc(BIO_MAX_VECS);
3318 	/*
3319 	 * For compressed page range, its disk_bytenr is always @disk_bytenr
3320 	 * passed in, no matter if we have added any range into previous bio.
3321 	 */
3322 	if (bio_flags & EXTENT_BIO_COMPRESSED)
3323 		bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
3324 	else
3325 		bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
3326 	bio_ctrl->bio = bio;
3327 	bio_ctrl->bio_flags = bio_flags;
3328 	bio->bi_end_io = end_io_func;
3329 	bio->bi_private = &inode->io_tree;
3330 	bio->bi_write_hint = inode->vfs_inode.i_write_hint;
3331 	bio->bi_opf = opf;
3332 	ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
3333 	if (ret < 0)
3334 		goto error;
3335 	if (wbc) {
3336 		struct block_device *bdev;
3337 
3338 		bdev = fs_info->fs_devices->latest_dev->bdev;
3339 		bio_set_dev(bio, bdev);
3340 		wbc_init_bio(wbc, bio);
3341 	}
3342 	if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
3343 		struct btrfs_device *device;
3344 
3345 		device = btrfs_zoned_get_device(fs_info, disk_bytenr,
3346 						fs_info->sectorsize);
3347 		if (IS_ERR(device)) {
3348 			ret = PTR_ERR(device);
3349 			goto error;
3350 		}
3351 
3352 		btrfs_bio(bio)->device = device;
3353 	}
3354 	return 0;
3355 error:
3356 	bio_ctrl->bio = NULL;
3357 	bio->bi_status = errno_to_blk_status(ret);
3358 	bio_endio(bio);
3359 	return ret;
3360 }
3361 
3362 /*
3363  * @opf:	bio REQ_OP_* and REQ_* flags as one value
3364  * @wbc:	optional writeback control for io accounting
3365  * @page:	page to add to the bio
3366  * @disk_bytenr: logical bytenr where the write will be
3367  * @size:	portion of page that we want to write to
3368  * @pg_offset:	offset of the new bio or to check whether we are adding
3369  *              a contiguous page to the previous one
3370  * @bio_ret:	must be valid pointer, newly allocated bio will be stored there
3371  * @end_io_func:     end_io callback for new bio
3372  * @mirror_num:	     desired mirror to read/write
3373  * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
3374  * @bio_flags:	flags of the current bio to see if we can merge them
3375  */
3376 static int submit_extent_page(unsigned int opf,
3377 			      struct writeback_control *wbc,
3378 			      struct btrfs_bio_ctrl *bio_ctrl,
3379 			      struct page *page, u64 disk_bytenr,
3380 			      size_t size, unsigned long pg_offset,
3381 			      bio_end_io_t end_io_func,
3382 			      int mirror_num,
3383 			      unsigned long bio_flags,
3384 			      bool force_bio_submit)
3385 {
3386 	int ret = 0;
3387 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3388 	unsigned int cur = pg_offset;
3389 
3390 	ASSERT(bio_ctrl);
3391 
3392 	ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
3393 	       pg_offset + size <= PAGE_SIZE);
3394 	if (force_bio_submit && bio_ctrl->bio) {
3395 		ret = submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->bio_flags);
3396 		bio_ctrl->bio = NULL;
3397 		if (ret < 0)
3398 			return ret;
3399 	}
3400 
3401 	while (cur < pg_offset + size) {
3402 		u32 offset = cur - pg_offset;
3403 		int added;
3404 
3405 		/* Allocate new bio if needed */
3406 		if (!bio_ctrl->bio) {
3407 			ret = alloc_new_bio(inode, bio_ctrl, wbc, opf,
3408 					    end_io_func, disk_bytenr, offset,
3409 					    page_offset(page) + cur,
3410 					    bio_flags);
3411 			if (ret < 0)
3412 				return ret;
3413 		}
3414 		/*
3415 		 * We must go through btrfs_bio_add_page() to ensure each
3416 		 * page range won't cross various boundaries.
3417 		 */
3418 		if (bio_flags & EXTENT_BIO_COMPRESSED)
3419 			added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr,
3420 					size - offset, pg_offset + offset,
3421 					bio_flags);
3422 		else
3423 			added = btrfs_bio_add_page(bio_ctrl, page,
3424 					disk_bytenr + offset, size - offset,
3425 					pg_offset + offset, bio_flags);
3426 
3427 		/* Metadata page range should never be split */
3428 		if (!is_data_inode(&inode->vfs_inode))
3429 			ASSERT(added == 0 || added == size - offset);
3430 
3431 		/* At least we added some page, update the account */
3432 		if (wbc && added)
3433 			wbc_account_cgroup_owner(wbc, page, added);
3434 
3435 		/* We have reached boundary, submit right now */
3436 		if (added < size - offset) {
3437 			/* The bio should contain some page(s) */
3438 			ASSERT(bio_ctrl->bio->bi_iter.bi_size);
3439 			ret = submit_one_bio(bio_ctrl->bio, mirror_num,
3440 					bio_ctrl->bio_flags);
3441 			bio_ctrl->bio = NULL;
3442 			if (ret < 0)
3443 				return ret;
3444 		}
3445 		cur += added;
3446 	}
3447 	return 0;
3448 }
3449 
3450 static int attach_extent_buffer_page(struct extent_buffer *eb,
3451 				     struct page *page,
3452 				     struct btrfs_subpage *prealloc)
3453 {
3454 	struct btrfs_fs_info *fs_info = eb->fs_info;
3455 	int ret = 0;
3456 
3457 	/*
3458 	 * If the page is mapped to btree inode, we should hold the private
3459 	 * lock to prevent race.
3460 	 * For cloned or dummy extent buffers, their pages are not mapped and
3461 	 * will not race with any other ebs.
3462 	 */
3463 	if (page->mapping)
3464 		lockdep_assert_held(&page->mapping->private_lock);
3465 
3466 	if (fs_info->sectorsize == PAGE_SIZE) {
3467 		if (!PagePrivate(page))
3468 			attach_page_private(page, eb);
3469 		else
3470 			WARN_ON(page->private != (unsigned long)eb);
3471 		return 0;
3472 	}
3473 
3474 	/* Already mapped, just free prealloc */
3475 	if (PagePrivate(page)) {
3476 		btrfs_free_subpage(prealloc);
3477 		return 0;
3478 	}
3479 
3480 	if (prealloc)
3481 		/* Has preallocated memory for subpage */
3482 		attach_page_private(page, prealloc);
3483 	else
3484 		/* Do new allocation to attach subpage */
3485 		ret = btrfs_attach_subpage(fs_info, page,
3486 					   BTRFS_SUBPAGE_METADATA);
3487 	return ret;
3488 }
3489 
3490 int set_page_extent_mapped(struct page *page)
3491 {
3492 	struct btrfs_fs_info *fs_info;
3493 
3494 	ASSERT(page->mapping);
3495 
3496 	if (PagePrivate(page))
3497 		return 0;
3498 
3499 	fs_info = btrfs_sb(page->mapping->host->i_sb);
3500 
3501 	if (fs_info->sectorsize < PAGE_SIZE)
3502 		return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
3503 
3504 	attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3505 	return 0;
3506 }
3507 
3508 void clear_page_extent_mapped(struct page *page)
3509 {
3510 	struct btrfs_fs_info *fs_info;
3511 
3512 	ASSERT(page->mapping);
3513 
3514 	if (!PagePrivate(page))
3515 		return;
3516 
3517 	fs_info = btrfs_sb(page->mapping->host->i_sb);
3518 	if (fs_info->sectorsize < PAGE_SIZE)
3519 		return btrfs_detach_subpage(fs_info, page);
3520 
3521 	detach_page_private(page);
3522 }
3523 
3524 static struct extent_map *
3525 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3526 		 u64 start, u64 len, struct extent_map **em_cached)
3527 {
3528 	struct extent_map *em;
3529 
3530 	if (em_cached && *em_cached) {
3531 		em = *em_cached;
3532 		if (extent_map_in_tree(em) && start >= em->start &&
3533 		    start < extent_map_end(em)) {
3534 			refcount_inc(&em->refs);
3535 			return em;
3536 		}
3537 
3538 		free_extent_map(em);
3539 		*em_cached = NULL;
3540 	}
3541 
3542 	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3543 	if (em_cached && !IS_ERR_OR_NULL(em)) {
3544 		BUG_ON(*em_cached);
3545 		refcount_inc(&em->refs);
3546 		*em_cached = em;
3547 	}
3548 	return em;
3549 }
3550 /*
3551  * basic readpage implementation.  Locked extent state structs are inserted
3552  * into the tree that are removed when the IO is done (by the end_io
3553  * handlers)
3554  * XXX JDM: This needs looking at to ensure proper page locking
3555  * return 0 on success, otherwise return error
3556  */
3557 int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
3558 		      struct btrfs_bio_ctrl *bio_ctrl,
3559 		      unsigned int read_flags, u64 *prev_em_start)
3560 {
3561 	struct inode *inode = page->mapping->host;
3562 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3563 	u64 start = page_offset(page);
3564 	const u64 end = start + PAGE_SIZE - 1;
3565 	u64 cur = start;
3566 	u64 extent_offset;
3567 	u64 last_byte = i_size_read(inode);
3568 	u64 block_start;
3569 	u64 cur_end;
3570 	struct extent_map *em;
3571 	int ret = 0;
3572 	int nr = 0;
3573 	size_t pg_offset = 0;
3574 	size_t iosize;
3575 	size_t blocksize = inode->i_sb->s_blocksize;
3576 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3577 
3578 	ret = set_page_extent_mapped(page);
3579 	if (ret < 0) {
3580 		unlock_extent(tree, start, end);
3581 		btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
3582 		unlock_page(page);
3583 		goto out;
3584 	}
3585 
3586 	if (!PageUptodate(page)) {
3587 		if (cleancache_get_page(page) == 0) {
3588 			BUG_ON(blocksize != PAGE_SIZE);
3589 			unlock_extent(tree, start, end);
3590 			unlock_page(page);
3591 			goto out;
3592 		}
3593 	}
3594 
3595 	if (page->index == last_byte >> PAGE_SHIFT) {
3596 		size_t zero_offset = offset_in_page(last_byte);
3597 
3598 		if (zero_offset) {
3599 			iosize = PAGE_SIZE - zero_offset;
3600 			memzero_page(page, zero_offset, iosize);
3601 			flush_dcache_page(page);
3602 		}
3603 	}
3604 	begin_page_read(fs_info, page);
3605 	while (cur <= end) {
3606 		unsigned long this_bio_flag = 0;
3607 		bool force_bio_submit = false;
3608 		u64 disk_bytenr;
3609 
3610 		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
3611 		if (cur >= last_byte) {
3612 			struct extent_state *cached = NULL;
3613 
3614 			iosize = PAGE_SIZE - pg_offset;
3615 			memzero_page(page, pg_offset, iosize);
3616 			flush_dcache_page(page);
3617 			set_extent_uptodate(tree, cur, cur + iosize - 1,
3618 					    &cached, GFP_NOFS);
3619 			unlock_extent_cached(tree, cur,
3620 					     cur + iosize - 1, &cached);
3621 			end_page_read(page, true, cur, iosize);
3622 			break;
3623 		}
3624 		em = __get_extent_map(inode, page, pg_offset, cur,
3625 				      end - cur + 1, em_cached);
3626 		if (IS_ERR_OR_NULL(em)) {
3627 			unlock_extent(tree, cur, end);
3628 			end_page_read(page, false, cur, end + 1 - cur);
3629 			break;
3630 		}
3631 		extent_offset = cur - em->start;
3632 		BUG_ON(extent_map_end(em) <= cur);
3633 		BUG_ON(end < cur);
3634 
3635 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3636 			this_bio_flag |= EXTENT_BIO_COMPRESSED;
3637 			extent_set_compress_type(&this_bio_flag,
3638 						 em->compress_type);
3639 		}
3640 
3641 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
3642 		cur_end = min(extent_map_end(em) - 1, end);
3643 		iosize = ALIGN(iosize, blocksize);
3644 		if (this_bio_flag & EXTENT_BIO_COMPRESSED)
3645 			disk_bytenr = em->block_start;
3646 		else
3647 			disk_bytenr = em->block_start + extent_offset;
3648 		block_start = em->block_start;
3649 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3650 			block_start = EXTENT_MAP_HOLE;
3651 
3652 		/*
3653 		 * If we have a file range that points to a compressed extent
3654 		 * and it's followed by a consecutive file range that points
3655 		 * to the same compressed extent (possibly with a different
3656 		 * offset and/or length, so it either points to the whole extent
3657 		 * or only part of it), we must make sure we do not submit a
3658 		 * single bio to populate the pages for the 2 ranges because
3659 		 * this makes the compressed extent read zero out the pages
3660 		 * belonging to the 2nd range. Imagine the following scenario:
3661 		 *
3662 		 *  File layout
3663 		 *  [0 - 8K]                     [8K - 24K]
3664 		 *    |                               |
3665 		 *    |                               |
3666 		 * points to extent X,         points to extent X,
3667 		 * offset 4K, length of 8K     offset 0, length 16K
3668 		 *
3669 		 * [extent X, compressed length = 4K uncompressed length = 16K]
3670 		 *
3671 		 * If the bio to read the compressed extent covers both ranges,
3672 		 * it will decompress extent X into the pages belonging to the
3673 		 * first range and then it will stop, zeroing out the remaining
3674 		 * pages that belong to the other range that points to extent X.
3675 		 * So here we make sure we submit 2 bios, one for the first
3676 		 * range and another one for the third range. Both will target
3677 		 * the same physical extent from disk, but we can't currently
3678 		 * make the compressed bio endio callback populate the pages
3679 		 * for both ranges because each compressed bio is tightly
3680 		 * coupled with a single extent map, and each range can have
3681 		 * an extent map with a different offset value relative to the
3682 		 * uncompressed data of our extent and different lengths. This
3683 		 * is a corner case so we prioritize correctness over
3684 		 * non-optimal behavior (submitting 2 bios for the same extent).
3685 		 */
3686 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3687 		    prev_em_start && *prev_em_start != (u64)-1 &&
3688 		    *prev_em_start != em->start)
3689 			force_bio_submit = true;
3690 
3691 		if (prev_em_start)
3692 			*prev_em_start = em->start;
3693 
3694 		free_extent_map(em);
3695 		em = NULL;
3696 
3697 		/* we've found a hole, just zero and go on */
3698 		if (block_start == EXTENT_MAP_HOLE) {
3699 			struct extent_state *cached = NULL;
3700 
3701 			memzero_page(page, pg_offset, iosize);
3702 			flush_dcache_page(page);
3703 
3704 			set_extent_uptodate(tree, cur, cur + iosize - 1,
3705 					    &cached, GFP_NOFS);
3706 			unlock_extent_cached(tree, cur,
3707 					     cur + iosize - 1, &cached);
3708 			end_page_read(page, true, cur, iosize);
3709 			cur = cur + iosize;
3710 			pg_offset += iosize;
3711 			continue;
3712 		}
3713 		/* the get_extent function already copied into the page */
3714 		if (test_range_bit(tree, cur, cur_end,
3715 				   EXTENT_UPTODATE, 1, NULL)) {
3716 			unlock_extent(tree, cur, cur + iosize - 1);
3717 			end_page_read(page, true, cur, iosize);
3718 			cur = cur + iosize;
3719 			pg_offset += iosize;
3720 			continue;
3721 		}
3722 		/* we have an inline extent but it didn't get marked up
3723 		 * to date.  Error out
3724 		 */
3725 		if (block_start == EXTENT_MAP_INLINE) {
3726 			unlock_extent(tree, cur, cur + iosize - 1);
3727 			end_page_read(page, false, cur, iosize);
3728 			cur = cur + iosize;
3729 			pg_offset += iosize;
3730 			continue;
3731 		}
3732 
3733 		ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
3734 					 bio_ctrl, page, disk_bytenr, iosize,
3735 					 pg_offset,
3736 					 end_bio_extent_readpage, 0,
3737 					 this_bio_flag,
3738 					 force_bio_submit);
3739 		if (!ret) {
3740 			nr++;
3741 		} else {
3742 			unlock_extent(tree, cur, cur + iosize - 1);
3743 			end_page_read(page, false, cur, iosize);
3744 			goto out;
3745 		}
3746 		cur = cur + iosize;
3747 		pg_offset += iosize;
3748 	}
3749 out:
3750 	return ret;
3751 }
3752 
3753 static inline void contiguous_readpages(struct page *pages[], int nr_pages,
3754 					u64 start, u64 end,
3755 					struct extent_map **em_cached,
3756 					struct btrfs_bio_ctrl *bio_ctrl,
3757 					u64 *prev_em_start)
3758 {
3759 	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
3760 	int index;
3761 
3762 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3763 
3764 	for (index = 0; index < nr_pages; index++) {
3765 		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
3766 				  REQ_RAHEAD, prev_em_start);
3767 		put_page(pages[index]);
3768 	}
3769 }
3770 
3771 static void update_nr_written(struct writeback_control *wbc,
3772 			      unsigned long nr_written)
3773 {
3774 	wbc->nr_to_write -= nr_written;
3775 }
3776 
3777 /*
3778  * helper for __extent_writepage, doing all of the delayed allocation setup.
3779  *
3780  * This returns 1 if btrfs_run_delalloc_range function did all the work required
3781  * to write the page (copy into inline extent).  In this case the IO has
3782  * been started and the page is already unlocked.
3783  *
3784  * This returns 0 if all went well (page still locked)
3785  * This returns < 0 if there were errors (page still locked)
3786  */
3787 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
3788 		struct page *page, struct writeback_control *wbc,
3789 		unsigned long *nr_written)
3790 {
3791 	const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
3792 	u64 delalloc_start = page_offset(page);
3793 	u64 delalloc_to_write = 0;
3794 	int ret;
3795 	int page_started = 0;
3796 
3797 	while (delalloc_start < page_end) {
3798 		u64 delalloc_end = page_end;
3799 		bool found;
3800 
3801 		found = find_lock_delalloc_range(&inode->vfs_inode, page,
3802 					       &delalloc_start,
3803 					       &delalloc_end);
3804 		if (!found) {
3805 			delalloc_start = delalloc_end + 1;
3806 			continue;
3807 		}
3808 		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3809 				delalloc_end, &page_started, nr_written, wbc);
3810 		if (ret) {
3811 			btrfs_page_set_error(inode->root->fs_info, page,
3812 					     page_offset(page), PAGE_SIZE);
3813 			return ret;
3814 		}
3815 		/*
3816 		 * delalloc_end is already one less than the total length, so
3817 		 * we don't subtract one from PAGE_SIZE
3818 		 */
3819 		delalloc_to_write += (delalloc_end - delalloc_start +
3820 				      PAGE_SIZE) >> PAGE_SHIFT;
3821 		delalloc_start = delalloc_end + 1;
3822 	}
3823 	if (wbc->nr_to_write < delalloc_to_write) {
3824 		int thresh = 8192;
3825 
3826 		if (delalloc_to_write < thresh * 2)
3827 			thresh = delalloc_to_write;
3828 		wbc->nr_to_write = min_t(u64, delalloc_to_write,
3829 					 thresh);
3830 	}
3831 
3832 	/* did the fill delalloc function already unlock and start
3833 	 * the IO?
3834 	 */
3835 	if (page_started) {
3836 		/*
3837 		 * we've unlocked the page, so we can't update
3838 		 * the mapping's writeback index, just update
3839 		 * nr_to_write.
3840 		 */
3841 		wbc->nr_to_write -= *nr_written;
3842 		return 1;
3843 	}
3844 
3845 	return 0;
3846 }
3847 
3848 /*
3849  * Find the first byte we need to write.
3850  *
3851  * For subpage, one page can contain several sectors, and
3852  * __extent_writepage_io() will just grab all extent maps in the page
3853  * range and try to submit all non-inline/non-compressed extents.
3854  *
3855  * This is a big problem for subpage, we shouldn't re-submit already written
3856  * data at all.
3857  * This function will lookup subpage dirty bit to find which range we really
3858  * need to submit.
3859  *
3860  * Return the next dirty range in [@start, @end).
3861  * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
3862  */
3863 static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
3864 				 struct page *page, u64 *start, u64 *end)
3865 {
3866 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
3867 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
3868 	u64 orig_start = *start;
3869 	/* Declare as unsigned long so we can use bitmap ops */
3870 	unsigned long flags;
3871 	int range_start_bit;
3872 	int range_end_bit;
3873 
3874 	/*
3875 	 * For regular sector size == page size case, since one page only
3876 	 * contains one sector, we return the page offset directly.
3877 	 */
3878 	if (fs_info->sectorsize == PAGE_SIZE) {
3879 		*start = page_offset(page);
3880 		*end = page_offset(page) + PAGE_SIZE;
3881 		return;
3882 	}
3883 
3884 	range_start_bit = spi->dirty_offset +
3885 			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
3886 
3887 	/* We should have the page locked, but just in case */
3888 	spin_lock_irqsave(&subpage->lock, flags);
3889 	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
3890 			       spi->dirty_offset + spi->bitmap_nr_bits);
3891 	spin_unlock_irqrestore(&subpage->lock, flags);
3892 
3893 	range_start_bit -= spi->dirty_offset;
3894 	range_end_bit -= spi->dirty_offset;
3895 
3896 	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
3897 	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
3898 }
3899 
3900 /*
3901  * helper for __extent_writepage.  This calls the writepage start hooks,
3902  * and does the loop to map the page into extents and bios.
3903  *
3904  * We return 1 if the IO is started and the page is unlocked,
3905  * 0 if all went well (page still locked)
3906  * < 0 if there were errors (page still locked)
3907  */
3908 static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
3909 				 struct page *page,
3910 				 struct writeback_control *wbc,
3911 				 struct extent_page_data *epd,
3912 				 loff_t i_size,
3913 				 unsigned long nr_written,
3914 				 int *nr_ret)
3915 {
3916 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3917 	u64 cur = page_offset(page);
3918 	u64 end = cur + PAGE_SIZE - 1;
3919 	u64 extent_offset;
3920 	u64 block_start;
3921 	struct extent_map *em;
3922 	int ret = 0;
3923 	int nr = 0;
3924 	u32 opf = REQ_OP_WRITE;
3925 	const unsigned int write_flags = wbc_to_write_flags(wbc);
3926 	bool compressed;
3927 
3928 	ret = btrfs_writepage_cow_fixup(page);
3929 	if (ret) {
3930 		/* Fixup worker will requeue */
3931 		redirty_page_for_writepage(wbc, page);
3932 		update_nr_written(wbc, nr_written);
3933 		unlock_page(page);
3934 		return 1;
3935 	}
3936 
3937 	/*
3938 	 * we don't want to touch the inode after unlocking the page,
3939 	 * so we update the mapping writeback index now
3940 	 */
3941 	update_nr_written(wbc, nr_written + 1);
3942 
3943 	while (cur <= end) {
3944 		u64 disk_bytenr;
3945 		u64 em_end;
3946 		u64 dirty_range_start = cur;
3947 		u64 dirty_range_end;
3948 		u32 iosize;
3949 
3950 		if (cur >= i_size) {
3951 			btrfs_writepage_endio_finish_ordered(inode, page, cur,
3952 							     end, true);
3953 			/*
3954 			 * This range is beyond i_size, thus we don't need to
3955 			 * bother writing back.
3956 			 * But we still need to clear the dirty subpage bit, or
3957 			 * the next time the page gets dirtied, we will try to
3958 			 * writeback the sectors with subpage dirty bits,
3959 			 * causing writeback without ordered extent.
3960 			 */
3961 			btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
3962 			break;
3963 		}
3964 
3965 		find_next_dirty_byte(fs_info, page, &dirty_range_start,
3966 				     &dirty_range_end);
3967 		if (cur < dirty_range_start) {
3968 			cur = dirty_range_start;
3969 			continue;
3970 		}
3971 
3972 		em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
3973 		if (IS_ERR_OR_NULL(em)) {
3974 			btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
3975 			ret = PTR_ERR_OR_ZERO(em);
3976 			break;
3977 		}
3978 
3979 		extent_offset = cur - em->start;
3980 		em_end = extent_map_end(em);
3981 		ASSERT(cur <= em_end);
3982 		ASSERT(cur < end);
3983 		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
3984 		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
3985 		block_start = em->block_start;
3986 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3987 		disk_bytenr = em->block_start + extent_offset;
3988 
3989 		/*
3990 		 * Note that em_end from extent_map_end() and dirty_range_end from
3991 		 * find_next_dirty_byte() are all exclusive
3992 		 */
3993 		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
3994 
3995 		if (btrfs_use_zone_append(inode, em->block_start))
3996 			opf = REQ_OP_ZONE_APPEND;
3997 
3998 		free_extent_map(em);
3999 		em = NULL;
4000 
4001 		/*
4002 		 * compressed and inline extents are written through other
4003 		 * paths in the FS
4004 		 */
4005 		if (compressed || block_start == EXTENT_MAP_HOLE ||
4006 		    block_start == EXTENT_MAP_INLINE) {
4007 			if (compressed)
4008 				nr++;
4009 			else
4010 				btrfs_writepage_endio_finish_ordered(inode,
4011 						page, cur, cur + iosize - 1, true);
4012 			btrfs_page_clear_dirty(fs_info, page, cur, iosize);
4013 			cur += iosize;
4014 			continue;
4015 		}
4016 
4017 		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
4018 		if (!PageWriteback(page)) {
4019 			btrfs_err(inode->root->fs_info,
4020 				   "page %lu not writeback, cur %llu end %llu",
4021 			       page->index, cur, end);
4022 		}
4023 
4024 		/*
4025 		 * Although the PageDirty bit is cleared before entering this
4026 		 * function, subpage dirty bit is not cleared.
4027 		 * So clear subpage dirty bit here so next time we won't submit
4028 		 * page for range already written to disk.
4029 		 */
4030 		btrfs_page_clear_dirty(fs_info, page, cur, iosize);
4031 
4032 		ret = submit_extent_page(opf | write_flags, wbc,
4033 					 &epd->bio_ctrl, page,
4034 					 disk_bytenr, iosize,
4035 					 cur - page_offset(page),
4036 					 end_bio_extent_writepage,
4037 					 0, 0, false);
4038 		if (ret) {
4039 			btrfs_page_set_error(fs_info, page, cur, iosize);
4040 			if (PageWriteback(page))
4041 				btrfs_page_clear_writeback(fs_info, page, cur,
4042 							   iosize);
4043 		}
4044 
4045 		cur += iosize;
4046 		nr++;
4047 	}
4048 	/*
4049 	 * If we finish without problem, we should not only clear page dirty,
4050 	 * but also empty subpage dirty bits
4051 	 */
4052 	if (!ret)
4053 		btrfs_page_assert_not_dirty(fs_info, page);
4054 	*nr_ret = nr;
4055 	return ret;
4056 }
4057 
4058 /*
4059  * the writepage semantics are similar to regular writepage.  extent
4060  * records are inserted to lock ranges in the tree, and as dirty areas
4061  * are found, they are marked writeback.  Then the lock bits are removed
4062  * and the end_io handler clears the writeback ranges
4063  *
4064  * Return 0 if everything goes well.
4065  * Return <0 for error.
4066  */
4067 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
4068 			      struct extent_page_data *epd)
4069 {
4070 	struct inode *inode = page->mapping->host;
4071 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4072 	const u64 page_start = page_offset(page);
4073 	const u64 page_end = page_start + PAGE_SIZE - 1;
4074 	int ret;
4075 	int nr = 0;
4076 	size_t pg_offset;
4077 	loff_t i_size = i_size_read(inode);
4078 	unsigned long end_index = i_size >> PAGE_SHIFT;
4079 	unsigned long nr_written = 0;
4080 
4081 	trace___extent_writepage(page, inode, wbc);
4082 
4083 	WARN_ON(!PageLocked(page));
4084 
4085 	btrfs_page_clear_error(btrfs_sb(inode->i_sb), page,
4086 			       page_offset(page), PAGE_SIZE);
4087 
4088 	pg_offset = offset_in_page(i_size);
4089 	if (page->index > end_index ||
4090 	   (page->index == end_index && !pg_offset)) {
4091 		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
4092 		unlock_page(page);
4093 		return 0;
4094 	}
4095 
4096 	if (page->index == end_index) {
4097 		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
4098 		flush_dcache_page(page);
4099 	}
4100 
4101 	ret = set_page_extent_mapped(page);
4102 	if (ret < 0) {
4103 		SetPageError(page);
4104 		goto done;
4105 	}
4106 
4107 	if (!epd->extent_locked) {
4108 		ret = writepage_delalloc(BTRFS_I(inode), page, wbc, &nr_written);
4109 		if (ret == 1)
4110 			return 0;
4111 		if (ret)
4112 			goto done;
4113 	}
4114 
4115 	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
4116 				    nr_written, &nr);
4117 	if (ret == 1)
4118 		return 0;
4119 
4120 done:
4121 	if (nr == 0) {
4122 		/* make sure the mapping tag for page dirty gets cleared */
4123 		set_page_writeback(page);
4124 		end_page_writeback(page);
4125 	}
4126 	/*
4127 	 * Here we used to have a check for PageError() and then set @ret and
4128 	 * call end_extent_writepage().
4129 	 *
4130 	 * But in fact setting @ret here will cause different error paths
4131 	 * between subpage and regular sectorsize.
4132 	 *
4133 	 * For regular page size, we never submit current page, but only add
4134 	 * current page to current bio.
4135 	 * The bio submission can only happen in next page.
4136 	 * Thus if we hit the PageError() branch, @ret is already set to
4137 	 * non-zero value and will not get updated for regular sectorsize.
4138 	 *
4139 	 * But for subpage case, it's possible we submit part of current page,
4140 	 * thus can get PageError() set by submitted bio of the same page,
4141 	 * while our @ret is still 0.
4142 	 *
4143 	 * So here we unify the behavior and don't set @ret.
4144 	 * Error can still be properly passed to higher layer as page will
4145 	 * be set error, here we just don't handle the IO failure.
4146 	 *
4147 	 * NOTE: This is just a hotfix for subpage.
4148 	 * The root fix will be properly ending ordered extent when we hit
4149 	 * an error during writeback.
4150 	 *
4151 	 * But that needs a bigger refactoring, as we not only need to grab the
4152 	 * submitted OE, but also need to know exactly at which bytenr we hit
4153 	 * the error.
4154 	 * Currently the full page based __extent_writepage_io() is not
4155 	 * capable of that.
4156 	 */
4157 	if (PageError(page))
4158 		end_extent_writepage(page, ret, page_start, page_end);
4159 	if (epd->extent_locked) {
4160 		/*
4161 		 * If epd->extent_locked, it's from extent_write_locked_range(),
4162 		 * the page can either be locked by lock_page() or
4163 		 * process_one_page().
4164 		 * Let btrfs_page_unlock_writer() handle both cases.
4165 		 */
4166 		ASSERT(wbc);
4167 		btrfs_page_unlock_writer(fs_info, page, wbc->range_start,
4168 					 wbc->range_end + 1 - wbc->range_start);
4169 	} else {
4170 		unlock_page(page);
4171 	}
4172 	ASSERT(ret <= 0);
4173 	return ret;
4174 }
4175 
4176 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
4177 {
4178 	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
4179 		       TASK_UNINTERRUPTIBLE);
4180 }
4181 
4182 static void end_extent_buffer_writeback(struct extent_buffer *eb)
4183 {
4184 	if (test_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags))
4185 		btrfs_zone_finish_endio(eb->fs_info, eb->start, eb->len);
4186 
4187 	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4188 	smp_mb__after_atomic();
4189 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
4190 }
4191 
4192 /*
4193  * Lock extent buffer status and pages for writeback.
4194  *
4195  * May try to flush write bio if we can't get the lock.
4196  *
4197  * Return  0 if the extent buffer doesn't need to be submitted.
4198  *           (E.g. the extent buffer is not dirty)
4199  * Return >0 is the extent buffer is submitted to bio.
4200  * Return <0 if something went wrong, no page is locked.
4201  */
4202 static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
4203 			  struct extent_page_data *epd)
4204 {
4205 	struct btrfs_fs_info *fs_info = eb->fs_info;
4206 	int i, num_pages, failed_page_nr;
4207 	int flush = 0;
4208 	int ret = 0;
4209 
4210 	if (!btrfs_try_tree_write_lock(eb)) {
4211 		ret = flush_write_bio(epd);
4212 		if (ret < 0)
4213 			return ret;
4214 		flush = 1;
4215 		btrfs_tree_lock(eb);
4216 	}
4217 
4218 	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
4219 		btrfs_tree_unlock(eb);
4220 		if (!epd->sync_io)
4221 			return 0;
4222 		if (!flush) {
4223 			ret = flush_write_bio(epd);
4224 			if (ret < 0)
4225 				return ret;
4226 			flush = 1;
4227 		}
4228 		while (1) {
4229 			wait_on_extent_buffer_writeback(eb);
4230 			btrfs_tree_lock(eb);
4231 			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
4232 				break;
4233 			btrfs_tree_unlock(eb);
4234 		}
4235 	}
4236 
4237 	/*
4238 	 * We need to do this to prevent races in people who check if the eb is
4239 	 * under IO since we can end up having no IO bits set for a short period
4240 	 * of time.
4241 	 */
4242 	spin_lock(&eb->refs_lock);
4243 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4244 		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4245 		spin_unlock(&eb->refs_lock);
4246 		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4247 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4248 					 -eb->len,
4249 					 fs_info->dirty_metadata_batch);
4250 		ret = 1;
4251 	} else {
4252 		spin_unlock(&eb->refs_lock);
4253 	}
4254 
4255 	btrfs_tree_unlock(eb);
4256 
4257 	/*
4258 	 * Either we don't need to submit any tree block, or we're submitting
4259 	 * subpage eb.
4260 	 * Subpage metadata doesn't use page locking at all, so we can skip
4261 	 * the page locking.
4262 	 */
4263 	if (!ret || fs_info->sectorsize < PAGE_SIZE)
4264 		return ret;
4265 
4266 	num_pages = num_extent_pages(eb);
4267 	for (i = 0; i < num_pages; i++) {
4268 		struct page *p = eb->pages[i];
4269 
4270 		if (!trylock_page(p)) {
4271 			if (!flush) {
4272 				int err;
4273 
4274 				err = flush_write_bio(epd);
4275 				if (err < 0) {
4276 					ret = err;
4277 					failed_page_nr = i;
4278 					goto err_unlock;
4279 				}
4280 				flush = 1;
4281 			}
4282 			lock_page(p);
4283 		}
4284 	}
4285 
4286 	return ret;
4287 err_unlock:
4288 	/* Unlock already locked pages */
4289 	for (i = 0; i < failed_page_nr; i++)
4290 		unlock_page(eb->pages[i]);
4291 	/*
4292 	 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
4293 	 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
4294 	 * be made and undo everything done before.
4295 	 */
4296 	btrfs_tree_lock(eb);
4297 	spin_lock(&eb->refs_lock);
4298 	set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4299 	end_extent_buffer_writeback(eb);
4300 	spin_unlock(&eb->refs_lock);
4301 	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
4302 				 fs_info->dirty_metadata_batch);
4303 	btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4304 	btrfs_tree_unlock(eb);
4305 	return ret;
4306 }
4307 
4308 static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
4309 {
4310 	struct btrfs_fs_info *fs_info = eb->fs_info;
4311 
4312 	btrfs_page_set_error(fs_info, page, eb->start, eb->len);
4313 	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4314 		return;
4315 
4316 	/*
4317 	 * If we error out, we should add back the dirty_metadata_bytes
4318 	 * to make it consistent.
4319 	 */
4320 	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4321 				 eb->len, fs_info->dirty_metadata_batch);
4322 
4323 	/*
4324 	 * If writeback for a btree extent that doesn't belong to a log tree
4325 	 * failed, increment the counter transaction->eb_write_errors.
4326 	 * We do this because while the transaction is running and before it's
4327 	 * committing (when we call filemap_fdata[write|wait]_range against
4328 	 * the btree inode), we might have
4329 	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
4330 	 * returns an error or an error happens during writeback, when we're
4331 	 * committing the transaction we wouldn't know about it, since the pages
4332 	 * can be no longer dirty nor marked anymore for writeback (if a
4333 	 * subsequent modification to the extent buffer didn't happen before the
4334 	 * transaction commit), which makes filemap_fdata[write|wait]_range not
4335 	 * able to find the pages tagged with SetPageError at transaction
4336 	 * commit time. So if this happens we must abort the transaction,
4337 	 * otherwise we commit a super block with btree roots that point to
4338 	 * btree nodes/leafs whose content on disk is invalid - either garbage
4339 	 * or the content of some node/leaf from a past generation that got
4340 	 * cowed or deleted and is no longer valid.
4341 	 *
4342 	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
4343 	 * not be enough - we need to distinguish between log tree extents vs
4344 	 * non-log tree extents, and the next filemap_fdatawait_range() call
4345 	 * will catch and clear such errors in the mapping - and that call might
4346 	 * be from a log sync and not from a transaction commit. Also, checking
4347 	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
4348 	 * not done and would not be reliable - the eb might have been released
4349 	 * from memory and reading it back again means that flag would not be
4350 	 * set (since it's a runtime flag, not persisted on disk).
4351 	 *
4352 	 * Using the flags below in the btree inode also makes us achieve the
4353 	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
4354 	 * writeback for all dirty pages and before filemap_fdatawait_range()
4355 	 * is called, the writeback for all dirty pages had already finished
4356 	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
4357 	 * filemap_fdatawait_range() would return success, as it could not know
4358 	 * that writeback errors happened (the pages were no longer tagged for
4359 	 * writeback).
4360 	 */
4361 	switch (eb->log_index) {
4362 	case -1:
4363 		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
4364 		break;
4365 	case 0:
4366 		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
4367 		break;
4368 	case 1:
4369 		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
4370 		break;
4371 	default:
4372 		BUG(); /* unexpected, logic error */
4373 	}
4374 }
4375 
4376 /*
4377  * The endio specific version which won't touch any unsafe spinlock in endio
4378  * context.
4379  */
4380 static struct extent_buffer *find_extent_buffer_nolock(
4381 		struct btrfs_fs_info *fs_info, u64 start)
4382 {
4383 	struct extent_buffer *eb;
4384 
4385 	rcu_read_lock();
4386 	eb = radix_tree_lookup(&fs_info->buffer_radix,
4387 			       start >> fs_info->sectorsize_bits);
4388 	if (eb && atomic_inc_not_zero(&eb->refs)) {
4389 		rcu_read_unlock();
4390 		return eb;
4391 	}
4392 	rcu_read_unlock();
4393 	return NULL;
4394 }
4395 
4396 /*
4397  * The endio function for subpage extent buffer write.
4398  *
4399  * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
4400  * after all extent buffers in the page has finished their writeback.
4401  */
4402 static void end_bio_subpage_eb_writepage(struct bio *bio)
4403 {
4404 	struct btrfs_fs_info *fs_info;
4405 	struct bio_vec *bvec;
4406 	struct bvec_iter_all iter_all;
4407 
4408 	fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
4409 	ASSERT(fs_info->sectorsize < PAGE_SIZE);
4410 
4411 	ASSERT(!bio_flagged(bio, BIO_CLONED));
4412 	bio_for_each_segment_all(bvec, bio, iter_all) {
4413 		struct page *page = bvec->bv_page;
4414 		u64 bvec_start = page_offset(page) + bvec->bv_offset;
4415 		u64 bvec_end = bvec_start + bvec->bv_len - 1;
4416 		u64 cur_bytenr = bvec_start;
4417 
4418 		ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
4419 
4420 		/* Iterate through all extent buffers in the range */
4421 		while (cur_bytenr <= bvec_end) {
4422 			struct extent_buffer *eb;
4423 			int done;
4424 
4425 			/*
4426 			 * Here we can't use find_extent_buffer(), as it may
4427 			 * try to lock eb->refs_lock, which is not safe in endio
4428 			 * context.
4429 			 */
4430 			eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
4431 			ASSERT(eb);
4432 
4433 			cur_bytenr = eb->start + eb->len;
4434 
4435 			ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
4436 			done = atomic_dec_and_test(&eb->io_pages);
4437 			ASSERT(done);
4438 
4439 			if (bio->bi_status ||
4440 			    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4441 				ClearPageUptodate(page);
4442 				set_btree_ioerr(page, eb);
4443 			}
4444 
4445 			btrfs_subpage_clear_writeback(fs_info, page, eb->start,
4446 						      eb->len);
4447 			end_extent_buffer_writeback(eb);
4448 			/*
4449 			 * free_extent_buffer() will grab spinlock which is not
4450 			 * safe in endio context. Thus here we manually dec
4451 			 * the ref.
4452 			 */
4453 			atomic_dec(&eb->refs);
4454 		}
4455 	}
4456 	bio_put(bio);
4457 }
4458 
4459 static void end_bio_extent_buffer_writepage(struct bio *bio)
4460 {
4461 	struct bio_vec *bvec;
4462 	struct extent_buffer *eb;
4463 	int done;
4464 	struct bvec_iter_all iter_all;
4465 
4466 	ASSERT(!bio_flagged(bio, BIO_CLONED));
4467 	bio_for_each_segment_all(bvec, bio, iter_all) {
4468 		struct page *page = bvec->bv_page;
4469 
4470 		eb = (struct extent_buffer *)page->private;
4471 		BUG_ON(!eb);
4472 		done = atomic_dec_and_test(&eb->io_pages);
4473 
4474 		if (bio->bi_status ||
4475 		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4476 			ClearPageUptodate(page);
4477 			set_btree_ioerr(page, eb);
4478 		}
4479 
4480 		end_page_writeback(page);
4481 
4482 		if (!done)
4483 			continue;
4484 
4485 		end_extent_buffer_writeback(eb);
4486 	}
4487 
4488 	bio_put(bio);
4489 }
4490 
4491 static void prepare_eb_write(struct extent_buffer *eb)
4492 {
4493 	u32 nritems;
4494 	unsigned long start;
4495 	unsigned long end;
4496 
4497 	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
4498 	atomic_set(&eb->io_pages, num_extent_pages(eb));
4499 
4500 	/* Set btree blocks beyond nritems with 0 to avoid stale content */
4501 	nritems = btrfs_header_nritems(eb);
4502 	if (btrfs_header_level(eb) > 0) {
4503 		end = btrfs_node_key_ptr_offset(nritems);
4504 		memzero_extent_buffer(eb, end, eb->len - end);
4505 	} else {
4506 		/*
4507 		 * Leaf:
4508 		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
4509 		 */
4510 		start = btrfs_item_nr_offset(nritems);
4511 		end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
4512 		memzero_extent_buffer(eb, start, end - start);
4513 	}
4514 }
4515 
4516 /*
4517  * Unlike the work in write_one_eb(), we rely completely on extent locking.
4518  * Page locking is only utilized at minimum to keep the VMM code happy.
4519  */
4520 static int write_one_subpage_eb(struct extent_buffer *eb,
4521 				struct writeback_control *wbc,
4522 				struct extent_page_data *epd)
4523 {
4524 	struct btrfs_fs_info *fs_info = eb->fs_info;
4525 	struct page *page = eb->pages[0];
4526 	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4527 	bool no_dirty_ebs = false;
4528 	int ret;
4529 
4530 	prepare_eb_write(eb);
4531 
4532 	/* clear_page_dirty_for_io() in subpage helper needs page locked */
4533 	lock_page(page);
4534 	btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
4535 
4536 	/* Check if this is the last dirty bit to update nr_written */
4537 	no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
4538 							  eb->start, eb->len);
4539 	if (no_dirty_ebs)
4540 		clear_page_dirty_for_io(page);
4541 
4542 	ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4543 			&epd->bio_ctrl, page, eb->start, eb->len,
4544 			eb->start - page_offset(page),
4545 			end_bio_subpage_eb_writepage, 0, 0, false);
4546 	if (ret) {
4547 		btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
4548 		set_btree_ioerr(page, eb);
4549 		unlock_page(page);
4550 
4551 		if (atomic_dec_and_test(&eb->io_pages))
4552 			end_extent_buffer_writeback(eb);
4553 		return -EIO;
4554 	}
4555 	unlock_page(page);
4556 	/*
4557 	 * Submission finished without problem, if no range of the page is
4558 	 * dirty anymore, we have submitted a page.  Update nr_written in wbc.
4559 	 */
4560 	if (no_dirty_ebs)
4561 		update_nr_written(wbc, 1);
4562 	return ret;
4563 }
4564 
4565 static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
4566 			struct writeback_control *wbc,
4567 			struct extent_page_data *epd)
4568 {
4569 	u64 disk_bytenr = eb->start;
4570 	int i, num_pages;
4571 	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4572 	int ret = 0;
4573 
4574 	prepare_eb_write(eb);
4575 
4576 	num_pages = num_extent_pages(eb);
4577 	for (i = 0; i < num_pages; i++) {
4578 		struct page *p = eb->pages[i];
4579 
4580 		clear_page_dirty_for_io(p);
4581 		set_page_writeback(p);
4582 		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4583 					 &epd->bio_ctrl, p, disk_bytenr,
4584 					 PAGE_SIZE, 0,
4585 					 end_bio_extent_buffer_writepage,
4586 					 0, 0, false);
4587 		if (ret) {
4588 			set_btree_ioerr(p, eb);
4589 			if (PageWriteback(p))
4590 				end_page_writeback(p);
4591 			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
4592 				end_extent_buffer_writeback(eb);
4593 			ret = -EIO;
4594 			break;
4595 		}
4596 		disk_bytenr += PAGE_SIZE;
4597 		update_nr_written(wbc, 1);
4598 		unlock_page(p);
4599 	}
4600 
4601 	if (unlikely(ret)) {
4602 		for (; i < num_pages; i++) {
4603 			struct page *p = eb->pages[i];
4604 			clear_page_dirty_for_io(p);
4605 			unlock_page(p);
4606 		}
4607 	}
4608 
4609 	return ret;
4610 }
4611 
4612 /*
4613  * Submit one subpage btree page.
4614  *
4615  * The main difference to submit_eb_page() is:
4616  * - Page locking
4617  *   For subpage, we don't rely on page locking at all.
4618  *
4619  * - Flush write bio
4620  *   We only flush bio if we may be unable to fit current extent buffers into
4621  *   current bio.
4622  *
4623  * Return >=0 for the number of submitted extent buffers.
4624  * Return <0 for fatal error.
4625  */
4626 static int submit_eb_subpage(struct page *page,
4627 			     struct writeback_control *wbc,
4628 			     struct extent_page_data *epd)
4629 {
4630 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4631 	int submitted = 0;
4632 	u64 page_start = page_offset(page);
4633 	int bit_start = 0;
4634 	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
4635 	int ret;
4636 
4637 	/* Lock and write each dirty extent buffers in the range */
4638 	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
4639 		struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
4640 		struct extent_buffer *eb;
4641 		unsigned long flags;
4642 		u64 start;
4643 
4644 		/*
4645 		 * Take private lock to ensure the subpage won't be detached
4646 		 * in the meantime.
4647 		 */
4648 		spin_lock(&page->mapping->private_lock);
4649 		if (!PagePrivate(page)) {
4650 			spin_unlock(&page->mapping->private_lock);
4651 			break;
4652 		}
4653 		spin_lock_irqsave(&subpage->lock, flags);
4654 		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
4655 			      subpage->bitmaps)) {
4656 			spin_unlock_irqrestore(&subpage->lock, flags);
4657 			spin_unlock(&page->mapping->private_lock);
4658 			bit_start++;
4659 			continue;
4660 		}
4661 
4662 		start = page_start + bit_start * fs_info->sectorsize;
4663 		bit_start += sectors_per_node;
4664 
4665 		/*
4666 		 * Here we just want to grab the eb without touching extra
4667 		 * spin locks, so call find_extent_buffer_nolock().
4668 		 */
4669 		eb = find_extent_buffer_nolock(fs_info, start);
4670 		spin_unlock_irqrestore(&subpage->lock, flags);
4671 		spin_unlock(&page->mapping->private_lock);
4672 
4673 		/*
4674 		 * The eb has already reached 0 refs thus find_extent_buffer()
4675 		 * doesn't return it. We don't need to write back such eb
4676 		 * anyway.
4677 		 */
4678 		if (!eb)
4679 			continue;
4680 
4681 		ret = lock_extent_buffer_for_io(eb, epd);
4682 		if (ret == 0) {
4683 			free_extent_buffer(eb);
4684 			continue;
4685 		}
4686 		if (ret < 0) {
4687 			free_extent_buffer(eb);
4688 			goto cleanup;
4689 		}
4690 		ret = write_one_subpage_eb(eb, wbc, epd);
4691 		free_extent_buffer(eb);
4692 		if (ret < 0)
4693 			goto cleanup;
4694 		submitted++;
4695 	}
4696 	return submitted;
4697 
4698 cleanup:
4699 	/* We hit error, end bio for the submitted extent buffers */
4700 	end_write_bio(epd, ret);
4701 	return ret;
4702 }
4703 
4704 /*
4705  * Submit all page(s) of one extent buffer.
4706  *
4707  * @page:	the page of one extent buffer
4708  * @eb_context:	to determine if we need to submit this page, if current page
4709  *		belongs to this eb, we don't need to submit
4710  *
4711  * The caller should pass each page in their bytenr order, and here we use
4712  * @eb_context to determine if we have submitted pages of one extent buffer.
4713  *
4714  * If we have, we just skip until we hit a new page that doesn't belong to
4715  * current @eb_context.
4716  *
4717  * If not, we submit all the page(s) of the extent buffer.
4718  *
4719  * Return >0 if we have submitted the extent buffer successfully.
4720  * Return 0 if we don't need to submit the page, as it's already submitted by
4721  * previous call.
4722  * Return <0 for fatal error.
4723  */
4724 static int submit_eb_page(struct page *page, struct writeback_control *wbc,
4725 			  struct extent_page_data *epd,
4726 			  struct extent_buffer **eb_context)
4727 {
4728 	struct address_space *mapping = page->mapping;
4729 	struct btrfs_block_group *cache = NULL;
4730 	struct extent_buffer *eb;
4731 	int ret;
4732 
4733 	if (!PagePrivate(page))
4734 		return 0;
4735 
4736 	if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
4737 		return submit_eb_subpage(page, wbc, epd);
4738 
4739 	spin_lock(&mapping->private_lock);
4740 	if (!PagePrivate(page)) {
4741 		spin_unlock(&mapping->private_lock);
4742 		return 0;
4743 	}
4744 
4745 	eb = (struct extent_buffer *)page->private;
4746 
4747 	/*
4748 	 * Shouldn't happen and normally this would be a BUG_ON but no point
4749 	 * crashing the machine for something we can survive anyway.
4750 	 */
4751 	if (WARN_ON(!eb)) {
4752 		spin_unlock(&mapping->private_lock);
4753 		return 0;
4754 	}
4755 
4756 	if (eb == *eb_context) {
4757 		spin_unlock(&mapping->private_lock);
4758 		return 0;
4759 	}
4760 	ret = atomic_inc_not_zero(&eb->refs);
4761 	spin_unlock(&mapping->private_lock);
4762 	if (!ret)
4763 		return 0;
4764 
4765 	if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
4766 		/*
4767 		 * If for_sync, this hole will be filled with
4768 		 * trasnsaction commit.
4769 		 */
4770 		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
4771 			ret = -EAGAIN;
4772 		else
4773 			ret = 0;
4774 		free_extent_buffer(eb);
4775 		return ret;
4776 	}
4777 
4778 	*eb_context = eb;
4779 
4780 	ret = lock_extent_buffer_for_io(eb, epd);
4781 	if (ret <= 0) {
4782 		btrfs_revert_meta_write_pointer(cache, eb);
4783 		if (cache)
4784 			btrfs_put_block_group(cache);
4785 		free_extent_buffer(eb);
4786 		return ret;
4787 	}
4788 	if (cache) {
4789 		/* Impiles write in zoned mode */
4790 		btrfs_put_block_group(cache);
4791 		/* Mark the last eb in a block group */
4792 		if (cache->seq_zone && eb->start + eb->len == cache->zone_capacity)
4793 			set_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags);
4794 	}
4795 	ret = write_one_eb(eb, wbc, epd);
4796 	free_extent_buffer(eb);
4797 	if (ret < 0)
4798 		return ret;
4799 	return 1;
4800 }
4801 
4802 int btree_write_cache_pages(struct address_space *mapping,
4803 				   struct writeback_control *wbc)
4804 {
4805 	struct extent_buffer *eb_context = NULL;
4806 	struct extent_page_data epd = {
4807 		.bio_ctrl = { 0 },
4808 		.extent_locked = 0,
4809 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4810 	};
4811 	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
4812 	int ret = 0;
4813 	int done = 0;
4814 	int nr_to_write_done = 0;
4815 	struct pagevec pvec;
4816 	int nr_pages;
4817 	pgoff_t index;
4818 	pgoff_t end;		/* Inclusive */
4819 	int scanned = 0;
4820 	xa_mark_t tag;
4821 
4822 	pagevec_init(&pvec);
4823 	if (wbc->range_cyclic) {
4824 		index = mapping->writeback_index; /* Start from prev offset */
4825 		end = -1;
4826 		/*
4827 		 * Start from the beginning does not need to cycle over the
4828 		 * range, mark it as scanned.
4829 		 */
4830 		scanned = (index == 0);
4831 	} else {
4832 		index = wbc->range_start >> PAGE_SHIFT;
4833 		end = wbc->range_end >> PAGE_SHIFT;
4834 		scanned = 1;
4835 	}
4836 	if (wbc->sync_mode == WB_SYNC_ALL)
4837 		tag = PAGECACHE_TAG_TOWRITE;
4838 	else
4839 		tag = PAGECACHE_TAG_DIRTY;
4840 	btrfs_zoned_meta_io_lock(fs_info);
4841 retry:
4842 	if (wbc->sync_mode == WB_SYNC_ALL)
4843 		tag_pages_for_writeback(mapping, index, end);
4844 	while (!done && !nr_to_write_done && (index <= end) &&
4845 	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
4846 			tag))) {
4847 		unsigned i;
4848 
4849 		for (i = 0; i < nr_pages; i++) {
4850 			struct page *page = pvec.pages[i];
4851 
4852 			ret = submit_eb_page(page, wbc, &epd, &eb_context);
4853 			if (ret == 0)
4854 				continue;
4855 			if (ret < 0) {
4856 				done = 1;
4857 				break;
4858 			}
4859 
4860 			/*
4861 			 * the filesystem may choose to bump up nr_to_write.
4862 			 * We have to make sure to honor the new nr_to_write
4863 			 * at any time
4864 			 */
4865 			nr_to_write_done = wbc->nr_to_write <= 0;
4866 		}
4867 		pagevec_release(&pvec);
4868 		cond_resched();
4869 	}
4870 	if (!scanned && !done) {
4871 		/*
4872 		 * We hit the last page and there is more work to be done: wrap
4873 		 * back to the start of the file
4874 		 */
4875 		scanned = 1;
4876 		index = 0;
4877 		goto retry;
4878 	}
4879 	if (ret < 0) {
4880 		end_write_bio(&epd, ret);
4881 		goto out;
4882 	}
4883 	/*
4884 	 * If something went wrong, don't allow any metadata write bio to be
4885 	 * submitted.
4886 	 *
4887 	 * This would prevent use-after-free if we had dirty pages not
4888 	 * cleaned up, which can still happen by fuzzed images.
4889 	 *
4890 	 * - Bad extent tree
4891 	 *   Allowing existing tree block to be allocated for other trees.
4892 	 *
4893 	 * - Log tree operations
4894 	 *   Exiting tree blocks get allocated to log tree, bumps its
4895 	 *   generation, then get cleaned in tree re-balance.
4896 	 *   Such tree block will not be written back, since it's clean,
4897 	 *   thus no WRITTEN flag set.
4898 	 *   And after log writes back, this tree block is not traced by
4899 	 *   any dirty extent_io_tree.
4900 	 *
4901 	 * - Offending tree block gets re-dirtied from its original owner
4902 	 *   Since it has bumped generation, no WRITTEN flag, it can be
4903 	 *   reused without COWing. This tree block will not be traced
4904 	 *   by btrfs_transaction::dirty_pages.
4905 	 *
4906 	 *   Now such dirty tree block will not be cleaned by any dirty
4907 	 *   extent io tree. Thus we don't want to submit such wild eb
4908 	 *   if the fs already has error.
4909 	 */
4910 	if (!BTRFS_FS_ERROR(fs_info)) {
4911 		ret = flush_write_bio(&epd);
4912 	} else {
4913 		ret = -EROFS;
4914 		end_write_bio(&epd, ret);
4915 	}
4916 out:
4917 	btrfs_zoned_meta_io_unlock(fs_info);
4918 	return ret;
4919 }
4920 
4921 /**
4922  * Walk the list of dirty pages of the given address space and write all of them.
4923  *
4924  * @mapping: address space structure to write
4925  * @wbc:     subtract the number of written pages from *@wbc->nr_to_write
4926  * @epd:     holds context for the write, namely the bio
4927  *
4928  * If a page is already under I/O, write_cache_pages() skips it, even
4929  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
4930  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
4931  * and msync() need to guarantee that all the data which was dirty at the time
4932  * the call was made get new I/O started against them.  If wbc->sync_mode is
4933  * WB_SYNC_ALL then we were called for data integrity and we must wait for
4934  * existing IO to complete.
4935  */
4936 static int extent_write_cache_pages(struct address_space *mapping,
4937 			     struct writeback_control *wbc,
4938 			     struct extent_page_data *epd)
4939 {
4940 	struct inode *inode = mapping->host;
4941 	int ret = 0;
4942 	int done = 0;
4943 	int nr_to_write_done = 0;
4944 	struct pagevec pvec;
4945 	int nr_pages;
4946 	pgoff_t index;
4947 	pgoff_t end;		/* Inclusive */
4948 	pgoff_t done_index;
4949 	int range_whole = 0;
4950 	int scanned = 0;
4951 	xa_mark_t tag;
4952 
4953 	/*
4954 	 * We have to hold onto the inode so that ordered extents can do their
4955 	 * work when the IO finishes.  The alternative to this is failing to add
4956 	 * an ordered extent if the igrab() fails there and that is a huge pain
4957 	 * to deal with, so instead just hold onto the inode throughout the
4958 	 * writepages operation.  If it fails here we are freeing up the inode
4959 	 * anyway and we'd rather not waste our time writing out stuff that is
4960 	 * going to be truncated anyway.
4961 	 */
4962 	if (!igrab(inode))
4963 		return 0;
4964 
4965 	pagevec_init(&pvec);
4966 	if (wbc->range_cyclic) {
4967 		index = mapping->writeback_index; /* Start from prev offset */
4968 		end = -1;
4969 		/*
4970 		 * Start from the beginning does not need to cycle over the
4971 		 * range, mark it as scanned.
4972 		 */
4973 		scanned = (index == 0);
4974 	} else {
4975 		index = wbc->range_start >> PAGE_SHIFT;
4976 		end = wbc->range_end >> PAGE_SHIFT;
4977 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4978 			range_whole = 1;
4979 		scanned = 1;
4980 	}
4981 
4982 	/*
4983 	 * We do the tagged writepage as long as the snapshot flush bit is set
4984 	 * and we are the first one who do the filemap_flush() on this inode.
4985 	 *
4986 	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4987 	 * not race in and drop the bit.
4988 	 */
4989 	if (range_whole && wbc->nr_to_write == LONG_MAX &&
4990 	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4991 			       &BTRFS_I(inode)->runtime_flags))
4992 		wbc->tagged_writepages = 1;
4993 
4994 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4995 		tag = PAGECACHE_TAG_TOWRITE;
4996 	else
4997 		tag = PAGECACHE_TAG_DIRTY;
4998 retry:
4999 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
5000 		tag_pages_for_writeback(mapping, index, end);
5001 	done_index = index;
5002 	while (!done && !nr_to_write_done && (index <= end) &&
5003 			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
5004 						&index, end, tag))) {
5005 		unsigned i;
5006 
5007 		for (i = 0; i < nr_pages; i++) {
5008 			struct page *page = pvec.pages[i];
5009 
5010 			done_index = page->index + 1;
5011 			/*
5012 			 * At this point we hold neither the i_pages lock nor
5013 			 * the page lock: the page may be truncated or
5014 			 * invalidated (changing page->mapping to NULL),
5015 			 * or even swizzled back from swapper_space to
5016 			 * tmpfs file mapping
5017 			 */
5018 			if (!trylock_page(page)) {
5019 				ret = flush_write_bio(epd);
5020 				BUG_ON(ret < 0);
5021 				lock_page(page);
5022 			}
5023 
5024 			if (unlikely(page->mapping != mapping)) {
5025 				unlock_page(page);
5026 				continue;
5027 			}
5028 
5029 			if (wbc->sync_mode != WB_SYNC_NONE) {
5030 				if (PageWriteback(page)) {
5031 					ret = flush_write_bio(epd);
5032 					BUG_ON(ret < 0);
5033 				}
5034 				wait_on_page_writeback(page);
5035 			}
5036 
5037 			if (PageWriteback(page) ||
5038 			    !clear_page_dirty_for_io(page)) {
5039 				unlock_page(page);
5040 				continue;
5041 			}
5042 
5043 			ret = __extent_writepage(page, wbc, epd);
5044 			if (ret < 0) {
5045 				done = 1;
5046 				break;
5047 			}
5048 
5049 			/*
5050 			 * the filesystem may choose to bump up nr_to_write.
5051 			 * We have to make sure to honor the new nr_to_write
5052 			 * at any time
5053 			 */
5054 			nr_to_write_done = wbc->nr_to_write <= 0;
5055 		}
5056 		pagevec_release(&pvec);
5057 		cond_resched();
5058 	}
5059 	if (!scanned && !done) {
5060 		/*
5061 		 * We hit the last page and there is more work to be done: wrap
5062 		 * back to the start of the file
5063 		 */
5064 		scanned = 1;
5065 		index = 0;
5066 
5067 		/*
5068 		 * If we're looping we could run into a page that is locked by a
5069 		 * writer and that writer could be waiting on writeback for a
5070 		 * page in our current bio, and thus deadlock, so flush the
5071 		 * write bio here.
5072 		 */
5073 		ret = flush_write_bio(epd);
5074 		if (!ret)
5075 			goto retry;
5076 	}
5077 
5078 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
5079 		mapping->writeback_index = done_index;
5080 
5081 	btrfs_add_delayed_iput(inode);
5082 	return ret;
5083 }
5084 
5085 int extent_write_full_page(struct page *page, struct writeback_control *wbc)
5086 {
5087 	int ret;
5088 	struct extent_page_data epd = {
5089 		.bio_ctrl = { 0 },
5090 		.extent_locked = 0,
5091 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
5092 	};
5093 
5094 	ret = __extent_writepage(page, wbc, &epd);
5095 	ASSERT(ret <= 0);
5096 	if (ret < 0) {
5097 		end_write_bio(&epd, ret);
5098 		return ret;
5099 	}
5100 
5101 	ret = flush_write_bio(&epd);
5102 	ASSERT(ret <= 0);
5103 	return ret;
5104 }
5105 
5106 /*
5107  * Submit the pages in the range to bio for call sites which delalloc range has
5108  * already been ran (aka, ordered extent inserted) and all pages are still
5109  * locked.
5110  */
5111 int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
5112 {
5113 	bool found_error = false;
5114 	int first_error = 0;
5115 	int ret = 0;
5116 	struct address_space *mapping = inode->i_mapping;
5117 	struct page *page;
5118 	u64 cur = start;
5119 	unsigned long nr_pages;
5120 	const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
5121 	struct extent_page_data epd = {
5122 		.bio_ctrl = { 0 },
5123 		.extent_locked = 1,
5124 		.sync_io = 1,
5125 	};
5126 	struct writeback_control wbc_writepages = {
5127 		.sync_mode	= WB_SYNC_ALL,
5128 		.range_start	= start,
5129 		.range_end	= end + 1,
5130 		/* We're called from an async helper function */
5131 		.punt_to_cgroup	= 1,
5132 		.no_cgroup_owner = 1,
5133 	};
5134 
5135 	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
5136 	nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >>
5137 		   PAGE_SHIFT;
5138 	wbc_writepages.nr_to_write = nr_pages * 2;
5139 
5140 	wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
5141 	while (cur <= end) {
5142 		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
5143 
5144 		page = find_get_page(mapping, cur >> PAGE_SHIFT);
5145 		/*
5146 		 * All pages in the range are locked since
5147 		 * btrfs_run_delalloc_range(), thus there is no way to clear
5148 		 * the page dirty flag.
5149 		 */
5150 		ASSERT(PageLocked(page));
5151 		ASSERT(PageDirty(page));
5152 		clear_page_dirty_for_io(page);
5153 		ret = __extent_writepage(page, &wbc_writepages, &epd);
5154 		ASSERT(ret <= 0);
5155 		if (ret < 0) {
5156 			found_error = true;
5157 			first_error = ret;
5158 		}
5159 		put_page(page);
5160 		cur = cur_end + 1;
5161 	}
5162 
5163 	if (!found_error)
5164 		ret = flush_write_bio(&epd);
5165 	else
5166 		end_write_bio(&epd, ret);
5167 
5168 	wbc_detach_inode(&wbc_writepages);
5169 	if (found_error)
5170 		return first_error;
5171 	return ret;
5172 }
5173 
5174 int extent_writepages(struct address_space *mapping,
5175 		      struct writeback_control *wbc)
5176 {
5177 	struct inode *inode = mapping->host;
5178 	const bool data_reloc = btrfs_is_data_reloc_root(BTRFS_I(inode)->root);
5179 	const bool zoned = btrfs_is_zoned(BTRFS_I(inode)->root->fs_info);
5180 	int ret = 0;
5181 	struct extent_page_data epd = {
5182 		.bio_ctrl = { 0 },
5183 		.extent_locked = 0,
5184 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
5185 	};
5186 
5187 	/*
5188 	 * Allow only a single thread to do the reloc work in zoned mode to
5189 	 * protect the write pointer updates.
5190 	 */
5191 	if (data_reloc && zoned)
5192 		btrfs_inode_lock(inode, 0);
5193 	ret = extent_write_cache_pages(mapping, wbc, &epd);
5194 	if (data_reloc && zoned)
5195 		btrfs_inode_unlock(inode, 0);
5196 	ASSERT(ret <= 0);
5197 	if (ret < 0) {
5198 		end_write_bio(&epd, ret);
5199 		return ret;
5200 	}
5201 	ret = flush_write_bio(&epd);
5202 	return ret;
5203 }
5204 
5205 void extent_readahead(struct readahead_control *rac)
5206 {
5207 	struct btrfs_bio_ctrl bio_ctrl = { 0 };
5208 	struct page *pagepool[16];
5209 	struct extent_map *em_cached = NULL;
5210 	u64 prev_em_start = (u64)-1;
5211 	int nr;
5212 
5213 	while ((nr = readahead_page_batch(rac, pagepool))) {
5214 		u64 contig_start = readahead_pos(rac);
5215 		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
5216 
5217 		contiguous_readpages(pagepool, nr, contig_start, contig_end,
5218 				&em_cached, &bio_ctrl, &prev_em_start);
5219 	}
5220 
5221 	if (em_cached)
5222 		free_extent_map(em_cached);
5223 
5224 	if (bio_ctrl.bio) {
5225 		if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags))
5226 			return;
5227 	}
5228 }
5229 
5230 /*
5231  * basic invalidatepage code, this waits on any locked or writeback
5232  * ranges corresponding to the page, and then deletes any extent state
5233  * records from the tree
5234  */
5235 int extent_invalidatepage(struct extent_io_tree *tree,
5236 			  struct page *page, unsigned long offset)
5237 {
5238 	struct extent_state *cached_state = NULL;
5239 	u64 start = page_offset(page);
5240 	u64 end = start + PAGE_SIZE - 1;
5241 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
5242 
5243 	/* This function is only called for the btree inode */
5244 	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
5245 
5246 	start += ALIGN(offset, blocksize);
5247 	if (start > end)
5248 		return 0;
5249 
5250 	lock_extent_bits(tree, start, end, &cached_state);
5251 	wait_on_page_writeback(page);
5252 
5253 	/*
5254 	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
5255 	 * so here we only need to unlock the extent range to free any
5256 	 * existing extent state.
5257 	 */
5258 	unlock_extent_cached(tree, start, end, &cached_state);
5259 	return 0;
5260 }
5261 
5262 /*
5263  * a helper for releasepage, this tests for areas of the page that
5264  * are locked or under IO and drops the related state bits if it is safe
5265  * to drop the page.
5266  */
5267 static int try_release_extent_state(struct extent_io_tree *tree,
5268 				    struct page *page, gfp_t mask)
5269 {
5270 	u64 start = page_offset(page);
5271 	u64 end = start + PAGE_SIZE - 1;
5272 	int ret = 1;
5273 
5274 	if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
5275 		ret = 0;
5276 	} else {
5277 		/*
5278 		 * At this point we can safely clear everything except the
5279 		 * locked bit, the nodatasum bit and the delalloc new bit.
5280 		 * The delalloc new bit will be cleared by ordered extent
5281 		 * completion.
5282 		 */
5283 		ret = __clear_extent_bit(tree, start, end,
5284 			 ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
5285 			 0, 0, NULL, mask, NULL);
5286 
5287 		/* if clear_extent_bit failed for enomem reasons,
5288 		 * we can't allow the release to continue.
5289 		 */
5290 		if (ret < 0)
5291 			ret = 0;
5292 		else
5293 			ret = 1;
5294 	}
5295 	return ret;
5296 }
5297 
5298 /*
5299  * a helper for releasepage.  As long as there are no locked extents
5300  * in the range corresponding to the page, both state records and extent
5301  * map records are removed
5302  */
5303 int try_release_extent_mapping(struct page *page, gfp_t mask)
5304 {
5305 	struct extent_map *em;
5306 	u64 start = page_offset(page);
5307 	u64 end = start + PAGE_SIZE - 1;
5308 	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
5309 	struct extent_io_tree *tree = &btrfs_inode->io_tree;
5310 	struct extent_map_tree *map = &btrfs_inode->extent_tree;
5311 
5312 	if (gfpflags_allow_blocking(mask) &&
5313 	    page->mapping->host->i_size > SZ_16M) {
5314 		u64 len;
5315 		while (start <= end) {
5316 			struct btrfs_fs_info *fs_info;
5317 			u64 cur_gen;
5318 
5319 			len = end - start + 1;
5320 			write_lock(&map->lock);
5321 			em = lookup_extent_mapping(map, start, len);
5322 			if (!em) {
5323 				write_unlock(&map->lock);
5324 				break;
5325 			}
5326 			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
5327 			    em->start != start) {
5328 				write_unlock(&map->lock);
5329 				free_extent_map(em);
5330 				break;
5331 			}
5332 			if (test_range_bit(tree, em->start,
5333 					   extent_map_end(em) - 1,
5334 					   EXTENT_LOCKED, 0, NULL))
5335 				goto next;
5336 			/*
5337 			 * If it's not in the list of modified extents, used
5338 			 * by a fast fsync, we can remove it. If it's being
5339 			 * logged we can safely remove it since fsync took an
5340 			 * extra reference on the em.
5341 			 */
5342 			if (list_empty(&em->list) ||
5343 			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
5344 				goto remove_em;
5345 			/*
5346 			 * If it's in the list of modified extents, remove it
5347 			 * only if its generation is older then the current one,
5348 			 * in which case we don't need it for a fast fsync.
5349 			 * Otherwise don't remove it, we could be racing with an
5350 			 * ongoing fast fsync that could miss the new extent.
5351 			 */
5352 			fs_info = btrfs_inode->root->fs_info;
5353 			spin_lock(&fs_info->trans_lock);
5354 			cur_gen = fs_info->generation;
5355 			spin_unlock(&fs_info->trans_lock);
5356 			if (em->generation >= cur_gen)
5357 				goto next;
5358 remove_em:
5359 			/*
5360 			 * We only remove extent maps that are not in the list of
5361 			 * modified extents or that are in the list but with a
5362 			 * generation lower then the current generation, so there
5363 			 * is no need to set the full fsync flag on the inode (it
5364 			 * hurts the fsync performance for workloads with a data
5365 			 * size that exceeds or is close to the system's memory).
5366 			 */
5367 			remove_extent_mapping(map, em);
5368 			/* once for the rb tree */
5369 			free_extent_map(em);
5370 next:
5371 			start = extent_map_end(em);
5372 			write_unlock(&map->lock);
5373 
5374 			/* once for us */
5375 			free_extent_map(em);
5376 
5377 			cond_resched(); /* Allow large-extent preemption. */
5378 		}
5379 	}
5380 	return try_release_extent_state(tree, page, mask);
5381 }
5382 
5383 /*
5384  * helper function for fiemap, which doesn't want to see any holes.
5385  * This maps until we find something past 'last'
5386  */
5387 static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
5388 						u64 offset, u64 last)
5389 {
5390 	u64 sectorsize = btrfs_inode_sectorsize(inode);
5391 	struct extent_map *em;
5392 	u64 len;
5393 
5394 	if (offset >= last)
5395 		return NULL;
5396 
5397 	while (1) {
5398 		len = last - offset;
5399 		if (len == 0)
5400 			break;
5401 		len = ALIGN(len, sectorsize);
5402 		em = btrfs_get_extent_fiemap(inode, offset, len);
5403 		if (IS_ERR_OR_NULL(em))
5404 			return em;
5405 
5406 		/* if this isn't a hole return it */
5407 		if (em->block_start != EXTENT_MAP_HOLE)
5408 			return em;
5409 
5410 		/* this is a hole, advance to the next extent */
5411 		offset = extent_map_end(em);
5412 		free_extent_map(em);
5413 		if (offset >= last)
5414 			break;
5415 	}
5416 	return NULL;
5417 }
5418 
5419 /*
5420  * To cache previous fiemap extent
5421  *
5422  * Will be used for merging fiemap extent
5423  */
5424 struct fiemap_cache {
5425 	u64 offset;
5426 	u64 phys;
5427 	u64 len;
5428 	u32 flags;
5429 	bool cached;
5430 };
5431 
5432 /*
5433  * Helper to submit fiemap extent.
5434  *
5435  * Will try to merge current fiemap extent specified by @offset, @phys,
5436  * @len and @flags with cached one.
5437  * And only when we fails to merge, cached one will be submitted as
5438  * fiemap extent.
5439  *
5440  * Return value is the same as fiemap_fill_next_extent().
5441  */
5442 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
5443 				struct fiemap_cache *cache,
5444 				u64 offset, u64 phys, u64 len, u32 flags)
5445 {
5446 	int ret = 0;
5447 
5448 	if (!cache->cached)
5449 		goto assign;
5450 
5451 	/*
5452 	 * Sanity check, extent_fiemap() should have ensured that new
5453 	 * fiemap extent won't overlap with cached one.
5454 	 * Not recoverable.
5455 	 *
5456 	 * NOTE: Physical address can overlap, due to compression
5457 	 */
5458 	if (cache->offset + cache->len > offset) {
5459 		WARN_ON(1);
5460 		return -EINVAL;
5461 	}
5462 
5463 	/*
5464 	 * Only merges fiemap extents if
5465 	 * 1) Their logical addresses are continuous
5466 	 *
5467 	 * 2) Their physical addresses are continuous
5468 	 *    So truly compressed (physical size smaller than logical size)
5469 	 *    extents won't get merged with each other
5470 	 *
5471 	 * 3) Share same flags except FIEMAP_EXTENT_LAST
5472 	 *    So regular extent won't get merged with prealloc extent
5473 	 */
5474 	if (cache->offset + cache->len  == offset &&
5475 	    cache->phys + cache->len == phys  &&
5476 	    (cache->flags & ~FIEMAP_EXTENT_LAST) ==
5477 			(flags & ~FIEMAP_EXTENT_LAST)) {
5478 		cache->len += len;
5479 		cache->flags |= flags;
5480 		goto try_submit_last;
5481 	}
5482 
5483 	/* Not mergeable, need to submit cached one */
5484 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5485 				      cache->len, cache->flags);
5486 	cache->cached = false;
5487 	if (ret)
5488 		return ret;
5489 assign:
5490 	cache->cached = true;
5491 	cache->offset = offset;
5492 	cache->phys = phys;
5493 	cache->len = len;
5494 	cache->flags = flags;
5495 try_submit_last:
5496 	if (cache->flags & FIEMAP_EXTENT_LAST) {
5497 		ret = fiemap_fill_next_extent(fieinfo, cache->offset,
5498 				cache->phys, cache->len, cache->flags);
5499 		cache->cached = false;
5500 	}
5501 	return ret;
5502 }
5503 
5504 /*
5505  * Emit last fiemap cache
5506  *
5507  * The last fiemap cache may still be cached in the following case:
5508  * 0		      4k		    8k
5509  * |<- Fiemap range ->|
5510  * |<------------  First extent ----------->|
5511  *
5512  * In this case, the first extent range will be cached but not emitted.
5513  * So we must emit it before ending extent_fiemap().
5514  */
5515 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
5516 				  struct fiemap_cache *cache)
5517 {
5518 	int ret;
5519 
5520 	if (!cache->cached)
5521 		return 0;
5522 
5523 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5524 				      cache->len, cache->flags);
5525 	cache->cached = false;
5526 	if (ret > 0)
5527 		ret = 0;
5528 	return ret;
5529 }
5530 
5531 int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
5532 		  u64 start, u64 len)
5533 {
5534 	int ret = 0;
5535 	u64 off;
5536 	u64 max = start + len;
5537 	u32 flags = 0;
5538 	u32 found_type;
5539 	u64 last;
5540 	u64 last_for_get_extent = 0;
5541 	u64 disko = 0;
5542 	u64 isize = i_size_read(&inode->vfs_inode);
5543 	struct btrfs_key found_key;
5544 	struct extent_map *em = NULL;
5545 	struct extent_state *cached_state = NULL;
5546 	struct btrfs_path *path;
5547 	struct btrfs_root *root = inode->root;
5548 	struct fiemap_cache cache = { 0 };
5549 	struct ulist *roots;
5550 	struct ulist *tmp_ulist;
5551 	int end = 0;
5552 	u64 em_start = 0;
5553 	u64 em_len = 0;
5554 	u64 em_end = 0;
5555 
5556 	if (len == 0)
5557 		return -EINVAL;
5558 
5559 	path = btrfs_alloc_path();
5560 	if (!path)
5561 		return -ENOMEM;
5562 
5563 	roots = ulist_alloc(GFP_KERNEL);
5564 	tmp_ulist = ulist_alloc(GFP_KERNEL);
5565 	if (!roots || !tmp_ulist) {
5566 		ret = -ENOMEM;
5567 		goto out_free_ulist;
5568 	}
5569 
5570 	/*
5571 	 * We can't initialize that to 'start' as this could miss extents due
5572 	 * to extent item merging
5573 	 */
5574 	off = 0;
5575 	start = round_down(start, btrfs_inode_sectorsize(inode));
5576 	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
5577 
5578 	/*
5579 	 * lookup the last file extent.  We're not using i_size here
5580 	 * because there might be preallocation past i_size
5581 	 */
5582 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
5583 				       0);
5584 	if (ret < 0) {
5585 		goto out_free_ulist;
5586 	} else {
5587 		WARN_ON(!ret);
5588 		if (ret == 1)
5589 			ret = 0;
5590 	}
5591 
5592 	path->slots[0]--;
5593 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5594 	found_type = found_key.type;
5595 
5596 	/* No extents, but there might be delalloc bits */
5597 	if (found_key.objectid != btrfs_ino(inode) ||
5598 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5599 		/* have to trust i_size as the end */
5600 		last = (u64)-1;
5601 		last_for_get_extent = isize;
5602 	} else {
5603 		/*
5604 		 * remember the start of the last extent.  There are a
5605 		 * bunch of different factors that go into the length of the
5606 		 * extent, so its much less complex to remember where it started
5607 		 */
5608 		last = found_key.offset;
5609 		last_for_get_extent = last + 1;
5610 	}
5611 	btrfs_release_path(path);
5612 
5613 	/*
5614 	 * we might have some extents allocated but more delalloc past those
5615 	 * extents.  so, we trust isize unless the start of the last extent is
5616 	 * beyond isize
5617 	 */
5618 	if (last < isize) {
5619 		last = (u64)-1;
5620 		last_for_get_extent = isize;
5621 	}
5622 
5623 	lock_extent_bits(&inode->io_tree, start, start + len - 1,
5624 			 &cached_state);
5625 
5626 	em = get_extent_skip_holes(inode, start, last_for_get_extent);
5627 	if (!em)
5628 		goto out;
5629 	if (IS_ERR(em)) {
5630 		ret = PTR_ERR(em);
5631 		goto out;
5632 	}
5633 
5634 	while (!end) {
5635 		u64 offset_in_extent = 0;
5636 
5637 		/* break if the extent we found is outside the range */
5638 		if (em->start >= max || extent_map_end(em) < off)
5639 			break;
5640 
5641 		/*
5642 		 * get_extent may return an extent that starts before our
5643 		 * requested range.  We have to make sure the ranges
5644 		 * we return to fiemap always move forward and don't
5645 		 * overlap, so adjust the offsets here
5646 		 */
5647 		em_start = max(em->start, off);
5648 
5649 		/*
5650 		 * record the offset from the start of the extent
5651 		 * for adjusting the disk offset below.  Only do this if the
5652 		 * extent isn't compressed since our in ram offset may be past
5653 		 * what we have actually allocated on disk.
5654 		 */
5655 		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5656 			offset_in_extent = em_start - em->start;
5657 		em_end = extent_map_end(em);
5658 		em_len = em_end - em_start;
5659 		flags = 0;
5660 		if (em->block_start < EXTENT_MAP_LAST_BYTE)
5661 			disko = em->block_start + offset_in_extent;
5662 		else
5663 			disko = 0;
5664 
5665 		/*
5666 		 * bump off for our next call to get_extent
5667 		 */
5668 		off = extent_map_end(em);
5669 		if (off >= max)
5670 			end = 1;
5671 
5672 		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
5673 			end = 1;
5674 			flags |= FIEMAP_EXTENT_LAST;
5675 		} else if (em->block_start == EXTENT_MAP_INLINE) {
5676 			flags |= (FIEMAP_EXTENT_DATA_INLINE |
5677 				  FIEMAP_EXTENT_NOT_ALIGNED);
5678 		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
5679 			flags |= (FIEMAP_EXTENT_DELALLOC |
5680 				  FIEMAP_EXTENT_UNKNOWN);
5681 		} else if (fieinfo->fi_extents_max) {
5682 			u64 bytenr = em->block_start -
5683 				(em->start - em->orig_start);
5684 
5685 			/*
5686 			 * As btrfs supports shared space, this information
5687 			 * can be exported to userspace tools via
5688 			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
5689 			 * then we're just getting a count and we can skip the
5690 			 * lookup stuff.
5691 			 */
5692 			ret = btrfs_check_shared(root, btrfs_ino(inode),
5693 						 bytenr, roots, tmp_ulist);
5694 			if (ret < 0)
5695 				goto out_free;
5696 			if (ret)
5697 				flags |= FIEMAP_EXTENT_SHARED;
5698 			ret = 0;
5699 		}
5700 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5701 			flags |= FIEMAP_EXTENT_ENCODED;
5702 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5703 			flags |= FIEMAP_EXTENT_UNWRITTEN;
5704 
5705 		free_extent_map(em);
5706 		em = NULL;
5707 		if ((em_start >= last) || em_len == (u64)-1 ||
5708 		   (last == (u64)-1 && isize <= em_end)) {
5709 			flags |= FIEMAP_EXTENT_LAST;
5710 			end = 1;
5711 		}
5712 
5713 		/* now scan forward to see if this is really the last extent. */
5714 		em = get_extent_skip_holes(inode, off, last_for_get_extent);
5715 		if (IS_ERR(em)) {
5716 			ret = PTR_ERR(em);
5717 			goto out;
5718 		}
5719 		if (!em) {
5720 			flags |= FIEMAP_EXTENT_LAST;
5721 			end = 1;
5722 		}
5723 		ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
5724 					   em_len, flags);
5725 		if (ret) {
5726 			if (ret == 1)
5727 				ret = 0;
5728 			goto out_free;
5729 		}
5730 	}
5731 out_free:
5732 	if (!ret)
5733 		ret = emit_last_fiemap_cache(fieinfo, &cache);
5734 	free_extent_map(em);
5735 out:
5736 	unlock_extent_cached(&inode->io_tree, start, start + len - 1,
5737 			     &cached_state);
5738 
5739 out_free_ulist:
5740 	btrfs_free_path(path);
5741 	ulist_free(roots);
5742 	ulist_free(tmp_ulist);
5743 	return ret;
5744 }
5745 
5746 static void __free_extent_buffer(struct extent_buffer *eb)
5747 {
5748 	kmem_cache_free(extent_buffer_cache, eb);
5749 }
5750 
5751 int extent_buffer_under_io(const struct extent_buffer *eb)
5752 {
5753 	return (atomic_read(&eb->io_pages) ||
5754 		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
5755 		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5756 }
5757 
5758 static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
5759 {
5760 	struct btrfs_subpage *subpage;
5761 
5762 	lockdep_assert_held(&page->mapping->private_lock);
5763 
5764 	if (PagePrivate(page)) {
5765 		subpage = (struct btrfs_subpage *)page->private;
5766 		if (atomic_read(&subpage->eb_refs))
5767 			return true;
5768 		/*
5769 		 * Even there is no eb refs here, we may still have
5770 		 * end_page_read() call relying on page::private.
5771 		 */
5772 		if (atomic_read(&subpage->readers))
5773 			return true;
5774 	}
5775 	return false;
5776 }
5777 
5778 static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
5779 {
5780 	struct btrfs_fs_info *fs_info = eb->fs_info;
5781 	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5782 
5783 	/*
5784 	 * For mapped eb, we're going to change the page private, which should
5785 	 * be done under the private_lock.
5786 	 */
5787 	if (mapped)
5788 		spin_lock(&page->mapping->private_lock);
5789 
5790 	if (!PagePrivate(page)) {
5791 		if (mapped)
5792 			spin_unlock(&page->mapping->private_lock);
5793 		return;
5794 	}
5795 
5796 	if (fs_info->sectorsize == PAGE_SIZE) {
5797 		/*
5798 		 * We do this since we'll remove the pages after we've
5799 		 * removed the eb from the radix tree, so we could race
5800 		 * and have this page now attached to the new eb.  So
5801 		 * only clear page_private if it's still connected to
5802 		 * this eb.
5803 		 */
5804 		if (PagePrivate(page) &&
5805 		    page->private == (unsigned long)eb) {
5806 			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5807 			BUG_ON(PageDirty(page));
5808 			BUG_ON(PageWriteback(page));
5809 			/*
5810 			 * We need to make sure we haven't be attached
5811 			 * to a new eb.
5812 			 */
5813 			detach_page_private(page);
5814 		}
5815 		if (mapped)
5816 			spin_unlock(&page->mapping->private_lock);
5817 		return;
5818 	}
5819 
5820 	/*
5821 	 * For subpage, we can have dummy eb with page private.  In this case,
5822 	 * we can directly detach the private as such page is only attached to
5823 	 * one dummy eb, no sharing.
5824 	 */
5825 	if (!mapped) {
5826 		btrfs_detach_subpage(fs_info, page);
5827 		return;
5828 	}
5829 
5830 	btrfs_page_dec_eb_refs(fs_info, page);
5831 
5832 	/*
5833 	 * We can only detach the page private if there are no other ebs in the
5834 	 * page range and no unfinished IO.
5835 	 */
5836 	if (!page_range_has_eb(fs_info, page))
5837 		btrfs_detach_subpage(fs_info, page);
5838 
5839 	spin_unlock(&page->mapping->private_lock);
5840 }
5841 
5842 /* Release all pages attached to the extent buffer */
5843 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
5844 {
5845 	int i;
5846 	int num_pages;
5847 
5848 	ASSERT(!extent_buffer_under_io(eb));
5849 
5850 	num_pages = num_extent_pages(eb);
5851 	for (i = 0; i < num_pages; i++) {
5852 		struct page *page = eb->pages[i];
5853 
5854 		if (!page)
5855 			continue;
5856 
5857 		detach_extent_buffer_page(eb, page);
5858 
5859 		/* One for when we allocated the page */
5860 		put_page(page);
5861 	}
5862 }
5863 
5864 /*
5865  * Helper for releasing the extent buffer.
5866  */
5867 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
5868 {
5869 	btrfs_release_extent_buffer_pages(eb);
5870 	btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
5871 	__free_extent_buffer(eb);
5872 }
5873 
5874 static struct extent_buffer *
5875 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
5876 		      unsigned long len)
5877 {
5878 	struct extent_buffer *eb = NULL;
5879 
5880 	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
5881 	eb->start = start;
5882 	eb->len = len;
5883 	eb->fs_info = fs_info;
5884 	eb->bflags = 0;
5885 	init_rwsem(&eb->lock);
5886 
5887 	btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
5888 			     &fs_info->allocated_ebs);
5889 	INIT_LIST_HEAD(&eb->release_list);
5890 
5891 	spin_lock_init(&eb->refs_lock);
5892 	atomic_set(&eb->refs, 1);
5893 	atomic_set(&eb->io_pages, 0);
5894 
5895 	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
5896 
5897 	return eb;
5898 }
5899 
5900 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
5901 {
5902 	int i;
5903 	struct page *p;
5904 	struct extent_buffer *new;
5905 	int num_pages = num_extent_pages(src);
5906 
5907 	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
5908 	if (new == NULL)
5909 		return NULL;
5910 
5911 	/*
5912 	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
5913 	 * btrfs_release_extent_buffer() have different behavior for
5914 	 * UNMAPPED subpage extent buffer.
5915 	 */
5916 	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5917 
5918 	for (i = 0; i < num_pages; i++) {
5919 		int ret;
5920 
5921 		p = alloc_page(GFP_NOFS);
5922 		if (!p) {
5923 			btrfs_release_extent_buffer(new);
5924 			return NULL;
5925 		}
5926 		ret = attach_extent_buffer_page(new, p, NULL);
5927 		if (ret < 0) {
5928 			put_page(p);
5929 			btrfs_release_extent_buffer(new);
5930 			return NULL;
5931 		}
5932 		WARN_ON(PageDirty(p));
5933 		new->pages[i] = p;
5934 		copy_page(page_address(p), page_address(src->pages[i]));
5935 	}
5936 	set_extent_buffer_uptodate(new);
5937 
5938 	return new;
5939 }
5940 
5941 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5942 						  u64 start, unsigned long len)
5943 {
5944 	struct extent_buffer *eb;
5945 	int num_pages;
5946 	int i;
5947 
5948 	eb = __alloc_extent_buffer(fs_info, start, len);
5949 	if (!eb)
5950 		return NULL;
5951 
5952 	num_pages = num_extent_pages(eb);
5953 	for (i = 0; i < num_pages; i++) {
5954 		int ret;
5955 
5956 		eb->pages[i] = alloc_page(GFP_NOFS);
5957 		if (!eb->pages[i])
5958 			goto err;
5959 		ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
5960 		if (ret < 0)
5961 			goto err;
5962 	}
5963 	set_extent_buffer_uptodate(eb);
5964 	btrfs_set_header_nritems(eb, 0);
5965 	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5966 
5967 	return eb;
5968 err:
5969 	for (; i > 0; i--) {
5970 		detach_extent_buffer_page(eb, eb->pages[i - 1]);
5971 		__free_page(eb->pages[i - 1]);
5972 	}
5973 	__free_extent_buffer(eb);
5974 	return NULL;
5975 }
5976 
5977 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5978 						u64 start)
5979 {
5980 	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
5981 }
5982 
5983 static void check_buffer_tree_ref(struct extent_buffer *eb)
5984 {
5985 	int refs;
5986 	/*
5987 	 * The TREE_REF bit is first set when the extent_buffer is added
5988 	 * to the radix tree. It is also reset, if unset, when a new reference
5989 	 * is created by find_extent_buffer.
5990 	 *
5991 	 * It is only cleared in two cases: freeing the last non-tree
5992 	 * reference to the extent_buffer when its STALE bit is set or
5993 	 * calling releasepage when the tree reference is the only reference.
5994 	 *
5995 	 * In both cases, care is taken to ensure that the extent_buffer's
5996 	 * pages are not under io. However, releasepage can be concurrently
5997 	 * called with creating new references, which is prone to race
5998 	 * conditions between the calls to check_buffer_tree_ref in those
5999 	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
6000 	 *
6001 	 * The actual lifetime of the extent_buffer in the radix tree is
6002 	 * adequately protected by the refcount, but the TREE_REF bit and
6003 	 * its corresponding reference are not. To protect against this
6004 	 * class of races, we call check_buffer_tree_ref from the codepaths
6005 	 * which trigger io after they set eb->io_pages. Note that once io is
6006 	 * initiated, TREE_REF can no longer be cleared, so that is the
6007 	 * moment at which any such race is best fixed.
6008 	 */
6009 	refs = atomic_read(&eb->refs);
6010 	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6011 		return;
6012 
6013 	spin_lock(&eb->refs_lock);
6014 	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6015 		atomic_inc(&eb->refs);
6016 	spin_unlock(&eb->refs_lock);
6017 }
6018 
6019 static void mark_extent_buffer_accessed(struct extent_buffer *eb,
6020 		struct page *accessed)
6021 {
6022 	int num_pages, i;
6023 
6024 	check_buffer_tree_ref(eb);
6025 
6026 	num_pages = num_extent_pages(eb);
6027 	for (i = 0; i < num_pages; i++) {
6028 		struct page *p = eb->pages[i];
6029 
6030 		if (p != accessed)
6031 			mark_page_accessed(p);
6032 	}
6033 }
6034 
6035 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
6036 					 u64 start)
6037 {
6038 	struct extent_buffer *eb;
6039 
6040 	eb = find_extent_buffer_nolock(fs_info, start);
6041 	if (!eb)
6042 		return NULL;
6043 	/*
6044 	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
6045 	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
6046 	 * another task running free_extent_buffer() might have seen that flag
6047 	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
6048 	 * writeback flags not set) and it's still in the tree (flag
6049 	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
6050 	 * decrementing the extent buffer's reference count twice.  So here we
6051 	 * could race and increment the eb's reference count, clear its stale
6052 	 * flag, mark it as dirty and drop our reference before the other task
6053 	 * finishes executing free_extent_buffer, which would later result in
6054 	 * an attempt to free an extent buffer that is dirty.
6055 	 */
6056 	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
6057 		spin_lock(&eb->refs_lock);
6058 		spin_unlock(&eb->refs_lock);
6059 	}
6060 	mark_extent_buffer_accessed(eb, NULL);
6061 	return eb;
6062 }
6063 
6064 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6065 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
6066 					u64 start)
6067 {
6068 	struct extent_buffer *eb, *exists = NULL;
6069 	int ret;
6070 
6071 	eb = find_extent_buffer(fs_info, start);
6072 	if (eb)
6073 		return eb;
6074 	eb = alloc_dummy_extent_buffer(fs_info, start);
6075 	if (!eb)
6076 		return ERR_PTR(-ENOMEM);
6077 	eb->fs_info = fs_info;
6078 again:
6079 	ret = radix_tree_preload(GFP_NOFS);
6080 	if (ret) {
6081 		exists = ERR_PTR(ret);
6082 		goto free_eb;
6083 	}
6084 	spin_lock(&fs_info->buffer_lock);
6085 	ret = radix_tree_insert(&fs_info->buffer_radix,
6086 				start >> fs_info->sectorsize_bits, eb);
6087 	spin_unlock(&fs_info->buffer_lock);
6088 	radix_tree_preload_end();
6089 	if (ret == -EEXIST) {
6090 		exists = find_extent_buffer(fs_info, start);
6091 		if (exists)
6092 			goto free_eb;
6093 		else
6094 			goto again;
6095 	}
6096 	check_buffer_tree_ref(eb);
6097 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
6098 
6099 	return eb;
6100 free_eb:
6101 	btrfs_release_extent_buffer(eb);
6102 	return exists;
6103 }
6104 #endif
6105 
6106 static struct extent_buffer *grab_extent_buffer(
6107 		struct btrfs_fs_info *fs_info, struct page *page)
6108 {
6109 	struct extent_buffer *exists;
6110 
6111 	/*
6112 	 * For subpage case, we completely rely on radix tree to ensure we
6113 	 * don't try to insert two ebs for the same bytenr.  So here we always
6114 	 * return NULL and just continue.
6115 	 */
6116 	if (fs_info->sectorsize < PAGE_SIZE)
6117 		return NULL;
6118 
6119 	/* Page not yet attached to an extent buffer */
6120 	if (!PagePrivate(page))
6121 		return NULL;
6122 
6123 	/*
6124 	 * We could have already allocated an eb for this page and attached one
6125 	 * so lets see if we can get a ref on the existing eb, and if we can we
6126 	 * know it's good and we can just return that one, else we know we can
6127 	 * just overwrite page->private.
6128 	 */
6129 	exists = (struct extent_buffer *)page->private;
6130 	if (atomic_inc_not_zero(&exists->refs))
6131 		return exists;
6132 
6133 	WARN_ON(PageDirty(page));
6134 	detach_page_private(page);
6135 	return NULL;
6136 }
6137 
6138 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
6139 					  u64 start, u64 owner_root, int level)
6140 {
6141 	unsigned long len = fs_info->nodesize;
6142 	int num_pages;
6143 	int i;
6144 	unsigned long index = start >> PAGE_SHIFT;
6145 	struct extent_buffer *eb;
6146 	struct extent_buffer *exists = NULL;
6147 	struct page *p;
6148 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
6149 	int uptodate = 1;
6150 	int ret;
6151 
6152 	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
6153 		btrfs_err(fs_info, "bad tree block start %llu", start);
6154 		return ERR_PTR(-EINVAL);
6155 	}
6156 
6157 #if BITS_PER_LONG == 32
6158 	if (start >= MAX_LFS_FILESIZE) {
6159 		btrfs_err_rl(fs_info,
6160 		"extent buffer %llu is beyond 32bit page cache limit", start);
6161 		btrfs_err_32bit_limit(fs_info);
6162 		return ERR_PTR(-EOVERFLOW);
6163 	}
6164 	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
6165 		btrfs_warn_32bit_limit(fs_info);
6166 #endif
6167 
6168 	if (fs_info->sectorsize < PAGE_SIZE &&
6169 	    offset_in_page(start) + len > PAGE_SIZE) {
6170 		btrfs_err(fs_info,
6171 		"tree block crosses page boundary, start %llu nodesize %lu",
6172 			  start, len);
6173 		return ERR_PTR(-EINVAL);
6174 	}
6175 
6176 	eb = find_extent_buffer(fs_info, start);
6177 	if (eb)
6178 		return eb;
6179 
6180 	eb = __alloc_extent_buffer(fs_info, start, len);
6181 	if (!eb)
6182 		return ERR_PTR(-ENOMEM);
6183 	btrfs_set_buffer_lockdep_class(owner_root, eb, level);
6184 
6185 	num_pages = num_extent_pages(eb);
6186 	for (i = 0; i < num_pages; i++, index++) {
6187 		struct btrfs_subpage *prealloc = NULL;
6188 
6189 		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
6190 		if (!p) {
6191 			exists = ERR_PTR(-ENOMEM);
6192 			goto free_eb;
6193 		}
6194 
6195 		/*
6196 		 * Preallocate page->private for subpage case, so that we won't
6197 		 * allocate memory with private_lock hold.  The memory will be
6198 		 * freed by attach_extent_buffer_page() or freed manually if
6199 		 * we exit earlier.
6200 		 *
6201 		 * Although we have ensured one subpage eb can only have one
6202 		 * page, but it may change in the future for 16K page size
6203 		 * support, so we still preallocate the memory in the loop.
6204 		 */
6205 		if (fs_info->sectorsize < PAGE_SIZE) {
6206 			prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
6207 			if (IS_ERR(prealloc)) {
6208 				ret = PTR_ERR(prealloc);
6209 				unlock_page(p);
6210 				put_page(p);
6211 				exists = ERR_PTR(ret);
6212 				goto free_eb;
6213 			}
6214 		}
6215 
6216 		spin_lock(&mapping->private_lock);
6217 		exists = grab_extent_buffer(fs_info, p);
6218 		if (exists) {
6219 			spin_unlock(&mapping->private_lock);
6220 			unlock_page(p);
6221 			put_page(p);
6222 			mark_extent_buffer_accessed(exists, p);
6223 			btrfs_free_subpage(prealloc);
6224 			goto free_eb;
6225 		}
6226 		/* Should not fail, as we have preallocated the memory */
6227 		ret = attach_extent_buffer_page(eb, p, prealloc);
6228 		ASSERT(!ret);
6229 		/*
6230 		 * To inform we have extra eb under allocation, so that
6231 		 * detach_extent_buffer_page() won't release the page private
6232 		 * when the eb hasn't yet been inserted into radix tree.
6233 		 *
6234 		 * The ref will be decreased when the eb released the page, in
6235 		 * detach_extent_buffer_page().
6236 		 * Thus needs no special handling in error path.
6237 		 */
6238 		btrfs_page_inc_eb_refs(fs_info, p);
6239 		spin_unlock(&mapping->private_lock);
6240 
6241 		WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
6242 		eb->pages[i] = p;
6243 		if (!PageUptodate(p))
6244 			uptodate = 0;
6245 
6246 		/*
6247 		 * We can't unlock the pages just yet since the extent buffer
6248 		 * hasn't been properly inserted in the radix tree, this
6249 		 * opens a race with btree_releasepage which can free a page
6250 		 * while we are still filling in all pages for the buffer and
6251 		 * we could crash.
6252 		 */
6253 	}
6254 	if (uptodate)
6255 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6256 again:
6257 	ret = radix_tree_preload(GFP_NOFS);
6258 	if (ret) {
6259 		exists = ERR_PTR(ret);
6260 		goto free_eb;
6261 	}
6262 
6263 	spin_lock(&fs_info->buffer_lock);
6264 	ret = radix_tree_insert(&fs_info->buffer_radix,
6265 				start >> fs_info->sectorsize_bits, eb);
6266 	spin_unlock(&fs_info->buffer_lock);
6267 	radix_tree_preload_end();
6268 	if (ret == -EEXIST) {
6269 		exists = find_extent_buffer(fs_info, start);
6270 		if (exists)
6271 			goto free_eb;
6272 		else
6273 			goto again;
6274 	}
6275 	/* add one reference for the tree */
6276 	check_buffer_tree_ref(eb);
6277 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
6278 
6279 	/*
6280 	 * Now it's safe to unlock the pages because any calls to
6281 	 * btree_releasepage will correctly detect that a page belongs to a
6282 	 * live buffer and won't free them prematurely.
6283 	 */
6284 	for (i = 0; i < num_pages; i++)
6285 		unlock_page(eb->pages[i]);
6286 	return eb;
6287 
6288 free_eb:
6289 	WARN_ON(!atomic_dec_and_test(&eb->refs));
6290 	for (i = 0; i < num_pages; i++) {
6291 		if (eb->pages[i])
6292 			unlock_page(eb->pages[i]);
6293 	}
6294 
6295 	btrfs_release_extent_buffer(eb);
6296 	return exists;
6297 }
6298 
6299 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
6300 {
6301 	struct extent_buffer *eb =
6302 			container_of(head, struct extent_buffer, rcu_head);
6303 
6304 	__free_extent_buffer(eb);
6305 }
6306 
6307 static int release_extent_buffer(struct extent_buffer *eb)
6308 	__releases(&eb->refs_lock)
6309 {
6310 	lockdep_assert_held(&eb->refs_lock);
6311 
6312 	WARN_ON(atomic_read(&eb->refs) == 0);
6313 	if (atomic_dec_and_test(&eb->refs)) {
6314 		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
6315 			struct btrfs_fs_info *fs_info = eb->fs_info;
6316 
6317 			spin_unlock(&eb->refs_lock);
6318 
6319 			spin_lock(&fs_info->buffer_lock);
6320 			radix_tree_delete(&fs_info->buffer_radix,
6321 					  eb->start >> fs_info->sectorsize_bits);
6322 			spin_unlock(&fs_info->buffer_lock);
6323 		} else {
6324 			spin_unlock(&eb->refs_lock);
6325 		}
6326 
6327 		btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
6328 		/* Should be safe to release our pages at this point */
6329 		btrfs_release_extent_buffer_pages(eb);
6330 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6331 		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
6332 			__free_extent_buffer(eb);
6333 			return 1;
6334 		}
6335 #endif
6336 		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
6337 		return 1;
6338 	}
6339 	spin_unlock(&eb->refs_lock);
6340 
6341 	return 0;
6342 }
6343 
6344 void free_extent_buffer(struct extent_buffer *eb)
6345 {
6346 	int refs;
6347 	int old;
6348 	if (!eb)
6349 		return;
6350 
6351 	while (1) {
6352 		refs = atomic_read(&eb->refs);
6353 		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
6354 		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
6355 			refs == 1))
6356 			break;
6357 		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
6358 		if (old == refs)
6359 			return;
6360 	}
6361 
6362 	spin_lock(&eb->refs_lock);
6363 	if (atomic_read(&eb->refs) == 2 &&
6364 	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
6365 	    !extent_buffer_under_io(eb) &&
6366 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6367 		atomic_dec(&eb->refs);
6368 
6369 	/*
6370 	 * I know this is terrible, but it's temporary until we stop tracking
6371 	 * the uptodate bits and such for the extent buffers.
6372 	 */
6373 	release_extent_buffer(eb);
6374 }
6375 
6376 void free_extent_buffer_stale(struct extent_buffer *eb)
6377 {
6378 	if (!eb)
6379 		return;
6380 
6381 	spin_lock(&eb->refs_lock);
6382 	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
6383 
6384 	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
6385 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6386 		atomic_dec(&eb->refs);
6387 	release_extent_buffer(eb);
6388 }
6389 
6390 static void btree_clear_page_dirty(struct page *page)
6391 {
6392 	ASSERT(PageDirty(page));
6393 	ASSERT(PageLocked(page));
6394 	clear_page_dirty_for_io(page);
6395 	xa_lock_irq(&page->mapping->i_pages);
6396 	if (!PageDirty(page))
6397 		__xa_clear_mark(&page->mapping->i_pages,
6398 				page_index(page), PAGECACHE_TAG_DIRTY);
6399 	xa_unlock_irq(&page->mapping->i_pages);
6400 }
6401 
6402 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
6403 {
6404 	struct btrfs_fs_info *fs_info = eb->fs_info;
6405 	struct page *page = eb->pages[0];
6406 	bool last;
6407 
6408 	/* btree_clear_page_dirty() needs page locked */
6409 	lock_page(page);
6410 	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
6411 						  eb->len);
6412 	if (last)
6413 		btree_clear_page_dirty(page);
6414 	unlock_page(page);
6415 	WARN_ON(atomic_read(&eb->refs) == 0);
6416 }
6417 
6418 void clear_extent_buffer_dirty(const struct extent_buffer *eb)
6419 {
6420 	int i;
6421 	int num_pages;
6422 	struct page *page;
6423 
6424 	if (eb->fs_info->sectorsize < PAGE_SIZE)
6425 		return clear_subpage_extent_buffer_dirty(eb);
6426 
6427 	num_pages = num_extent_pages(eb);
6428 
6429 	for (i = 0; i < num_pages; i++) {
6430 		page = eb->pages[i];
6431 		if (!PageDirty(page))
6432 			continue;
6433 		lock_page(page);
6434 		btree_clear_page_dirty(page);
6435 		ClearPageError(page);
6436 		unlock_page(page);
6437 	}
6438 	WARN_ON(atomic_read(&eb->refs) == 0);
6439 }
6440 
6441 bool set_extent_buffer_dirty(struct extent_buffer *eb)
6442 {
6443 	int i;
6444 	int num_pages;
6445 	bool was_dirty;
6446 
6447 	check_buffer_tree_ref(eb);
6448 
6449 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
6450 
6451 	num_pages = num_extent_pages(eb);
6452 	WARN_ON(atomic_read(&eb->refs) == 0);
6453 	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
6454 
6455 	if (!was_dirty) {
6456 		bool subpage = eb->fs_info->sectorsize < PAGE_SIZE;
6457 
6458 		/*
6459 		 * For subpage case, we can have other extent buffers in the
6460 		 * same page, and in clear_subpage_extent_buffer_dirty() we
6461 		 * have to clear page dirty without subpage lock held.
6462 		 * This can cause race where our page gets dirty cleared after
6463 		 * we just set it.
6464 		 *
6465 		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
6466 		 * its page for other reasons, we can use page lock to prevent
6467 		 * the above race.
6468 		 */
6469 		if (subpage)
6470 			lock_page(eb->pages[0]);
6471 		for (i = 0; i < num_pages; i++)
6472 			btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
6473 					     eb->start, eb->len);
6474 		if (subpage)
6475 			unlock_page(eb->pages[0]);
6476 	}
6477 #ifdef CONFIG_BTRFS_DEBUG
6478 	for (i = 0; i < num_pages; i++)
6479 		ASSERT(PageDirty(eb->pages[i]));
6480 #endif
6481 
6482 	return was_dirty;
6483 }
6484 
6485 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
6486 {
6487 	struct btrfs_fs_info *fs_info = eb->fs_info;
6488 	struct page *page;
6489 	int num_pages;
6490 	int i;
6491 
6492 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6493 	num_pages = num_extent_pages(eb);
6494 	for (i = 0; i < num_pages; i++) {
6495 		page = eb->pages[i];
6496 		if (page)
6497 			btrfs_page_clear_uptodate(fs_info, page,
6498 						  eb->start, eb->len);
6499 	}
6500 }
6501 
6502 void set_extent_buffer_uptodate(struct extent_buffer *eb)
6503 {
6504 	struct btrfs_fs_info *fs_info = eb->fs_info;
6505 	struct page *page;
6506 	int num_pages;
6507 	int i;
6508 
6509 	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6510 	num_pages = num_extent_pages(eb);
6511 	for (i = 0; i < num_pages; i++) {
6512 		page = eb->pages[i];
6513 		btrfs_page_set_uptodate(fs_info, page, eb->start, eb->len);
6514 	}
6515 }
6516 
6517 static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
6518 				      int mirror_num)
6519 {
6520 	struct btrfs_fs_info *fs_info = eb->fs_info;
6521 	struct extent_io_tree *io_tree;
6522 	struct page *page = eb->pages[0];
6523 	struct btrfs_bio_ctrl bio_ctrl = { 0 };
6524 	int ret = 0;
6525 
6526 	ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
6527 	ASSERT(PagePrivate(page));
6528 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
6529 
6530 	if (wait == WAIT_NONE) {
6531 		if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
6532 			return -EAGAIN;
6533 	} else {
6534 		ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6535 		if (ret < 0)
6536 			return ret;
6537 	}
6538 
6539 	ret = 0;
6540 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
6541 	    PageUptodate(page) ||
6542 	    btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
6543 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6544 		unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6545 		return ret;
6546 	}
6547 
6548 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
6549 	eb->read_mirror = 0;
6550 	atomic_set(&eb->io_pages, 1);
6551 	check_buffer_tree_ref(eb);
6552 	btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
6553 
6554 	btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
6555 	ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, &bio_ctrl,
6556 				 page, eb->start, eb->len,
6557 				 eb->start - page_offset(page),
6558 				 end_bio_extent_readpage, mirror_num, 0,
6559 				 true);
6560 	if (ret) {
6561 		/*
6562 		 * In the endio function, if we hit something wrong we will
6563 		 * increase the io_pages, so here we need to decrease it for
6564 		 * error path.
6565 		 */
6566 		atomic_dec(&eb->io_pages);
6567 	}
6568 	if (bio_ctrl.bio) {
6569 		int tmp;
6570 
6571 		tmp = submit_one_bio(bio_ctrl.bio, mirror_num, 0);
6572 		bio_ctrl.bio = NULL;
6573 		if (tmp < 0)
6574 			return tmp;
6575 	}
6576 	if (ret || wait != WAIT_COMPLETE)
6577 		return ret;
6578 
6579 	wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED);
6580 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
6581 		ret = -EIO;
6582 	return ret;
6583 }
6584 
6585 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
6586 {
6587 	int i;
6588 	struct page *page;
6589 	int err;
6590 	int ret = 0;
6591 	int locked_pages = 0;
6592 	int all_uptodate = 1;
6593 	int num_pages;
6594 	unsigned long num_reads = 0;
6595 	struct btrfs_bio_ctrl bio_ctrl = { 0 };
6596 
6597 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
6598 		return 0;
6599 
6600 	if (eb->fs_info->sectorsize < PAGE_SIZE)
6601 		return read_extent_buffer_subpage(eb, wait, mirror_num);
6602 
6603 	num_pages = num_extent_pages(eb);
6604 	for (i = 0; i < num_pages; i++) {
6605 		page = eb->pages[i];
6606 		if (wait == WAIT_NONE) {
6607 			/*
6608 			 * WAIT_NONE is only utilized by readahead. If we can't
6609 			 * acquire the lock atomically it means either the eb
6610 			 * is being read out or under modification.
6611 			 * Either way the eb will be or has been cached,
6612 			 * readahead can exit safely.
6613 			 */
6614 			if (!trylock_page(page))
6615 				goto unlock_exit;
6616 		} else {
6617 			lock_page(page);
6618 		}
6619 		locked_pages++;
6620 	}
6621 	/*
6622 	 * We need to firstly lock all pages to make sure that
6623 	 * the uptodate bit of our pages won't be affected by
6624 	 * clear_extent_buffer_uptodate().
6625 	 */
6626 	for (i = 0; i < num_pages; i++) {
6627 		page = eb->pages[i];
6628 		if (!PageUptodate(page)) {
6629 			num_reads++;
6630 			all_uptodate = 0;
6631 		}
6632 	}
6633 
6634 	if (all_uptodate) {
6635 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6636 		goto unlock_exit;
6637 	}
6638 
6639 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
6640 	eb->read_mirror = 0;
6641 	atomic_set(&eb->io_pages, num_reads);
6642 	/*
6643 	 * It is possible for releasepage to clear the TREE_REF bit before we
6644 	 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
6645 	 */
6646 	check_buffer_tree_ref(eb);
6647 	for (i = 0; i < num_pages; i++) {
6648 		page = eb->pages[i];
6649 
6650 		if (!PageUptodate(page)) {
6651 			if (ret) {
6652 				atomic_dec(&eb->io_pages);
6653 				unlock_page(page);
6654 				continue;
6655 			}
6656 
6657 			ClearPageError(page);
6658 			err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
6659 					 &bio_ctrl, page, page_offset(page),
6660 					 PAGE_SIZE, 0, end_bio_extent_readpage,
6661 					 mirror_num, 0, false);
6662 			if (err) {
6663 				/*
6664 				 * We failed to submit the bio so it's the
6665 				 * caller's responsibility to perform cleanup
6666 				 * i.e unlock page/set error bit.
6667 				 */
6668 				ret = err;
6669 				SetPageError(page);
6670 				unlock_page(page);
6671 				atomic_dec(&eb->io_pages);
6672 			}
6673 		} else {
6674 			unlock_page(page);
6675 		}
6676 	}
6677 
6678 	if (bio_ctrl.bio) {
6679 		err = submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.bio_flags);
6680 		bio_ctrl.bio = NULL;
6681 		if (err)
6682 			return err;
6683 	}
6684 
6685 	if (ret || wait != WAIT_COMPLETE)
6686 		return ret;
6687 
6688 	for (i = 0; i < num_pages; i++) {
6689 		page = eb->pages[i];
6690 		wait_on_page_locked(page);
6691 		if (!PageUptodate(page))
6692 			ret = -EIO;
6693 	}
6694 
6695 	return ret;
6696 
6697 unlock_exit:
6698 	while (locked_pages > 0) {
6699 		locked_pages--;
6700 		page = eb->pages[locked_pages];
6701 		unlock_page(page);
6702 	}
6703 	return ret;
6704 }
6705 
6706 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
6707 			    unsigned long len)
6708 {
6709 	btrfs_warn(eb->fs_info,
6710 		"access to eb bytenr %llu len %lu out of range start %lu len %lu",
6711 		eb->start, eb->len, start, len);
6712 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
6713 
6714 	return true;
6715 }
6716 
6717 /*
6718  * Check if the [start, start + len) range is valid before reading/writing
6719  * the eb.
6720  * NOTE: @start and @len are offset inside the eb, not logical address.
6721  *
6722  * Caller should not touch the dst/src memory if this function returns error.
6723  */
6724 static inline int check_eb_range(const struct extent_buffer *eb,
6725 				 unsigned long start, unsigned long len)
6726 {
6727 	unsigned long offset;
6728 
6729 	/* start, start + len should not go beyond eb->len nor overflow */
6730 	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
6731 		return report_eb_range(eb, start, len);
6732 
6733 	return false;
6734 }
6735 
6736 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
6737 			unsigned long start, unsigned long len)
6738 {
6739 	size_t cur;
6740 	size_t offset;
6741 	struct page *page;
6742 	char *kaddr;
6743 	char *dst = (char *)dstv;
6744 	unsigned long i = get_eb_page_index(start);
6745 
6746 	if (check_eb_range(eb, start, len))
6747 		return;
6748 
6749 	offset = get_eb_offset_in_page(eb, start);
6750 
6751 	while (len > 0) {
6752 		page = eb->pages[i];
6753 
6754 		cur = min(len, (PAGE_SIZE - offset));
6755 		kaddr = page_address(page);
6756 		memcpy(dst, kaddr + offset, cur);
6757 
6758 		dst += cur;
6759 		len -= cur;
6760 		offset = 0;
6761 		i++;
6762 	}
6763 }
6764 
6765 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
6766 				       void __user *dstv,
6767 				       unsigned long start, unsigned long len)
6768 {
6769 	size_t cur;
6770 	size_t offset;
6771 	struct page *page;
6772 	char *kaddr;
6773 	char __user *dst = (char __user *)dstv;
6774 	unsigned long i = get_eb_page_index(start);
6775 	int ret = 0;
6776 
6777 	WARN_ON(start > eb->len);
6778 	WARN_ON(start + len > eb->start + eb->len);
6779 
6780 	offset = get_eb_offset_in_page(eb, start);
6781 
6782 	while (len > 0) {
6783 		page = eb->pages[i];
6784 
6785 		cur = min(len, (PAGE_SIZE - offset));
6786 		kaddr = page_address(page);
6787 		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
6788 			ret = -EFAULT;
6789 			break;
6790 		}
6791 
6792 		dst += cur;
6793 		len -= cur;
6794 		offset = 0;
6795 		i++;
6796 	}
6797 
6798 	return ret;
6799 }
6800 
6801 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
6802 			 unsigned long start, unsigned long len)
6803 {
6804 	size_t cur;
6805 	size_t offset;
6806 	struct page *page;
6807 	char *kaddr;
6808 	char *ptr = (char *)ptrv;
6809 	unsigned long i = get_eb_page_index(start);
6810 	int ret = 0;
6811 
6812 	if (check_eb_range(eb, start, len))
6813 		return -EINVAL;
6814 
6815 	offset = get_eb_offset_in_page(eb, start);
6816 
6817 	while (len > 0) {
6818 		page = eb->pages[i];
6819 
6820 		cur = min(len, (PAGE_SIZE - offset));
6821 
6822 		kaddr = page_address(page);
6823 		ret = memcmp(ptr, kaddr + offset, cur);
6824 		if (ret)
6825 			break;
6826 
6827 		ptr += cur;
6828 		len -= cur;
6829 		offset = 0;
6830 		i++;
6831 	}
6832 	return ret;
6833 }
6834 
6835 /*
6836  * Check that the extent buffer is uptodate.
6837  *
6838  * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
6839  * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
6840  */
6841 static void assert_eb_page_uptodate(const struct extent_buffer *eb,
6842 				    struct page *page)
6843 {
6844 	struct btrfs_fs_info *fs_info = eb->fs_info;
6845 
6846 	if (fs_info->sectorsize < PAGE_SIZE) {
6847 		bool uptodate;
6848 
6849 		uptodate = btrfs_subpage_test_uptodate(fs_info, page,
6850 						       eb->start, eb->len);
6851 		WARN_ON(!uptodate);
6852 	} else {
6853 		WARN_ON(!PageUptodate(page));
6854 	}
6855 }
6856 
6857 void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
6858 		const void *srcv)
6859 {
6860 	char *kaddr;
6861 
6862 	assert_eb_page_uptodate(eb, eb->pages[0]);
6863 	kaddr = page_address(eb->pages[0]) +
6864 		get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
6865 						   chunk_tree_uuid));
6866 	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
6867 }
6868 
6869 void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
6870 {
6871 	char *kaddr;
6872 
6873 	assert_eb_page_uptodate(eb, eb->pages[0]);
6874 	kaddr = page_address(eb->pages[0]) +
6875 		get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
6876 	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
6877 }
6878 
6879 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
6880 			 unsigned long start, unsigned long len)
6881 {
6882 	size_t cur;
6883 	size_t offset;
6884 	struct page *page;
6885 	char *kaddr;
6886 	char *src = (char *)srcv;
6887 	unsigned long i = get_eb_page_index(start);
6888 
6889 	WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
6890 
6891 	if (check_eb_range(eb, start, len))
6892 		return;
6893 
6894 	offset = get_eb_offset_in_page(eb, start);
6895 
6896 	while (len > 0) {
6897 		page = eb->pages[i];
6898 		assert_eb_page_uptodate(eb, page);
6899 
6900 		cur = min(len, PAGE_SIZE - offset);
6901 		kaddr = page_address(page);
6902 		memcpy(kaddr + offset, src, cur);
6903 
6904 		src += cur;
6905 		len -= cur;
6906 		offset = 0;
6907 		i++;
6908 	}
6909 }
6910 
6911 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
6912 		unsigned long len)
6913 {
6914 	size_t cur;
6915 	size_t offset;
6916 	struct page *page;
6917 	char *kaddr;
6918 	unsigned long i = get_eb_page_index(start);
6919 
6920 	if (check_eb_range(eb, start, len))
6921 		return;
6922 
6923 	offset = get_eb_offset_in_page(eb, start);
6924 
6925 	while (len > 0) {
6926 		page = eb->pages[i];
6927 		assert_eb_page_uptodate(eb, page);
6928 
6929 		cur = min(len, PAGE_SIZE - offset);
6930 		kaddr = page_address(page);
6931 		memset(kaddr + offset, 0, cur);
6932 
6933 		len -= cur;
6934 		offset = 0;
6935 		i++;
6936 	}
6937 }
6938 
6939 void copy_extent_buffer_full(const struct extent_buffer *dst,
6940 			     const struct extent_buffer *src)
6941 {
6942 	int i;
6943 	int num_pages;
6944 
6945 	ASSERT(dst->len == src->len);
6946 
6947 	if (dst->fs_info->sectorsize == PAGE_SIZE) {
6948 		num_pages = num_extent_pages(dst);
6949 		for (i = 0; i < num_pages; i++)
6950 			copy_page(page_address(dst->pages[i]),
6951 				  page_address(src->pages[i]));
6952 	} else {
6953 		size_t src_offset = get_eb_offset_in_page(src, 0);
6954 		size_t dst_offset = get_eb_offset_in_page(dst, 0);
6955 
6956 		ASSERT(src->fs_info->sectorsize < PAGE_SIZE);
6957 		memcpy(page_address(dst->pages[0]) + dst_offset,
6958 		       page_address(src->pages[0]) + src_offset,
6959 		       src->len);
6960 	}
6961 }
6962 
6963 void copy_extent_buffer(const struct extent_buffer *dst,
6964 			const struct extent_buffer *src,
6965 			unsigned long dst_offset, unsigned long src_offset,
6966 			unsigned long len)
6967 {
6968 	u64 dst_len = dst->len;
6969 	size_t cur;
6970 	size_t offset;
6971 	struct page *page;
6972 	char *kaddr;
6973 	unsigned long i = get_eb_page_index(dst_offset);
6974 
6975 	if (check_eb_range(dst, dst_offset, len) ||
6976 	    check_eb_range(src, src_offset, len))
6977 		return;
6978 
6979 	WARN_ON(src->len != dst_len);
6980 
6981 	offset = get_eb_offset_in_page(dst, dst_offset);
6982 
6983 	while (len > 0) {
6984 		page = dst->pages[i];
6985 		assert_eb_page_uptodate(dst, page);
6986 
6987 		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
6988 
6989 		kaddr = page_address(page);
6990 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
6991 
6992 		src_offset += cur;
6993 		len -= cur;
6994 		offset = 0;
6995 		i++;
6996 	}
6997 }
6998 
6999 /*
7000  * eb_bitmap_offset() - calculate the page and offset of the byte containing the
7001  * given bit number
7002  * @eb: the extent buffer
7003  * @start: offset of the bitmap item in the extent buffer
7004  * @nr: bit number
7005  * @page_index: return index of the page in the extent buffer that contains the
7006  * given bit number
7007  * @page_offset: return offset into the page given by page_index
7008  *
7009  * This helper hides the ugliness of finding the byte in an extent buffer which
7010  * contains a given bit.
7011  */
7012 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
7013 				    unsigned long start, unsigned long nr,
7014 				    unsigned long *page_index,
7015 				    size_t *page_offset)
7016 {
7017 	size_t byte_offset = BIT_BYTE(nr);
7018 	size_t offset;
7019 
7020 	/*
7021 	 * The byte we want is the offset of the extent buffer + the offset of
7022 	 * the bitmap item in the extent buffer + the offset of the byte in the
7023 	 * bitmap item.
7024 	 */
7025 	offset = start + offset_in_page(eb->start) + byte_offset;
7026 
7027 	*page_index = offset >> PAGE_SHIFT;
7028 	*page_offset = offset_in_page(offset);
7029 }
7030 
7031 /**
7032  * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
7033  * @eb: the extent buffer
7034  * @start: offset of the bitmap item in the extent buffer
7035  * @nr: bit number to test
7036  */
7037 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
7038 			   unsigned long nr)
7039 {
7040 	u8 *kaddr;
7041 	struct page *page;
7042 	unsigned long i;
7043 	size_t offset;
7044 
7045 	eb_bitmap_offset(eb, start, nr, &i, &offset);
7046 	page = eb->pages[i];
7047 	assert_eb_page_uptodate(eb, page);
7048 	kaddr = page_address(page);
7049 	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
7050 }
7051 
7052 /**
7053  * extent_buffer_bitmap_set - set an area of a bitmap
7054  * @eb: the extent buffer
7055  * @start: offset of the bitmap item in the extent buffer
7056  * @pos: bit number of the first bit
7057  * @len: number of bits to set
7058  */
7059 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
7060 			      unsigned long pos, unsigned long len)
7061 {
7062 	u8 *kaddr;
7063 	struct page *page;
7064 	unsigned long i;
7065 	size_t offset;
7066 	const unsigned int size = pos + len;
7067 	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
7068 	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
7069 
7070 	eb_bitmap_offset(eb, start, pos, &i, &offset);
7071 	page = eb->pages[i];
7072 	assert_eb_page_uptodate(eb, page);
7073 	kaddr = page_address(page);
7074 
7075 	while (len >= bits_to_set) {
7076 		kaddr[offset] |= mask_to_set;
7077 		len -= bits_to_set;
7078 		bits_to_set = BITS_PER_BYTE;
7079 		mask_to_set = ~0;
7080 		if (++offset >= PAGE_SIZE && len > 0) {
7081 			offset = 0;
7082 			page = eb->pages[++i];
7083 			assert_eb_page_uptodate(eb, page);
7084 			kaddr = page_address(page);
7085 		}
7086 	}
7087 	if (len) {
7088 		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
7089 		kaddr[offset] |= mask_to_set;
7090 	}
7091 }
7092 
7093 
7094 /**
7095  * extent_buffer_bitmap_clear - clear an area of a bitmap
7096  * @eb: the extent buffer
7097  * @start: offset of the bitmap item in the extent buffer
7098  * @pos: bit number of the first bit
7099  * @len: number of bits to clear
7100  */
7101 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
7102 				unsigned long start, unsigned long pos,
7103 				unsigned long len)
7104 {
7105 	u8 *kaddr;
7106 	struct page *page;
7107 	unsigned long i;
7108 	size_t offset;
7109 	const unsigned int size = pos + len;
7110 	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
7111 	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
7112 
7113 	eb_bitmap_offset(eb, start, pos, &i, &offset);
7114 	page = eb->pages[i];
7115 	assert_eb_page_uptodate(eb, page);
7116 	kaddr = page_address(page);
7117 
7118 	while (len >= bits_to_clear) {
7119 		kaddr[offset] &= ~mask_to_clear;
7120 		len -= bits_to_clear;
7121 		bits_to_clear = BITS_PER_BYTE;
7122 		mask_to_clear = ~0;
7123 		if (++offset >= PAGE_SIZE && len > 0) {
7124 			offset = 0;
7125 			page = eb->pages[++i];
7126 			assert_eb_page_uptodate(eb, page);
7127 			kaddr = page_address(page);
7128 		}
7129 	}
7130 	if (len) {
7131 		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
7132 		kaddr[offset] &= ~mask_to_clear;
7133 	}
7134 }
7135 
7136 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
7137 {
7138 	unsigned long distance = (src > dst) ? src - dst : dst - src;
7139 	return distance < len;
7140 }
7141 
7142 static void copy_pages(struct page *dst_page, struct page *src_page,
7143 		       unsigned long dst_off, unsigned long src_off,
7144 		       unsigned long len)
7145 {
7146 	char *dst_kaddr = page_address(dst_page);
7147 	char *src_kaddr;
7148 	int must_memmove = 0;
7149 
7150 	if (dst_page != src_page) {
7151 		src_kaddr = page_address(src_page);
7152 	} else {
7153 		src_kaddr = dst_kaddr;
7154 		if (areas_overlap(src_off, dst_off, len))
7155 			must_memmove = 1;
7156 	}
7157 
7158 	if (must_memmove)
7159 		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
7160 	else
7161 		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
7162 }
7163 
7164 void memcpy_extent_buffer(const struct extent_buffer *dst,
7165 			  unsigned long dst_offset, unsigned long src_offset,
7166 			  unsigned long len)
7167 {
7168 	size_t cur;
7169 	size_t dst_off_in_page;
7170 	size_t src_off_in_page;
7171 	unsigned long dst_i;
7172 	unsigned long src_i;
7173 
7174 	if (check_eb_range(dst, dst_offset, len) ||
7175 	    check_eb_range(dst, src_offset, len))
7176 		return;
7177 
7178 	while (len > 0) {
7179 		dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
7180 		src_off_in_page = get_eb_offset_in_page(dst, src_offset);
7181 
7182 		dst_i = get_eb_page_index(dst_offset);
7183 		src_i = get_eb_page_index(src_offset);
7184 
7185 		cur = min(len, (unsigned long)(PAGE_SIZE -
7186 					       src_off_in_page));
7187 		cur = min_t(unsigned long, cur,
7188 			(unsigned long)(PAGE_SIZE - dst_off_in_page));
7189 
7190 		copy_pages(dst->pages[dst_i], dst->pages[src_i],
7191 			   dst_off_in_page, src_off_in_page, cur);
7192 
7193 		src_offset += cur;
7194 		dst_offset += cur;
7195 		len -= cur;
7196 	}
7197 }
7198 
7199 void memmove_extent_buffer(const struct extent_buffer *dst,
7200 			   unsigned long dst_offset, unsigned long src_offset,
7201 			   unsigned long len)
7202 {
7203 	size_t cur;
7204 	size_t dst_off_in_page;
7205 	size_t src_off_in_page;
7206 	unsigned long dst_end = dst_offset + len - 1;
7207 	unsigned long src_end = src_offset + len - 1;
7208 	unsigned long dst_i;
7209 	unsigned long src_i;
7210 
7211 	if (check_eb_range(dst, dst_offset, len) ||
7212 	    check_eb_range(dst, src_offset, len))
7213 		return;
7214 	if (dst_offset < src_offset) {
7215 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
7216 		return;
7217 	}
7218 	while (len > 0) {
7219 		dst_i = get_eb_page_index(dst_end);
7220 		src_i = get_eb_page_index(src_end);
7221 
7222 		dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
7223 		src_off_in_page = get_eb_offset_in_page(dst, src_end);
7224 
7225 		cur = min_t(unsigned long, len, src_off_in_page + 1);
7226 		cur = min(cur, dst_off_in_page + 1);
7227 		copy_pages(dst->pages[dst_i], dst->pages[src_i],
7228 			   dst_off_in_page - cur + 1,
7229 			   src_off_in_page - cur + 1, cur);
7230 
7231 		dst_end -= cur;
7232 		src_end -= cur;
7233 		len -= cur;
7234 	}
7235 }
7236 
7237 #define GANG_LOOKUP_SIZE	16
7238 static struct extent_buffer *get_next_extent_buffer(
7239 		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
7240 {
7241 	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
7242 	struct extent_buffer *found = NULL;
7243 	u64 page_start = page_offset(page);
7244 	u64 cur = page_start;
7245 
7246 	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
7247 	lockdep_assert_held(&fs_info->buffer_lock);
7248 
7249 	while (cur < page_start + PAGE_SIZE) {
7250 		int ret;
7251 		int i;
7252 
7253 		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
7254 				(void **)gang, cur >> fs_info->sectorsize_bits,
7255 				min_t(unsigned int, GANG_LOOKUP_SIZE,
7256 				      PAGE_SIZE / fs_info->nodesize));
7257 		if (ret == 0)
7258 			goto out;
7259 		for (i = 0; i < ret; i++) {
7260 			/* Already beyond page end */
7261 			if (gang[i]->start >= page_start + PAGE_SIZE)
7262 				goto out;
7263 			/* Found one */
7264 			if (gang[i]->start >= bytenr) {
7265 				found = gang[i];
7266 				goto out;
7267 			}
7268 		}
7269 		cur = gang[ret - 1]->start + gang[ret - 1]->len;
7270 	}
7271 out:
7272 	return found;
7273 }
7274 
7275 static int try_release_subpage_extent_buffer(struct page *page)
7276 {
7277 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
7278 	u64 cur = page_offset(page);
7279 	const u64 end = page_offset(page) + PAGE_SIZE;
7280 	int ret;
7281 
7282 	while (cur < end) {
7283 		struct extent_buffer *eb = NULL;
7284 
7285 		/*
7286 		 * Unlike try_release_extent_buffer() which uses page->private
7287 		 * to grab buffer, for subpage case we rely on radix tree, thus
7288 		 * we need to ensure radix tree consistency.
7289 		 *
7290 		 * We also want an atomic snapshot of the radix tree, thus go
7291 		 * with spinlock rather than RCU.
7292 		 */
7293 		spin_lock(&fs_info->buffer_lock);
7294 		eb = get_next_extent_buffer(fs_info, page, cur);
7295 		if (!eb) {
7296 			/* No more eb in the page range after or at cur */
7297 			spin_unlock(&fs_info->buffer_lock);
7298 			break;
7299 		}
7300 		cur = eb->start + eb->len;
7301 
7302 		/*
7303 		 * The same as try_release_extent_buffer(), to ensure the eb
7304 		 * won't disappear out from under us.
7305 		 */
7306 		spin_lock(&eb->refs_lock);
7307 		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
7308 			spin_unlock(&eb->refs_lock);
7309 			spin_unlock(&fs_info->buffer_lock);
7310 			break;
7311 		}
7312 		spin_unlock(&fs_info->buffer_lock);
7313 
7314 		/*
7315 		 * If tree ref isn't set then we know the ref on this eb is a
7316 		 * real ref, so just return, this eb will likely be freed soon
7317 		 * anyway.
7318 		 */
7319 		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
7320 			spin_unlock(&eb->refs_lock);
7321 			break;
7322 		}
7323 
7324 		/*
7325 		 * Here we don't care about the return value, we will always
7326 		 * check the page private at the end.  And
7327 		 * release_extent_buffer() will release the refs_lock.
7328 		 */
7329 		release_extent_buffer(eb);
7330 	}
7331 	/*
7332 	 * Finally to check if we have cleared page private, as if we have
7333 	 * released all ebs in the page, the page private should be cleared now.
7334 	 */
7335 	spin_lock(&page->mapping->private_lock);
7336 	if (!PagePrivate(page))
7337 		ret = 1;
7338 	else
7339 		ret = 0;
7340 	spin_unlock(&page->mapping->private_lock);
7341 	return ret;
7342 
7343 }
7344 
7345 int try_release_extent_buffer(struct page *page)
7346 {
7347 	struct extent_buffer *eb;
7348 
7349 	if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
7350 		return try_release_subpage_extent_buffer(page);
7351 
7352 	/*
7353 	 * We need to make sure nobody is changing page->private, as we rely on
7354 	 * page->private as the pointer to extent buffer.
7355 	 */
7356 	spin_lock(&page->mapping->private_lock);
7357 	if (!PagePrivate(page)) {
7358 		spin_unlock(&page->mapping->private_lock);
7359 		return 1;
7360 	}
7361 
7362 	eb = (struct extent_buffer *)page->private;
7363 	BUG_ON(!eb);
7364 
7365 	/*
7366 	 * This is a little awful but should be ok, we need to make sure that
7367 	 * the eb doesn't disappear out from under us while we're looking at
7368 	 * this page.
7369 	 */
7370 	spin_lock(&eb->refs_lock);
7371 	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
7372 		spin_unlock(&eb->refs_lock);
7373 		spin_unlock(&page->mapping->private_lock);
7374 		return 0;
7375 	}
7376 	spin_unlock(&page->mapping->private_lock);
7377 
7378 	/*
7379 	 * If tree ref isn't set then we know the ref on this eb is a real ref,
7380 	 * so just return, this page will likely be freed soon anyway.
7381 	 */
7382 	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
7383 		spin_unlock(&eb->refs_lock);
7384 		return 0;
7385 	}
7386 
7387 	return release_extent_buffer(eb);
7388 }
7389 
7390 /*
7391  * btrfs_readahead_tree_block - attempt to readahead a child block
7392  * @fs_info:	the fs_info
7393  * @bytenr:	bytenr to read
7394  * @owner_root: objectid of the root that owns this eb
7395  * @gen:	generation for the uptodate check, can be 0
7396  * @level:	level for the eb
7397  *
7398  * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
7399  * normal uptodate check of the eb, without checking the generation.  If we have
7400  * to read the block we will not block on anything.
7401  */
7402 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
7403 				u64 bytenr, u64 owner_root, u64 gen, int level)
7404 {
7405 	struct extent_buffer *eb;
7406 	int ret;
7407 
7408 	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
7409 	if (IS_ERR(eb))
7410 		return;
7411 
7412 	if (btrfs_buffer_uptodate(eb, gen, 1)) {
7413 		free_extent_buffer(eb);
7414 		return;
7415 	}
7416 
7417 	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0);
7418 	if (ret < 0)
7419 		free_extent_buffer_stale(eb);
7420 	else
7421 		free_extent_buffer(eb);
7422 }
7423 
7424 /*
7425  * btrfs_readahead_node_child - readahead a node's child block
7426  * @node:	parent node we're reading from
7427  * @slot:	slot in the parent node for the child we want to read
7428  *
7429  * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
7430  * the slot in the node provided.
7431  */
7432 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
7433 {
7434 	btrfs_readahead_tree_block(node->fs_info,
7435 				   btrfs_node_blockptr(node, slot),
7436 				   btrfs_header_owner(node),
7437 				   btrfs_node_ptr_generation(node, slot),
7438 				   btrfs_header_level(node) - 1);
7439 }
7440