xref: /openbmc/linux/fs/btrfs/extent_io.c (revision b4bc93bd76d4da32600795cd323c971f00a2e788)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/prefetch.h>
15 #include <linux/fsverity.h>
16 #include "misc.h"
17 #include "extent_io.h"
18 #include "extent-io-tree.h"
19 #include "extent_map.h"
20 #include "ctree.h"
21 #include "btrfs_inode.h"
22 #include "volumes.h"
23 #include "check-integrity.h"
24 #include "locking.h"
25 #include "rcu-string.h"
26 #include "backref.h"
27 #include "disk-io.h"
28 #include "subpage.h"
29 #include "zoned.h"
30 #include "block-group.h"
31 
32 static struct kmem_cache *extent_state_cache;
33 static struct kmem_cache *extent_buffer_cache;
34 static struct bio_set btrfs_bioset;
35 
36 static inline bool extent_state_in_tree(const struct extent_state *state)
37 {
38 	return !RB_EMPTY_NODE(&state->rb_node);
39 }
40 
41 #ifdef CONFIG_BTRFS_DEBUG
42 static LIST_HEAD(states);
43 static DEFINE_SPINLOCK(leak_lock);
44 
45 static inline void btrfs_leak_debug_add(spinlock_t *lock,
46 					struct list_head *new,
47 					struct list_head *head)
48 {
49 	unsigned long flags;
50 
51 	spin_lock_irqsave(lock, flags);
52 	list_add(new, head);
53 	spin_unlock_irqrestore(lock, flags);
54 }
55 
56 static inline void btrfs_leak_debug_del(spinlock_t *lock,
57 					struct list_head *entry)
58 {
59 	unsigned long flags;
60 
61 	spin_lock_irqsave(lock, flags);
62 	list_del(entry);
63 	spin_unlock_irqrestore(lock, flags);
64 }
65 
66 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
67 {
68 	struct extent_buffer *eb;
69 	unsigned long flags;
70 
71 	/*
72 	 * If we didn't get into open_ctree our allocated_ebs will not be
73 	 * initialized, so just skip this.
74 	 */
75 	if (!fs_info->allocated_ebs.next)
76 		return;
77 
78 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
79 	while (!list_empty(&fs_info->allocated_ebs)) {
80 		eb = list_first_entry(&fs_info->allocated_ebs,
81 				      struct extent_buffer, leak_list);
82 		pr_err(
83 	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
84 		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
85 		       btrfs_header_owner(eb));
86 		list_del(&eb->leak_list);
87 		kmem_cache_free(extent_buffer_cache, eb);
88 	}
89 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
90 }
91 
92 static inline void btrfs_extent_state_leak_debug_check(void)
93 {
94 	struct extent_state *state;
95 
96 	while (!list_empty(&states)) {
97 		state = list_entry(states.next, struct extent_state, leak_list);
98 		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
99 		       state->start, state->end, state->state,
100 		       extent_state_in_tree(state),
101 		       refcount_read(&state->refs));
102 		list_del(&state->leak_list);
103 		kmem_cache_free(extent_state_cache, state);
104 	}
105 }
106 
107 #define btrfs_debug_check_extent_io_range(tree, start, end)		\
108 	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
109 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
110 		struct extent_io_tree *tree, u64 start, u64 end)
111 {
112 	struct inode *inode = tree->private_data;
113 	u64 isize;
114 
115 	if (!inode || !is_data_inode(inode))
116 		return;
117 
118 	isize = i_size_read(inode);
119 	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
120 		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
121 		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
122 			caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
123 	}
124 }
125 #else
126 #define btrfs_leak_debug_add(lock, new, head)	do {} while (0)
127 #define btrfs_leak_debug_del(lock, entry)	do {} while (0)
128 #define btrfs_extent_state_leak_debug_check()	do {} while (0)
129 #define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
130 #endif
131 
132 struct tree_entry {
133 	u64 start;
134 	u64 end;
135 	struct rb_node rb_node;
136 };
137 
138 struct extent_page_data {
139 	struct btrfs_bio_ctrl bio_ctrl;
140 	/* tells writepage not to lock the state bits for this range
141 	 * it still does the unlocking
142 	 */
143 	unsigned int extent_locked:1;
144 
145 	/* tells the submit_bio code to use REQ_SYNC */
146 	unsigned int sync_io:1;
147 };
148 
149 static int add_extent_changeset(struct extent_state *state, u32 bits,
150 				 struct extent_changeset *changeset,
151 				 int set)
152 {
153 	int ret;
154 
155 	if (!changeset)
156 		return 0;
157 	if (set && (state->state & bits) == bits)
158 		return 0;
159 	if (!set && (state->state & bits) == 0)
160 		return 0;
161 	changeset->bytes_changed += state->end - state->start + 1;
162 	ret = ulist_add(&changeset->range_changed, state->start, state->end,
163 			GFP_ATOMIC);
164 	return ret;
165 }
166 
167 int __must_check submit_one_bio(struct bio *bio, int mirror_num,
168 				unsigned long bio_flags)
169 {
170 	blk_status_t ret = 0;
171 	struct extent_io_tree *tree = bio->bi_private;
172 
173 	bio->bi_private = NULL;
174 
175 	/* Caller should ensure the bio has at least some range added */
176 	ASSERT(bio->bi_iter.bi_size);
177 	if (is_data_inode(tree->private_data))
178 		ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
179 					    bio_flags);
180 	else
181 		ret = btrfs_submit_metadata_bio(tree->private_data, bio,
182 						mirror_num, bio_flags);
183 
184 	return blk_status_to_errno(ret);
185 }
186 
187 /* Cleanup unsubmitted bios */
188 static void end_write_bio(struct extent_page_data *epd, int ret)
189 {
190 	struct bio *bio = epd->bio_ctrl.bio;
191 
192 	if (bio) {
193 		bio->bi_status = errno_to_blk_status(ret);
194 		bio_endio(bio);
195 		epd->bio_ctrl.bio = NULL;
196 	}
197 }
198 
199 /*
200  * Submit bio from extent page data via submit_one_bio
201  *
202  * Return 0 if everything is OK.
203  * Return <0 for error.
204  */
205 static int __must_check flush_write_bio(struct extent_page_data *epd)
206 {
207 	int ret = 0;
208 	struct bio *bio = epd->bio_ctrl.bio;
209 
210 	if (bio) {
211 		ret = submit_one_bio(bio, 0, 0);
212 		/*
213 		 * Clean up of epd->bio is handled by its endio function.
214 		 * And endio is either triggered by successful bio execution
215 		 * or the error handler of submit bio hook.
216 		 * So at this point, no matter what happened, we don't need
217 		 * to clean up epd->bio.
218 		 */
219 		epd->bio_ctrl.bio = NULL;
220 	}
221 	return ret;
222 }
223 
224 int __init extent_state_cache_init(void)
225 {
226 	extent_state_cache = kmem_cache_create("btrfs_extent_state",
227 			sizeof(struct extent_state), 0,
228 			SLAB_MEM_SPREAD, NULL);
229 	if (!extent_state_cache)
230 		return -ENOMEM;
231 	return 0;
232 }
233 
234 int __init extent_io_init(void)
235 {
236 	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
237 			sizeof(struct extent_buffer), 0,
238 			SLAB_MEM_SPREAD, NULL);
239 	if (!extent_buffer_cache)
240 		return -ENOMEM;
241 
242 	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
243 			offsetof(struct btrfs_bio, bio),
244 			BIOSET_NEED_BVECS))
245 		goto free_buffer_cache;
246 
247 	if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
248 		goto free_bioset;
249 
250 	return 0;
251 
252 free_bioset:
253 	bioset_exit(&btrfs_bioset);
254 
255 free_buffer_cache:
256 	kmem_cache_destroy(extent_buffer_cache);
257 	extent_buffer_cache = NULL;
258 	return -ENOMEM;
259 }
260 
261 void __cold extent_state_cache_exit(void)
262 {
263 	btrfs_extent_state_leak_debug_check();
264 	kmem_cache_destroy(extent_state_cache);
265 }
266 
267 void __cold extent_io_exit(void)
268 {
269 	/*
270 	 * Make sure all delayed rcu free are flushed before we
271 	 * destroy caches.
272 	 */
273 	rcu_barrier();
274 	kmem_cache_destroy(extent_buffer_cache);
275 	bioset_exit(&btrfs_bioset);
276 }
277 
278 /*
279  * For the file_extent_tree, we want to hold the inode lock when we lookup and
280  * update the disk_i_size, but lockdep will complain because our io_tree we hold
281  * the tree lock and get the inode lock when setting delalloc.  These two things
282  * are unrelated, so make a class for the file_extent_tree so we don't get the
283  * two locking patterns mixed up.
284  */
285 static struct lock_class_key file_extent_tree_class;
286 
287 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
288 			 struct extent_io_tree *tree, unsigned int owner,
289 			 void *private_data)
290 {
291 	tree->fs_info = fs_info;
292 	tree->state = RB_ROOT;
293 	tree->dirty_bytes = 0;
294 	spin_lock_init(&tree->lock);
295 	tree->private_data = private_data;
296 	tree->owner = owner;
297 	if (owner == IO_TREE_INODE_FILE_EXTENT)
298 		lockdep_set_class(&tree->lock, &file_extent_tree_class);
299 }
300 
301 void extent_io_tree_release(struct extent_io_tree *tree)
302 {
303 	spin_lock(&tree->lock);
304 	/*
305 	 * Do a single barrier for the waitqueue_active check here, the state
306 	 * of the waitqueue should not change once extent_io_tree_release is
307 	 * called.
308 	 */
309 	smp_mb();
310 	while (!RB_EMPTY_ROOT(&tree->state)) {
311 		struct rb_node *node;
312 		struct extent_state *state;
313 
314 		node = rb_first(&tree->state);
315 		state = rb_entry(node, struct extent_state, rb_node);
316 		rb_erase(&state->rb_node, &tree->state);
317 		RB_CLEAR_NODE(&state->rb_node);
318 		/*
319 		 * btree io trees aren't supposed to have tasks waiting for
320 		 * changes in the flags of extent states ever.
321 		 */
322 		ASSERT(!waitqueue_active(&state->wq));
323 		free_extent_state(state);
324 
325 		cond_resched_lock(&tree->lock);
326 	}
327 	spin_unlock(&tree->lock);
328 }
329 
330 static struct extent_state *alloc_extent_state(gfp_t mask)
331 {
332 	struct extent_state *state;
333 
334 	/*
335 	 * The given mask might be not appropriate for the slab allocator,
336 	 * drop the unsupported bits
337 	 */
338 	mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
339 	state = kmem_cache_alloc(extent_state_cache, mask);
340 	if (!state)
341 		return state;
342 	state->state = 0;
343 	state->failrec = NULL;
344 	RB_CLEAR_NODE(&state->rb_node);
345 	btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
346 	refcount_set(&state->refs, 1);
347 	init_waitqueue_head(&state->wq);
348 	trace_alloc_extent_state(state, mask, _RET_IP_);
349 	return state;
350 }
351 
352 void free_extent_state(struct extent_state *state)
353 {
354 	if (!state)
355 		return;
356 	if (refcount_dec_and_test(&state->refs)) {
357 		WARN_ON(extent_state_in_tree(state));
358 		btrfs_leak_debug_del(&leak_lock, &state->leak_list);
359 		trace_free_extent_state(state, _RET_IP_);
360 		kmem_cache_free(extent_state_cache, state);
361 	}
362 }
363 
364 static struct rb_node *tree_insert(struct rb_root *root,
365 				   struct rb_node *search_start,
366 				   u64 offset,
367 				   struct rb_node *node,
368 				   struct rb_node ***p_in,
369 				   struct rb_node **parent_in)
370 {
371 	struct rb_node **p;
372 	struct rb_node *parent = NULL;
373 	struct tree_entry *entry;
374 
375 	if (p_in && parent_in) {
376 		p = *p_in;
377 		parent = *parent_in;
378 		goto do_insert;
379 	}
380 
381 	p = search_start ? &search_start : &root->rb_node;
382 	while (*p) {
383 		parent = *p;
384 		entry = rb_entry(parent, struct tree_entry, rb_node);
385 
386 		if (offset < entry->start)
387 			p = &(*p)->rb_left;
388 		else if (offset > entry->end)
389 			p = &(*p)->rb_right;
390 		else
391 			return parent;
392 	}
393 
394 do_insert:
395 	rb_link_node(node, parent, p);
396 	rb_insert_color(node, root);
397 	return NULL;
398 }
399 
400 /**
401  * Search @tree for an entry that contains @offset. Such entry would have
402  * entry->start <= offset && entry->end >= offset.
403  *
404  * @tree:       the tree to search
405  * @offset:     offset that should fall within an entry in @tree
406  * @next_ret:   pointer to the first entry whose range ends after @offset
407  * @prev_ret:   pointer to the first entry whose range begins before @offset
408  * @p_ret:      pointer where new node should be anchored (used when inserting an
409  *	        entry in the tree)
410  * @parent_ret: points to entry which would have been the parent of the entry,
411  *               containing @offset
412  *
413  * This function returns a pointer to the entry that contains @offset byte
414  * address. If no such entry exists, then NULL is returned and the other
415  * pointer arguments to the function are filled, otherwise the found entry is
416  * returned and other pointers are left untouched.
417  */
418 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
419 				      struct rb_node **next_ret,
420 				      struct rb_node **prev_ret,
421 				      struct rb_node ***p_ret,
422 				      struct rb_node **parent_ret)
423 {
424 	struct rb_root *root = &tree->state;
425 	struct rb_node **n = &root->rb_node;
426 	struct rb_node *prev = NULL;
427 	struct rb_node *orig_prev = NULL;
428 	struct tree_entry *entry;
429 	struct tree_entry *prev_entry = NULL;
430 
431 	while (*n) {
432 		prev = *n;
433 		entry = rb_entry(prev, struct tree_entry, rb_node);
434 		prev_entry = entry;
435 
436 		if (offset < entry->start)
437 			n = &(*n)->rb_left;
438 		else if (offset > entry->end)
439 			n = &(*n)->rb_right;
440 		else
441 			return *n;
442 	}
443 
444 	if (p_ret)
445 		*p_ret = n;
446 	if (parent_ret)
447 		*parent_ret = prev;
448 
449 	if (next_ret) {
450 		orig_prev = prev;
451 		while (prev && offset > prev_entry->end) {
452 			prev = rb_next(prev);
453 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
454 		}
455 		*next_ret = prev;
456 		prev = orig_prev;
457 	}
458 
459 	if (prev_ret) {
460 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
461 		while (prev && offset < prev_entry->start) {
462 			prev = rb_prev(prev);
463 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
464 		}
465 		*prev_ret = prev;
466 	}
467 	return NULL;
468 }
469 
470 static inline struct rb_node *
471 tree_search_for_insert(struct extent_io_tree *tree,
472 		       u64 offset,
473 		       struct rb_node ***p_ret,
474 		       struct rb_node **parent_ret)
475 {
476 	struct rb_node *next= NULL;
477 	struct rb_node *ret;
478 
479 	ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
480 	if (!ret)
481 		return next;
482 	return ret;
483 }
484 
485 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
486 					  u64 offset)
487 {
488 	return tree_search_for_insert(tree, offset, NULL, NULL);
489 }
490 
491 /*
492  * utility function to look for merge candidates inside a given range.
493  * Any extents with matching state are merged together into a single
494  * extent in the tree.  Extents with EXTENT_IO in their state field
495  * are not merged because the end_io handlers need to be able to do
496  * operations on them without sleeping (or doing allocations/splits).
497  *
498  * This should be called with the tree lock held.
499  */
500 static void merge_state(struct extent_io_tree *tree,
501 		        struct extent_state *state)
502 {
503 	struct extent_state *other;
504 	struct rb_node *other_node;
505 
506 	if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
507 		return;
508 
509 	other_node = rb_prev(&state->rb_node);
510 	if (other_node) {
511 		other = rb_entry(other_node, struct extent_state, rb_node);
512 		if (other->end == state->start - 1 &&
513 		    other->state == state->state) {
514 			if (tree->private_data &&
515 			    is_data_inode(tree->private_data))
516 				btrfs_merge_delalloc_extent(tree->private_data,
517 							    state, other);
518 			state->start = other->start;
519 			rb_erase(&other->rb_node, &tree->state);
520 			RB_CLEAR_NODE(&other->rb_node);
521 			free_extent_state(other);
522 		}
523 	}
524 	other_node = rb_next(&state->rb_node);
525 	if (other_node) {
526 		other = rb_entry(other_node, struct extent_state, rb_node);
527 		if (other->start == state->end + 1 &&
528 		    other->state == state->state) {
529 			if (tree->private_data &&
530 			    is_data_inode(tree->private_data))
531 				btrfs_merge_delalloc_extent(tree->private_data,
532 							    state, other);
533 			state->end = other->end;
534 			rb_erase(&other->rb_node, &tree->state);
535 			RB_CLEAR_NODE(&other->rb_node);
536 			free_extent_state(other);
537 		}
538 	}
539 }
540 
541 static void set_state_bits(struct extent_io_tree *tree,
542 			   struct extent_state *state, u32 *bits,
543 			   struct extent_changeset *changeset);
544 
545 /*
546  * insert an extent_state struct into the tree.  'bits' are set on the
547  * struct before it is inserted.
548  *
549  * This may return -EEXIST if the extent is already there, in which case the
550  * state struct is freed.
551  *
552  * The tree lock is not taken internally.  This is a utility function and
553  * probably isn't what you want to call (see set/clear_extent_bit).
554  */
555 static int insert_state(struct extent_io_tree *tree,
556 			struct extent_state *state, u64 start, u64 end,
557 			struct rb_node ***p,
558 			struct rb_node **parent,
559 			u32 *bits, struct extent_changeset *changeset)
560 {
561 	struct rb_node *node;
562 
563 	if (end < start) {
564 		btrfs_err(tree->fs_info,
565 			"insert state: end < start %llu %llu", end, start);
566 		WARN_ON(1);
567 	}
568 	state->start = start;
569 	state->end = end;
570 
571 	set_state_bits(tree, state, bits, changeset);
572 
573 	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
574 	if (node) {
575 		struct extent_state *found;
576 		found = rb_entry(node, struct extent_state, rb_node);
577 		btrfs_err(tree->fs_info,
578 		       "found node %llu %llu on insert of %llu %llu",
579 		       found->start, found->end, start, end);
580 		return -EEXIST;
581 	}
582 	merge_state(tree, state);
583 	return 0;
584 }
585 
586 /*
587  * split a given extent state struct in two, inserting the preallocated
588  * struct 'prealloc' as the newly created second half.  'split' indicates an
589  * offset inside 'orig' where it should be split.
590  *
591  * Before calling,
592  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
593  * are two extent state structs in the tree:
594  * prealloc: [orig->start, split - 1]
595  * orig: [ split, orig->end ]
596  *
597  * The tree locks are not taken by this function. They need to be held
598  * by the caller.
599  */
600 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
601 		       struct extent_state *prealloc, u64 split)
602 {
603 	struct rb_node *node;
604 
605 	if (tree->private_data && is_data_inode(tree->private_data))
606 		btrfs_split_delalloc_extent(tree->private_data, orig, split);
607 
608 	prealloc->start = orig->start;
609 	prealloc->end = split - 1;
610 	prealloc->state = orig->state;
611 	orig->start = split;
612 
613 	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
614 			   &prealloc->rb_node, NULL, NULL);
615 	if (node) {
616 		free_extent_state(prealloc);
617 		return -EEXIST;
618 	}
619 	return 0;
620 }
621 
622 static struct extent_state *next_state(struct extent_state *state)
623 {
624 	struct rb_node *next = rb_next(&state->rb_node);
625 	if (next)
626 		return rb_entry(next, struct extent_state, rb_node);
627 	else
628 		return NULL;
629 }
630 
631 /*
632  * utility function to clear some bits in an extent state struct.
633  * it will optionally wake up anyone waiting on this state (wake == 1).
634  *
635  * If no bits are set on the state struct after clearing things, the
636  * struct is freed and removed from the tree
637  */
638 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
639 					    struct extent_state *state,
640 					    u32 *bits, int wake,
641 					    struct extent_changeset *changeset)
642 {
643 	struct extent_state *next;
644 	u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
645 	int ret;
646 
647 	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
648 		u64 range = state->end - state->start + 1;
649 		WARN_ON(range > tree->dirty_bytes);
650 		tree->dirty_bytes -= range;
651 	}
652 
653 	if (tree->private_data && is_data_inode(tree->private_data))
654 		btrfs_clear_delalloc_extent(tree->private_data, state, bits);
655 
656 	ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
657 	BUG_ON(ret < 0);
658 	state->state &= ~bits_to_clear;
659 	if (wake)
660 		wake_up(&state->wq);
661 	if (state->state == 0) {
662 		next = next_state(state);
663 		if (extent_state_in_tree(state)) {
664 			rb_erase(&state->rb_node, &tree->state);
665 			RB_CLEAR_NODE(&state->rb_node);
666 			free_extent_state(state);
667 		} else {
668 			WARN_ON(1);
669 		}
670 	} else {
671 		merge_state(tree, state);
672 		next = next_state(state);
673 	}
674 	return next;
675 }
676 
677 static struct extent_state *
678 alloc_extent_state_atomic(struct extent_state *prealloc)
679 {
680 	if (!prealloc)
681 		prealloc = alloc_extent_state(GFP_ATOMIC);
682 
683 	return prealloc;
684 }
685 
686 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
687 {
688 	btrfs_panic(tree->fs_info, err,
689 	"locking error: extent tree was modified by another thread while locked");
690 }
691 
692 /*
693  * clear some bits on a range in the tree.  This may require splitting
694  * or inserting elements in the tree, so the gfp mask is used to
695  * indicate which allocations or sleeping are allowed.
696  *
697  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
698  * the given range from the tree regardless of state (ie for truncate).
699  *
700  * the range [start, end] is inclusive.
701  *
702  * This takes the tree lock, and returns 0 on success and < 0 on error.
703  */
704 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
705 		       u32 bits, int wake, int delete,
706 		       struct extent_state **cached_state,
707 		       gfp_t mask, struct extent_changeset *changeset)
708 {
709 	struct extent_state *state;
710 	struct extent_state *cached;
711 	struct extent_state *prealloc = NULL;
712 	struct rb_node *node;
713 	u64 last_end;
714 	int err;
715 	int clear = 0;
716 
717 	btrfs_debug_check_extent_io_range(tree, start, end);
718 	trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
719 
720 	if (bits & EXTENT_DELALLOC)
721 		bits |= EXTENT_NORESERVE;
722 
723 	if (delete)
724 		bits |= ~EXTENT_CTLBITS;
725 
726 	if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
727 		clear = 1;
728 again:
729 	if (!prealloc && gfpflags_allow_blocking(mask)) {
730 		/*
731 		 * Don't care for allocation failure here because we might end
732 		 * up not needing the pre-allocated extent state at all, which
733 		 * is the case if we only have in the tree extent states that
734 		 * cover our input range and don't cover too any other range.
735 		 * If we end up needing a new extent state we allocate it later.
736 		 */
737 		prealloc = alloc_extent_state(mask);
738 	}
739 
740 	spin_lock(&tree->lock);
741 	if (cached_state) {
742 		cached = *cached_state;
743 
744 		if (clear) {
745 			*cached_state = NULL;
746 			cached_state = NULL;
747 		}
748 
749 		if (cached && extent_state_in_tree(cached) &&
750 		    cached->start <= start && cached->end > start) {
751 			if (clear)
752 				refcount_dec(&cached->refs);
753 			state = cached;
754 			goto hit_next;
755 		}
756 		if (clear)
757 			free_extent_state(cached);
758 	}
759 	/*
760 	 * this search will find the extents that end after
761 	 * our range starts
762 	 */
763 	node = tree_search(tree, start);
764 	if (!node)
765 		goto out;
766 	state = rb_entry(node, struct extent_state, rb_node);
767 hit_next:
768 	if (state->start > end)
769 		goto out;
770 	WARN_ON(state->end < start);
771 	last_end = state->end;
772 
773 	/* the state doesn't have the wanted bits, go ahead */
774 	if (!(state->state & bits)) {
775 		state = next_state(state);
776 		goto next;
777 	}
778 
779 	/*
780 	 *     | ---- desired range ---- |
781 	 *  | state | or
782 	 *  | ------------- state -------------- |
783 	 *
784 	 * We need to split the extent we found, and may flip
785 	 * bits on second half.
786 	 *
787 	 * If the extent we found extends past our range, we
788 	 * just split and search again.  It'll get split again
789 	 * the next time though.
790 	 *
791 	 * If the extent we found is inside our range, we clear
792 	 * the desired bit on it.
793 	 */
794 
795 	if (state->start < start) {
796 		prealloc = alloc_extent_state_atomic(prealloc);
797 		BUG_ON(!prealloc);
798 		err = split_state(tree, state, prealloc, start);
799 		if (err)
800 			extent_io_tree_panic(tree, err);
801 
802 		prealloc = NULL;
803 		if (err)
804 			goto out;
805 		if (state->end <= end) {
806 			state = clear_state_bit(tree, state, &bits, wake,
807 						changeset);
808 			goto next;
809 		}
810 		goto search_again;
811 	}
812 	/*
813 	 * | ---- desired range ---- |
814 	 *                        | state |
815 	 * We need to split the extent, and clear the bit
816 	 * on the first half
817 	 */
818 	if (state->start <= end && state->end > end) {
819 		prealloc = alloc_extent_state_atomic(prealloc);
820 		BUG_ON(!prealloc);
821 		err = split_state(tree, state, prealloc, end + 1);
822 		if (err)
823 			extent_io_tree_panic(tree, err);
824 
825 		if (wake)
826 			wake_up(&state->wq);
827 
828 		clear_state_bit(tree, prealloc, &bits, wake, changeset);
829 
830 		prealloc = NULL;
831 		goto out;
832 	}
833 
834 	state = clear_state_bit(tree, state, &bits, wake, changeset);
835 next:
836 	if (last_end == (u64)-1)
837 		goto out;
838 	start = last_end + 1;
839 	if (start <= end && state && !need_resched())
840 		goto hit_next;
841 
842 search_again:
843 	if (start > end)
844 		goto out;
845 	spin_unlock(&tree->lock);
846 	if (gfpflags_allow_blocking(mask))
847 		cond_resched();
848 	goto again;
849 
850 out:
851 	spin_unlock(&tree->lock);
852 	if (prealloc)
853 		free_extent_state(prealloc);
854 
855 	return 0;
856 
857 }
858 
859 static void wait_on_state(struct extent_io_tree *tree,
860 			  struct extent_state *state)
861 		__releases(tree->lock)
862 		__acquires(tree->lock)
863 {
864 	DEFINE_WAIT(wait);
865 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
866 	spin_unlock(&tree->lock);
867 	schedule();
868 	spin_lock(&tree->lock);
869 	finish_wait(&state->wq, &wait);
870 }
871 
872 /*
873  * waits for one or more bits to clear on a range in the state tree.
874  * The range [start, end] is inclusive.
875  * The tree lock is taken by this function
876  */
877 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
878 			    u32 bits)
879 {
880 	struct extent_state *state;
881 	struct rb_node *node;
882 
883 	btrfs_debug_check_extent_io_range(tree, start, end);
884 
885 	spin_lock(&tree->lock);
886 again:
887 	while (1) {
888 		/*
889 		 * this search will find all the extents that end after
890 		 * our range starts
891 		 */
892 		node = tree_search(tree, start);
893 process_node:
894 		if (!node)
895 			break;
896 
897 		state = rb_entry(node, struct extent_state, rb_node);
898 
899 		if (state->start > end)
900 			goto out;
901 
902 		if (state->state & bits) {
903 			start = state->start;
904 			refcount_inc(&state->refs);
905 			wait_on_state(tree, state);
906 			free_extent_state(state);
907 			goto again;
908 		}
909 		start = state->end + 1;
910 
911 		if (start > end)
912 			break;
913 
914 		if (!cond_resched_lock(&tree->lock)) {
915 			node = rb_next(node);
916 			goto process_node;
917 		}
918 	}
919 out:
920 	spin_unlock(&tree->lock);
921 }
922 
923 static void set_state_bits(struct extent_io_tree *tree,
924 			   struct extent_state *state,
925 			   u32 *bits, struct extent_changeset *changeset)
926 {
927 	u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
928 	int ret;
929 
930 	if (tree->private_data && is_data_inode(tree->private_data))
931 		btrfs_set_delalloc_extent(tree->private_data, state, bits);
932 
933 	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
934 		u64 range = state->end - state->start + 1;
935 		tree->dirty_bytes += range;
936 	}
937 	ret = add_extent_changeset(state, bits_to_set, changeset, 1);
938 	BUG_ON(ret < 0);
939 	state->state |= bits_to_set;
940 }
941 
942 static void cache_state_if_flags(struct extent_state *state,
943 				 struct extent_state **cached_ptr,
944 				 unsigned flags)
945 {
946 	if (cached_ptr && !(*cached_ptr)) {
947 		if (!flags || (state->state & flags)) {
948 			*cached_ptr = state;
949 			refcount_inc(&state->refs);
950 		}
951 	}
952 }
953 
954 static void cache_state(struct extent_state *state,
955 			struct extent_state **cached_ptr)
956 {
957 	return cache_state_if_flags(state, cached_ptr,
958 				    EXTENT_LOCKED | EXTENT_BOUNDARY);
959 }
960 
961 /*
962  * set some bits on a range in the tree.  This may require allocations or
963  * sleeping, so the gfp mask is used to indicate what is allowed.
964  *
965  * If any of the exclusive bits are set, this will fail with -EEXIST if some
966  * part of the range already has the desired bits set.  The start of the
967  * existing range is returned in failed_start in this case.
968  *
969  * [start, end] is inclusive This takes the tree lock.
970  */
971 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
972 		   u32 exclusive_bits, u64 *failed_start,
973 		   struct extent_state **cached_state, gfp_t mask,
974 		   struct extent_changeset *changeset)
975 {
976 	struct extent_state *state;
977 	struct extent_state *prealloc = NULL;
978 	struct rb_node *node;
979 	struct rb_node **p;
980 	struct rb_node *parent;
981 	int err = 0;
982 	u64 last_start;
983 	u64 last_end;
984 
985 	btrfs_debug_check_extent_io_range(tree, start, end);
986 	trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
987 
988 	if (exclusive_bits)
989 		ASSERT(failed_start);
990 	else
991 		ASSERT(failed_start == NULL);
992 again:
993 	if (!prealloc && gfpflags_allow_blocking(mask)) {
994 		/*
995 		 * Don't care for allocation failure here because we might end
996 		 * up not needing the pre-allocated extent state at all, which
997 		 * is the case if we only have in the tree extent states that
998 		 * cover our input range and don't cover too any other range.
999 		 * If we end up needing a new extent state we allocate it later.
1000 		 */
1001 		prealloc = alloc_extent_state(mask);
1002 	}
1003 
1004 	spin_lock(&tree->lock);
1005 	if (cached_state && *cached_state) {
1006 		state = *cached_state;
1007 		if (state->start <= start && state->end > start &&
1008 		    extent_state_in_tree(state)) {
1009 			node = &state->rb_node;
1010 			goto hit_next;
1011 		}
1012 	}
1013 	/*
1014 	 * this search will find all the extents that end after
1015 	 * our range starts.
1016 	 */
1017 	node = tree_search_for_insert(tree, start, &p, &parent);
1018 	if (!node) {
1019 		prealloc = alloc_extent_state_atomic(prealloc);
1020 		BUG_ON(!prealloc);
1021 		err = insert_state(tree, prealloc, start, end,
1022 				   &p, &parent, &bits, changeset);
1023 		if (err)
1024 			extent_io_tree_panic(tree, err);
1025 
1026 		cache_state(prealloc, cached_state);
1027 		prealloc = NULL;
1028 		goto out;
1029 	}
1030 	state = rb_entry(node, struct extent_state, rb_node);
1031 hit_next:
1032 	last_start = state->start;
1033 	last_end = state->end;
1034 
1035 	/*
1036 	 * | ---- desired range ---- |
1037 	 * | state |
1038 	 *
1039 	 * Just lock what we found and keep going
1040 	 */
1041 	if (state->start == start && state->end <= end) {
1042 		if (state->state & exclusive_bits) {
1043 			*failed_start = state->start;
1044 			err = -EEXIST;
1045 			goto out;
1046 		}
1047 
1048 		set_state_bits(tree, state, &bits, changeset);
1049 		cache_state(state, cached_state);
1050 		merge_state(tree, state);
1051 		if (last_end == (u64)-1)
1052 			goto out;
1053 		start = last_end + 1;
1054 		state = next_state(state);
1055 		if (start < end && state && state->start == start &&
1056 		    !need_resched())
1057 			goto hit_next;
1058 		goto search_again;
1059 	}
1060 
1061 	/*
1062 	 *     | ---- desired range ---- |
1063 	 * | state |
1064 	 *   or
1065 	 * | ------------- state -------------- |
1066 	 *
1067 	 * We need to split the extent we found, and may flip bits on
1068 	 * second half.
1069 	 *
1070 	 * If the extent we found extends past our
1071 	 * range, we just split and search again.  It'll get split
1072 	 * again the next time though.
1073 	 *
1074 	 * If the extent we found is inside our range, we set the
1075 	 * desired bit on it.
1076 	 */
1077 	if (state->start < start) {
1078 		if (state->state & exclusive_bits) {
1079 			*failed_start = start;
1080 			err = -EEXIST;
1081 			goto out;
1082 		}
1083 
1084 		/*
1085 		 * If this extent already has all the bits we want set, then
1086 		 * skip it, not necessary to split it or do anything with it.
1087 		 */
1088 		if ((state->state & bits) == bits) {
1089 			start = state->end + 1;
1090 			cache_state(state, cached_state);
1091 			goto search_again;
1092 		}
1093 
1094 		prealloc = alloc_extent_state_atomic(prealloc);
1095 		BUG_ON(!prealloc);
1096 		err = split_state(tree, state, prealloc, start);
1097 		if (err)
1098 			extent_io_tree_panic(tree, err);
1099 
1100 		prealloc = NULL;
1101 		if (err)
1102 			goto out;
1103 		if (state->end <= end) {
1104 			set_state_bits(tree, state, &bits, changeset);
1105 			cache_state(state, cached_state);
1106 			merge_state(tree, state);
1107 			if (last_end == (u64)-1)
1108 				goto out;
1109 			start = last_end + 1;
1110 			state = next_state(state);
1111 			if (start < end && state && state->start == start &&
1112 			    !need_resched())
1113 				goto hit_next;
1114 		}
1115 		goto search_again;
1116 	}
1117 	/*
1118 	 * | ---- desired range ---- |
1119 	 *     | state | or               | state |
1120 	 *
1121 	 * There's a hole, we need to insert something in it and
1122 	 * ignore the extent we found.
1123 	 */
1124 	if (state->start > start) {
1125 		u64 this_end;
1126 		if (end < last_start)
1127 			this_end = end;
1128 		else
1129 			this_end = last_start - 1;
1130 
1131 		prealloc = alloc_extent_state_atomic(prealloc);
1132 		BUG_ON(!prealloc);
1133 
1134 		/*
1135 		 * Avoid to free 'prealloc' if it can be merged with
1136 		 * the later extent.
1137 		 */
1138 		err = insert_state(tree, prealloc, start, this_end,
1139 				   NULL, NULL, &bits, changeset);
1140 		if (err)
1141 			extent_io_tree_panic(tree, err);
1142 
1143 		cache_state(prealloc, cached_state);
1144 		prealloc = NULL;
1145 		start = this_end + 1;
1146 		goto search_again;
1147 	}
1148 	/*
1149 	 * | ---- desired range ---- |
1150 	 *                        | state |
1151 	 * We need to split the extent, and set the bit
1152 	 * on the first half
1153 	 */
1154 	if (state->start <= end && state->end > end) {
1155 		if (state->state & exclusive_bits) {
1156 			*failed_start = start;
1157 			err = -EEXIST;
1158 			goto out;
1159 		}
1160 
1161 		prealloc = alloc_extent_state_atomic(prealloc);
1162 		BUG_ON(!prealloc);
1163 		err = split_state(tree, state, prealloc, end + 1);
1164 		if (err)
1165 			extent_io_tree_panic(tree, err);
1166 
1167 		set_state_bits(tree, prealloc, &bits, changeset);
1168 		cache_state(prealloc, cached_state);
1169 		merge_state(tree, prealloc);
1170 		prealloc = NULL;
1171 		goto out;
1172 	}
1173 
1174 search_again:
1175 	if (start > end)
1176 		goto out;
1177 	spin_unlock(&tree->lock);
1178 	if (gfpflags_allow_blocking(mask))
1179 		cond_resched();
1180 	goto again;
1181 
1182 out:
1183 	spin_unlock(&tree->lock);
1184 	if (prealloc)
1185 		free_extent_state(prealloc);
1186 
1187 	return err;
1188 
1189 }
1190 
1191 /**
1192  * convert_extent_bit - convert all bits in a given range from one bit to
1193  * 			another
1194  * @tree:	the io tree to search
1195  * @start:	the start offset in bytes
1196  * @end:	the end offset in bytes (inclusive)
1197  * @bits:	the bits to set in this range
1198  * @clear_bits:	the bits to clear in this range
1199  * @cached_state:	state that we're going to cache
1200  *
1201  * This will go through and set bits for the given range.  If any states exist
1202  * already in this range they are set with the given bit and cleared of the
1203  * clear_bits.  This is only meant to be used by things that are mergeable, ie
1204  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1205  * boundary bits like LOCK.
1206  *
1207  * All allocations are done with GFP_NOFS.
1208  */
1209 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1210 		       u32 bits, u32 clear_bits,
1211 		       struct extent_state **cached_state)
1212 {
1213 	struct extent_state *state;
1214 	struct extent_state *prealloc = NULL;
1215 	struct rb_node *node;
1216 	struct rb_node **p;
1217 	struct rb_node *parent;
1218 	int err = 0;
1219 	u64 last_start;
1220 	u64 last_end;
1221 	bool first_iteration = true;
1222 
1223 	btrfs_debug_check_extent_io_range(tree, start, end);
1224 	trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1225 				       clear_bits);
1226 
1227 again:
1228 	if (!prealloc) {
1229 		/*
1230 		 * Best effort, don't worry if extent state allocation fails
1231 		 * here for the first iteration. We might have a cached state
1232 		 * that matches exactly the target range, in which case no
1233 		 * extent state allocations are needed. We'll only know this
1234 		 * after locking the tree.
1235 		 */
1236 		prealloc = alloc_extent_state(GFP_NOFS);
1237 		if (!prealloc && !first_iteration)
1238 			return -ENOMEM;
1239 	}
1240 
1241 	spin_lock(&tree->lock);
1242 	if (cached_state && *cached_state) {
1243 		state = *cached_state;
1244 		if (state->start <= start && state->end > start &&
1245 		    extent_state_in_tree(state)) {
1246 			node = &state->rb_node;
1247 			goto hit_next;
1248 		}
1249 	}
1250 
1251 	/*
1252 	 * this search will find all the extents that end after
1253 	 * our range starts.
1254 	 */
1255 	node = tree_search_for_insert(tree, start, &p, &parent);
1256 	if (!node) {
1257 		prealloc = alloc_extent_state_atomic(prealloc);
1258 		if (!prealloc) {
1259 			err = -ENOMEM;
1260 			goto out;
1261 		}
1262 		err = insert_state(tree, prealloc, start, end,
1263 				   &p, &parent, &bits, NULL);
1264 		if (err)
1265 			extent_io_tree_panic(tree, err);
1266 		cache_state(prealloc, cached_state);
1267 		prealloc = NULL;
1268 		goto out;
1269 	}
1270 	state = rb_entry(node, struct extent_state, rb_node);
1271 hit_next:
1272 	last_start = state->start;
1273 	last_end = state->end;
1274 
1275 	/*
1276 	 * | ---- desired range ---- |
1277 	 * | state |
1278 	 *
1279 	 * Just lock what we found and keep going
1280 	 */
1281 	if (state->start == start && state->end <= end) {
1282 		set_state_bits(tree, state, &bits, NULL);
1283 		cache_state(state, cached_state);
1284 		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1285 		if (last_end == (u64)-1)
1286 			goto out;
1287 		start = last_end + 1;
1288 		if (start < end && state && state->start == start &&
1289 		    !need_resched())
1290 			goto hit_next;
1291 		goto search_again;
1292 	}
1293 
1294 	/*
1295 	 *     | ---- desired range ---- |
1296 	 * | state |
1297 	 *   or
1298 	 * | ------------- state -------------- |
1299 	 *
1300 	 * We need to split the extent we found, and may flip bits on
1301 	 * second half.
1302 	 *
1303 	 * If the extent we found extends past our
1304 	 * range, we just split and search again.  It'll get split
1305 	 * again the next time though.
1306 	 *
1307 	 * If the extent we found is inside our range, we set the
1308 	 * desired bit on it.
1309 	 */
1310 	if (state->start < start) {
1311 		prealloc = alloc_extent_state_atomic(prealloc);
1312 		if (!prealloc) {
1313 			err = -ENOMEM;
1314 			goto out;
1315 		}
1316 		err = split_state(tree, state, prealloc, start);
1317 		if (err)
1318 			extent_io_tree_panic(tree, err);
1319 		prealloc = NULL;
1320 		if (err)
1321 			goto out;
1322 		if (state->end <= end) {
1323 			set_state_bits(tree, state, &bits, NULL);
1324 			cache_state(state, cached_state);
1325 			state = clear_state_bit(tree, state, &clear_bits, 0,
1326 						NULL);
1327 			if (last_end == (u64)-1)
1328 				goto out;
1329 			start = last_end + 1;
1330 			if (start < end && state && state->start == start &&
1331 			    !need_resched())
1332 				goto hit_next;
1333 		}
1334 		goto search_again;
1335 	}
1336 	/*
1337 	 * | ---- desired range ---- |
1338 	 *     | state | or               | state |
1339 	 *
1340 	 * There's a hole, we need to insert something in it and
1341 	 * ignore the extent we found.
1342 	 */
1343 	if (state->start > start) {
1344 		u64 this_end;
1345 		if (end < last_start)
1346 			this_end = end;
1347 		else
1348 			this_end = last_start - 1;
1349 
1350 		prealloc = alloc_extent_state_atomic(prealloc);
1351 		if (!prealloc) {
1352 			err = -ENOMEM;
1353 			goto out;
1354 		}
1355 
1356 		/*
1357 		 * Avoid to free 'prealloc' if it can be merged with
1358 		 * the later extent.
1359 		 */
1360 		err = insert_state(tree, prealloc, start, this_end,
1361 				   NULL, NULL, &bits, NULL);
1362 		if (err)
1363 			extent_io_tree_panic(tree, err);
1364 		cache_state(prealloc, cached_state);
1365 		prealloc = NULL;
1366 		start = this_end + 1;
1367 		goto search_again;
1368 	}
1369 	/*
1370 	 * | ---- desired range ---- |
1371 	 *                        | state |
1372 	 * We need to split the extent, and set the bit
1373 	 * on the first half
1374 	 */
1375 	if (state->start <= end && state->end > end) {
1376 		prealloc = alloc_extent_state_atomic(prealloc);
1377 		if (!prealloc) {
1378 			err = -ENOMEM;
1379 			goto out;
1380 		}
1381 
1382 		err = split_state(tree, state, prealloc, end + 1);
1383 		if (err)
1384 			extent_io_tree_panic(tree, err);
1385 
1386 		set_state_bits(tree, prealloc, &bits, NULL);
1387 		cache_state(prealloc, cached_state);
1388 		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1389 		prealloc = NULL;
1390 		goto out;
1391 	}
1392 
1393 search_again:
1394 	if (start > end)
1395 		goto out;
1396 	spin_unlock(&tree->lock);
1397 	cond_resched();
1398 	first_iteration = false;
1399 	goto again;
1400 
1401 out:
1402 	spin_unlock(&tree->lock);
1403 	if (prealloc)
1404 		free_extent_state(prealloc);
1405 
1406 	return err;
1407 }
1408 
1409 /* wrappers around set/clear extent bit */
1410 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1411 			   u32 bits, struct extent_changeset *changeset)
1412 {
1413 	/*
1414 	 * We don't support EXTENT_LOCKED yet, as current changeset will
1415 	 * record any bits changed, so for EXTENT_LOCKED case, it will
1416 	 * either fail with -EEXIST or changeset will record the whole
1417 	 * range.
1418 	 */
1419 	BUG_ON(bits & EXTENT_LOCKED);
1420 
1421 	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1422 			      changeset);
1423 }
1424 
1425 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1426 			   u32 bits)
1427 {
1428 	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1429 			      GFP_NOWAIT, NULL);
1430 }
1431 
1432 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1433 		     u32 bits, int wake, int delete,
1434 		     struct extent_state **cached)
1435 {
1436 	return __clear_extent_bit(tree, start, end, bits, wake, delete,
1437 				  cached, GFP_NOFS, NULL);
1438 }
1439 
1440 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1441 		u32 bits, struct extent_changeset *changeset)
1442 {
1443 	/*
1444 	 * Don't support EXTENT_LOCKED case, same reason as
1445 	 * set_record_extent_bits().
1446 	 */
1447 	BUG_ON(bits & EXTENT_LOCKED);
1448 
1449 	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1450 				  changeset);
1451 }
1452 
1453 /*
1454  * either insert or lock state struct between start and end use mask to tell
1455  * us if waiting is desired.
1456  */
1457 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1458 		     struct extent_state **cached_state)
1459 {
1460 	int err;
1461 	u64 failed_start;
1462 
1463 	while (1) {
1464 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
1465 				     EXTENT_LOCKED, &failed_start,
1466 				     cached_state, GFP_NOFS, NULL);
1467 		if (err == -EEXIST) {
1468 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1469 			start = failed_start;
1470 		} else
1471 			break;
1472 		WARN_ON(start > end);
1473 	}
1474 	return err;
1475 }
1476 
1477 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1478 {
1479 	int err;
1480 	u64 failed_start;
1481 
1482 	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1483 			     &failed_start, NULL, GFP_NOFS, NULL);
1484 	if (err == -EEXIST) {
1485 		if (failed_start > start)
1486 			clear_extent_bit(tree, start, failed_start - 1,
1487 					 EXTENT_LOCKED, 1, 0, NULL);
1488 		return 0;
1489 	}
1490 	return 1;
1491 }
1492 
1493 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1494 {
1495 	unsigned long index = start >> PAGE_SHIFT;
1496 	unsigned long end_index = end >> PAGE_SHIFT;
1497 	struct page *page;
1498 
1499 	while (index <= end_index) {
1500 		page = find_get_page(inode->i_mapping, index);
1501 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1502 		clear_page_dirty_for_io(page);
1503 		put_page(page);
1504 		index++;
1505 	}
1506 }
1507 
1508 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1509 {
1510 	struct address_space *mapping = inode->i_mapping;
1511 	unsigned long index = start >> PAGE_SHIFT;
1512 	unsigned long end_index = end >> PAGE_SHIFT;
1513 	struct folio *folio;
1514 
1515 	while (index <= end_index) {
1516 		folio = filemap_get_folio(mapping, index);
1517 		filemap_dirty_folio(mapping, folio);
1518 		folio_account_redirty(folio);
1519 		index += folio_nr_pages(folio);
1520 		folio_put(folio);
1521 	}
1522 }
1523 
1524 /* find the first state struct with 'bits' set after 'start', and
1525  * return it.  tree->lock must be held.  NULL will returned if
1526  * nothing was found after 'start'
1527  */
1528 static struct extent_state *
1529 find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
1530 {
1531 	struct rb_node *node;
1532 	struct extent_state *state;
1533 
1534 	/*
1535 	 * this search will find all the extents that end after
1536 	 * our range starts.
1537 	 */
1538 	node = tree_search(tree, start);
1539 	if (!node)
1540 		goto out;
1541 
1542 	while (1) {
1543 		state = rb_entry(node, struct extent_state, rb_node);
1544 		if (state->end >= start && (state->state & bits))
1545 			return state;
1546 
1547 		node = rb_next(node);
1548 		if (!node)
1549 			break;
1550 	}
1551 out:
1552 	return NULL;
1553 }
1554 
1555 /*
1556  * Find the first offset in the io tree with one or more @bits set.
1557  *
1558  * Note: If there are multiple bits set in @bits, any of them will match.
1559  *
1560  * Return 0 if we find something, and update @start_ret and @end_ret.
1561  * Return 1 if we found nothing.
1562  */
1563 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1564 			  u64 *start_ret, u64 *end_ret, u32 bits,
1565 			  struct extent_state **cached_state)
1566 {
1567 	struct extent_state *state;
1568 	int ret = 1;
1569 
1570 	spin_lock(&tree->lock);
1571 	if (cached_state && *cached_state) {
1572 		state = *cached_state;
1573 		if (state->end == start - 1 && extent_state_in_tree(state)) {
1574 			while ((state = next_state(state)) != NULL) {
1575 				if (state->state & bits)
1576 					goto got_it;
1577 			}
1578 			free_extent_state(*cached_state);
1579 			*cached_state = NULL;
1580 			goto out;
1581 		}
1582 		free_extent_state(*cached_state);
1583 		*cached_state = NULL;
1584 	}
1585 
1586 	state = find_first_extent_bit_state(tree, start, bits);
1587 got_it:
1588 	if (state) {
1589 		cache_state_if_flags(state, cached_state, 0);
1590 		*start_ret = state->start;
1591 		*end_ret = state->end;
1592 		ret = 0;
1593 	}
1594 out:
1595 	spin_unlock(&tree->lock);
1596 	return ret;
1597 }
1598 
1599 /**
1600  * Find a contiguous area of bits
1601  *
1602  * @tree:      io tree to check
1603  * @start:     offset to start the search from
1604  * @start_ret: the first offset we found with the bits set
1605  * @end_ret:   the final contiguous range of the bits that were set
1606  * @bits:      bits to look for
1607  *
1608  * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1609  * to set bits appropriately, and then merge them again.  During this time it
1610  * will drop the tree->lock, so use this helper if you want to find the actual
1611  * contiguous area for given bits.  We will search to the first bit we find, and
1612  * then walk down the tree until we find a non-contiguous area.  The area
1613  * returned will be the full contiguous area with the bits set.
1614  */
1615 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1616 			       u64 *start_ret, u64 *end_ret, u32 bits)
1617 {
1618 	struct extent_state *state;
1619 	int ret = 1;
1620 
1621 	spin_lock(&tree->lock);
1622 	state = find_first_extent_bit_state(tree, start, bits);
1623 	if (state) {
1624 		*start_ret = state->start;
1625 		*end_ret = state->end;
1626 		while ((state = next_state(state)) != NULL) {
1627 			if (state->start > (*end_ret + 1))
1628 				break;
1629 			*end_ret = state->end;
1630 		}
1631 		ret = 0;
1632 	}
1633 	spin_unlock(&tree->lock);
1634 	return ret;
1635 }
1636 
1637 /**
1638  * Find the first range that has @bits not set. This range could start before
1639  * @start.
1640  *
1641  * @tree:      the tree to search
1642  * @start:     offset at/after which the found extent should start
1643  * @start_ret: records the beginning of the range
1644  * @end_ret:   records the end of the range (inclusive)
1645  * @bits:      the set of bits which must be unset
1646  *
1647  * Since unallocated range is also considered one which doesn't have the bits
1648  * set it's possible that @end_ret contains -1, this happens in case the range
1649  * spans (last_range_end, end of device]. In this case it's up to the caller to
1650  * trim @end_ret to the appropriate size.
1651  */
1652 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1653 				 u64 *start_ret, u64 *end_ret, u32 bits)
1654 {
1655 	struct extent_state *state;
1656 	struct rb_node *node, *prev = NULL, *next;
1657 
1658 	spin_lock(&tree->lock);
1659 
1660 	/* Find first extent with bits cleared */
1661 	while (1) {
1662 		node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1663 		if (!node && !next && !prev) {
1664 			/*
1665 			 * Tree is completely empty, send full range and let
1666 			 * caller deal with it
1667 			 */
1668 			*start_ret = 0;
1669 			*end_ret = -1;
1670 			goto out;
1671 		} else if (!node && !next) {
1672 			/*
1673 			 * We are past the last allocated chunk, set start at
1674 			 * the end of the last extent.
1675 			 */
1676 			state = rb_entry(prev, struct extent_state, rb_node);
1677 			*start_ret = state->end + 1;
1678 			*end_ret = -1;
1679 			goto out;
1680 		} else if (!node) {
1681 			node = next;
1682 		}
1683 		/*
1684 		 * At this point 'node' either contains 'start' or start is
1685 		 * before 'node'
1686 		 */
1687 		state = rb_entry(node, struct extent_state, rb_node);
1688 
1689 		if (in_range(start, state->start, state->end - state->start + 1)) {
1690 			if (state->state & bits) {
1691 				/*
1692 				 * |--range with bits sets--|
1693 				 *    |
1694 				 *    start
1695 				 */
1696 				start = state->end + 1;
1697 			} else {
1698 				/*
1699 				 * 'start' falls within a range that doesn't
1700 				 * have the bits set, so take its start as
1701 				 * the beginning of the desired range
1702 				 *
1703 				 * |--range with bits cleared----|
1704 				 *      |
1705 				 *      start
1706 				 */
1707 				*start_ret = state->start;
1708 				break;
1709 			}
1710 		} else {
1711 			/*
1712 			 * |---prev range---|---hole/unset---|---node range---|
1713 			 *                          |
1714 			 *                        start
1715 			 *
1716 			 *                        or
1717 			 *
1718 			 * |---hole/unset--||--first node--|
1719 			 * 0   |
1720 			 *    start
1721 			 */
1722 			if (prev) {
1723 				state = rb_entry(prev, struct extent_state,
1724 						 rb_node);
1725 				*start_ret = state->end + 1;
1726 			} else {
1727 				*start_ret = 0;
1728 			}
1729 			break;
1730 		}
1731 	}
1732 
1733 	/*
1734 	 * Find the longest stretch from start until an entry which has the
1735 	 * bits set
1736 	 */
1737 	while (1) {
1738 		state = rb_entry(node, struct extent_state, rb_node);
1739 		if (state->end >= start && !(state->state & bits)) {
1740 			*end_ret = state->end;
1741 		} else {
1742 			*end_ret = state->start - 1;
1743 			break;
1744 		}
1745 
1746 		node = rb_next(node);
1747 		if (!node)
1748 			break;
1749 	}
1750 out:
1751 	spin_unlock(&tree->lock);
1752 }
1753 
1754 /*
1755  * find a contiguous range of bytes in the file marked as delalloc, not
1756  * more than 'max_bytes'.  start and end are used to return the range,
1757  *
1758  * true is returned if we find something, false if nothing was in the tree
1759  */
1760 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1761 			       u64 *end, u64 max_bytes,
1762 			       struct extent_state **cached_state)
1763 {
1764 	struct rb_node *node;
1765 	struct extent_state *state;
1766 	u64 cur_start = *start;
1767 	bool found = false;
1768 	u64 total_bytes = 0;
1769 
1770 	spin_lock(&tree->lock);
1771 
1772 	/*
1773 	 * this search will find all the extents that end after
1774 	 * our range starts.
1775 	 */
1776 	node = tree_search(tree, cur_start);
1777 	if (!node) {
1778 		*end = (u64)-1;
1779 		goto out;
1780 	}
1781 
1782 	while (1) {
1783 		state = rb_entry(node, struct extent_state, rb_node);
1784 		if (found && (state->start != cur_start ||
1785 			      (state->state & EXTENT_BOUNDARY))) {
1786 			goto out;
1787 		}
1788 		if (!(state->state & EXTENT_DELALLOC)) {
1789 			if (!found)
1790 				*end = state->end;
1791 			goto out;
1792 		}
1793 		if (!found) {
1794 			*start = state->start;
1795 			*cached_state = state;
1796 			refcount_inc(&state->refs);
1797 		}
1798 		found = true;
1799 		*end = state->end;
1800 		cur_start = state->end + 1;
1801 		node = rb_next(node);
1802 		total_bytes += state->end - state->start + 1;
1803 		if (total_bytes >= max_bytes)
1804 			break;
1805 		if (!node)
1806 			break;
1807 	}
1808 out:
1809 	spin_unlock(&tree->lock);
1810 	return found;
1811 }
1812 
1813 /*
1814  * Process one page for __process_pages_contig().
1815  *
1816  * Return >0 if we hit @page == @locked_page.
1817  * Return 0 if we updated the page status.
1818  * Return -EGAIN if the we need to try again.
1819  * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
1820  */
1821 static int process_one_page(struct btrfs_fs_info *fs_info,
1822 			    struct address_space *mapping,
1823 			    struct page *page, struct page *locked_page,
1824 			    unsigned long page_ops, u64 start, u64 end)
1825 {
1826 	u32 len;
1827 
1828 	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
1829 	len = end + 1 - start;
1830 
1831 	if (page_ops & PAGE_SET_ORDERED)
1832 		btrfs_page_clamp_set_ordered(fs_info, page, start, len);
1833 	if (page_ops & PAGE_SET_ERROR)
1834 		btrfs_page_clamp_set_error(fs_info, page, start, len);
1835 	if (page_ops & PAGE_START_WRITEBACK) {
1836 		btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
1837 		btrfs_page_clamp_set_writeback(fs_info, page, start, len);
1838 	}
1839 	if (page_ops & PAGE_END_WRITEBACK)
1840 		btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
1841 
1842 	if (page == locked_page)
1843 		return 1;
1844 
1845 	if (page_ops & PAGE_LOCK) {
1846 		int ret;
1847 
1848 		ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
1849 		if (ret)
1850 			return ret;
1851 		if (!PageDirty(page) || page->mapping != mapping) {
1852 			btrfs_page_end_writer_lock(fs_info, page, start, len);
1853 			return -EAGAIN;
1854 		}
1855 	}
1856 	if (page_ops & PAGE_UNLOCK)
1857 		btrfs_page_end_writer_lock(fs_info, page, start, len);
1858 	return 0;
1859 }
1860 
1861 static int __process_pages_contig(struct address_space *mapping,
1862 				  struct page *locked_page,
1863 				  u64 start, u64 end, unsigned long page_ops,
1864 				  u64 *processed_end)
1865 {
1866 	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
1867 	pgoff_t start_index = start >> PAGE_SHIFT;
1868 	pgoff_t end_index = end >> PAGE_SHIFT;
1869 	pgoff_t index = start_index;
1870 	unsigned long nr_pages = end_index - start_index + 1;
1871 	unsigned long pages_processed = 0;
1872 	struct page *pages[16];
1873 	int err = 0;
1874 	int i;
1875 
1876 	if (page_ops & PAGE_LOCK) {
1877 		ASSERT(page_ops == PAGE_LOCK);
1878 		ASSERT(processed_end && *processed_end == start);
1879 	}
1880 
1881 	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1882 		mapping_set_error(mapping, -EIO);
1883 
1884 	while (nr_pages > 0) {
1885 		int found_pages;
1886 
1887 		found_pages = find_get_pages_contig(mapping, index,
1888 				     min_t(unsigned long,
1889 				     nr_pages, ARRAY_SIZE(pages)), pages);
1890 		if (found_pages == 0) {
1891 			/*
1892 			 * Only if we're going to lock these pages, we can find
1893 			 * nothing at @index.
1894 			 */
1895 			ASSERT(page_ops & PAGE_LOCK);
1896 			err = -EAGAIN;
1897 			goto out;
1898 		}
1899 
1900 		for (i = 0; i < found_pages; i++) {
1901 			int process_ret;
1902 
1903 			process_ret = process_one_page(fs_info, mapping,
1904 					pages[i], locked_page, page_ops,
1905 					start, end);
1906 			if (process_ret < 0) {
1907 				for (; i < found_pages; i++)
1908 					put_page(pages[i]);
1909 				err = -EAGAIN;
1910 				goto out;
1911 			}
1912 			put_page(pages[i]);
1913 			pages_processed++;
1914 		}
1915 		nr_pages -= found_pages;
1916 		index += found_pages;
1917 		cond_resched();
1918 	}
1919 out:
1920 	if (err && processed_end) {
1921 		/*
1922 		 * Update @processed_end. I know this is awful since it has
1923 		 * two different return value patterns (inclusive vs exclusive).
1924 		 *
1925 		 * But the exclusive pattern is necessary if @start is 0, or we
1926 		 * underflow and check against processed_end won't work as
1927 		 * expected.
1928 		 */
1929 		if (pages_processed)
1930 			*processed_end = min(end,
1931 			((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
1932 		else
1933 			*processed_end = start;
1934 	}
1935 	return err;
1936 }
1937 
1938 static noinline void __unlock_for_delalloc(struct inode *inode,
1939 					   struct page *locked_page,
1940 					   u64 start, u64 end)
1941 {
1942 	unsigned long index = start >> PAGE_SHIFT;
1943 	unsigned long end_index = end >> PAGE_SHIFT;
1944 
1945 	ASSERT(locked_page);
1946 	if (index == locked_page->index && end_index == index)
1947 		return;
1948 
1949 	__process_pages_contig(inode->i_mapping, locked_page, start, end,
1950 			       PAGE_UNLOCK, NULL);
1951 }
1952 
1953 static noinline int lock_delalloc_pages(struct inode *inode,
1954 					struct page *locked_page,
1955 					u64 delalloc_start,
1956 					u64 delalloc_end)
1957 {
1958 	unsigned long index = delalloc_start >> PAGE_SHIFT;
1959 	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1960 	u64 processed_end = delalloc_start;
1961 	int ret;
1962 
1963 	ASSERT(locked_page);
1964 	if (index == locked_page->index && index == end_index)
1965 		return 0;
1966 
1967 	ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
1968 				     delalloc_end, PAGE_LOCK, &processed_end);
1969 	if (ret == -EAGAIN && processed_end > delalloc_start)
1970 		__unlock_for_delalloc(inode, locked_page, delalloc_start,
1971 				      processed_end);
1972 	return ret;
1973 }
1974 
1975 /*
1976  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1977  * more than @max_bytes.
1978  *
1979  * @start:	The original start bytenr to search.
1980  *		Will store the extent range start bytenr.
1981  * @end:	The original end bytenr of the search range
1982  *		Will store the extent range end bytenr.
1983  *
1984  * Return true if we find a delalloc range which starts inside the original
1985  * range, and @start/@end will store the delalloc range start/end.
1986  *
1987  * Return false if we can't find any delalloc range which starts inside the
1988  * original range, and @start/@end will be the non-delalloc range start/end.
1989  */
1990 EXPORT_FOR_TESTS
1991 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1992 				    struct page *locked_page, u64 *start,
1993 				    u64 *end)
1994 {
1995 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1996 	const u64 orig_start = *start;
1997 	const u64 orig_end = *end;
1998 	u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
1999 	u64 delalloc_start;
2000 	u64 delalloc_end;
2001 	bool found;
2002 	struct extent_state *cached_state = NULL;
2003 	int ret;
2004 	int loops = 0;
2005 
2006 	/* Caller should pass a valid @end to indicate the search range end */
2007 	ASSERT(orig_end > orig_start);
2008 
2009 	/* The range should at least cover part of the page */
2010 	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
2011 		 orig_end <= page_offset(locked_page)));
2012 again:
2013 	/* step one, find a bunch of delalloc bytes starting at start */
2014 	delalloc_start = *start;
2015 	delalloc_end = 0;
2016 	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
2017 					  max_bytes, &cached_state);
2018 	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
2019 		*start = delalloc_start;
2020 
2021 		/* @delalloc_end can be -1, never go beyond @orig_end */
2022 		*end = min(delalloc_end, orig_end);
2023 		free_extent_state(cached_state);
2024 		return false;
2025 	}
2026 
2027 	/*
2028 	 * start comes from the offset of locked_page.  We have to lock
2029 	 * pages in order, so we can't process delalloc bytes before
2030 	 * locked_page
2031 	 */
2032 	if (delalloc_start < *start)
2033 		delalloc_start = *start;
2034 
2035 	/*
2036 	 * make sure to limit the number of pages we try to lock down
2037 	 */
2038 	if (delalloc_end + 1 - delalloc_start > max_bytes)
2039 		delalloc_end = delalloc_start + max_bytes - 1;
2040 
2041 	/* step two, lock all the pages after the page that has start */
2042 	ret = lock_delalloc_pages(inode, locked_page,
2043 				  delalloc_start, delalloc_end);
2044 	ASSERT(!ret || ret == -EAGAIN);
2045 	if (ret == -EAGAIN) {
2046 		/* some of the pages are gone, lets avoid looping by
2047 		 * shortening the size of the delalloc range we're searching
2048 		 */
2049 		free_extent_state(cached_state);
2050 		cached_state = NULL;
2051 		if (!loops) {
2052 			max_bytes = PAGE_SIZE;
2053 			loops = 1;
2054 			goto again;
2055 		} else {
2056 			found = false;
2057 			goto out_failed;
2058 		}
2059 	}
2060 
2061 	/* step three, lock the state bits for the whole range */
2062 	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
2063 
2064 	/* then test to make sure it is all still delalloc */
2065 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
2066 			     EXTENT_DELALLOC, 1, cached_state);
2067 	if (!ret) {
2068 		unlock_extent_cached(tree, delalloc_start, delalloc_end,
2069 				     &cached_state);
2070 		__unlock_for_delalloc(inode, locked_page,
2071 			      delalloc_start, delalloc_end);
2072 		cond_resched();
2073 		goto again;
2074 	}
2075 	free_extent_state(cached_state);
2076 	*start = delalloc_start;
2077 	*end = delalloc_end;
2078 out_failed:
2079 	return found;
2080 }
2081 
2082 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2083 				  struct page *locked_page,
2084 				  u32 clear_bits, unsigned long page_ops)
2085 {
2086 	clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2087 
2088 	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
2089 			       start, end, page_ops, NULL);
2090 }
2091 
2092 /*
2093  * count the number of bytes in the tree that have a given bit(s)
2094  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
2095  * cached.  The total number found is returned.
2096  */
2097 u64 count_range_bits(struct extent_io_tree *tree,
2098 		     u64 *start, u64 search_end, u64 max_bytes,
2099 		     u32 bits, int contig)
2100 {
2101 	struct rb_node *node;
2102 	struct extent_state *state;
2103 	u64 cur_start = *start;
2104 	u64 total_bytes = 0;
2105 	u64 last = 0;
2106 	int found = 0;
2107 
2108 	if (WARN_ON(search_end <= cur_start))
2109 		return 0;
2110 
2111 	spin_lock(&tree->lock);
2112 	if (cur_start == 0 && bits == EXTENT_DIRTY) {
2113 		total_bytes = tree->dirty_bytes;
2114 		goto out;
2115 	}
2116 	/*
2117 	 * this search will find all the extents that end after
2118 	 * our range starts.
2119 	 */
2120 	node = tree_search(tree, cur_start);
2121 	if (!node)
2122 		goto out;
2123 
2124 	while (1) {
2125 		state = rb_entry(node, struct extent_state, rb_node);
2126 		if (state->start > search_end)
2127 			break;
2128 		if (contig && found && state->start > last + 1)
2129 			break;
2130 		if (state->end >= cur_start && (state->state & bits) == bits) {
2131 			total_bytes += min(search_end, state->end) + 1 -
2132 				       max(cur_start, state->start);
2133 			if (total_bytes >= max_bytes)
2134 				break;
2135 			if (!found) {
2136 				*start = max(cur_start, state->start);
2137 				found = 1;
2138 			}
2139 			last = state->end;
2140 		} else if (contig && found) {
2141 			break;
2142 		}
2143 		node = rb_next(node);
2144 		if (!node)
2145 			break;
2146 	}
2147 out:
2148 	spin_unlock(&tree->lock);
2149 	return total_bytes;
2150 }
2151 
2152 /*
2153  * set the private field for a given byte offset in the tree.  If there isn't
2154  * an extent_state there already, this does nothing.
2155  */
2156 int set_state_failrec(struct extent_io_tree *tree, u64 start,
2157 		      struct io_failure_record *failrec)
2158 {
2159 	struct rb_node *node;
2160 	struct extent_state *state;
2161 	int ret = 0;
2162 
2163 	spin_lock(&tree->lock);
2164 	/*
2165 	 * this search will find all the extents that end after
2166 	 * our range starts.
2167 	 */
2168 	node = tree_search(tree, start);
2169 	if (!node) {
2170 		ret = -ENOENT;
2171 		goto out;
2172 	}
2173 	state = rb_entry(node, struct extent_state, rb_node);
2174 	if (state->start != start) {
2175 		ret = -ENOENT;
2176 		goto out;
2177 	}
2178 	state->failrec = failrec;
2179 out:
2180 	spin_unlock(&tree->lock);
2181 	return ret;
2182 }
2183 
2184 struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
2185 {
2186 	struct rb_node *node;
2187 	struct extent_state *state;
2188 	struct io_failure_record *failrec;
2189 
2190 	spin_lock(&tree->lock);
2191 	/*
2192 	 * this search will find all the extents that end after
2193 	 * our range starts.
2194 	 */
2195 	node = tree_search(tree, start);
2196 	if (!node) {
2197 		failrec = ERR_PTR(-ENOENT);
2198 		goto out;
2199 	}
2200 	state = rb_entry(node, struct extent_state, rb_node);
2201 	if (state->start != start) {
2202 		failrec = ERR_PTR(-ENOENT);
2203 		goto out;
2204 	}
2205 
2206 	failrec = state->failrec;
2207 out:
2208 	spin_unlock(&tree->lock);
2209 	return failrec;
2210 }
2211 
2212 /*
2213  * searches a range in the state tree for a given mask.
2214  * If 'filled' == 1, this returns 1 only if every extent in the tree
2215  * has the bits set.  Otherwise, 1 is returned if any bit in the
2216  * range is found set.
2217  */
2218 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2219 		   u32 bits, int filled, struct extent_state *cached)
2220 {
2221 	struct extent_state *state = NULL;
2222 	struct rb_node *node;
2223 	int bitset = 0;
2224 
2225 	spin_lock(&tree->lock);
2226 	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2227 	    cached->end > start)
2228 		node = &cached->rb_node;
2229 	else
2230 		node = tree_search(tree, start);
2231 	while (node && start <= end) {
2232 		state = rb_entry(node, struct extent_state, rb_node);
2233 
2234 		if (filled && state->start > start) {
2235 			bitset = 0;
2236 			break;
2237 		}
2238 
2239 		if (state->start > end)
2240 			break;
2241 
2242 		if (state->state & bits) {
2243 			bitset = 1;
2244 			if (!filled)
2245 				break;
2246 		} else if (filled) {
2247 			bitset = 0;
2248 			break;
2249 		}
2250 
2251 		if (state->end == (u64)-1)
2252 			break;
2253 
2254 		start = state->end + 1;
2255 		if (start > end)
2256 			break;
2257 		node = rb_next(node);
2258 		if (!node) {
2259 			if (filled)
2260 				bitset = 0;
2261 			break;
2262 		}
2263 	}
2264 	spin_unlock(&tree->lock);
2265 	return bitset;
2266 }
2267 
2268 int free_io_failure(struct extent_io_tree *failure_tree,
2269 		    struct extent_io_tree *io_tree,
2270 		    struct io_failure_record *rec)
2271 {
2272 	int ret;
2273 	int err = 0;
2274 
2275 	set_state_failrec(failure_tree, rec->start, NULL);
2276 	ret = clear_extent_bits(failure_tree, rec->start,
2277 				rec->start + rec->len - 1,
2278 				EXTENT_LOCKED | EXTENT_DIRTY);
2279 	if (ret)
2280 		err = ret;
2281 
2282 	ret = clear_extent_bits(io_tree, rec->start,
2283 				rec->start + rec->len - 1,
2284 				EXTENT_DAMAGED);
2285 	if (ret && !err)
2286 		err = ret;
2287 
2288 	kfree(rec);
2289 	return err;
2290 }
2291 
2292 /*
2293  * this bypasses the standard btrfs submit functions deliberately, as
2294  * the standard behavior is to write all copies in a raid setup. here we only
2295  * want to write the one bad copy. so we do the mapping for ourselves and issue
2296  * submit_bio directly.
2297  * to avoid any synchronization issues, wait for the data after writing, which
2298  * actually prevents the read that triggered the error from finishing.
2299  * currently, there can be no more than two copies of every data bit. thus,
2300  * exactly one rewrite is required.
2301  */
2302 static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2303 			     u64 length, u64 logical, struct page *page,
2304 			     unsigned int pg_offset, int mirror_num)
2305 {
2306 	struct bio *bio;
2307 	struct btrfs_device *dev;
2308 	u64 map_length = 0;
2309 	u64 sector;
2310 	struct btrfs_io_context *bioc = NULL;
2311 	int ret;
2312 
2313 	ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2314 	BUG_ON(!mirror_num);
2315 
2316 	if (btrfs_repair_one_zone(fs_info, logical))
2317 		return 0;
2318 
2319 	bio = btrfs_bio_alloc(1);
2320 	bio->bi_iter.bi_size = 0;
2321 	map_length = length;
2322 
2323 	/*
2324 	 * Avoid races with device replace and make sure our bioc has devices
2325 	 * associated to its stripes that don't go away while we are doing the
2326 	 * read repair operation.
2327 	 */
2328 	btrfs_bio_counter_inc_blocked(fs_info);
2329 	if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2330 		/*
2331 		 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2332 		 * to update all raid stripes, but here we just want to correct
2333 		 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2334 		 * stripe's dev and sector.
2335 		 */
2336 		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2337 				      &map_length, &bioc, 0);
2338 		if (ret) {
2339 			btrfs_bio_counter_dec(fs_info);
2340 			bio_put(bio);
2341 			return -EIO;
2342 		}
2343 		ASSERT(bioc->mirror_num == 1);
2344 	} else {
2345 		ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2346 				      &map_length, &bioc, mirror_num);
2347 		if (ret) {
2348 			btrfs_bio_counter_dec(fs_info);
2349 			bio_put(bio);
2350 			return -EIO;
2351 		}
2352 		BUG_ON(mirror_num != bioc->mirror_num);
2353 	}
2354 
2355 	sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
2356 	bio->bi_iter.bi_sector = sector;
2357 	dev = bioc->stripes[bioc->mirror_num - 1].dev;
2358 	btrfs_put_bioc(bioc);
2359 	if (!dev || !dev->bdev ||
2360 	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2361 		btrfs_bio_counter_dec(fs_info);
2362 		bio_put(bio);
2363 		return -EIO;
2364 	}
2365 	bio_set_dev(bio, dev->bdev);
2366 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2367 	bio_add_page(bio, page, length, pg_offset);
2368 
2369 	if (btrfsic_submit_bio_wait(bio)) {
2370 		/* try to remap that extent elsewhere? */
2371 		btrfs_bio_counter_dec(fs_info);
2372 		bio_put(bio);
2373 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2374 		return -EIO;
2375 	}
2376 
2377 	btrfs_info_rl_in_rcu(fs_info,
2378 		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
2379 				  ino, start,
2380 				  rcu_str_deref(dev->name), sector);
2381 	btrfs_bio_counter_dec(fs_info);
2382 	bio_put(bio);
2383 	return 0;
2384 }
2385 
2386 int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
2387 {
2388 	struct btrfs_fs_info *fs_info = eb->fs_info;
2389 	u64 start = eb->start;
2390 	int i, num_pages = num_extent_pages(eb);
2391 	int ret = 0;
2392 
2393 	if (sb_rdonly(fs_info->sb))
2394 		return -EROFS;
2395 
2396 	for (i = 0; i < num_pages; i++) {
2397 		struct page *p = eb->pages[i];
2398 
2399 		ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2400 					start - page_offset(p), mirror_num);
2401 		if (ret)
2402 			break;
2403 		start += PAGE_SIZE;
2404 	}
2405 
2406 	return ret;
2407 }
2408 
2409 /*
2410  * each time an IO finishes, we do a fast check in the IO failure tree
2411  * to see if we need to process or clean up an io_failure_record
2412  */
2413 int clean_io_failure(struct btrfs_fs_info *fs_info,
2414 		     struct extent_io_tree *failure_tree,
2415 		     struct extent_io_tree *io_tree, u64 start,
2416 		     struct page *page, u64 ino, unsigned int pg_offset)
2417 {
2418 	u64 private;
2419 	struct io_failure_record *failrec;
2420 	struct extent_state *state;
2421 	int num_copies;
2422 	int ret;
2423 
2424 	private = 0;
2425 	ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2426 			       EXTENT_DIRTY, 0);
2427 	if (!ret)
2428 		return 0;
2429 
2430 	failrec = get_state_failrec(failure_tree, start);
2431 	if (IS_ERR(failrec))
2432 		return 0;
2433 
2434 	BUG_ON(!failrec->this_mirror);
2435 
2436 	if (sb_rdonly(fs_info->sb))
2437 		goto out;
2438 
2439 	spin_lock(&io_tree->lock);
2440 	state = find_first_extent_bit_state(io_tree,
2441 					    failrec->start,
2442 					    EXTENT_LOCKED);
2443 	spin_unlock(&io_tree->lock);
2444 
2445 	if (state && state->start <= failrec->start &&
2446 	    state->end >= failrec->start + failrec->len - 1) {
2447 		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2448 					      failrec->len);
2449 		if (num_copies > 1)  {
2450 			repair_io_failure(fs_info, ino, start, failrec->len,
2451 					  failrec->logical, page, pg_offset,
2452 					  failrec->failed_mirror);
2453 		}
2454 	}
2455 
2456 out:
2457 	free_io_failure(failure_tree, io_tree, failrec);
2458 
2459 	return 0;
2460 }
2461 
2462 /*
2463  * Can be called when
2464  * - hold extent lock
2465  * - under ordered extent
2466  * - the inode is freeing
2467  */
2468 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2469 {
2470 	struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2471 	struct io_failure_record *failrec;
2472 	struct extent_state *state, *next;
2473 
2474 	if (RB_EMPTY_ROOT(&failure_tree->state))
2475 		return;
2476 
2477 	spin_lock(&failure_tree->lock);
2478 	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2479 	while (state) {
2480 		if (state->start > end)
2481 			break;
2482 
2483 		ASSERT(state->end <= end);
2484 
2485 		next = next_state(state);
2486 
2487 		failrec = state->failrec;
2488 		free_extent_state(state);
2489 		kfree(failrec);
2490 
2491 		state = next;
2492 	}
2493 	spin_unlock(&failure_tree->lock);
2494 }
2495 
2496 static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
2497 							     u64 start)
2498 {
2499 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2500 	struct io_failure_record *failrec;
2501 	struct extent_map *em;
2502 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2503 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2504 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2505 	const u32 sectorsize = fs_info->sectorsize;
2506 	int ret;
2507 	u64 logical;
2508 
2509 	failrec = get_state_failrec(failure_tree, start);
2510 	if (!IS_ERR(failrec)) {
2511 		btrfs_debug(fs_info,
2512 	"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
2513 			failrec->logical, failrec->start, failrec->len);
2514 		/*
2515 		 * when data can be on disk more than twice, add to failrec here
2516 		 * (e.g. with a list for failed_mirror) to make
2517 		 * clean_io_failure() clean all those errors at once.
2518 		 */
2519 
2520 		return failrec;
2521 	}
2522 
2523 	failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2524 	if (!failrec)
2525 		return ERR_PTR(-ENOMEM);
2526 
2527 	failrec->start = start;
2528 	failrec->len = sectorsize;
2529 	failrec->this_mirror = 0;
2530 	failrec->bio_flags = 0;
2531 
2532 	read_lock(&em_tree->lock);
2533 	em = lookup_extent_mapping(em_tree, start, failrec->len);
2534 	if (!em) {
2535 		read_unlock(&em_tree->lock);
2536 		kfree(failrec);
2537 		return ERR_PTR(-EIO);
2538 	}
2539 
2540 	if (em->start > start || em->start + em->len <= start) {
2541 		free_extent_map(em);
2542 		em = NULL;
2543 	}
2544 	read_unlock(&em_tree->lock);
2545 	if (!em) {
2546 		kfree(failrec);
2547 		return ERR_PTR(-EIO);
2548 	}
2549 
2550 	logical = start - em->start;
2551 	logical = em->block_start + logical;
2552 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2553 		logical = em->block_start;
2554 		failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2555 		extent_set_compress_type(&failrec->bio_flags, em->compress_type);
2556 	}
2557 
2558 	btrfs_debug(fs_info,
2559 		    "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2560 		    logical, start, failrec->len);
2561 
2562 	failrec->logical = logical;
2563 	free_extent_map(em);
2564 
2565 	/* Set the bits in the private failure tree */
2566 	ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
2567 			      EXTENT_LOCKED | EXTENT_DIRTY);
2568 	if (ret >= 0) {
2569 		ret = set_state_failrec(failure_tree, start, failrec);
2570 		/* Set the bits in the inode's tree */
2571 		ret = set_extent_bits(tree, start, start + sectorsize - 1,
2572 				      EXTENT_DAMAGED);
2573 	} else if (ret < 0) {
2574 		kfree(failrec);
2575 		return ERR_PTR(ret);
2576 	}
2577 
2578 	return failrec;
2579 }
2580 
2581 static bool btrfs_check_repairable(struct inode *inode,
2582 				   struct io_failure_record *failrec,
2583 				   int failed_mirror)
2584 {
2585 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2586 	int num_copies;
2587 
2588 	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2589 	if (num_copies == 1) {
2590 		/*
2591 		 * we only have a single copy of the data, so don't bother with
2592 		 * all the retry and error correction code that follows. no
2593 		 * matter what the error is, it is very likely to persist.
2594 		 */
2595 		btrfs_debug(fs_info,
2596 			"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2597 			num_copies, failrec->this_mirror, failed_mirror);
2598 		return false;
2599 	}
2600 
2601 	/* The failure record should only contain one sector */
2602 	ASSERT(failrec->len == fs_info->sectorsize);
2603 
2604 	/*
2605 	 * There are two premises:
2606 	 * a) deliver good data to the caller
2607 	 * b) correct the bad sectors on disk
2608 	 *
2609 	 * Since we're only doing repair for one sector, we only need to get
2610 	 * a good copy of the failed sector and if we succeed, we have setup
2611 	 * everything for repair_io_failure to do the rest for us.
2612 	 */
2613 	ASSERT(failed_mirror);
2614 	failrec->failed_mirror = failed_mirror;
2615 	failrec->this_mirror++;
2616 	if (failrec->this_mirror == failed_mirror)
2617 		failrec->this_mirror++;
2618 
2619 	if (failrec->this_mirror > num_copies) {
2620 		btrfs_debug(fs_info,
2621 			"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2622 			num_copies, failrec->this_mirror, failed_mirror);
2623 		return false;
2624 	}
2625 
2626 	return true;
2627 }
2628 
2629 int btrfs_repair_one_sector(struct inode *inode,
2630 			    struct bio *failed_bio, u32 bio_offset,
2631 			    struct page *page, unsigned int pgoff,
2632 			    u64 start, int failed_mirror,
2633 			    submit_bio_hook_t *submit_bio_hook)
2634 {
2635 	struct io_failure_record *failrec;
2636 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2637 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2638 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2639 	struct btrfs_bio *failed_bbio = btrfs_bio(failed_bio);
2640 	const int icsum = bio_offset >> fs_info->sectorsize_bits;
2641 	struct bio *repair_bio;
2642 	struct btrfs_bio *repair_bbio;
2643 
2644 	btrfs_debug(fs_info,
2645 		   "repair read error: read error at %llu", start);
2646 
2647 	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2648 
2649 	failrec = btrfs_get_io_failure_record(inode, start);
2650 	if (IS_ERR(failrec))
2651 		return PTR_ERR(failrec);
2652 
2653 
2654 	if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
2655 		free_io_failure(failure_tree, tree, failrec);
2656 		return -EIO;
2657 	}
2658 
2659 	repair_bio = btrfs_bio_alloc(1);
2660 	repair_bbio = btrfs_bio(repair_bio);
2661 	repair_bio->bi_opf = REQ_OP_READ;
2662 	repair_bio->bi_end_io = failed_bio->bi_end_io;
2663 	repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2664 	repair_bio->bi_private = failed_bio->bi_private;
2665 
2666 	if (failed_bbio->csum) {
2667 		const u32 csum_size = fs_info->csum_size;
2668 
2669 		repair_bbio->csum = repair_bbio->csum_inline;
2670 		memcpy(repair_bbio->csum,
2671 		       failed_bbio->csum + csum_size * icsum, csum_size);
2672 	}
2673 
2674 	bio_add_page(repair_bio, page, failrec->len, pgoff);
2675 	repair_bbio->iter = repair_bio->bi_iter;
2676 
2677 	btrfs_debug(btrfs_sb(inode->i_sb),
2678 		    "repair read error: submitting new read to mirror %d",
2679 		    failrec->this_mirror);
2680 
2681 	/*
2682 	 * At this point we have a bio, so any errors from submit_bio_hook()
2683 	 * will be handled by the endio on the repair_bio, so we can't return an
2684 	 * error here.
2685 	 */
2686 	submit_bio_hook(inode, repair_bio, failrec->this_mirror, failrec->bio_flags);
2687 	return BLK_STS_OK;
2688 }
2689 
2690 static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
2691 {
2692 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
2693 
2694 	ASSERT(page_offset(page) <= start &&
2695 	       start + len <= page_offset(page) + PAGE_SIZE);
2696 
2697 	if (uptodate) {
2698 		if (fsverity_active(page->mapping->host) &&
2699 		    !PageError(page) &&
2700 		    !PageUptodate(page) &&
2701 		    start < i_size_read(page->mapping->host) &&
2702 		    !fsverity_verify_page(page)) {
2703 			btrfs_page_set_error(fs_info, page, start, len);
2704 		} else {
2705 			btrfs_page_set_uptodate(fs_info, page, start, len);
2706 		}
2707 	} else {
2708 		btrfs_page_clear_uptodate(fs_info, page, start, len);
2709 		btrfs_page_set_error(fs_info, page, start, len);
2710 	}
2711 
2712 	if (fs_info->sectorsize == PAGE_SIZE)
2713 		unlock_page(page);
2714 	else
2715 		btrfs_subpage_end_reader(fs_info, page, start, len);
2716 }
2717 
2718 static blk_status_t submit_read_repair(struct inode *inode,
2719 				      struct bio *failed_bio, u32 bio_offset,
2720 				      struct page *page, unsigned int pgoff,
2721 				      u64 start, u64 end, int failed_mirror,
2722 				      unsigned int error_bitmap,
2723 				      submit_bio_hook_t *submit_bio_hook)
2724 {
2725 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2726 	const u32 sectorsize = fs_info->sectorsize;
2727 	const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
2728 	int error = 0;
2729 	int i;
2730 
2731 	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2732 
2733 	/* We're here because we had some read errors or csum mismatch */
2734 	ASSERT(error_bitmap);
2735 
2736 	/*
2737 	 * We only get called on buffered IO, thus page must be mapped and bio
2738 	 * must not be cloned.
2739 	 */
2740 	ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));
2741 
2742 	/* Iterate through all the sectors in the range */
2743 	for (i = 0; i < nr_bits; i++) {
2744 		const unsigned int offset = i * sectorsize;
2745 		struct extent_state *cached = NULL;
2746 		bool uptodate = false;
2747 		int ret;
2748 
2749 		if (!(error_bitmap & (1U << i))) {
2750 			/*
2751 			 * This sector has no error, just end the page read
2752 			 * and unlock the range.
2753 			 */
2754 			uptodate = true;
2755 			goto next;
2756 		}
2757 
2758 		ret = btrfs_repair_one_sector(inode, failed_bio,
2759 				bio_offset + offset,
2760 				page, pgoff + offset, start + offset,
2761 				failed_mirror, submit_bio_hook);
2762 		if (!ret) {
2763 			/*
2764 			 * We have submitted the read repair, the page release
2765 			 * will be handled by the endio function of the
2766 			 * submitted repair bio.
2767 			 * Thus we don't need to do any thing here.
2768 			 */
2769 			continue;
2770 		}
2771 		/*
2772 		 * Repair failed, just record the error but still continue.
2773 		 * Or the remaining sectors will not be properly unlocked.
2774 		 */
2775 		if (!error)
2776 			error = ret;
2777 next:
2778 		end_page_read(page, uptodate, start + offset, sectorsize);
2779 		if (uptodate)
2780 			set_extent_uptodate(&BTRFS_I(inode)->io_tree,
2781 					start + offset,
2782 					start + offset + sectorsize - 1,
2783 					&cached, GFP_ATOMIC);
2784 		unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree,
2785 				start + offset,
2786 				start + offset + sectorsize - 1,
2787 				&cached);
2788 	}
2789 	return errno_to_blk_status(error);
2790 }
2791 
2792 /* lots and lots of room for performance fixes in the end_bio funcs */
2793 
2794 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2795 {
2796 	struct btrfs_inode *inode;
2797 	const bool uptodate = (err == 0);
2798 	int ret = 0;
2799 
2800 	ASSERT(page && page->mapping);
2801 	inode = BTRFS_I(page->mapping->host);
2802 	btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
2803 
2804 	if (!uptodate) {
2805 		const struct btrfs_fs_info *fs_info = inode->root->fs_info;
2806 		u32 len;
2807 
2808 		ASSERT(end + 1 - start <= U32_MAX);
2809 		len = end + 1 - start;
2810 
2811 		btrfs_page_clear_uptodate(fs_info, page, start, len);
2812 		btrfs_page_set_error(fs_info, page, start, len);
2813 		ret = err < 0 ? err : -EIO;
2814 		mapping_set_error(page->mapping, ret);
2815 	}
2816 }
2817 
2818 /*
2819  * after a writepage IO is done, we need to:
2820  * clear the uptodate bits on error
2821  * clear the writeback bits in the extent tree for this IO
2822  * end_page_writeback if the page has no more pending IO
2823  *
2824  * Scheduling is not allowed, so the extent state tree is expected
2825  * to have one and only one object corresponding to this IO.
2826  */
2827 static void end_bio_extent_writepage(struct bio *bio)
2828 {
2829 	int error = blk_status_to_errno(bio->bi_status);
2830 	struct bio_vec *bvec;
2831 	u64 start;
2832 	u64 end;
2833 	struct bvec_iter_all iter_all;
2834 	bool first_bvec = true;
2835 
2836 	ASSERT(!bio_flagged(bio, BIO_CLONED));
2837 	bio_for_each_segment_all(bvec, bio, iter_all) {
2838 		struct page *page = bvec->bv_page;
2839 		struct inode *inode = page->mapping->host;
2840 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2841 		const u32 sectorsize = fs_info->sectorsize;
2842 
2843 		/* Our read/write should always be sector aligned. */
2844 		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
2845 			btrfs_err(fs_info,
2846 		"partial page write in btrfs with offset %u and length %u",
2847 				  bvec->bv_offset, bvec->bv_len);
2848 		else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
2849 			btrfs_info(fs_info,
2850 		"incomplete page write with offset %u and length %u",
2851 				   bvec->bv_offset, bvec->bv_len);
2852 
2853 		start = page_offset(page) + bvec->bv_offset;
2854 		end = start + bvec->bv_len - 1;
2855 
2856 		if (first_bvec) {
2857 			btrfs_record_physical_zoned(inode, start, bio);
2858 			first_bvec = false;
2859 		}
2860 
2861 		end_extent_writepage(page, error, start, end);
2862 
2863 		btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
2864 	}
2865 
2866 	bio_put(bio);
2867 }
2868 
2869 /*
2870  * Record previously processed extent range
2871  *
2872  * For endio_readpage_release_extent() to handle a full extent range, reducing
2873  * the extent io operations.
2874  */
2875 struct processed_extent {
2876 	struct btrfs_inode *inode;
2877 	/* Start of the range in @inode */
2878 	u64 start;
2879 	/* End of the range in @inode */
2880 	u64 end;
2881 	bool uptodate;
2882 };
2883 
2884 /*
2885  * Try to release processed extent range
2886  *
2887  * May not release the extent range right now if the current range is
2888  * contiguous to processed extent.
2889  *
2890  * Will release processed extent when any of @inode, @uptodate, the range is
2891  * no longer contiguous to the processed range.
2892  *
2893  * Passing @inode == NULL will force processed extent to be released.
2894  */
2895 static void endio_readpage_release_extent(struct processed_extent *processed,
2896 			      struct btrfs_inode *inode, u64 start, u64 end,
2897 			      bool uptodate)
2898 {
2899 	struct extent_state *cached = NULL;
2900 	struct extent_io_tree *tree;
2901 
2902 	/* The first extent, initialize @processed */
2903 	if (!processed->inode)
2904 		goto update;
2905 
2906 	/*
2907 	 * Contiguous to processed extent, just uptodate the end.
2908 	 *
2909 	 * Several things to notice:
2910 	 *
2911 	 * - bio can be merged as long as on-disk bytenr is contiguous
2912 	 *   This means we can have page belonging to other inodes, thus need to
2913 	 *   check if the inode still matches.
2914 	 * - bvec can contain range beyond current page for multi-page bvec
2915 	 *   Thus we need to do processed->end + 1 >= start check
2916 	 */
2917 	if (processed->inode == inode && processed->uptodate == uptodate &&
2918 	    processed->end + 1 >= start && end >= processed->end) {
2919 		processed->end = end;
2920 		return;
2921 	}
2922 
2923 	tree = &processed->inode->io_tree;
2924 	/*
2925 	 * Now we don't have range contiguous to the processed range, release
2926 	 * the processed range now.
2927 	 */
2928 	if (processed->uptodate && tree->track_uptodate)
2929 		set_extent_uptodate(tree, processed->start, processed->end,
2930 				    &cached, GFP_ATOMIC);
2931 	unlock_extent_cached_atomic(tree, processed->start, processed->end,
2932 				    &cached);
2933 
2934 update:
2935 	/* Update processed to current range */
2936 	processed->inode = inode;
2937 	processed->start = start;
2938 	processed->end = end;
2939 	processed->uptodate = uptodate;
2940 }
2941 
2942 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
2943 {
2944 	ASSERT(PageLocked(page));
2945 	if (fs_info->sectorsize == PAGE_SIZE)
2946 		return;
2947 
2948 	ASSERT(PagePrivate(page));
2949 	btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
2950 }
2951 
2952 /*
2953  * Find extent buffer for a givne bytenr.
2954  *
2955  * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
2956  * in endio context.
2957  */
2958 static struct extent_buffer *find_extent_buffer_readpage(
2959 		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
2960 {
2961 	struct extent_buffer *eb;
2962 
2963 	/*
2964 	 * For regular sectorsize, we can use page->private to grab extent
2965 	 * buffer
2966 	 */
2967 	if (fs_info->sectorsize == PAGE_SIZE) {
2968 		ASSERT(PagePrivate(page) && page->private);
2969 		return (struct extent_buffer *)page->private;
2970 	}
2971 
2972 	/* For subpage case, we need to lookup buffer radix tree */
2973 	rcu_read_lock();
2974 	eb = radix_tree_lookup(&fs_info->buffer_radix,
2975 			       bytenr >> fs_info->sectorsize_bits);
2976 	rcu_read_unlock();
2977 	ASSERT(eb);
2978 	return eb;
2979 }
2980 
2981 /*
2982  * after a readpage IO is done, we need to:
2983  * clear the uptodate bits on error
2984  * set the uptodate bits if things worked
2985  * set the page up to date if all extents in the tree are uptodate
2986  * clear the lock bit in the extent tree
2987  * unlock the page if there are no other extents locked for it
2988  *
2989  * Scheduling is not allowed, so the extent state tree is expected
2990  * to have one and only one object corresponding to this IO.
2991  */
2992 static void end_bio_extent_readpage(struct bio *bio)
2993 {
2994 	struct bio_vec *bvec;
2995 	struct btrfs_bio *bbio = btrfs_bio(bio);
2996 	struct extent_io_tree *tree, *failure_tree;
2997 	struct processed_extent processed = { 0 };
2998 	/*
2999 	 * The offset to the beginning of a bio, since one bio can never be
3000 	 * larger than UINT_MAX, u32 here is enough.
3001 	 */
3002 	u32 bio_offset = 0;
3003 	int mirror;
3004 	int ret;
3005 	struct bvec_iter_all iter_all;
3006 
3007 	ASSERT(!bio_flagged(bio, BIO_CLONED));
3008 	bio_for_each_segment_all(bvec, bio, iter_all) {
3009 		bool uptodate = !bio->bi_status;
3010 		struct page *page = bvec->bv_page;
3011 		struct inode *inode = page->mapping->host;
3012 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3013 		const u32 sectorsize = fs_info->sectorsize;
3014 		unsigned int error_bitmap = (unsigned int)-1;
3015 		u64 start;
3016 		u64 end;
3017 		u32 len;
3018 
3019 		btrfs_debug(fs_info,
3020 			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
3021 			bio->bi_iter.bi_sector, bio->bi_status,
3022 			bbio->mirror_num);
3023 		tree = &BTRFS_I(inode)->io_tree;
3024 		failure_tree = &BTRFS_I(inode)->io_failure_tree;
3025 
3026 		/*
3027 		 * We always issue full-sector reads, but if some block in a
3028 		 * page fails to read, blk_update_request() will advance
3029 		 * bv_offset and adjust bv_len to compensate.  Print a warning
3030 		 * for unaligned offsets, and an error if they don't add up to
3031 		 * a full sector.
3032 		 */
3033 		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
3034 			btrfs_err(fs_info,
3035 		"partial page read in btrfs with offset %u and length %u",
3036 				  bvec->bv_offset, bvec->bv_len);
3037 		else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
3038 				     sectorsize))
3039 			btrfs_info(fs_info,
3040 		"incomplete page read with offset %u and length %u",
3041 				   bvec->bv_offset, bvec->bv_len);
3042 
3043 		start = page_offset(page) + bvec->bv_offset;
3044 		end = start + bvec->bv_len - 1;
3045 		len = bvec->bv_len;
3046 
3047 		mirror = bbio->mirror_num;
3048 		if (likely(uptodate)) {
3049 			if (is_data_inode(inode)) {
3050 				error_bitmap = btrfs_verify_data_csum(bbio,
3051 						bio_offset, page, start, end);
3052 				ret = error_bitmap;
3053 			} else {
3054 				ret = btrfs_validate_metadata_buffer(bbio,
3055 					page, start, end, mirror);
3056 			}
3057 			if (ret)
3058 				uptodate = false;
3059 			else
3060 				clean_io_failure(BTRFS_I(inode)->root->fs_info,
3061 						 failure_tree, tree, start,
3062 						 page,
3063 						 btrfs_ino(BTRFS_I(inode)), 0);
3064 		}
3065 
3066 		if (likely(uptodate))
3067 			goto readpage_ok;
3068 
3069 		if (is_data_inode(inode)) {
3070 			/*
3071 			 * If we failed to submit the IO at all we'll have a
3072 			 * mirror_num == 0, in which case we need to just mark
3073 			 * the page with an error and unlock it and carry on.
3074 			 */
3075 			if (mirror == 0)
3076 				goto readpage_ok;
3077 
3078 			/*
3079 			 * btrfs_submit_read_repair() will handle all the good
3080 			 * and bad sectors, we just continue to the next bvec.
3081 			 */
3082 			submit_read_repair(inode, bio, bio_offset, page,
3083 					   start - page_offset(page), start,
3084 					   end, mirror, error_bitmap,
3085 					   btrfs_submit_data_bio);
3086 
3087 			ASSERT(bio_offset + len > bio_offset);
3088 			bio_offset += len;
3089 			continue;
3090 		} else {
3091 			struct extent_buffer *eb;
3092 
3093 			eb = find_extent_buffer_readpage(fs_info, page, start);
3094 			set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3095 			eb->read_mirror = mirror;
3096 			atomic_dec(&eb->io_pages);
3097 		}
3098 readpage_ok:
3099 		if (likely(uptodate)) {
3100 			loff_t i_size = i_size_read(inode);
3101 			pgoff_t end_index = i_size >> PAGE_SHIFT;
3102 
3103 			/*
3104 			 * Zero out the remaining part if this range straddles
3105 			 * i_size.
3106 			 *
3107 			 * Here we should only zero the range inside the bvec,
3108 			 * not touch anything else.
3109 			 *
3110 			 * NOTE: i_size is exclusive while end is inclusive.
3111 			 */
3112 			if (page->index == end_index && i_size <= end) {
3113 				u32 zero_start = max(offset_in_page(i_size),
3114 						     offset_in_page(start));
3115 
3116 				zero_user_segment(page, zero_start,
3117 						  offset_in_page(end) + 1);
3118 			}
3119 		}
3120 		ASSERT(bio_offset + len > bio_offset);
3121 		bio_offset += len;
3122 
3123 		/* Update page status and unlock */
3124 		end_page_read(page, uptodate, start, len);
3125 		endio_readpage_release_extent(&processed, BTRFS_I(inode),
3126 					      start, end, PageUptodate(page));
3127 	}
3128 	/* Release the last extent */
3129 	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
3130 	btrfs_bio_free_csum(bbio);
3131 	bio_put(bio);
3132 }
3133 
3134 /*
3135  * Initialize the members up to but not including 'bio'. Use after allocating a
3136  * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
3137  * 'bio' because use of __GFP_ZERO is not supported.
3138  */
3139 static inline void btrfs_bio_init(struct btrfs_bio *bbio)
3140 {
3141 	memset(bbio, 0, offsetof(struct btrfs_bio, bio));
3142 }
3143 
3144 /*
3145  * Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
3146  *
3147  * The bio allocation is backed by bioset and does not fail.
3148  */
3149 struct bio *btrfs_bio_alloc(unsigned int nr_iovecs)
3150 {
3151 	struct bio *bio;
3152 
3153 	ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
3154 	bio = bio_alloc_bioset(NULL, nr_iovecs, 0, GFP_NOFS, &btrfs_bioset);
3155 	btrfs_bio_init(btrfs_bio(bio));
3156 	return bio;
3157 }
3158 
3159 struct bio *btrfs_bio_clone(struct bio *bio)
3160 {
3161 	struct btrfs_bio *bbio;
3162 	struct bio *new;
3163 
3164 	/* Bio allocation backed by a bioset does not fail */
3165 	new = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOFS, &btrfs_bioset);
3166 	bbio = btrfs_bio(new);
3167 	btrfs_bio_init(bbio);
3168 	bbio->iter = bio->bi_iter;
3169 	return new;
3170 }
3171 
3172 struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
3173 {
3174 	struct bio *bio;
3175 	struct btrfs_bio *bbio;
3176 
3177 	ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
3178 
3179 	/* this will never fail when it's backed by a bioset */
3180 	bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
3181 	ASSERT(bio);
3182 
3183 	bbio = btrfs_bio(bio);
3184 	btrfs_bio_init(bbio);
3185 
3186 	bio_trim(bio, offset >> 9, size >> 9);
3187 	bbio->iter = bio->bi_iter;
3188 	return bio;
3189 }
3190 
3191 /**
3192  * Attempt to add a page to bio
3193  *
3194  * @bio_ctrl:	record both the bio, and its bio_flags
3195  * @page:	page to add to the bio
3196  * @disk_bytenr:  offset of the new bio or to check whether we are adding
3197  *                a contiguous page to the previous one
3198  * @size:	portion of page that we want to write
3199  * @pg_offset:	starting offset in the page
3200  * @bio_flags:	flags of the current bio to see if we can merge them
3201  *
3202  * Attempt to add a page to bio considering stripe alignment etc.
3203  *
3204  * Return >= 0 for the number of bytes added to the bio.
3205  * Can return 0 if the current bio is already at stripe/zone boundary.
3206  * Return <0 for error.
3207  */
3208 static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
3209 			      struct page *page,
3210 			      u64 disk_bytenr, unsigned int size,
3211 			      unsigned int pg_offset,
3212 			      unsigned long bio_flags)
3213 {
3214 	struct bio *bio = bio_ctrl->bio;
3215 	u32 bio_size = bio->bi_iter.bi_size;
3216 	u32 real_size;
3217 	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
3218 	bool contig;
3219 	int ret;
3220 
3221 	ASSERT(bio);
3222 	/* The limit should be calculated when bio_ctrl->bio is allocated */
3223 	ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
3224 	if (bio_ctrl->bio_flags != bio_flags)
3225 		return 0;
3226 
3227 	if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
3228 		contig = bio->bi_iter.bi_sector == sector;
3229 	else
3230 		contig = bio_end_sector(bio) == sector;
3231 	if (!contig)
3232 		return 0;
3233 
3234 	real_size = min(bio_ctrl->len_to_oe_boundary,
3235 			bio_ctrl->len_to_stripe_boundary) - bio_size;
3236 	real_size = min(real_size, size);
3237 
3238 	/*
3239 	 * If real_size is 0, never call bio_add_*_page(), as even size is 0,
3240 	 * bio will still execute its endio function on the page!
3241 	 */
3242 	if (real_size == 0)
3243 		return 0;
3244 
3245 	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
3246 		ret = bio_add_zone_append_page(bio, page, real_size, pg_offset);
3247 	else
3248 		ret = bio_add_page(bio, page, real_size, pg_offset);
3249 
3250 	return ret;
3251 }
3252 
3253 static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
3254 			       struct btrfs_inode *inode, u64 file_offset)
3255 {
3256 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3257 	struct btrfs_io_geometry geom;
3258 	struct btrfs_ordered_extent *ordered;
3259 	struct extent_map *em;
3260 	u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
3261 	int ret;
3262 
3263 	/*
3264 	 * Pages for compressed extent are never submitted to disk directly,
3265 	 * thus it has no real boundary, just set them to U32_MAX.
3266 	 *
3267 	 * The split happens for real compressed bio, which happens in
3268 	 * btrfs_submit_compressed_read/write().
3269 	 */
3270 	if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED) {
3271 		bio_ctrl->len_to_oe_boundary = U32_MAX;
3272 		bio_ctrl->len_to_stripe_boundary = U32_MAX;
3273 		return 0;
3274 	}
3275 	em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
3276 	if (IS_ERR(em))
3277 		return PTR_ERR(em);
3278 	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
3279 				    logical, &geom);
3280 	free_extent_map(em);
3281 	if (ret < 0) {
3282 		return ret;
3283 	}
3284 	if (geom.len > U32_MAX)
3285 		bio_ctrl->len_to_stripe_boundary = U32_MAX;
3286 	else
3287 		bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
3288 
3289 	if (bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
3290 		bio_ctrl->len_to_oe_boundary = U32_MAX;
3291 		return 0;
3292 	}
3293 
3294 	/* Ordered extent not yet created, so we're good */
3295 	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
3296 	if (!ordered) {
3297 		bio_ctrl->len_to_oe_boundary = U32_MAX;
3298 		return 0;
3299 	}
3300 
3301 	bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
3302 		ordered->disk_bytenr + ordered->disk_num_bytes - logical);
3303 	btrfs_put_ordered_extent(ordered);
3304 	return 0;
3305 }
3306 
3307 static int alloc_new_bio(struct btrfs_inode *inode,
3308 			 struct btrfs_bio_ctrl *bio_ctrl,
3309 			 struct writeback_control *wbc,
3310 			 unsigned int opf,
3311 			 bio_end_io_t end_io_func,
3312 			 u64 disk_bytenr, u32 offset, u64 file_offset,
3313 			 unsigned long bio_flags)
3314 {
3315 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3316 	struct bio *bio;
3317 	int ret;
3318 
3319 	bio = btrfs_bio_alloc(BIO_MAX_VECS);
3320 	/*
3321 	 * For compressed page range, its disk_bytenr is always @disk_bytenr
3322 	 * passed in, no matter if we have added any range into previous bio.
3323 	 */
3324 	if (bio_flags & EXTENT_BIO_COMPRESSED)
3325 		bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
3326 	else
3327 		bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
3328 	bio_ctrl->bio = bio;
3329 	bio_ctrl->bio_flags = bio_flags;
3330 	bio->bi_end_io = end_io_func;
3331 	bio->bi_private = &inode->io_tree;
3332 	bio->bi_write_hint = inode->vfs_inode.i_write_hint;
3333 	bio->bi_opf = opf;
3334 	ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
3335 	if (ret < 0)
3336 		goto error;
3337 	if (wbc) {
3338 		struct block_device *bdev;
3339 
3340 		bdev = fs_info->fs_devices->latest_dev->bdev;
3341 		bio_set_dev(bio, bdev);
3342 		wbc_init_bio(wbc, bio);
3343 	}
3344 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
3345 		struct btrfs_device *device;
3346 
3347 		device = btrfs_zoned_get_device(fs_info, disk_bytenr,
3348 						fs_info->sectorsize);
3349 		if (IS_ERR(device)) {
3350 			ret = PTR_ERR(device);
3351 			goto error;
3352 		}
3353 
3354 		btrfs_bio(bio)->device = device;
3355 	}
3356 	return 0;
3357 error:
3358 	bio_ctrl->bio = NULL;
3359 	bio->bi_status = errno_to_blk_status(ret);
3360 	bio_endio(bio);
3361 	return ret;
3362 }
3363 
3364 /*
3365  * @opf:	bio REQ_OP_* and REQ_* flags as one value
3366  * @wbc:	optional writeback control for io accounting
3367  * @page:	page to add to the bio
3368  * @disk_bytenr: logical bytenr where the write will be
3369  * @size:	portion of page that we want to write to
3370  * @pg_offset:	offset of the new bio or to check whether we are adding
3371  *              a contiguous page to the previous one
3372  * @bio_ret:	must be valid pointer, newly allocated bio will be stored there
3373  * @end_io_func:     end_io callback for new bio
3374  * @mirror_num:	     desired mirror to read/write
3375  * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
3376  * @bio_flags:	flags of the current bio to see if we can merge them
3377  */
3378 static int submit_extent_page(unsigned int opf,
3379 			      struct writeback_control *wbc,
3380 			      struct btrfs_bio_ctrl *bio_ctrl,
3381 			      struct page *page, u64 disk_bytenr,
3382 			      size_t size, unsigned long pg_offset,
3383 			      bio_end_io_t end_io_func,
3384 			      int mirror_num,
3385 			      unsigned long bio_flags,
3386 			      bool force_bio_submit)
3387 {
3388 	int ret = 0;
3389 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3390 	unsigned int cur = pg_offset;
3391 
3392 	ASSERT(bio_ctrl);
3393 
3394 	ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
3395 	       pg_offset + size <= PAGE_SIZE);
3396 	if (force_bio_submit && bio_ctrl->bio) {
3397 		ret = submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->bio_flags);
3398 		bio_ctrl->bio = NULL;
3399 		if (ret < 0)
3400 			return ret;
3401 	}
3402 
3403 	while (cur < pg_offset + size) {
3404 		u32 offset = cur - pg_offset;
3405 		int added;
3406 
3407 		/* Allocate new bio if needed */
3408 		if (!bio_ctrl->bio) {
3409 			ret = alloc_new_bio(inode, bio_ctrl, wbc, opf,
3410 					    end_io_func, disk_bytenr, offset,
3411 					    page_offset(page) + cur,
3412 					    bio_flags);
3413 			if (ret < 0)
3414 				return ret;
3415 		}
3416 		/*
3417 		 * We must go through btrfs_bio_add_page() to ensure each
3418 		 * page range won't cross various boundaries.
3419 		 */
3420 		if (bio_flags & EXTENT_BIO_COMPRESSED)
3421 			added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr,
3422 					size - offset, pg_offset + offset,
3423 					bio_flags);
3424 		else
3425 			added = btrfs_bio_add_page(bio_ctrl, page,
3426 					disk_bytenr + offset, size - offset,
3427 					pg_offset + offset, bio_flags);
3428 
3429 		/* Metadata page range should never be split */
3430 		if (!is_data_inode(&inode->vfs_inode))
3431 			ASSERT(added == 0 || added == size - offset);
3432 
3433 		/* At least we added some page, update the account */
3434 		if (wbc && added)
3435 			wbc_account_cgroup_owner(wbc, page, added);
3436 
3437 		/* We have reached boundary, submit right now */
3438 		if (added < size - offset) {
3439 			/* The bio should contain some page(s) */
3440 			ASSERT(bio_ctrl->bio->bi_iter.bi_size);
3441 			ret = submit_one_bio(bio_ctrl->bio, mirror_num,
3442 					bio_ctrl->bio_flags);
3443 			bio_ctrl->bio = NULL;
3444 			if (ret < 0)
3445 				return ret;
3446 		}
3447 		cur += added;
3448 	}
3449 	return 0;
3450 }
3451 
3452 static int attach_extent_buffer_page(struct extent_buffer *eb,
3453 				     struct page *page,
3454 				     struct btrfs_subpage *prealloc)
3455 {
3456 	struct btrfs_fs_info *fs_info = eb->fs_info;
3457 	int ret = 0;
3458 
3459 	/*
3460 	 * If the page is mapped to btree inode, we should hold the private
3461 	 * lock to prevent race.
3462 	 * For cloned or dummy extent buffers, their pages are not mapped and
3463 	 * will not race with any other ebs.
3464 	 */
3465 	if (page->mapping)
3466 		lockdep_assert_held(&page->mapping->private_lock);
3467 
3468 	if (fs_info->sectorsize == PAGE_SIZE) {
3469 		if (!PagePrivate(page))
3470 			attach_page_private(page, eb);
3471 		else
3472 			WARN_ON(page->private != (unsigned long)eb);
3473 		return 0;
3474 	}
3475 
3476 	/* Already mapped, just free prealloc */
3477 	if (PagePrivate(page)) {
3478 		btrfs_free_subpage(prealloc);
3479 		return 0;
3480 	}
3481 
3482 	if (prealloc)
3483 		/* Has preallocated memory for subpage */
3484 		attach_page_private(page, prealloc);
3485 	else
3486 		/* Do new allocation to attach subpage */
3487 		ret = btrfs_attach_subpage(fs_info, page,
3488 					   BTRFS_SUBPAGE_METADATA);
3489 	return ret;
3490 }
3491 
3492 int set_page_extent_mapped(struct page *page)
3493 {
3494 	struct btrfs_fs_info *fs_info;
3495 
3496 	ASSERT(page->mapping);
3497 
3498 	if (PagePrivate(page))
3499 		return 0;
3500 
3501 	fs_info = btrfs_sb(page->mapping->host->i_sb);
3502 
3503 	if (fs_info->sectorsize < PAGE_SIZE)
3504 		return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
3505 
3506 	attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3507 	return 0;
3508 }
3509 
3510 void clear_page_extent_mapped(struct page *page)
3511 {
3512 	struct btrfs_fs_info *fs_info;
3513 
3514 	ASSERT(page->mapping);
3515 
3516 	if (!PagePrivate(page))
3517 		return;
3518 
3519 	fs_info = btrfs_sb(page->mapping->host->i_sb);
3520 	if (fs_info->sectorsize < PAGE_SIZE)
3521 		return btrfs_detach_subpage(fs_info, page);
3522 
3523 	detach_page_private(page);
3524 }
3525 
3526 static struct extent_map *
3527 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3528 		 u64 start, u64 len, struct extent_map **em_cached)
3529 {
3530 	struct extent_map *em;
3531 
3532 	if (em_cached && *em_cached) {
3533 		em = *em_cached;
3534 		if (extent_map_in_tree(em) && start >= em->start &&
3535 		    start < extent_map_end(em)) {
3536 			refcount_inc(&em->refs);
3537 			return em;
3538 		}
3539 
3540 		free_extent_map(em);
3541 		*em_cached = NULL;
3542 	}
3543 
3544 	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3545 	if (em_cached && !IS_ERR(em)) {
3546 		BUG_ON(*em_cached);
3547 		refcount_inc(&em->refs);
3548 		*em_cached = em;
3549 	}
3550 	return em;
3551 }
3552 /*
3553  * basic readpage implementation.  Locked extent state structs are inserted
3554  * into the tree that are removed when the IO is done (by the end_io
3555  * handlers)
3556  * XXX JDM: This needs looking at to ensure proper page locking
3557  * return 0 on success, otherwise return error
3558  */
3559 int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
3560 		      struct btrfs_bio_ctrl *bio_ctrl,
3561 		      unsigned int read_flags, u64 *prev_em_start)
3562 {
3563 	struct inode *inode = page->mapping->host;
3564 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3565 	u64 start = page_offset(page);
3566 	const u64 end = start + PAGE_SIZE - 1;
3567 	u64 cur = start;
3568 	u64 extent_offset;
3569 	u64 last_byte = i_size_read(inode);
3570 	u64 block_start;
3571 	u64 cur_end;
3572 	struct extent_map *em;
3573 	int ret = 0;
3574 	size_t pg_offset = 0;
3575 	size_t iosize;
3576 	size_t blocksize = inode->i_sb->s_blocksize;
3577 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3578 
3579 	ret = set_page_extent_mapped(page);
3580 	if (ret < 0) {
3581 		unlock_extent(tree, start, end);
3582 		btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
3583 		unlock_page(page);
3584 		goto out;
3585 	}
3586 
3587 	if (page->index == last_byte >> PAGE_SHIFT) {
3588 		size_t zero_offset = offset_in_page(last_byte);
3589 
3590 		if (zero_offset) {
3591 			iosize = PAGE_SIZE - zero_offset;
3592 			memzero_page(page, zero_offset, iosize);
3593 			flush_dcache_page(page);
3594 		}
3595 	}
3596 	begin_page_read(fs_info, page);
3597 	while (cur <= end) {
3598 		unsigned long this_bio_flag = 0;
3599 		bool force_bio_submit = false;
3600 		u64 disk_bytenr;
3601 
3602 		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
3603 		if (cur >= last_byte) {
3604 			struct extent_state *cached = NULL;
3605 
3606 			iosize = PAGE_SIZE - pg_offset;
3607 			memzero_page(page, pg_offset, iosize);
3608 			flush_dcache_page(page);
3609 			set_extent_uptodate(tree, cur, cur + iosize - 1,
3610 					    &cached, GFP_NOFS);
3611 			unlock_extent_cached(tree, cur,
3612 					     cur + iosize - 1, &cached);
3613 			end_page_read(page, true, cur, iosize);
3614 			break;
3615 		}
3616 		em = __get_extent_map(inode, page, pg_offset, cur,
3617 				      end - cur + 1, em_cached);
3618 		if (IS_ERR(em)) {
3619 			unlock_extent(tree, cur, end);
3620 			end_page_read(page, false, cur, end + 1 - cur);
3621 			ret = PTR_ERR(em);
3622 			break;
3623 		}
3624 		extent_offset = cur - em->start;
3625 		BUG_ON(extent_map_end(em) <= cur);
3626 		BUG_ON(end < cur);
3627 
3628 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3629 			this_bio_flag |= EXTENT_BIO_COMPRESSED;
3630 			extent_set_compress_type(&this_bio_flag,
3631 						 em->compress_type);
3632 		}
3633 
3634 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
3635 		cur_end = min(extent_map_end(em) - 1, end);
3636 		iosize = ALIGN(iosize, blocksize);
3637 		if (this_bio_flag & EXTENT_BIO_COMPRESSED)
3638 			disk_bytenr = em->block_start;
3639 		else
3640 			disk_bytenr = em->block_start + extent_offset;
3641 		block_start = em->block_start;
3642 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3643 			block_start = EXTENT_MAP_HOLE;
3644 
3645 		/*
3646 		 * If we have a file range that points to a compressed extent
3647 		 * and it's followed by a consecutive file range that points
3648 		 * to the same compressed extent (possibly with a different
3649 		 * offset and/or length, so it either points to the whole extent
3650 		 * or only part of it), we must make sure we do not submit a
3651 		 * single bio to populate the pages for the 2 ranges because
3652 		 * this makes the compressed extent read zero out the pages
3653 		 * belonging to the 2nd range. Imagine the following scenario:
3654 		 *
3655 		 *  File layout
3656 		 *  [0 - 8K]                     [8K - 24K]
3657 		 *    |                               |
3658 		 *    |                               |
3659 		 * points to extent X,         points to extent X,
3660 		 * offset 4K, length of 8K     offset 0, length 16K
3661 		 *
3662 		 * [extent X, compressed length = 4K uncompressed length = 16K]
3663 		 *
3664 		 * If the bio to read the compressed extent covers both ranges,
3665 		 * it will decompress extent X into the pages belonging to the
3666 		 * first range and then it will stop, zeroing out the remaining
3667 		 * pages that belong to the other range that points to extent X.
3668 		 * So here we make sure we submit 2 bios, one for the first
3669 		 * range and another one for the third range. Both will target
3670 		 * the same physical extent from disk, but we can't currently
3671 		 * make the compressed bio endio callback populate the pages
3672 		 * for both ranges because each compressed bio is tightly
3673 		 * coupled with a single extent map, and each range can have
3674 		 * an extent map with a different offset value relative to the
3675 		 * uncompressed data of our extent and different lengths. This
3676 		 * is a corner case so we prioritize correctness over
3677 		 * non-optimal behavior (submitting 2 bios for the same extent).
3678 		 */
3679 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3680 		    prev_em_start && *prev_em_start != (u64)-1 &&
3681 		    *prev_em_start != em->start)
3682 			force_bio_submit = true;
3683 
3684 		if (prev_em_start)
3685 			*prev_em_start = em->start;
3686 
3687 		free_extent_map(em);
3688 		em = NULL;
3689 
3690 		/* we've found a hole, just zero and go on */
3691 		if (block_start == EXTENT_MAP_HOLE) {
3692 			struct extent_state *cached = NULL;
3693 
3694 			memzero_page(page, pg_offset, iosize);
3695 			flush_dcache_page(page);
3696 
3697 			set_extent_uptodate(tree, cur, cur + iosize - 1,
3698 					    &cached, GFP_NOFS);
3699 			unlock_extent_cached(tree, cur,
3700 					     cur + iosize - 1, &cached);
3701 			end_page_read(page, true, cur, iosize);
3702 			cur = cur + iosize;
3703 			pg_offset += iosize;
3704 			continue;
3705 		}
3706 		/* the get_extent function already copied into the page */
3707 		if (test_range_bit(tree, cur, cur_end,
3708 				   EXTENT_UPTODATE, 1, NULL)) {
3709 			unlock_extent(tree, cur, cur + iosize - 1);
3710 			end_page_read(page, true, cur, iosize);
3711 			cur = cur + iosize;
3712 			pg_offset += iosize;
3713 			continue;
3714 		}
3715 		/* we have an inline extent but it didn't get marked up
3716 		 * to date.  Error out
3717 		 */
3718 		if (block_start == EXTENT_MAP_INLINE) {
3719 			unlock_extent(tree, cur, cur + iosize - 1);
3720 			end_page_read(page, false, cur, iosize);
3721 			cur = cur + iosize;
3722 			pg_offset += iosize;
3723 			continue;
3724 		}
3725 
3726 		ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
3727 					 bio_ctrl, page, disk_bytenr, iosize,
3728 					 pg_offset,
3729 					 end_bio_extent_readpage, 0,
3730 					 this_bio_flag,
3731 					 force_bio_submit);
3732 		if (ret) {
3733 			unlock_extent(tree, cur, cur + iosize - 1);
3734 			end_page_read(page, false, cur, iosize);
3735 			goto out;
3736 		}
3737 		cur = cur + iosize;
3738 		pg_offset += iosize;
3739 	}
3740 out:
3741 	return ret;
3742 }
3743 
3744 static inline void contiguous_readpages(struct page *pages[], int nr_pages,
3745 					u64 start, u64 end,
3746 					struct extent_map **em_cached,
3747 					struct btrfs_bio_ctrl *bio_ctrl,
3748 					u64 *prev_em_start)
3749 {
3750 	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
3751 	int index;
3752 
3753 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3754 
3755 	for (index = 0; index < nr_pages; index++) {
3756 		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
3757 				  REQ_RAHEAD, prev_em_start);
3758 		put_page(pages[index]);
3759 	}
3760 }
3761 
3762 static void update_nr_written(struct writeback_control *wbc,
3763 			      unsigned long nr_written)
3764 {
3765 	wbc->nr_to_write -= nr_written;
3766 }
3767 
3768 /*
3769  * helper for __extent_writepage, doing all of the delayed allocation setup.
3770  *
3771  * This returns 1 if btrfs_run_delalloc_range function did all the work required
3772  * to write the page (copy into inline extent).  In this case the IO has
3773  * been started and the page is already unlocked.
3774  *
3775  * This returns 0 if all went well (page still locked)
3776  * This returns < 0 if there were errors (page still locked)
3777  */
3778 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
3779 		struct page *page, struct writeback_control *wbc)
3780 {
3781 	const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
3782 	u64 delalloc_start = page_offset(page);
3783 	u64 delalloc_to_write = 0;
3784 	/* How many pages are started by btrfs_run_delalloc_range() */
3785 	unsigned long nr_written = 0;
3786 	int ret;
3787 	int page_started = 0;
3788 
3789 	while (delalloc_start < page_end) {
3790 		u64 delalloc_end = page_end;
3791 		bool found;
3792 
3793 		found = find_lock_delalloc_range(&inode->vfs_inode, page,
3794 					       &delalloc_start,
3795 					       &delalloc_end);
3796 		if (!found) {
3797 			delalloc_start = delalloc_end + 1;
3798 			continue;
3799 		}
3800 		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3801 				delalloc_end, &page_started, &nr_written, wbc);
3802 		if (ret) {
3803 			btrfs_page_set_error(inode->root->fs_info, page,
3804 					     page_offset(page), PAGE_SIZE);
3805 			return ret;
3806 		}
3807 		/*
3808 		 * delalloc_end is already one less than the total length, so
3809 		 * we don't subtract one from PAGE_SIZE
3810 		 */
3811 		delalloc_to_write += (delalloc_end - delalloc_start +
3812 				      PAGE_SIZE) >> PAGE_SHIFT;
3813 		delalloc_start = delalloc_end + 1;
3814 	}
3815 	if (wbc->nr_to_write < delalloc_to_write) {
3816 		int thresh = 8192;
3817 
3818 		if (delalloc_to_write < thresh * 2)
3819 			thresh = delalloc_to_write;
3820 		wbc->nr_to_write = min_t(u64, delalloc_to_write,
3821 					 thresh);
3822 	}
3823 
3824 	/* Did btrfs_run_dealloc_range() already unlock and start the IO? */
3825 	if (page_started) {
3826 		/*
3827 		 * We've unlocked the page, so we can't update the mapping's
3828 		 * writeback index, just update nr_to_write.
3829 		 */
3830 		wbc->nr_to_write -= nr_written;
3831 		return 1;
3832 	}
3833 
3834 	return 0;
3835 }
3836 
3837 /*
3838  * Find the first byte we need to write.
3839  *
3840  * For subpage, one page can contain several sectors, and
3841  * __extent_writepage_io() will just grab all extent maps in the page
3842  * range and try to submit all non-inline/non-compressed extents.
3843  *
3844  * This is a big problem for subpage, we shouldn't re-submit already written
3845  * data at all.
3846  * This function will lookup subpage dirty bit to find which range we really
3847  * need to submit.
3848  *
3849  * Return the next dirty range in [@start, @end).
3850  * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
3851  */
3852 static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
3853 				 struct page *page, u64 *start, u64 *end)
3854 {
3855 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
3856 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
3857 	u64 orig_start = *start;
3858 	/* Declare as unsigned long so we can use bitmap ops */
3859 	unsigned long flags;
3860 	int range_start_bit;
3861 	int range_end_bit;
3862 
3863 	/*
3864 	 * For regular sector size == page size case, since one page only
3865 	 * contains one sector, we return the page offset directly.
3866 	 */
3867 	if (fs_info->sectorsize == PAGE_SIZE) {
3868 		*start = page_offset(page);
3869 		*end = page_offset(page) + PAGE_SIZE;
3870 		return;
3871 	}
3872 
3873 	range_start_bit = spi->dirty_offset +
3874 			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
3875 
3876 	/* We should have the page locked, but just in case */
3877 	spin_lock_irqsave(&subpage->lock, flags);
3878 	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
3879 			       spi->dirty_offset + spi->bitmap_nr_bits);
3880 	spin_unlock_irqrestore(&subpage->lock, flags);
3881 
3882 	range_start_bit -= spi->dirty_offset;
3883 	range_end_bit -= spi->dirty_offset;
3884 
3885 	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
3886 	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
3887 }
3888 
3889 /*
3890  * helper for __extent_writepage.  This calls the writepage start hooks,
3891  * and does the loop to map the page into extents and bios.
3892  *
3893  * We return 1 if the IO is started and the page is unlocked,
3894  * 0 if all went well (page still locked)
3895  * < 0 if there were errors (page still locked)
3896  */
3897 static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
3898 				 struct page *page,
3899 				 struct writeback_control *wbc,
3900 				 struct extent_page_data *epd,
3901 				 loff_t i_size,
3902 				 int *nr_ret)
3903 {
3904 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3905 	u64 cur = page_offset(page);
3906 	u64 end = cur + PAGE_SIZE - 1;
3907 	u64 extent_offset;
3908 	u64 block_start;
3909 	struct extent_map *em;
3910 	int ret = 0;
3911 	int nr = 0;
3912 	u32 opf = REQ_OP_WRITE;
3913 	const unsigned int write_flags = wbc_to_write_flags(wbc);
3914 	bool compressed;
3915 
3916 	ret = btrfs_writepage_cow_fixup(page);
3917 	if (ret) {
3918 		/* Fixup worker will requeue */
3919 		redirty_page_for_writepage(wbc, page);
3920 		unlock_page(page);
3921 		return 1;
3922 	}
3923 
3924 	/*
3925 	 * we don't want to touch the inode after unlocking the page,
3926 	 * so we update the mapping writeback index now
3927 	 */
3928 	update_nr_written(wbc, 1);
3929 
3930 	while (cur <= end) {
3931 		u64 disk_bytenr;
3932 		u64 em_end;
3933 		u64 dirty_range_start = cur;
3934 		u64 dirty_range_end;
3935 		u32 iosize;
3936 
3937 		if (cur >= i_size) {
3938 			btrfs_writepage_endio_finish_ordered(inode, page, cur,
3939 							     end, true);
3940 			/*
3941 			 * This range is beyond i_size, thus we don't need to
3942 			 * bother writing back.
3943 			 * But we still need to clear the dirty subpage bit, or
3944 			 * the next time the page gets dirtied, we will try to
3945 			 * writeback the sectors with subpage dirty bits,
3946 			 * causing writeback without ordered extent.
3947 			 */
3948 			btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
3949 			break;
3950 		}
3951 
3952 		find_next_dirty_byte(fs_info, page, &dirty_range_start,
3953 				     &dirty_range_end);
3954 		if (cur < dirty_range_start) {
3955 			cur = dirty_range_start;
3956 			continue;
3957 		}
3958 
3959 		em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
3960 		if (IS_ERR(em)) {
3961 			btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
3962 			ret = PTR_ERR_OR_ZERO(em);
3963 			break;
3964 		}
3965 
3966 		extent_offset = cur - em->start;
3967 		em_end = extent_map_end(em);
3968 		ASSERT(cur <= em_end);
3969 		ASSERT(cur < end);
3970 		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
3971 		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
3972 		block_start = em->block_start;
3973 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3974 		disk_bytenr = em->block_start + extent_offset;
3975 
3976 		/*
3977 		 * Note that em_end from extent_map_end() and dirty_range_end from
3978 		 * find_next_dirty_byte() are all exclusive
3979 		 */
3980 		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
3981 
3982 		if (btrfs_use_zone_append(inode, em->block_start))
3983 			opf = REQ_OP_ZONE_APPEND;
3984 
3985 		free_extent_map(em);
3986 		em = NULL;
3987 
3988 		/*
3989 		 * compressed and inline extents are written through other
3990 		 * paths in the FS
3991 		 */
3992 		if (compressed || block_start == EXTENT_MAP_HOLE ||
3993 		    block_start == EXTENT_MAP_INLINE) {
3994 			if (compressed)
3995 				nr++;
3996 			else
3997 				btrfs_writepage_endio_finish_ordered(inode,
3998 						page, cur, cur + iosize - 1, true);
3999 			btrfs_page_clear_dirty(fs_info, page, cur, iosize);
4000 			cur += iosize;
4001 			continue;
4002 		}
4003 
4004 		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
4005 		if (!PageWriteback(page)) {
4006 			btrfs_err(inode->root->fs_info,
4007 				   "page %lu not writeback, cur %llu end %llu",
4008 			       page->index, cur, end);
4009 		}
4010 
4011 		/*
4012 		 * Although the PageDirty bit is cleared before entering this
4013 		 * function, subpage dirty bit is not cleared.
4014 		 * So clear subpage dirty bit here so next time we won't submit
4015 		 * page for range already written to disk.
4016 		 */
4017 		btrfs_page_clear_dirty(fs_info, page, cur, iosize);
4018 
4019 		ret = submit_extent_page(opf | write_flags, wbc,
4020 					 &epd->bio_ctrl, page,
4021 					 disk_bytenr, iosize,
4022 					 cur - page_offset(page),
4023 					 end_bio_extent_writepage,
4024 					 0, 0, false);
4025 		if (ret) {
4026 			btrfs_page_set_error(fs_info, page, cur, iosize);
4027 			if (PageWriteback(page))
4028 				btrfs_page_clear_writeback(fs_info, page, cur,
4029 							   iosize);
4030 		}
4031 
4032 		cur += iosize;
4033 		nr++;
4034 	}
4035 	/*
4036 	 * If we finish without problem, we should not only clear page dirty,
4037 	 * but also empty subpage dirty bits
4038 	 */
4039 	if (!ret)
4040 		btrfs_page_assert_not_dirty(fs_info, page);
4041 	*nr_ret = nr;
4042 	return ret;
4043 }
4044 
4045 /*
4046  * the writepage semantics are similar to regular writepage.  extent
4047  * records are inserted to lock ranges in the tree, and as dirty areas
4048  * are found, they are marked writeback.  Then the lock bits are removed
4049  * and the end_io handler clears the writeback ranges
4050  *
4051  * Return 0 if everything goes well.
4052  * Return <0 for error.
4053  */
4054 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
4055 			      struct extent_page_data *epd)
4056 {
4057 	struct folio *folio = page_folio(page);
4058 	struct inode *inode = page->mapping->host;
4059 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4060 	const u64 page_start = page_offset(page);
4061 	const u64 page_end = page_start + PAGE_SIZE - 1;
4062 	int ret;
4063 	int nr = 0;
4064 	size_t pg_offset;
4065 	loff_t i_size = i_size_read(inode);
4066 	unsigned long end_index = i_size >> PAGE_SHIFT;
4067 
4068 	trace___extent_writepage(page, inode, wbc);
4069 
4070 	WARN_ON(!PageLocked(page));
4071 
4072 	btrfs_page_clear_error(btrfs_sb(inode->i_sb), page,
4073 			       page_offset(page), PAGE_SIZE);
4074 
4075 	pg_offset = offset_in_page(i_size);
4076 	if (page->index > end_index ||
4077 	   (page->index == end_index && !pg_offset)) {
4078 		folio_invalidate(folio, 0, folio_size(folio));
4079 		folio_unlock(folio);
4080 		return 0;
4081 	}
4082 
4083 	if (page->index == end_index) {
4084 		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
4085 		flush_dcache_page(page);
4086 	}
4087 
4088 	ret = set_page_extent_mapped(page);
4089 	if (ret < 0) {
4090 		SetPageError(page);
4091 		goto done;
4092 	}
4093 
4094 	if (!epd->extent_locked) {
4095 		ret = writepage_delalloc(BTRFS_I(inode), page, wbc);
4096 		if (ret == 1)
4097 			return 0;
4098 		if (ret)
4099 			goto done;
4100 	}
4101 
4102 	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
4103 				    &nr);
4104 	if (ret == 1)
4105 		return 0;
4106 
4107 done:
4108 	if (nr == 0) {
4109 		/* make sure the mapping tag for page dirty gets cleared */
4110 		set_page_writeback(page);
4111 		end_page_writeback(page);
4112 	}
4113 	/*
4114 	 * Here we used to have a check for PageError() and then set @ret and
4115 	 * call end_extent_writepage().
4116 	 *
4117 	 * But in fact setting @ret here will cause different error paths
4118 	 * between subpage and regular sectorsize.
4119 	 *
4120 	 * For regular page size, we never submit current page, but only add
4121 	 * current page to current bio.
4122 	 * The bio submission can only happen in next page.
4123 	 * Thus if we hit the PageError() branch, @ret is already set to
4124 	 * non-zero value and will not get updated for regular sectorsize.
4125 	 *
4126 	 * But for subpage case, it's possible we submit part of current page,
4127 	 * thus can get PageError() set by submitted bio of the same page,
4128 	 * while our @ret is still 0.
4129 	 *
4130 	 * So here we unify the behavior and don't set @ret.
4131 	 * Error can still be properly passed to higher layer as page will
4132 	 * be set error, here we just don't handle the IO failure.
4133 	 *
4134 	 * NOTE: This is just a hotfix for subpage.
4135 	 * The root fix will be properly ending ordered extent when we hit
4136 	 * an error during writeback.
4137 	 *
4138 	 * But that needs a bigger refactoring, as we not only need to grab the
4139 	 * submitted OE, but also need to know exactly at which bytenr we hit
4140 	 * the error.
4141 	 * Currently the full page based __extent_writepage_io() is not
4142 	 * capable of that.
4143 	 */
4144 	if (PageError(page))
4145 		end_extent_writepage(page, ret, page_start, page_end);
4146 	if (epd->extent_locked) {
4147 		/*
4148 		 * If epd->extent_locked, it's from extent_write_locked_range(),
4149 		 * the page can either be locked by lock_page() or
4150 		 * process_one_page().
4151 		 * Let btrfs_page_unlock_writer() handle both cases.
4152 		 */
4153 		ASSERT(wbc);
4154 		btrfs_page_unlock_writer(fs_info, page, wbc->range_start,
4155 					 wbc->range_end + 1 - wbc->range_start);
4156 	} else {
4157 		unlock_page(page);
4158 	}
4159 	ASSERT(ret <= 0);
4160 	return ret;
4161 }
4162 
4163 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
4164 {
4165 	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
4166 		       TASK_UNINTERRUPTIBLE);
4167 }
4168 
4169 static void end_extent_buffer_writeback(struct extent_buffer *eb)
4170 {
4171 	if (test_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags))
4172 		btrfs_zone_finish_endio(eb->fs_info, eb->start, eb->len);
4173 
4174 	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4175 	smp_mb__after_atomic();
4176 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
4177 }
4178 
4179 /*
4180  * Lock extent buffer status and pages for writeback.
4181  *
4182  * May try to flush write bio if we can't get the lock.
4183  *
4184  * Return  0 if the extent buffer doesn't need to be submitted.
4185  *           (E.g. the extent buffer is not dirty)
4186  * Return >0 is the extent buffer is submitted to bio.
4187  * Return <0 if something went wrong, no page is locked.
4188  */
4189 static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
4190 			  struct extent_page_data *epd)
4191 {
4192 	struct btrfs_fs_info *fs_info = eb->fs_info;
4193 	int i, num_pages, failed_page_nr;
4194 	int flush = 0;
4195 	int ret = 0;
4196 
4197 	if (!btrfs_try_tree_write_lock(eb)) {
4198 		ret = flush_write_bio(epd);
4199 		if (ret < 0)
4200 			return ret;
4201 		flush = 1;
4202 		btrfs_tree_lock(eb);
4203 	}
4204 
4205 	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
4206 		btrfs_tree_unlock(eb);
4207 		if (!epd->sync_io)
4208 			return 0;
4209 		if (!flush) {
4210 			ret = flush_write_bio(epd);
4211 			if (ret < 0)
4212 				return ret;
4213 			flush = 1;
4214 		}
4215 		while (1) {
4216 			wait_on_extent_buffer_writeback(eb);
4217 			btrfs_tree_lock(eb);
4218 			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
4219 				break;
4220 			btrfs_tree_unlock(eb);
4221 		}
4222 	}
4223 
4224 	/*
4225 	 * We need to do this to prevent races in people who check if the eb is
4226 	 * under IO since we can end up having no IO bits set for a short period
4227 	 * of time.
4228 	 */
4229 	spin_lock(&eb->refs_lock);
4230 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4231 		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4232 		spin_unlock(&eb->refs_lock);
4233 		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4234 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4235 					 -eb->len,
4236 					 fs_info->dirty_metadata_batch);
4237 		ret = 1;
4238 	} else {
4239 		spin_unlock(&eb->refs_lock);
4240 	}
4241 
4242 	btrfs_tree_unlock(eb);
4243 
4244 	/*
4245 	 * Either we don't need to submit any tree block, or we're submitting
4246 	 * subpage eb.
4247 	 * Subpage metadata doesn't use page locking at all, so we can skip
4248 	 * the page locking.
4249 	 */
4250 	if (!ret || fs_info->sectorsize < PAGE_SIZE)
4251 		return ret;
4252 
4253 	num_pages = num_extent_pages(eb);
4254 	for (i = 0; i < num_pages; i++) {
4255 		struct page *p = eb->pages[i];
4256 
4257 		if (!trylock_page(p)) {
4258 			if (!flush) {
4259 				int err;
4260 
4261 				err = flush_write_bio(epd);
4262 				if (err < 0) {
4263 					ret = err;
4264 					failed_page_nr = i;
4265 					goto err_unlock;
4266 				}
4267 				flush = 1;
4268 			}
4269 			lock_page(p);
4270 		}
4271 	}
4272 
4273 	return ret;
4274 err_unlock:
4275 	/* Unlock already locked pages */
4276 	for (i = 0; i < failed_page_nr; i++)
4277 		unlock_page(eb->pages[i]);
4278 	/*
4279 	 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
4280 	 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
4281 	 * be made and undo everything done before.
4282 	 */
4283 	btrfs_tree_lock(eb);
4284 	spin_lock(&eb->refs_lock);
4285 	set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4286 	end_extent_buffer_writeback(eb);
4287 	spin_unlock(&eb->refs_lock);
4288 	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
4289 				 fs_info->dirty_metadata_batch);
4290 	btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4291 	btrfs_tree_unlock(eb);
4292 	return ret;
4293 }
4294 
4295 static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
4296 {
4297 	struct btrfs_fs_info *fs_info = eb->fs_info;
4298 
4299 	btrfs_page_set_error(fs_info, page, eb->start, eb->len);
4300 	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4301 		return;
4302 
4303 	/*
4304 	 * A read may stumble upon this buffer later, make sure that it gets an
4305 	 * error and knows there was an error.
4306 	 */
4307 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4308 
4309 	/*
4310 	 * We need to set the mapping with the io error as well because a write
4311 	 * error will flip the file system readonly, and then syncfs() will
4312 	 * return a 0 because we are readonly if we don't modify the err seq for
4313 	 * the superblock.
4314 	 */
4315 	mapping_set_error(page->mapping, -EIO);
4316 
4317 	/*
4318 	 * If we error out, we should add back the dirty_metadata_bytes
4319 	 * to make it consistent.
4320 	 */
4321 	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4322 				 eb->len, fs_info->dirty_metadata_batch);
4323 
4324 	/*
4325 	 * If writeback for a btree extent that doesn't belong to a log tree
4326 	 * failed, increment the counter transaction->eb_write_errors.
4327 	 * We do this because while the transaction is running and before it's
4328 	 * committing (when we call filemap_fdata[write|wait]_range against
4329 	 * the btree inode), we might have
4330 	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
4331 	 * returns an error or an error happens during writeback, when we're
4332 	 * committing the transaction we wouldn't know about it, since the pages
4333 	 * can be no longer dirty nor marked anymore for writeback (if a
4334 	 * subsequent modification to the extent buffer didn't happen before the
4335 	 * transaction commit), which makes filemap_fdata[write|wait]_range not
4336 	 * able to find the pages tagged with SetPageError at transaction
4337 	 * commit time. So if this happens we must abort the transaction,
4338 	 * otherwise we commit a super block with btree roots that point to
4339 	 * btree nodes/leafs whose content on disk is invalid - either garbage
4340 	 * or the content of some node/leaf from a past generation that got
4341 	 * cowed or deleted and is no longer valid.
4342 	 *
4343 	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
4344 	 * not be enough - we need to distinguish between log tree extents vs
4345 	 * non-log tree extents, and the next filemap_fdatawait_range() call
4346 	 * will catch and clear such errors in the mapping - and that call might
4347 	 * be from a log sync and not from a transaction commit. Also, checking
4348 	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
4349 	 * not done and would not be reliable - the eb might have been released
4350 	 * from memory and reading it back again means that flag would not be
4351 	 * set (since it's a runtime flag, not persisted on disk).
4352 	 *
4353 	 * Using the flags below in the btree inode also makes us achieve the
4354 	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
4355 	 * writeback for all dirty pages and before filemap_fdatawait_range()
4356 	 * is called, the writeback for all dirty pages had already finished
4357 	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
4358 	 * filemap_fdatawait_range() would return success, as it could not know
4359 	 * that writeback errors happened (the pages were no longer tagged for
4360 	 * writeback).
4361 	 */
4362 	switch (eb->log_index) {
4363 	case -1:
4364 		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
4365 		break;
4366 	case 0:
4367 		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
4368 		break;
4369 	case 1:
4370 		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
4371 		break;
4372 	default:
4373 		BUG(); /* unexpected, logic error */
4374 	}
4375 }
4376 
4377 /*
4378  * The endio specific version which won't touch any unsafe spinlock in endio
4379  * context.
4380  */
4381 static struct extent_buffer *find_extent_buffer_nolock(
4382 		struct btrfs_fs_info *fs_info, u64 start)
4383 {
4384 	struct extent_buffer *eb;
4385 
4386 	rcu_read_lock();
4387 	eb = radix_tree_lookup(&fs_info->buffer_radix,
4388 			       start >> fs_info->sectorsize_bits);
4389 	if (eb && atomic_inc_not_zero(&eb->refs)) {
4390 		rcu_read_unlock();
4391 		return eb;
4392 	}
4393 	rcu_read_unlock();
4394 	return NULL;
4395 }
4396 
4397 /*
4398  * The endio function for subpage extent buffer write.
4399  *
4400  * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
4401  * after all extent buffers in the page has finished their writeback.
4402  */
4403 static void end_bio_subpage_eb_writepage(struct bio *bio)
4404 {
4405 	struct btrfs_fs_info *fs_info;
4406 	struct bio_vec *bvec;
4407 	struct bvec_iter_all iter_all;
4408 
4409 	fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
4410 	ASSERT(fs_info->sectorsize < PAGE_SIZE);
4411 
4412 	ASSERT(!bio_flagged(bio, BIO_CLONED));
4413 	bio_for_each_segment_all(bvec, bio, iter_all) {
4414 		struct page *page = bvec->bv_page;
4415 		u64 bvec_start = page_offset(page) + bvec->bv_offset;
4416 		u64 bvec_end = bvec_start + bvec->bv_len - 1;
4417 		u64 cur_bytenr = bvec_start;
4418 
4419 		ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
4420 
4421 		/* Iterate through all extent buffers in the range */
4422 		while (cur_bytenr <= bvec_end) {
4423 			struct extent_buffer *eb;
4424 			int done;
4425 
4426 			/*
4427 			 * Here we can't use find_extent_buffer(), as it may
4428 			 * try to lock eb->refs_lock, which is not safe in endio
4429 			 * context.
4430 			 */
4431 			eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
4432 			ASSERT(eb);
4433 
4434 			cur_bytenr = eb->start + eb->len;
4435 
4436 			ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
4437 			done = atomic_dec_and_test(&eb->io_pages);
4438 			ASSERT(done);
4439 
4440 			if (bio->bi_status ||
4441 			    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4442 				ClearPageUptodate(page);
4443 				set_btree_ioerr(page, eb);
4444 			}
4445 
4446 			btrfs_subpage_clear_writeback(fs_info, page, eb->start,
4447 						      eb->len);
4448 			end_extent_buffer_writeback(eb);
4449 			/*
4450 			 * free_extent_buffer() will grab spinlock which is not
4451 			 * safe in endio context. Thus here we manually dec
4452 			 * the ref.
4453 			 */
4454 			atomic_dec(&eb->refs);
4455 		}
4456 	}
4457 	bio_put(bio);
4458 }
4459 
4460 static void end_bio_extent_buffer_writepage(struct bio *bio)
4461 {
4462 	struct bio_vec *bvec;
4463 	struct extent_buffer *eb;
4464 	int done;
4465 	struct bvec_iter_all iter_all;
4466 
4467 	ASSERT(!bio_flagged(bio, BIO_CLONED));
4468 	bio_for_each_segment_all(bvec, bio, iter_all) {
4469 		struct page *page = bvec->bv_page;
4470 
4471 		eb = (struct extent_buffer *)page->private;
4472 		BUG_ON(!eb);
4473 		done = atomic_dec_and_test(&eb->io_pages);
4474 
4475 		if (bio->bi_status ||
4476 		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4477 			ClearPageUptodate(page);
4478 			set_btree_ioerr(page, eb);
4479 		}
4480 
4481 		end_page_writeback(page);
4482 
4483 		if (!done)
4484 			continue;
4485 
4486 		end_extent_buffer_writeback(eb);
4487 	}
4488 
4489 	bio_put(bio);
4490 }
4491 
4492 static void prepare_eb_write(struct extent_buffer *eb)
4493 {
4494 	u32 nritems;
4495 	unsigned long start;
4496 	unsigned long end;
4497 
4498 	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
4499 	atomic_set(&eb->io_pages, num_extent_pages(eb));
4500 
4501 	/* Set btree blocks beyond nritems with 0 to avoid stale content */
4502 	nritems = btrfs_header_nritems(eb);
4503 	if (btrfs_header_level(eb) > 0) {
4504 		end = btrfs_node_key_ptr_offset(nritems);
4505 		memzero_extent_buffer(eb, end, eb->len - end);
4506 	} else {
4507 		/*
4508 		 * Leaf:
4509 		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
4510 		 */
4511 		start = btrfs_item_nr_offset(nritems);
4512 		end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
4513 		memzero_extent_buffer(eb, start, end - start);
4514 	}
4515 }
4516 
4517 /*
4518  * Unlike the work in write_one_eb(), we rely completely on extent locking.
4519  * Page locking is only utilized at minimum to keep the VMM code happy.
4520  */
4521 static int write_one_subpage_eb(struct extent_buffer *eb,
4522 				struct writeback_control *wbc,
4523 				struct extent_page_data *epd)
4524 {
4525 	struct btrfs_fs_info *fs_info = eb->fs_info;
4526 	struct page *page = eb->pages[0];
4527 	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4528 	bool no_dirty_ebs = false;
4529 	int ret;
4530 
4531 	prepare_eb_write(eb);
4532 
4533 	/* clear_page_dirty_for_io() in subpage helper needs page locked */
4534 	lock_page(page);
4535 	btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
4536 
4537 	/* Check if this is the last dirty bit to update nr_written */
4538 	no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
4539 							  eb->start, eb->len);
4540 	if (no_dirty_ebs)
4541 		clear_page_dirty_for_io(page);
4542 
4543 	ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4544 			&epd->bio_ctrl, page, eb->start, eb->len,
4545 			eb->start - page_offset(page),
4546 			end_bio_subpage_eb_writepage, 0, 0, false);
4547 	if (ret) {
4548 		btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
4549 		set_btree_ioerr(page, eb);
4550 		unlock_page(page);
4551 
4552 		if (atomic_dec_and_test(&eb->io_pages))
4553 			end_extent_buffer_writeback(eb);
4554 		return -EIO;
4555 	}
4556 	unlock_page(page);
4557 	/*
4558 	 * Submission finished without problem, if no range of the page is
4559 	 * dirty anymore, we have submitted a page.  Update nr_written in wbc.
4560 	 */
4561 	if (no_dirty_ebs)
4562 		update_nr_written(wbc, 1);
4563 	return ret;
4564 }
4565 
4566 static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
4567 			struct writeback_control *wbc,
4568 			struct extent_page_data *epd)
4569 {
4570 	u64 disk_bytenr = eb->start;
4571 	int i, num_pages;
4572 	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4573 	int ret = 0;
4574 
4575 	prepare_eb_write(eb);
4576 
4577 	num_pages = num_extent_pages(eb);
4578 	for (i = 0; i < num_pages; i++) {
4579 		struct page *p = eb->pages[i];
4580 
4581 		clear_page_dirty_for_io(p);
4582 		set_page_writeback(p);
4583 		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4584 					 &epd->bio_ctrl, p, disk_bytenr,
4585 					 PAGE_SIZE, 0,
4586 					 end_bio_extent_buffer_writepage,
4587 					 0, 0, false);
4588 		if (ret) {
4589 			set_btree_ioerr(p, eb);
4590 			if (PageWriteback(p))
4591 				end_page_writeback(p);
4592 			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
4593 				end_extent_buffer_writeback(eb);
4594 			ret = -EIO;
4595 			break;
4596 		}
4597 		disk_bytenr += PAGE_SIZE;
4598 		update_nr_written(wbc, 1);
4599 		unlock_page(p);
4600 	}
4601 
4602 	if (unlikely(ret)) {
4603 		for (; i < num_pages; i++) {
4604 			struct page *p = eb->pages[i];
4605 			clear_page_dirty_for_io(p);
4606 			unlock_page(p);
4607 		}
4608 	}
4609 
4610 	return ret;
4611 }
4612 
4613 /*
4614  * Submit one subpage btree page.
4615  *
4616  * The main difference to submit_eb_page() is:
4617  * - Page locking
4618  *   For subpage, we don't rely on page locking at all.
4619  *
4620  * - Flush write bio
4621  *   We only flush bio if we may be unable to fit current extent buffers into
4622  *   current bio.
4623  *
4624  * Return >=0 for the number of submitted extent buffers.
4625  * Return <0 for fatal error.
4626  */
4627 static int submit_eb_subpage(struct page *page,
4628 			     struct writeback_control *wbc,
4629 			     struct extent_page_data *epd)
4630 {
4631 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4632 	int submitted = 0;
4633 	u64 page_start = page_offset(page);
4634 	int bit_start = 0;
4635 	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
4636 	int ret;
4637 
4638 	/* Lock and write each dirty extent buffers in the range */
4639 	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
4640 		struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
4641 		struct extent_buffer *eb;
4642 		unsigned long flags;
4643 		u64 start;
4644 
4645 		/*
4646 		 * Take private lock to ensure the subpage won't be detached
4647 		 * in the meantime.
4648 		 */
4649 		spin_lock(&page->mapping->private_lock);
4650 		if (!PagePrivate(page)) {
4651 			spin_unlock(&page->mapping->private_lock);
4652 			break;
4653 		}
4654 		spin_lock_irqsave(&subpage->lock, flags);
4655 		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
4656 			      subpage->bitmaps)) {
4657 			spin_unlock_irqrestore(&subpage->lock, flags);
4658 			spin_unlock(&page->mapping->private_lock);
4659 			bit_start++;
4660 			continue;
4661 		}
4662 
4663 		start = page_start + bit_start * fs_info->sectorsize;
4664 		bit_start += sectors_per_node;
4665 
4666 		/*
4667 		 * Here we just want to grab the eb without touching extra
4668 		 * spin locks, so call find_extent_buffer_nolock().
4669 		 */
4670 		eb = find_extent_buffer_nolock(fs_info, start);
4671 		spin_unlock_irqrestore(&subpage->lock, flags);
4672 		spin_unlock(&page->mapping->private_lock);
4673 
4674 		/*
4675 		 * The eb has already reached 0 refs thus find_extent_buffer()
4676 		 * doesn't return it. We don't need to write back such eb
4677 		 * anyway.
4678 		 */
4679 		if (!eb)
4680 			continue;
4681 
4682 		ret = lock_extent_buffer_for_io(eb, epd);
4683 		if (ret == 0) {
4684 			free_extent_buffer(eb);
4685 			continue;
4686 		}
4687 		if (ret < 0) {
4688 			free_extent_buffer(eb);
4689 			goto cleanup;
4690 		}
4691 		ret = write_one_subpage_eb(eb, wbc, epd);
4692 		free_extent_buffer(eb);
4693 		if (ret < 0)
4694 			goto cleanup;
4695 		submitted++;
4696 	}
4697 	return submitted;
4698 
4699 cleanup:
4700 	/* We hit error, end bio for the submitted extent buffers */
4701 	end_write_bio(epd, ret);
4702 	return ret;
4703 }
4704 
4705 /*
4706  * Submit all page(s) of one extent buffer.
4707  *
4708  * @page:	the page of one extent buffer
4709  * @eb_context:	to determine if we need to submit this page, if current page
4710  *		belongs to this eb, we don't need to submit
4711  *
4712  * The caller should pass each page in their bytenr order, and here we use
4713  * @eb_context to determine if we have submitted pages of one extent buffer.
4714  *
4715  * If we have, we just skip until we hit a new page that doesn't belong to
4716  * current @eb_context.
4717  *
4718  * If not, we submit all the page(s) of the extent buffer.
4719  *
4720  * Return >0 if we have submitted the extent buffer successfully.
4721  * Return 0 if we don't need to submit the page, as it's already submitted by
4722  * previous call.
4723  * Return <0 for fatal error.
4724  */
4725 static int submit_eb_page(struct page *page, struct writeback_control *wbc,
4726 			  struct extent_page_data *epd,
4727 			  struct extent_buffer **eb_context)
4728 {
4729 	struct address_space *mapping = page->mapping;
4730 	struct btrfs_block_group *cache = NULL;
4731 	struct extent_buffer *eb;
4732 	int ret;
4733 
4734 	if (!PagePrivate(page))
4735 		return 0;
4736 
4737 	if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
4738 		return submit_eb_subpage(page, wbc, epd);
4739 
4740 	spin_lock(&mapping->private_lock);
4741 	if (!PagePrivate(page)) {
4742 		spin_unlock(&mapping->private_lock);
4743 		return 0;
4744 	}
4745 
4746 	eb = (struct extent_buffer *)page->private;
4747 
4748 	/*
4749 	 * Shouldn't happen and normally this would be a BUG_ON but no point
4750 	 * crashing the machine for something we can survive anyway.
4751 	 */
4752 	if (WARN_ON(!eb)) {
4753 		spin_unlock(&mapping->private_lock);
4754 		return 0;
4755 	}
4756 
4757 	if (eb == *eb_context) {
4758 		spin_unlock(&mapping->private_lock);
4759 		return 0;
4760 	}
4761 	ret = atomic_inc_not_zero(&eb->refs);
4762 	spin_unlock(&mapping->private_lock);
4763 	if (!ret)
4764 		return 0;
4765 
4766 	if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
4767 		/*
4768 		 * If for_sync, this hole will be filled with
4769 		 * trasnsaction commit.
4770 		 */
4771 		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
4772 			ret = -EAGAIN;
4773 		else
4774 			ret = 0;
4775 		free_extent_buffer(eb);
4776 		return ret;
4777 	}
4778 
4779 	*eb_context = eb;
4780 
4781 	ret = lock_extent_buffer_for_io(eb, epd);
4782 	if (ret <= 0) {
4783 		btrfs_revert_meta_write_pointer(cache, eb);
4784 		if (cache)
4785 			btrfs_put_block_group(cache);
4786 		free_extent_buffer(eb);
4787 		return ret;
4788 	}
4789 	if (cache) {
4790 		/*
4791 		 * Implies write in zoned mode. Mark the last eb in a block group.
4792 		 */
4793 		if (cache->seq_zone && eb->start + eb->len == cache->zone_capacity)
4794 			set_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags);
4795 		btrfs_put_block_group(cache);
4796 	}
4797 	ret = write_one_eb(eb, wbc, epd);
4798 	free_extent_buffer(eb);
4799 	if (ret < 0)
4800 		return ret;
4801 	return 1;
4802 }
4803 
4804 int btree_write_cache_pages(struct address_space *mapping,
4805 				   struct writeback_control *wbc)
4806 {
4807 	struct extent_buffer *eb_context = NULL;
4808 	struct extent_page_data epd = {
4809 		.bio_ctrl = { 0 },
4810 		.extent_locked = 0,
4811 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4812 	};
4813 	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
4814 	int ret = 0;
4815 	int done = 0;
4816 	int nr_to_write_done = 0;
4817 	struct pagevec pvec;
4818 	int nr_pages;
4819 	pgoff_t index;
4820 	pgoff_t end;		/* Inclusive */
4821 	int scanned = 0;
4822 	xa_mark_t tag;
4823 
4824 	pagevec_init(&pvec);
4825 	if (wbc->range_cyclic) {
4826 		index = mapping->writeback_index; /* Start from prev offset */
4827 		end = -1;
4828 		/*
4829 		 * Start from the beginning does not need to cycle over the
4830 		 * range, mark it as scanned.
4831 		 */
4832 		scanned = (index == 0);
4833 	} else {
4834 		index = wbc->range_start >> PAGE_SHIFT;
4835 		end = wbc->range_end >> PAGE_SHIFT;
4836 		scanned = 1;
4837 	}
4838 	if (wbc->sync_mode == WB_SYNC_ALL)
4839 		tag = PAGECACHE_TAG_TOWRITE;
4840 	else
4841 		tag = PAGECACHE_TAG_DIRTY;
4842 	btrfs_zoned_meta_io_lock(fs_info);
4843 retry:
4844 	if (wbc->sync_mode == WB_SYNC_ALL)
4845 		tag_pages_for_writeback(mapping, index, end);
4846 	while (!done && !nr_to_write_done && (index <= end) &&
4847 	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
4848 			tag))) {
4849 		unsigned i;
4850 
4851 		for (i = 0; i < nr_pages; i++) {
4852 			struct page *page = pvec.pages[i];
4853 
4854 			ret = submit_eb_page(page, wbc, &epd, &eb_context);
4855 			if (ret == 0)
4856 				continue;
4857 			if (ret < 0) {
4858 				done = 1;
4859 				break;
4860 			}
4861 
4862 			/*
4863 			 * the filesystem may choose to bump up nr_to_write.
4864 			 * We have to make sure to honor the new nr_to_write
4865 			 * at any time
4866 			 */
4867 			nr_to_write_done = wbc->nr_to_write <= 0;
4868 		}
4869 		pagevec_release(&pvec);
4870 		cond_resched();
4871 	}
4872 	if (!scanned && !done) {
4873 		/*
4874 		 * We hit the last page and there is more work to be done: wrap
4875 		 * back to the start of the file
4876 		 */
4877 		scanned = 1;
4878 		index = 0;
4879 		goto retry;
4880 	}
4881 	if (ret < 0) {
4882 		end_write_bio(&epd, ret);
4883 		goto out;
4884 	}
4885 	/*
4886 	 * If something went wrong, don't allow any metadata write bio to be
4887 	 * submitted.
4888 	 *
4889 	 * This would prevent use-after-free if we had dirty pages not
4890 	 * cleaned up, which can still happen by fuzzed images.
4891 	 *
4892 	 * - Bad extent tree
4893 	 *   Allowing existing tree block to be allocated for other trees.
4894 	 *
4895 	 * - Log tree operations
4896 	 *   Exiting tree blocks get allocated to log tree, bumps its
4897 	 *   generation, then get cleaned in tree re-balance.
4898 	 *   Such tree block will not be written back, since it's clean,
4899 	 *   thus no WRITTEN flag set.
4900 	 *   And after log writes back, this tree block is not traced by
4901 	 *   any dirty extent_io_tree.
4902 	 *
4903 	 * - Offending tree block gets re-dirtied from its original owner
4904 	 *   Since it has bumped generation, no WRITTEN flag, it can be
4905 	 *   reused without COWing. This tree block will not be traced
4906 	 *   by btrfs_transaction::dirty_pages.
4907 	 *
4908 	 *   Now such dirty tree block will not be cleaned by any dirty
4909 	 *   extent io tree. Thus we don't want to submit such wild eb
4910 	 *   if the fs already has error.
4911 	 */
4912 	if (!BTRFS_FS_ERROR(fs_info)) {
4913 		ret = flush_write_bio(&epd);
4914 	} else {
4915 		ret = -EROFS;
4916 		end_write_bio(&epd, ret);
4917 	}
4918 out:
4919 	btrfs_zoned_meta_io_unlock(fs_info);
4920 	return ret;
4921 }
4922 
4923 /**
4924  * Walk the list of dirty pages of the given address space and write all of them.
4925  *
4926  * @mapping: address space structure to write
4927  * @wbc:     subtract the number of written pages from *@wbc->nr_to_write
4928  * @epd:     holds context for the write, namely the bio
4929  *
4930  * If a page is already under I/O, write_cache_pages() skips it, even
4931  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
4932  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
4933  * and msync() need to guarantee that all the data which was dirty at the time
4934  * the call was made get new I/O started against them.  If wbc->sync_mode is
4935  * WB_SYNC_ALL then we were called for data integrity and we must wait for
4936  * existing IO to complete.
4937  */
4938 static int extent_write_cache_pages(struct address_space *mapping,
4939 			     struct writeback_control *wbc,
4940 			     struct extent_page_data *epd)
4941 {
4942 	struct inode *inode = mapping->host;
4943 	int ret = 0;
4944 	int done = 0;
4945 	int nr_to_write_done = 0;
4946 	struct pagevec pvec;
4947 	int nr_pages;
4948 	pgoff_t index;
4949 	pgoff_t end;		/* Inclusive */
4950 	pgoff_t done_index;
4951 	int range_whole = 0;
4952 	int scanned = 0;
4953 	xa_mark_t tag;
4954 
4955 	/*
4956 	 * We have to hold onto the inode so that ordered extents can do their
4957 	 * work when the IO finishes.  The alternative to this is failing to add
4958 	 * an ordered extent if the igrab() fails there and that is a huge pain
4959 	 * to deal with, so instead just hold onto the inode throughout the
4960 	 * writepages operation.  If it fails here we are freeing up the inode
4961 	 * anyway and we'd rather not waste our time writing out stuff that is
4962 	 * going to be truncated anyway.
4963 	 */
4964 	if (!igrab(inode))
4965 		return 0;
4966 
4967 	pagevec_init(&pvec);
4968 	if (wbc->range_cyclic) {
4969 		index = mapping->writeback_index; /* Start from prev offset */
4970 		end = -1;
4971 		/*
4972 		 * Start from the beginning does not need to cycle over the
4973 		 * range, mark it as scanned.
4974 		 */
4975 		scanned = (index == 0);
4976 	} else {
4977 		index = wbc->range_start >> PAGE_SHIFT;
4978 		end = wbc->range_end >> PAGE_SHIFT;
4979 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4980 			range_whole = 1;
4981 		scanned = 1;
4982 	}
4983 
4984 	/*
4985 	 * We do the tagged writepage as long as the snapshot flush bit is set
4986 	 * and we are the first one who do the filemap_flush() on this inode.
4987 	 *
4988 	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4989 	 * not race in and drop the bit.
4990 	 */
4991 	if (range_whole && wbc->nr_to_write == LONG_MAX &&
4992 	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4993 			       &BTRFS_I(inode)->runtime_flags))
4994 		wbc->tagged_writepages = 1;
4995 
4996 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4997 		tag = PAGECACHE_TAG_TOWRITE;
4998 	else
4999 		tag = PAGECACHE_TAG_DIRTY;
5000 retry:
5001 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
5002 		tag_pages_for_writeback(mapping, index, end);
5003 	done_index = index;
5004 	while (!done && !nr_to_write_done && (index <= end) &&
5005 			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
5006 						&index, end, tag))) {
5007 		unsigned i;
5008 
5009 		for (i = 0; i < nr_pages; i++) {
5010 			struct page *page = pvec.pages[i];
5011 
5012 			done_index = page->index + 1;
5013 			/*
5014 			 * At this point we hold neither the i_pages lock nor
5015 			 * the page lock: the page may be truncated or
5016 			 * invalidated (changing page->mapping to NULL),
5017 			 * or even swizzled back from swapper_space to
5018 			 * tmpfs file mapping
5019 			 */
5020 			if (!trylock_page(page)) {
5021 				ret = flush_write_bio(epd);
5022 				BUG_ON(ret < 0);
5023 				lock_page(page);
5024 			}
5025 
5026 			if (unlikely(page->mapping != mapping)) {
5027 				unlock_page(page);
5028 				continue;
5029 			}
5030 
5031 			if (wbc->sync_mode != WB_SYNC_NONE) {
5032 				if (PageWriteback(page)) {
5033 					ret = flush_write_bio(epd);
5034 					BUG_ON(ret < 0);
5035 				}
5036 				wait_on_page_writeback(page);
5037 			}
5038 
5039 			if (PageWriteback(page) ||
5040 			    !clear_page_dirty_for_io(page)) {
5041 				unlock_page(page);
5042 				continue;
5043 			}
5044 
5045 			ret = __extent_writepage(page, wbc, epd);
5046 			if (ret < 0) {
5047 				done = 1;
5048 				break;
5049 			}
5050 
5051 			/*
5052 			 * the filesystem may choose to bump up nr_to_write.
5053 			 * We have to make sure to honor the new nr_to_write
5054 			 * at any time
5055 			 */
5056 			nr_to_write_done = wbc->nr_to_write <= 0;
5057 		}
5058 		pagevec_release(&pvec);
5059 		cond_resched();
5060 	}
5061 	if (!scanned && !done) {
5062 		/*
5063 		 * We hit the last page and there is more work to be done: wrap
5064 		 * back to the start of the file
5065 		 */
5066 		scanned = 1;
5067 		index = 0;
5068 
5069 		/*
5070 		 * If we're looping we could run into a page that is locked by a
5071 		 * writer and that writer could be waiting on writeback for a
5072 		 * page in our current bio, and thus deadlock, so flush the
5073 		 * write bio here.
5074 		 */
5075 		ret = flush_write_bio(epd);
5076 		if (!ret)
5077 			goto retry;
5078 	}
5079 
5080 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
5081 		mapping->writeback_index = done_index;
5082 
5083 	btrfs_add_delayed_iput(inode);
5084 	return ret;
5085 }
5086 
5087 int extent_write_full_page(struct page *page, struct writeback_control *wbc)
5088 {
5089 	int ret;
5090 	struct extent_page_data epd = {
5091 		.bio_ctrl = { 0 },
5092 		.extent_locked = 0,
5093 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
5094 	};
5095 
5096 	ret = __extent_writepage(page, wbc, &epd);
5097 	ASSERT(ret <= 0);
5098 	if (ret < 0) {
5099 		end_write_bio(&epd, ret);
5100 		return ret;
5101 	}
5102 
5103 	ret = flush_write_bio(&epd);
5104 	ASSERT(ret <= 0);
5105 	return ret;
5106 }
5107 
5108 /*
5109  * Submit the pages in the range to bio for call sites which delalloc range has
5110  * already been ran (aka, ordered extent inserted) and all pages are still
5111  * locked.
5112  */
5113 int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
5114 {
5115 	bool found_error = false;
5116 	int first_error = 0;
5117 	int ret = 0;
5118 	struct address_space *mapping = inode->i_mapping;
5119 	struct page *page;
5120 	u64 cur = start;
5121 	unsigned long nr_pages;
5122 	const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
5123 	struct extent_page_data epd = {
5124 		.bio_ctrl = { 0 },
5125 		.extent_locked = 1,
5126 		.sync_io = 1,
5127 	};
5128 	struct writeback_control wbc_writepages = {
5129 		.sync_mode	= WB_SYNC_ALL,
5130 		.range_start	= start,
5131 		.range_end	= end + 1,
5132 		/* We're called from an async helper function */
5133 		.punt_to_cgroup	= 1,
5134 		.no_cgroup_owner = 1,
5135 	};
5136 
5137 	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
5138 	nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >>
5139 		   PAGE_SHIFT;
5140 	wbc_writepages.nr_to_write = nr_pages * 2;
5141 
5142 	wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
5143 	while (cur <= end) {
5144 		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
5145 
5146 		page = find_get_page(mapping, cur >> PAGE_SHIFT);
5147 		/*
5148 		 * All pages in the range are locked since
5149 		 * btrfs_run_delalloc_range(), thus there is no way to clear
5150 		 * the page dirty flag.
5151 		 */
5152 		ASSERT(PageLocked(page));
5153 		ASSERT(PageDirty(page));
5154 		clear_page_dirty_for_io(page);
5155 		ret = __extent_writepage(page, &wbc_writepages, &epd);
5156 		ASSERT(ret <= 0);
5157 		if (ret < 0) {
5158 			found_error = true;
5159 			first_error = ret;
5160 		}
5161 		put_page(page);
5162 		cur = cur_end + 1;
5163 	}
5164 
5165 	if (!found_error)
5166 		ret = flush_write_bio(&epd);
5167 	else
5168 		end_write_bio(&epd, ret);
5169 
5170 	wbc_detach_inode(&wbc_writepages);
5171 	if (found_error)
5172 		return first_error;
5173 	return ret;
5174 }
5175 
5176 int extent_writepages(struct address_space *mapping,
5177 		      struct writeback_control *wbc)
5178 {
5179 	struct inode *inode = mapping->host;
5180 	int ret = 0;
5181 	struct extent_page_data epd = {
5182 		.bio_ctrl = { 0 },
5183 		.extent_locked = 0,
5184 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
5185 	};
5186 
5187 	/*
5188 	 * Allow only a single thread to do the reloc work in zoned mode to
5189 	 * protect the write pointer updates.
5190 	 */
5191 	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
5192 	ret = extent_write_cache_pages(mapping, wbc, &epd);
5193 	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
5194 	ASSERT(ret <= 0);
5195 	if (ret < 0) {
5196 		end_write_bio(&epd, ret);
5197 		return ret;
5198 	}
5199 	ret = flush_write_bio(&epd);
5200 	return ret;
5201 }
5202 
5203 void extent_readahead(struct readahead_control *rac)
5204 {
5205 	struct btrfs_bio_ctrl bio_ctrl = { 0 };
5206 	struct page *pagepool[16];
5207 	struct extent_map *em_cached = NULL;
5208 	u64 prev_em_start = (u64)-1;
5209 	int nr;
5210 
5211 	while ((nr = readahead_page_batch(rac, pagepool))) {
5212 		u64 contig_start = readahead_pos(rac);
5213 		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
5214 
5215 		contiguous_readpages(pagepool, nr, contig_start, contig_end,
5216 				&em_cached, &bio_ctrl, &prev_em_start);
5217 	}
5218 
5219 	if (em_cached)
5220 		free_extent_map(em_cached);
5221 
5222 	if (bio_ctrl.bio) {
5223 		if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags))
5224 			return;
5225 	}
5226 }
5227 
5228 /*
5229  * basic invalidate_folio code, this waits on any locked or writeback
5230  * ranges corresponding to the folio, and then deletes any extent state
5231  * records from the tree
5232  */
5233 int extent_invalidate_folio(struct extent_io_tree *tree,
5234 			  struct folio *folio, size_t offset)
5235 {
5236 	struct extent_state *cached_state = NULL;
5237 	u64 start = folio_pos(folio);
5238 	u64 end = start + folio_size(folio) - 1;
5239 	size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
5240 
5241 	/* This function is only called for the btree inode */
5242 	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
5243 
5244 	start += ALIGN(offset, blocksize);
5245 	if (start > end)
5246 		return 0;
5247 
5248 	lock_extent_bits(tree, start, end, &cached_state);
5249 	folio_wait_writeback(folio);
5250 
5251 	/*
5252 	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
5253 	 * so here we only need to unlock the extent range to free any
5254 	 * existing extent state.
5255 	 */
5256 	unlock_extent_cached(tree, start, end, &cached_state);
5257 	return 0;
5258 }
5259 
5260 /*
5261  * a helper for releasepage, this tests for areas of the page that
5262  * are locked or under IO and drops the related state bits if it is safe
5263  * to drop the page.
5264  */
5265 static int try_release_extent_state(struct extent_io_tree *tree,
5266 				    struct page *page, gfp_t mask)
5267 {
5268 	u64 start = page_offset(page);
5269 	u64 end = start + PAGE_SIZE - 1;
5270 	int ret = 1;
5271 
5272 	if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
5273 		ret = 0;
5274 	} else {
5275 		/*
5276 		 * At this point we can safely clear everything except the
5277 		 * locked bit, the nodatasum bit and the delalloc new bit.
5278 		 * The delalloc new bit will be cleared by ordered extent
5279 		 * completion.
5280 		 */
5281 		ret = __clear_extent_bit(tree, start, end,
5282 			 ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
5283 			 0, 0, NULL, mask, NULL);
5284 
5285 		/* if clear_extent_bit failed for enomem reasons,
5286 		 * we can't allow the release to continue.
5287 		 */
5288 		if (ret < 0)
5289 			ret = 0;
5290 		else
5291 			ret = 1;
5292 	}
5293 	return ret;
5294 }
5295 
5296 /*
5297  * a helper for releasepage.  As long as there are no locked extents
5298  * in the range corresponding to the page, both state records and extent
5299  * map records are removed
5300  */
5301 int try_release_extent_mapping(struct page *page, gfp_t mask)
5302 {
5303 	struct extent_map *em;
5304 	u64 start = page_offset(page);
5305 	u64 end = start + PAGE_SIZE - 1;
5306 	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
5307 	struct extent_io_tree *tree = &btrfs_inode->io_tree;
5308 	struct extent_map_tree *map = &btrfs_inode->extent_tree;
5309 
5310 	if (gfpflags_allow_blocking(mask) &&
5311 	    page->mapping->host->i_size > SZ_16M) {
5312 		u64 len;
5313 		while (start <= end) {
5314 			struct btrfs_fs_info *fs_info;
5315 			u64 cur_gen;
5316 
5317 			len = end - start + 1;
5318 			write_lock(&map->lock);
5319 			em = lookup_extent_mapping(map, start, len);
5320 			if (!em) {
5321 				write_unlock(&map->lock);
5322 				break;
5323 			}
5324 			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
5325 			    em->start != start) {
5326 				write_unlock(&map->lock);
5327 				free_extent_map(em);
5328 				break;
5329 			}
5330 			if (test_range_bit(tree, em->start,
5331 					   extent_map_end(em) - 1,
5332 					   EXTENT_LOCKED, 0, NULL))
5333 				goto next;
5334 			/*
5335 			 * If it's not in the list of modified extents, used
5336 			 * by a fast fsync, we can remove it. If it's being
5337 			 * logged we can safely remove it since fsync took an
5338 			 * extra reference on the em.
5339 			 */
5340 			if (list_empty(&em->list) ||
5341 			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
5342 				goto remove_em;
5343 			/*
5344 			 * If it's in the list of modified extents, remove it
5345 			 * only if its generation is older then the current one,
5346 			 * in which case we don't need it for a fast fsync.
5347 			 * Otherwise don't remove it, we could be racing with an
5348 			 * ongoing fast fsync that could miss the new extent.
5349 			 */
5350 			fs_info = btrfs_inode->root->fs_info;
5351 			spin_lock(&fs_info->trans_lock);
5352 			cur_gen = fs_info->generation;
5353 			spin_unlock(&fs_info->trans_lock);
5354 			if (em->generation >= cur_gen)
5355 				goto next;
5356 remove_em:
5357 			/*
5358 			 * We only remove extent maps that are not in the list of
5359 			 * modified extents or that are in the list but with a
5360 			 * generation lower then the current generation, so there
5361 			 * is no need to set the full fsync flag on the inode (it
5362 			 * hurts the fsync performance for workloads with a data
5363 			 * size that exceeds or is close to the system's memory).
5364 			 */
5365 			remove_extent_mapping(map, em);
5366 			/* once for the rb tree */
5367 			free_extent_map(em);
5368 next:
5369 			start = extent_map_end(em);
5370 			write_unlock(&map->lock);
5371 
5372 			/* once for us */
5373 			free_extent_map(em);
5374 
5375 			cond_resched(); /* Allow large-extent preemption. */
5376 		}
5377 	}
5378 	return try_release_extent_state(tree, page, mask);
5379 }
5380 
5381 /*
5382  * helper function for fiemap, which doesn't want to see any holes.
5383  * This maps until we find something past 'last'
5384  */
5385 static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
5386 						u64 offset, u64 last)
5387 {
5388 	u64 sectorsize = btrfs_inode_sectorsize(inode);
5389 	struct extent_map *em;
5390 	u64 len;
5391 
5392 	if (offset >= last)
5393 		return NULL;
5394 
5395 	while (1) {
5396 		len = last - offset;
5397 		if (len == 0)
5398 			break;
5399 		len = ALIGN(len, sectorsize);
5400 		em = btrfs_get_extent_fiemap(inode, offset, len);
5401 		if (IS_ERR(em))
5402 			return em;
5403 
5404 		/* if this isn't a hole return it */
5405 		if (em->block_start != EXTENT_MAP_HOLE)
5406 			return em;
5407 
5408 		/* this is a hole, advance to the next extent */
5409 		offset = extent_map_end(em);
5410 		free_extent_map(em);
5411 		if (offset >= last)
5412 			break;
5413 	}
5414 	return NULL;
5415 }
5416 
5417 /*
5418  * To cache previous fiemap extent
5419  *
5420  * Will be used for merging fiemap extent
5421  */
5422 struct fiemap_cache {
5423 	u64 offset;
5424 	u64 phys;
5425 	u64 len;
5426 	u32 flags;
5427 	bool cached;
5428 };
5429 
5430 /*
5431  * Helper to submit fiemap extent.
5432  *
5433  * Will try to merge current fiemap extent specified by @offset, @phys,
5434  * @len and @flags with cached one.
5435  * And only when we fails to merge, cached one will be submitted as
5436  * fiemap extent.
5437  *
5438  * Return value is the same as fiemap_fill_next_extent().
5439  */
5440 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
5441 				struct fiemap_cache *cache,
5442 				u64 offset, u64 phys, u64 len, u32 flags)
5443 {
5444 	int ret = 0;
5445 
5446 	if (!cache->cached)
5447 		goto assign;
5448 
5449 	/*
5450 	 * Sanity check, extent_fiemap() should have ensured that new
5451 	 * fiemap extent won't overlap with cached one.
5452 	 * Not recoverable.
5453 	 *
5454 	 * NOTE: Physical address can overlap, due to compression
5455 	 */
5456 	if (cache->offset + cache->len > offset) {
5457 		WARN_ON(1);
5458 		return -EINVAL;
5459 	}
5460 
5461 	/*
5462 	 * Only merges fiemap extents if
5463 	 * 1) Their logical addresses are continuous
5464 	 *
5465 	 * 2) Their physical addresses are continuous
5466 	 *    So truly compressed (physical size smaller than logical size)
5467 	 *    extents won't get merged with each other
5468 	 *
5469 	 * 3) Share same flags except FIEMAP_EXTENT_LAST
5470 	 *    So regular extent won't get merged with prealloc extent
5471 	 */
5472 	if (cache->offset + cache->len  == offset &&
5473 	    cache->phys + cache->len == phys  &&
5474 	    (cache->flags & ~FIEMAP_EXTENT_LAST) ==
5475 			(flags & ~FIEMAP_EXTENT_LAST)) {
5476 		cache->len += len;
5477 		cache->flags |= flags;
5478 		goto try_submit_last;
5479 	}
5480 
5481 	/* Not mergeable, need to submit cached one */
5482 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5483 				      cache->len, cache->flags);
5484 	cache->cached = false;
5485 	if (ret)
5486 		return ret;
5487 assign:
5488 	cache->cached = true;
5489 	cache->offset = offset;
5490 	cache->phys = phys;
5491 	cache->len = len;
5492 	cache->flags = flags;
5493 try_submit_last:
5494 	if (cache->flags & FIEMAP_EXTENT_LAST) {
5495 		ret = fiemap_fill_next_extent(fieinfo, cache->offset,
5496 				cache->phys, cache->len, cache->flags);
5497 		cache->cached = false;
5498 	}
5499 	return ret;
5500 }
5501 
5502 /*
5503  * Emit last fiemap cache
5504  *
5505  * The last fiemap cache may still be cached in the following case:
5506  * 0		      4k		    8k
5507  * |<- Fiemap range ->|
5508  * |<------------  First extent ----------->|
5509  *
5510  * In this case, the first extent range will be cached but not emitted.
5511  * So we must emit it before ending extent_fiemap().
5512  */
5513 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
5514 				  struct fiemap_cache *cache)
5515 {
5516 	int ret;
5517 
5518 	if (!cache->cached)
5519 		return 0;
5520 
5521 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5522 				      cache->len, cache->flags);
5523 	cache->cached = false;
5524 	if (ret > 0)
5525 		ret = 0;
5526 	return ret;
5527 }
5528 
5529 int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
5530 		  u64 start, u64 len)
5531 {
5532 	int ret = 0;
5533 	u64 off;
5534 	u64 max = start + len;
5535 	u32 flags = 0;
5536 	u32 found_type;
5537 	u64 last;
5538 	u64 last_for_get_extent = 0;
5539 	u64 disko = 0;
5540 	u64 isize = i_size_read(&inode->vfs_inode);
5541 	struct btrfs_key found_key;
5542 	struct extent_map *em = NULL;
5543 	struct extent_state *cached_state = NULL;
5544 	struct btrfs_path *path;
5545 	struct btrfs_root *root = inode->root;
5546 	struct fiemap_cache cache = { 0 };
5547 	struct ulist *roots;
5548 	struct ulist *tmp_ulist;
5549 	int end = 0;
5550 	u64 em_start = 0;
5551 	u64 em_len = 0;
5552 	u64 em_end = 0;
5553 
5554 	if (len == 0)
5555 		return -EINVAL;
5556 
5557 	path = btrfs_alloc_path();
5558 	if (!path)
5559 		return -ENOMEM;
5560 
5561 	roots = ulist_alloc(GFP_KERNEL);
5562 	tmp_ulist = ulist_alloc(GFP_KERNEL);
5563 	if (!roots || !tmp_ulist) {
5564 		ret = -ENOMEM;
5565 		goto out_free_ulist;
5566 	}
5567 
5568 	/*
5569 	 * We can't initialize that to 'start' as this could miss extents due
5570 	 * to extent item merging
5571 	 */
5572 	off = 0;
5573 	start = round_down(start, btrfs_inode_sectorsize(inode));
5574 	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
5575 
5576 	/*
5577 	 * lookup the last file extent.  We're not using i_size here
5578 	 * because there might be preallocation past i_size
5579 	 */
5580 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
5581 				       0);
5582 	if (ret < 0) {
5583 		goto out_free_ulist;
5584 	} else {
5585 		WARN_ON(!ret);
5586 		if (ret == 1)
5587 			ret = 0;
5588 	}
5589 
5590 	path->slots[0]--;
5591 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5592 	found_type = found_key.type;
5593 
5594 	/* No extents, but there might be delalloc bits */
5595 	if (found_key.objectid != btrfs_ino(inode) ||
5596 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5597 		/* have to trust i_size as the end */
5598 		last = (u64)-1;
5599 		last_for_get_extent = isize;
5600 	} else {
5601 		/*
5602 		 * remember the start of the last extent.  There are a
5603 		 * bunch of different factors that go into the length of the
5604 		 * extent, so its much less complex to remember where it started
5605 		 */
5606 		last = found_key.offset;
5607 		last_for_get_extent = last + 1;
5608 	}
5609 	btrfs_release_path(path);
5610 
5611 	/*
5612 	 * we might have some extents allocated but more delalloc past those
5613 	 * extents.  so, we trust isize unless the start of the last extent is
5614 	 * beyond isize
5615 	 */
5616 	if (last < isize) {
5617 		last = (u64)-1;
5618 		last_for_get_extent = isize;
5619 	}
5620 
5621 	lock_extent_bits(&inode->io_tree, start, start + len - 1,
5622 			 &cached_state);
5623 
5624 	em = get_extent_skip_holes(inode, start, last_for_get_extent);
5625 	if (!em)
5626 		goto out;
5627 	if (IS_ERR(em)) {
5628 		ret = PTR_ERR(em);
5629 		goto out;
5630 	}
5631 
5632 	while (!end) {
5633 		u64 offset_in_extent = 0;
5634 
5635 		/* break if the extent we found is outside the range */
5636 		if (em->start >= max || extent_map_end(em) < off)
5637 			break;
5638 
5639 		/*
5640 		 * get_extent may return an extent that starts before our
5641 		 * requested range.  We have to make sure the ranges
5642 		 * we return to fiemap always move forward and don't
5643 		 * overlap, so adjust the offsets here
5644 		 */
5645 		em_start = max(em->start, off);
5646 
5647 		/*
5648 		 * record the offset from the start of the extent
5649 		 * for adjusting the disk offset below.  Only do this if the
5650 		 * extent isn't compressed since our in ram offset may be past
5651 		 * what we have actually allocated on disk.
5652 		 */
5653 		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5654 			offset_in_extent = em_start - em->start;
5655 		em_end = extent_map_end(em);
5656 		em_len = em_end - em_start;
5657 		flags = 0;
5658 		if (em->block_start < EXTENT_MAP_LAST_BYTE)
5659 			disko = em->block_start + offset_in_extent;
5660 		else
5661 			disko = 0;
5662 
5663 		/*
5664 		 * bump off for our next call to get_extent
5665 		 */
5666 		off = extent_map_end(em);
5667 		if (off >= max)
5668 			end = 1;
5669 
5670 		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
5671 			end = 1;
5672 			flags |= FIEMAP_EXTENT_LAST;
5673 		} else if (em->block_start == EXTENT_MAP_INLINE) {
5674 			flags |= (FIEMAP_EXTENT_DATA_INLINE |
5675 				  FIEMAP_EXTENT_NOT_ALIGNED);
5676 		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
5677 			flags |= (FIEMAP_EXTENT_DELALLOC |
5678 				  FIEMAP_EXTENT_UNKNOWN);
5679 		} else if (fieinfo->fi_extents_max) {
5680 			u64 bytenr = em->block_start -
5681 				(em->start - em->orig_start);
5682 
5683 			/*
5684 			 * As btrfs supports shared space, this information
5685 			 * can be exported to userspace tools via
5686 			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
5687 			 * then we're just getting a count and we can skip the
5688 			 * lookup stuff.
5689 			 */
5690 			ret = btrfs_check_shared(root, btrfs_ino(inode),
5691 						 bytenr, roots, tmp_ulist);
5692 			if (ret < 0)
5693 				goto out_free;
5694 			if (ret)
5695 				flags |= FIEMAP_EXTENT_SHARED;
5696 			ret = 0;
5697 		}
5698 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5699 			flags |= FIEMAP_EXTENT_ENCODED;
5700 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5701 			flags |= FIEMAP_EXTENT_UNWRITTEN;
5702 
5703 		free_extent_map(em);
5704 		em = NULL;
5705 		if ((em_start >= last) || em_len == (u64)-1 ||
5706 		   (last == (u64)-1 && isize <= em_end)) {
5707 			flags |= FIEMAP_EXTENT_LAST;
5708 			end = 1;
5709 		}
5710 
5711 		/* now scan forward to see if this is really the last extent. */
5712 		em = get_extent_skip_holes(inode, off, last_for_get_extent);
5713 		if (IS_ERR(em)) {
5714 			ret = PTR_ERR(em);
5715 			goto out;
5716 		}
5717 		if (!em) {
5718 			flags |= FIEMAP_EXTENT_LAST;
5719 			end = 1;
5720 		}
5721 		ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
5722 					   em_len, flags);
5723 		if (ret) {
5724 			if (ret == 1)
5725 				ret = 0;
5726 			goto out_free;
5727 		}
5728 	}
5729 out_free:
5730 	if (!ret)
5731 		ret = emit_last_fiemap_cache(fieinfo, &cache);
5732 	free_extent_map(em);
5733 out:
5734 	unlock_extent_cached(&inode->io_tree, start, start + len - 1,
5735 			     &cached_state);
5736 
5737 out_free_ulist:
5738 	btrfs_free_path(path);
5739 	ulist_free(roots);
5740 	ulist_free(tmp_ulist);
5741 	return ret;
5742 }
5743 
5744 static void __free_extent_buffer(struct extent_buffer *eb)
5745 {
5746 	kmem_cache_free(extent_buffer_cache, eb);
5747 }
5748 
5749 int extent_buffer_under_io(const struct extent_buffer *eb)
5750 {
5751 	return (atomic_read(&eb->io_pages) ||
5752 		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
5753 		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5754 }
5755 
5756 static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
5757 {
5758 	struct btrfs_subpage *subpage;
5759 
5760 	lockdep_assert_held(&page->mapping->private_lock);
5761 
5762 	if (PagePrivate(page)) {
5763 		subpage = (struct btrfs_subpage *)page->private;
5764 		if (atomic_read(&subpage->eb_refs))
5765 			return true;
5766 		/*
5767 		 * Even there is no eb refs here, we may still have
5768 		 * end_page_read() call relying on page::private.
5769 		 */
5770 		if (atomic_read(&subpage->readers))
5771 			return true;
5772 	}
5773 	return false;
5774 }
5775 
5776 static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
5777 {
5778 	struct btrfs_fs_info *fs_info = eb->fs_info;
5779 	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5780 
5781 	/*
5782 	 * For mapped eb, we're going to change the page private, which should
5783 	 * be done under the private_lock.
5784 	 */
5785 	if (mapped)
5786 		spin_lock(&page->mapping->private_lock);
5787 
5788 	if (!PagePrivate(page)) {
5789 		if (mapped)
5790 			spin_unlock(&page->mapping->private_lock);
5791 		return;
5792 	}
5793 
5794 	if (fs_info->sectorsize == PAGE_SIZE) {
5795 		/*
5796 		 * We do this since we'll remove the pages after we've
5797 		 * removed the eb from the radix tree, so we could race
5798 		 * and have this page now attached to the new eb.  So
5799 		 * only clear page_private if it's still connected to
5800 		 * this eb.
5801 		 */
5802 		if (PagePrivate(page) &&
5803 		    page->private == (unsigned long)eb) {
5804 			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5805 			BUG_ON(PageDirty(page));
5806 			BUG_ON(PageWriteback(page));
5807 			/*
5808 			 * We need to make sure we haven't be attached
5809 			 * to a new eb.
5810 			 */
5811 			detach_page_private(page);
5812 		}
5813 		if (mapped)
5814 			spin_unlock(&page->mapping->private_lock);
5815 		return;
5816 	}
5817 
5818 	/*
5819 	 * For subpage, we can have dummy eb with page private.  In this case,
5820 	 * we can directly detach the private as such page is only attached to
5821 	 * one dummy eb, no sharing.
5822 	 */
5823 	if (!mapped) {
5824 		btrfs_detach_subpage(fs_info, page);
5825 		return;
5826 	}
5827 
5828 	btrfs_page_dec_eb_refs(fs_info, page);
5829 
5830 	/*
5831 	 * We can only detach the page private if there are no other ebs in the
5832 	 * page range and no unfinished IO.
5833 	 */
5834 	if (!page_range_has_eb(fs_info, page))
5835 		btrfs_detach_subpage(fs_info, page);
5836 
5837 	spin_unlock(&page->mapping->private_lock);
5838 }
5839 
5840 /* Release all pages attached to the extent buffer */
5841 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
5842 {
5843 	int i;
5844 	int num_pages;
5845 
5846 	ASSERT(!extent_buffer_under_io(eb));
5847 
5848 	num_pages = num_extent_pages(eb);
5849 	for (i = 0; i < num_pages; i++) {
5850 		struct page *page = eb->pages[i];
5851 
5852 		if (!page)
5853 			continue;
5854 
5855 		detach_extent_buffer_page(eb, page);
5856 
5857 		/* One for when we allocated the page */
5858 		put_page(page);
5859 	}
5860 }
5861 
5862 /*
5863  * Helper for releasing the extent buffer.
5864  */
5865 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
5866 {
5867 	btrfs_release_extent_buffer_pages(eb);
5868 	btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
5869 	__free_extent_buffer(eb);
5870 }
5871 
5872 static struct extent_buffer *
5873 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
5874 		      unsigned long len)
5875 {
5876 	struct extent_buffer *eb = NULL;
5877 
5878 	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
5879 	eb->start = start;
5880 	eb->len = len;
5881 	eb->fs_info = fs_info;
5882 	eb->bflags = 0;
5883 	init_rwsem(&eb->lock);
5884 
5885 	btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
5886 			     &fs_info->allocated_ebs);
5887 	INIT_LIST_HEAD(&eb->release_list);
5888 
5889 	spin_lock_init(&eb->refs_lock);
5890 	atomic_set(&eb->refs, 1);
5891 	atomic_set(&eb->io_pages, 0);
5892 
5893 	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
5894 
5895 	return eb;
5896 }
5897 
5898 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
5899 {
5900 	int i;
5901 	struct page *p;
5902 	struct extent_buffer *new;
5903 	int num_pages = num_extent_pages(src);
5904 
5905 	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
5906 	if (new == NULL)
5907 		return NULL;
5908 
5909 	/*
5910 	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
5911 	 * btrfs_release_extent_buffer() have different behavior for
5912 	 * UNMAPPED subpage extent buffer.
5913 	 */
5914 	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5915 
5916 	for (i = 0; i < num_pages; i++) {
5917 		int ret;
5918 
5919 		p = alloc_page(GFP_NOFS);
5920 		if (!p) {
5921 			btrfs_release_extent_buffer(new);
5922 			return NULL;
5923 		}
5924 		ret = attach_extent_buffer_page(new, p, NULL);
5925 		if (ret < 0) {
5926 			put_page(p);
5927 			btrfs_release_extent_buffer(new);
5928 			return NULL;
5929 		}
5930 		WARN_ON(PageDirty(p));
5931 		new->pages[i] = p;
5932 		copy_page(page_address(p), page_address(src->pages[i]));
5933 	}
5934 	set_extent_buffer_uptodate(new);
5935 
5936 	return new;
5937 }
5938 
5939 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5940 						  u64 start, unsigned long len)
5941 {
5942 	struct extent_buffer *eb;
5943 	int num_pages;
5944 	int i;
5945 
5946 	eb = __alloc_extent_buffer(fs_info, start, len);
5947 	if (!eb)
5948 		return NULL;
5949 
5950 	num_pages = num_extent_pages(eb);
5951 	for (i = 0; i < num_pages; i++) {
5952 		int ret;
5953 
5954 		eb->pages[i] = alloc_page(GFP_NOFS);
5955 		if (!eb->pages[i])
5956 			goto err;
5957 		ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
5958 		if (ret < 0)
5959 			goto err;
5960 	}
5961 	set_extent_buffer_uptodate(eb);
5962 	btrfs_set_header_nritems(eb, 0);
5963 	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5964 
5965 	return eb;
5966 err:
5967 	for (; i > 0; i--) {
5968 		detach_extent_buffer_page(eb, eb->pages[i - 1]);
5969 		__free_page(eb->pages[i - 1]);
5970 	}
5971 	__free_extent_buffer(eb);
5972 	return NULL;
5973 }
5974 
5975 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5976 						u64 start)
5977 {
5978 	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
5979 }
5980 
5981 static void check_buffer_tree_ref(struct extent_buffer *eb)
5982 {
5983 	int refs;
5984 	/*
5985 	 * The TREE_REF bit is first set when the extent_buffer is added
5986 	 * to the radix tree. It is also reset, if unset, when a new reference
5987 	 * is created by find_extent_buffer.
5988 	 *
5989 	 * It is only cleared in two cases: freeing the last non-tree
5990 	 * reference to the extent_buffer when its STALE bit is set or
5991 	 * calling releasepage when the tree reference is the only reference.
5992 	 *
5993 	 * In both cases, care is taken to ensure that the extent_buffer's
5994 	 * pages are not under io. However, releasepage can be concurrently
5995 	 * called with creating new references, which is prone to race
5996 	 * conditions between the calls to check_buffer_tree_ref in those
5997 	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
5998 	 *
5999 	 * The actual lifetime of the extent_buffer in the radix tree is
6000 	 * adequately protected by the refcount, but the TREE_REF bit and
6001 	 * its corresponding reference are not. To protect against this
6002 	 * class of races, we call check_buffer_tree_ref from the codepaths
6003 	 * which trigger io after they set eb->io_pages. Note that once io is
6004 	 * initiated, TREE_REF can no longer be cleared, so that is the
6005 	 * moment at which any such race is best fixed.
6006 	 */
6007 	refs = atomic_read(&eb->refs);
6008 	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6009 		return;
6010 
6011 	spin_lock(&eb->refs_lock);
6012 	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6013 		atomic_inc(&eb->refs);
6014 	spin_unlock(&eb->refs_lock);
6015 }
6016 
6017 static void mark_extent_buffer_accessed(struct extent_buffer *eb,
6018 		struct page *accessed)
6019 {
6020 	int num_pages, i;
6021 
6022 	check_buffer_tree_ref(eb);
6023 
6024 	num_pages = num_extent_pages(eb);
6025 	for (i = 0; i < num_pages; i++) {
6026 		struct page *p = eb->pages[i];
6027 
6028 		if (p != accessed)
6029 			mark_page_accessed(p);
6030 	}
6031 }
6032 
6033 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
6034 					 u64 start)
6035 {
6036 	struct extent_buffer *eb;
6037 
6038 	eb = find_extent_buffer_nolock(fs_info, start);
6039 	if (!eb)
6040 		return NULL;
6041 	/*
6042 	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
6043 	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
6044 	 * another task running free_extent_buffer() might have seen that flag
6045 	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
6046 	 * writeback flags not set) and it's still in the tree (flag
6047 	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
6048 	 * decrementing the extent buffer's reference count twice.  So here we
6049 	 * could race and increment the eb's reference count, clear its stale
6050 	 * flag, mark it as dirty and drop our reference before the other task
6051 	 * finishes executing free_extent_buffer, which would later result in
6052 	 * an attempt to free an extent buffer that is dirty.
6053 	 */
6054 	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
6055 		spin_lock(&eb->refs_lock);
6056 		spin_unlock(&eb->refs_lock);
6057 	}
6058 	mark_extent_buffer_accessed(eb, NULL);
6059 	return eb;
6060 }
6061 
6062 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6063 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
6064 					u64 start)
6065 {
6066 	struct extent_buffer *eb, *exists = NULL;
6067 	int ret;
6068 
6069 	eb = find_extent_buffer(fs_info, start);
6070 	if (eb)
6071 		return eb;
6072 	eb = alloc_dummy_extent_buffer(fs_info, start);
6073 	if (!eb)
6074 		return ERR_PTR(-ENOMEM);
6075 	eb->fs_info = fs_info;
6076 again:
6077 	ret = radix_tree_preload(GFP_NOFS);
6078 	if (ret) {
6079 		exists = ERR_PTR(ret);
6080 		goto free_eb;
6081 	}
6082 	spin_lock(&fs_info->buffer_lock);
6083 	ret = radix_tree_insert(&fs_info->buffer_radix,
6084 				start >> fs_info->sectorsize_bits, eb);
6085 	spin_unlock(&fs_info->buffer_lock);
6086 	radix_tree_preload_end();
6087 	if (ret == -EEXIST) {
6088 		exists = find_extent_buffer(fs_info, start);
6089 		if (exists)
6090 			goto free_eb;
6091 		else
6092 			goto again;
6093 	}
6094 	check_buffer_tree_ref(eb);
6095 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
6096 
6097 	return eb;
6098 free_eb:
6099 	btrfs_release_extent_buffer(eb);
6100 	return exists;
6101 }
6102 #endif
6103 
6104 static struct extent_buffer *grab_extent_buffer(
6105 		struct btrfs_fs_info *fs_info, struct page *page)
6106 {
6107 	struct extent_buffer *exists;
6108 
6109 	/*
6110 	 * For subpage case, we completely rely on radix tree to ensure we
6111 	 * don't try to insert two ebs for the same bytenr.  So here we always
6112 	 * return NULL and just continue.
6113 	 */
6114 	if (fs_info->sectorsize < PAGE_SIZE)
6115 		return NULL;
6116 
6117 	/* Page not yet attached to an extent buffer */
6118 	if (!PagePrivate(page))
6119 		return NULL;
6120 
6121 	/*
6122 	 * We could have already allocated an eb for this page and attached one
6123 	 * so lets see if we can get a ref on the existing eb, and if we can we
6124 	 * know it's good and we can just return that one, else we know we can
6125 	 * just overwrite page->private.
6126 	 */
6127 	exists = (struct extent_buffer *)page->private;
6128 	if (atomic_inc_not_zero(&exists->refs))
6129 		return exists;
6130 
6131 	WARN_ON(PageDirty(page));
6132 	detach_page_private(page);
6133 	return NULL;
6134 }
6135 
6136 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
6137 					  u64 start, u64 owner_root, int level)
6138 {
6139 	unsigned long len = fs_info->nodesize;
6140 	int num_pages;
6141 	int i;
6142 	unsigned long index = start >> PAGE_SHIFT;
6143 	struct extent_buffer *eb;
6144 	struct extent_buffer *exists = NULL;
6145 	struct page *p;
6146 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
6147 	int uptodate = 1;
6148 	int ret;
6149 
6150 	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
6151 		btrfs_err(fs_info, "bad tree block start %llu", start);
6152 		return ERR_PTR(-EINVAL);
6153 	}
6154 
6155 #if BITS_PER_LONG == 32
6156 	if (start >= MAX_LFS_FILESIZE) {
6157 		btrfs_err_rl(fs_info,
6158 		"extent buffer %llu is beyond 32bit page cache limit", start);
6159 		btrfs_err_32bit_limit(fs_info);
6160 		return ERR_PTR(-EOVERFLOW);
6161 	}
6162 	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
6163 		btrfs_warn_32bit_limit(fs_info);
6164 #endif
6165 
6166 	if (fs_info->sectorsize < PAGE_SIZE &&
6167 	    offset_in_page(start) + len > PAGE_SIZE) {
6168 		btrfs_err(fs_info,
6169 		"tree block crosses page boundary, start %llu nodesize %lu",
6170 			  start, len);
6171 		return ERR_PTR(-EINVAL);
6172 	}
6173 
6174 	eb = find_extent_buffer(fs_info, start);
6175 	if (eb)
6176 		return eb;
6177 
6178 	eb = __alloc_extent_buffer(fs_info, start, len);
6179 	if (!eb)
6180 		return ERR_PTR(-ENOMEM);
6181 	btrfs_set_buffer_lockdep_class(owner_root, eb, level);
6182 
6183 	num_pages = num_extent_pages(eb);
6184 	for (i = 0; i < num_pages; i++, index++) {
6185 		struct btrfs_subpage *prealloc = NULL;
6186 
6187 		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
6188 		if (!p) {
6189 			exists = ERR_PTR(-ENOMEM);
6190 			goto free_eb;
6191 		}
6192 
6193 		/*
6194 		 * Preallocate page->private for subpage case, so that we won't
6195 		 * allocate memory with private_lock hold.  The memory will be
6196 		 * freed by attach_extent_buffer_page() or freed manually if
6197 		 * we exit earlier.
6198 		 *
6199 		 * Although we have ensured one subpage eb can only have one
6200 		 * page, but it may change in the future for 16K page size
6201 		 * support, so we still preallocate the memory in the loop.
6202 		 */
6203 		if (fs_info->sectorsize < PAGE_SIZE) {
6204 			prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
6205 			if (IS_ERR(prealloc)) {
6206 				ret = PTR_ERR(prealloc);
6207 				unlock_page(p);
6208 				put_page(p);
6209 				exists = ERR_PTR(ret);
6210 				goto free_eb;
6211 			}
6212 		}
6213 
6214 		spin_lock(&mapping->private_lock);
6215 		exists = grab_extent_buffer(fs_info, p);
6216 		if (exists) {
6217 			spin_unlock(&mapping->private_lock);
6218 			unlock_page(p);
6219 			put_page(p);
6220 			mark_extent_buffer_accessed(exists, p);
6221 			btrfs_free_subpage(prealloc);
6222 			goto free_eb;
6223 		}
6224 		/* Should not fail, as we have preallocated the memory */
6225 		ret = attach_extent_buffer_page(eb, p, prealloc);
6226 		ASSERT(!ret);
6227 		/*
6228 		 * To inform we have extra eb under allocation, so that
6229 		 * detach_extent_buffer_page() won't release the page private
6230 		 * when the eb hasn't yet been inserted into radix tree.
6231 		 *
6232 		 * The ref will be decreased when the eb released the page, in
6233 		 * detach_extent_buffer_page().
6234 		 * Thus needs no special handling in error path.
6235 		 */
6236 		btrfs_page_inc_eb_refs(fs_info, p);
6237 		spin_unlock(&mapping->private_lock);
6238 
6239 		WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
6240 		eb->pages[i] = p;
6241 		if (!PageUptodate(p))
6242 			uptodate = 0;
6243 
6244 		/*
6245 		 * We can't unlock the pages just yet since the extent buffer
6246 		 * hasn't been properly inserted in the radix tree, this
6247 		 * opens a race with btree_releasepage which can free a page
6248 		 * while we are still filling in all pages for the buffer and
6249 		 * we could crash.
6250 		 */
6251 	}
6252 	if (uptodate)
6253 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6254 again:
6255 	ret = radix_tree_preload(GFP_NOFS);
6256 	if (ret) {
6257 		exists = ERR_PTR(ret);
6258 		goto free_eb;
6259 	}
6260 
6261 	spin_lock(&fs_info->buffer_lock);
6262 	ret = radix_tree_insert(&fs_info->buffer_radix,
6263 				start >> fs_info->sectorsize_bits, eb);
6264 	spin_unlock(&fs_info->buffer_lock);
6265 	radix_tree_preload_end();
6266 	if (ret == -EEXIST) {
6267 		exists = find_extent_buffer(fs_info, start);
6268 		if (exists)
6269 			goto free_eb;
6270 		else
6271 			goto again;
6272 	}
6273 	/* add one reference for the tree */
6274 	check_buffer_tree_ref(eb);
6275 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
6276 
6277 	/*
6278 	 * Now it's safe to unlock the pages because any calls to
6279 	 * btree_releasepage will correctly detect that a page belongs to a
6280 	 * live buffer and won't free them prematurely.
6281 	 */
6282 	for (i = 0; i < num_pages; i++)
6283 		unlock_page(eb->pages[i]);
6284 	return eb;
6285 
6286 free_eb:
6287 	WARN_ON(!atomic_dec_and_test(&eb->refs));
6288 	for (i = 0; i < num_pages; i++) {
6289 		if (eb->pages[i])
6290 			unlock_page(eb->pages[i]);
6291 	}
6292 
6293 	btrfs_release_extent_buffer(eb);
6294 	return exists;
6295 }
6296 
6297 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
6298 {
6299 	struct extent_buffer *eb =
6300 			container_of(head, struct extent_buffer, rcu_head);
6301 
6302 	__free_extent_buffer(eb);
6303 }
6304 
6305 static int release_extent_buffer(struct extent_buffer *eb)
6306 	__releases(&eb->refs_lock)
6307 {
6308 	lockdep_assert_held(&eb->refs_lock);
6309 
6310 	WARN_ON(atomic_read(&eb->refs) == 0);
6311 	if (atomic_dec_and_test(&eb->refs)) {
6312 		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
6313 			struct btrfs_fs_info *fs_info = eb->fs_info;
6314 
6315 			spin_unlock(&eb->refs_lock);
6316 
6317 			spin_lock(&fs_info->buffer_lock);
6318 			radix_tree_delete(&fs_info->buffer_radix,
6319 					  eb->start >> fs_info->sectorsize_bits);
6320 			spin_unlock(&fs_info->buffer_lock);
6321 		} else {
6322 			spin_unlock(&eb->refs_lock);
6323 		}
6324 
6325 		btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
6326 		/* Should be safe to release our pages at this point */
6327 		btrfs_release_extent_buffer_pages(eb);
6328 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6329 		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
6330 			__free_extent_buffer(eb);
6331 			return 1;
6332 		}
6333 #endif
6334 		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
6335 		return 1;
6336 	}
6337 	spin_unlock(&eb->refs_lock);
6338 
6339 	return 0;
6340 }
6341 
6342 void free_extent_buffer(struct extent_buffer *eb)
6343 {
6344 	int refs;
6345 	int old;
6346 	if (!eb)
6347 		return;
6348 
6349 	while (1) {
6350 		refs = atomic_read(&eb->refs);
6351 		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
6352 		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
6353 			refs == 1))
6354 			break;
6355 		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
6356 		if (old == refs)
6357 			return;
6358 	}
6359 
6360 	spin_lock(&eb->refs_lock);
6361 	if (atomic_read(&eb->refs) == 2 &&
6362 	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
6363 	    !extent_buffer_under_io(eb) &&
6364 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6365 		atomic_dec(&eb->refs);
6366 
6367 	/*
6368 	 * I know this is terrible, but it's temporary until we stop tracking
6369 	 * the uptodate bits and such for the extent buffers.
6370 	 */
6371 	release_extent_buffer(eb);
6372 }
6373 
6374 void free_extent_buffer_stale(struct extent_buffer *eb)
6375 {
6376 	if (!eb)
6377 		return;
6378 
6379 	spin_lock(&eb->refs_lock);
6380 	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
6381 
6382 	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
6383 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6384 		atomic_dec(&eb->refs);
6385 	release_extent_buffer(eb);
6386 }
6387 
6388 static void btree_clear_page_dirty(struct page *page)
6389 {
6390 	ASSERT(PageDirty(page));
6391 	ASSERT(PageLocked(page));
6392 	clear_page_dirty_for_io(page);
6393 	xa_lock_irq(&page->mapping->i_pages);
6394 	if (!PageDirty(page))
6395 		__xa_clear_mark(&page->mapping->i_pages,
6396 				page_index(page), PAGECACHE_TAG_DIRTY);
6397 	xa_unlock_irq(&page->mapping->i_pages);
6398 }
6399 
6400 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
6401 {
6402 	struct btrfs_fs_info *fs_info = eb->fs_info;
6403 	struct page *page = eb->pages[0];
6404 	bool last;
6405 
6406 	/* btree_clear_page_dirty() needs page locked */
6407 	lock_page(page);
6408 	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
6409 						  eb->len);
6410 	if (last)
6411 		btree_clear_page_dirty(page);
6412 	unlock_page(page);
6413 	WARN_ON(atomic_read(&eb->refs) == 0);
6414 }
6415 
6416 void clear_extent_buffer_dirty(const struct extent_buffer *eb)
6417 {
6418 	int i;
6419 	int num_pages;
6420 	struct page *page;
6421 
6422 	if (eb->fs_info->sectorsize < PAGE_SIZE)
6423 		return clear_subpage_extent_buffer_dirty(eb);
6424 
6425 	num_pages = num_extent_pages(eb);
6426 
6427 	for (i = 0; i < num_pages; i++) {
6428 		page = eb->pages[i];
6429 		if (!PageDirty(page))
6430 			continue;
6431 		lock_page(page);
6432 		btree_clear_page_dirty(page);
6433 		ClearPageError(page);
6434 		unlock_page(page);
6435 	}
6436 	WARN_ON(atomic_read(&eb->refs) == 0);
6437 }
6438 
6439 bool set_extent_buffer_dirty(struct extent_buffer *eb)
6440 {
6441 	int i;
6442 	int num_pages;
6443 	bool was_dirty;
6444 
6445 	check_buffer_tree_ref(eb);
6446 
6447 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
6448 
6449 	num_pages = num_extent_pages(eb);
6450 	WARN_ON(atomic_read(&eb->refs) == 0);
6451 	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
6452 
6453 	if (!was_dirty) {
6454 		bool subpage = eb->fs_info->sectorsize < PAGE_SIZE;
6455 
6456 		/*
6457 		 * For subpage case, we can have other extent buffers in the
6458 		 * same page, and in clear_subpage_extent_buffer_dirty() we
6459 		 * have to clear page dirty without subpage lock held.
6460 		 * This can cause race where our page gets dirty cleared after
6461 		 * we just set it.
6462 		 *
6463 		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
6464 		 * its page for other reasons, we can use page lock to prevent
6465 		 * the above race.
6466 		 */
6467 		if (subpage)
6468 			lock_page(eb->pages[0]);
6469 		for (i = 0; i < num_pages; i++)
6470 			btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
6471 					     eb->start, eb->len);
6472 		if (subpage)
6473 			unlock_page(eb->pages[0]);
6474 	}
6475 #ifdef CONFIG_BTRFS_DEBUG
6476 	for (i = 0; i < num_pages; i++)
6477 		ASSERT(PageDirty(eb->pages[i]));
6478 #endif
6479 
6480 	return was_dirty;
6481 }
6482 
6483 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
6484 {
6485 	struct btrfs_fs_info *fs_info = eb->fs_info;
6486 	struct page *page;
6487 	int num_pages;
6488 	int i;
6489 
6490 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6491 	num_pages = num_extent_pages(eb);
6492 	for (i = 0; i < num_pages; i++) {
6493 		page = eb->pages[i];
6494 		if (page)
6495 			btrfs_page_clear_uptodate(fs_info, page,
6496 						  eb->start, eb->len);
6497 	}
6498 }
6499 
6500 void set_extent_buffer_uptodate(struct extent_buffer *eb)
6501 {
6502 	struct btrfs_fs_info *fs_info = eb->fs_info;
6503 	struct page *page;
6504 	int num_pages;
6505 	int i;
6506 
6507 	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6508 	num_pages = num_extent_pages(eb);
6509 	for (i = 0; i < num_pages; i++) {
6510 		page = eb->pages[i];
6511 		btrfs_page_set_uptodate(fs_info, page, eb->start, eb->len);
6512 	}
6513 }
6514 
6515 static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
6516 				      int mirror_num)
6517 {
6518 	struct btrfs_fs_info *fs_info = eb->fs_info;
6519 	struct extent_io_tree *io_tree;
6520 	struct page *page = eb->pages[0];
6521 	struct btrfs_bio_ctrl bio_ctrl = { 0 };
6522 	int ret = 0;
6523 
6524 	ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
6525 	ASSERT(PagePrivate(page));
6526 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
6527 
6528 	if (wait == WAIT_NONE) {
6529 		if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
6530 			return -EAGAIN;
6531 	} else {
6532 		ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6533 		if (ret < 0)
6534 			return ret;
6535 	}
6536 
6537 	ret = 0;
6538 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
6539 	    PageUptodate(page) ||
6540 	    btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
6541 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6542 		unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6543 		return ret;
6544 	}
6545 
6546 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
6547 	eb->read_mirror = 0;
6548 	atomic_set(&eb->io_pages, 1);
6549 	check_buffer_tree_ref(eb);
6550 	btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
6551 
6552 	btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
6553 	ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, &bio_ctrl,
6554 				 page, eb->start, eb->len,
6555 				 eb->start - page_offset(page),
6556 				 end_bio_extent_readpage, mirror_num, 0,
6557 				 true);
6558 	if (ret) {
6559 		/*
6560 		 * In the endio function, if we hit something wrong we will
6561 		 * increase the io_pages, so here we need to decrease it for
6562 		 * error path.
6563 		 */
6564 		atomic_dec(&eb->io_pages);
6565 	}
6566 	if (bio_ctrl.bio) {
6567 		int tmp;
6568 
6569 		tmp = submit_one_bio(bio_ctrl.bio, mirror_num, 0);
6570 		bio_ctrl.bio = NULL;
6571 		if (tmp < 0)
6572 			return tmp;
6573 	}
6574 	if (ret || wait != WAIT_COMPLETE)
6575 		return ret;
6576 
6577 	wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED);
6578 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
6579 		ret = -EIO;
6580 	return ret;
6581 }
6582 
6583 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
6584 {
6585 	int i;
6586 	struct page *page;
6587 	int err;
6588 	int ret = 0;
6589 	int locked_pages = 0;
6590 	int all_uptodate = 1;
6591 	int num_pages;
6592 	unsigned long num_reads = 0;
6593 	struct btrfs_bio_ctrl bio_ctrl = { 0 };
6594 
6595 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
6596 		return 0;
6597 
6598 	/*
6599 	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
6600 	 * operation, which could potentially still be in flight.  In this case
6601 	 * we simply want to return an error.
6602 	 */
6603 	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
6604 		return -EIO;
6605 
6606 	if (eb->fs_info->sectorsize < PAGE_SIZE)
6607 		return read_extent_buffer_subpage(eb, wait, mirror_num);
6608 
6609 	num_pages = num_extent_pages(eb);
6610 	for (i = 0; i < num_pages; i++) {
6611 		page = eb->pages[i];
6612 		if (wait == WAIT_NONE) {
6613 			/*
6614 			 * WAIT_NONE is only utilized by readahead. If we can't
6615 			 * acquire the lock atomically it means either the eb
6616 			 * is being read out or under modification.
6617 			 * Either way the eb will be or has been cached,
6618 			 * readahead can exit safely.
6619 			 */
6620 			if (!trylock_page(page))
6621 				goto unlock_exit;
6622 		} else {
6623 			lock_page(page);
6624 		}
6625 		locked_pages++;
6626 	}
6627 	/*
6628 	 * We need to firstly lock all pages to make sure that
6629 	 * the uptodate bit of our pages won't be affected by
6630 	 * clear_extent_buffer_uptodate().
6631 	 */
6632 	for (i = 0; i < num_pages; i++) {
6633 		page = eb->pages[i];
6634 		if (!PageUptodate(page)) {
6635 			num_reads++;
6636 			all_uptodate = 0;
6637 		}
6638 	}
6639 
6640 	if (all_uptodate) {
6641 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6642 		goto unlock_exit;
6643 	}
6644 
6645 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
6646 	eb->read_mirror = 0;
6647 	atomic_set(&eb->io_pages, num_reads);
6648 	/*
6649 	 * It is possible for releasepage to clear the TREE_REF bit before we
6650 	 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
6651 	 */
6652 	check_buffer_tree_ref(eb);
6653 	for (i = 0; i < num_pages; i++) {
6654 		page = eb->pages[i];
6655 
6656 		if (!PageUptodate(page)) {
6657 			if (ret) {
6658 				atomic_dec(&eb->io_pages);
6659 				unlock_page(page);
6660 				continue;
6661 			}
6662 
6663 			ClearPageError(page);
6664 			err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
6665 					 &bio_ctrl, page, page_offset(page),
6666 					 PAGE_SIZE, 0, end_bio_extent_readpage,
6667 					 mirror_num, 0, false);
6668 			if (err) {
6669 				/*
6670 				 * We failed to submit the bio so it's the
6671 				 * caller's responsibility to perform cleanup
6672 				 * i.e unlock page/set error bit.
6673 				 */
6674 				ret = err;
6675 				SetPageError(page);
6676 				unlock_page(page);
6677 				atomic_dec(&eb->io_pages);
6678 			}
6679 		} else {
6680 			unlock_page(page);
6681 		}
6682 	}
6683 
6684 	if (bio_ctrl.bio) {
6685 		err = submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.bio_flags);
6686 		bio_ctrl.bio = NULL;
6687 		if (err)
6688 			return err;
6689 	}
6690 
6691 	if (ret || wait != WAIT_COMPLETE)
6692 		return ret;
6693 
6694 	for (i = 0; i < num_pages; i++) {
6695 		page = eb->pages[i];
6696 		wait_on_page_locked(page);
6697 		if (!PageUptodate(page))
6698 			ret = -EIO;
6699 	}
6700 
6701 	return ret;
6702 
6703 unlock_exit:
6704 	while (locked_pages > 0) {
6705 		locked_pages--;
6706 		page = eb->pages[locked_pages];
6707 		unlock_page(page);
6708 	}
6709 	return ret;
6710 }
6711 
6712 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
6713 			    unsigned long len)
6714 {
6715 	btrfs_warn(eb->fs_info,
6716 		"access to eb bytenr %llu len %lu out of range start %lu len %lu",
6717 		eb->start, eb->len, start, len);
6718 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
6719 
6720 	return true;
6721 }
6722 
6723 /*
6724  * Check if the [start, start + len) range is valid before reading/writing
6725  * the eb.
6726  * NOTE: @start and @len are offset inside the eb, not logical address.
6727  *
6728  * Caller should not touch the dst/src memory if this function returns error.
6729  */
6730 static inline int check_eb_range(const struct extent_buffer *eb,
6731 				 unsigned long start, unsigned long len)
6732 {
6733 	unsigned long offset;
6734 
6735 	/* start, start + len should not go beyond eb->len nor overflow */
6736 	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
6737 		return report_eb_range(eb, start, len);
6738 
6739 	return false;
6740 }
6741 
6742 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
6743 			unsigned long start, unsigned long len)
6744 {
6745 	size_t cur;
6746 	size_t offset;
6747 	struct page *page;
6748 	char *kaddr;
6749 	char *dst = (char *)dstv;
6750 	unsigned long i = get_eb_page_index(start);
6751 
6752 	if (check_eb_range(eb, start, len))
6753 		return;
6754 
6755 	offset = get_eb_offset_in_page(eb, start);
6756 
6757 	while (len > 0) {
6758 		page = eb->pages[i];
6759 
6760 		cur = min(len, (PAGE_SIZE - offset));
6761 		kaddr = page_address(page);
6762 		memcpy(dst, kaddr + offset, cur);
6763 
6764 		dst += cur;
6765 		len -= cur;
6766 		offset = 0;
6767 		i++;
6768 	}
6769 }
6770 
6771 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
6772 				       void __user *dstv,
6773 				       unsigned long start, unsigned long len)
6774 {
6775 	size_t cur;
6776 	size_t offset;
6777 	struct page *page;
6778 	char *kaddr;
6779 	char __user *dst = (char __user *)dstv;
6780 	unsigned long i = get_eb_page_index(start);
6781 	int ret = 0;
6782 
6783 	WARN_ON(start > eb->len);
6784 	WARN_ON(start + len > eb->start + eb->len);
6785 
6786 	offset = get_eb_offset_in_page(eb, start);
6787 
6788 	while (len > 0) {
6789 		page = eb->pages[i];
6790 
6791 		cur = min(len, (PAGE_SIZE - offset));
6792 		kaddr = page_address(page);
6793 		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
6794 			ret = -EFAULT;
6795 			break;
6796 		}
6797 
6798 		dst += cur;
6799 		len -= cur;
6800 		offset = 0;
6801 		i++;
6802 	}
6803 
6804 	return ret;
6805 }
6806 
6807 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
6808 			 unsigned long start, unsigned long len)
6809 {
6810 	size_t cur;
6811 	size_t offset;
6812 	struct page *page;
6813 	char *kaddr;
6814 	char *ptr = (char *)ptrv;
6815 	unsigned long i = get_eb_page_index(start);
6816 	int ret = 0;
6817 
6818 	if (check_eb_range(eb, start, len))
6819 		return -EINVAL;
6820 
6821 	offset = get_eb_offset_in_page(eb, start);
6822 
6823 	while (len > 0) {
6824 		page = eb->pages[i];
6825 
6826 		cur = min(len, (PAGE_SIZE - offset));
6827 
6828 		kaddr = page_address(page);
6829 		ret = memcmp(ptr, kaddr + offset, cur);
6830 		if (ret)
6831 			break;
6832 
6833 		ptr += cur;
6834 		len -= cur;
6835 		offset = 0;
6836 		i++;
6837 	}
6838 	return ret;
6839 }
6840 
6841 /*
6842  * Check that the extent buffer is uptodate.
6843  *
6844  * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
6845  * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
6846  */
6847 static void assert_eb_page_uptodate(const struct extent_buffer *eb,
6848 				    struct page *page)
6849 {
6850 	struct btrfs_fs_info *fs_info = eb->fs_info;
6851 
6852 	/*
6853 	 * If we are using the commit root we could potentially clear a page
6854 	 * Uptodate while we're using the extent buffer that we've previously
6855 	 * looked up.  We don't want to complain in this case, as the page was
6856 	 * valid before, we just didn't write it out.  Instead we want to catch
6857 	 * the case where we didn't actually read the block properly, which
6858 	 * would have !PageUptodate && !PageError, as we clear PageError before
6859 	 * reading.
6860 	 */
6861 	if (fs_info->sectorsize < PAGE_SIZE) {
6862 		bool uptodate, error;
6863 
6864 		uptodate = btrfs_subpage_test_uptodate(fs_info, page,
6865 						       eb->start, eb->len);
6866 		error = btrfs_subpage_test_error(fs_info, page, eb->start, eb->len);
6867 		WARN_ON(!uptodate && !error);
6868 	} else {
6869 		WARN_ON(!PageUptodate(page) && !PageError(page));
6870 	}
6871 }
6872 
6873 void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
6874 		const void *srcv)
6875 {
6876 	char *kaddr;
6877 
6878 	assert_eb_page_uptodate(eb, eb->pages[0]);
6879 	kaddr = page_address(eb->pages[0]) +
6880 		get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
6881 						   chunk_tree_uuid));
6882 	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
6883 }
6884 
6885 void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
6886 {
6887 	char *kaddr;
6888 
6889 	assert_eb_page_uptodate(eb, eb->pages[0]);
6890 	kaddr = page_address(eb->pages[0]) +
6891 		get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
6892 	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
6893 }
6894 
6895 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
6896 			 unsigned long start, unsigned long len)
6897 {
6898 	size_t cur;
6899 	size_t offset;
6900 	struct page *page;
6901 	char *kaddr;
6902 	char *src = (char *)srcv;
6903 	unsigned long i = get_eb_page_index(start);
6904 
6905 	WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
6906 
6907 	if (check_eb_range(eb, start, len))
6908 		return;
6909 
6910 	offset = get_eb_offset_in_page(eb, start);
6911 
6912 	while (len > 0) {
6913 		page = eb->pages[i];
6914 		assert_eb_page_uptodate(eb, page);
6915 
6916 		cur = min(len, PAGE_SIZE - offset);
6917 		kaddr = page_address(page);
6918 		memcpy(kaddr + offset, src, cur);
6919 
6920 		src += cur;
6921 		len -= cur;
6922 		offset = 0;
6923 		i++;
6924 	}
6925 }
6926 
6927 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
6928 		unsigned long len)
6929 {
6930 	size_t cur;
6931 	size_t offset;
6932 	struct page *page;
6933 	char *kaddr;
6934 	unsigned long i = get_eb_page_index(start);
6935 
6936 	if (check_eb_range(eb, start, len))
6937 		return;
6938 
6939 	offset = get_eb_offset_in_page(eb, start);
6940 
6941 	while (len > 0) {
6942 		page = eb->pages[i];
6943 		assert_eb_page_uptodate(eb, page);
6944 
6945 		cur = min(len, PAGE_SIZE - offset);
6946 		kaddr = page_address(page);
6947 		memset(kaddr + offset, 0, cur);
6948 
6949 		len -= cur;
6950 		offset = 0;
6951 		i++;
6952 	}
6953 }
6954 
6955 void copy_extent_buffer_full(const struct extent_buffer *dst,
6956 			     const struct extent_buffer *src)
6957 {
6958 	int i;
6959 	int num_pages;
6960 
6961 	ASSERT(dst->len == src->len);
6962 
6963 	if (dst->fs_info->sectorsize == PAGE_SIZE) {
6964 		num_pages = num_extent_pages(dst);
6965 		for (i = 0; i < num_pages; i++)
6966 			copy_page(page_address(dst->pages[i]),
6967 				  page_address(src->pages[i]));
6968 	} else {
6969 		size_t src_offset = get_eb_offset_in_page(src, 0);
6970 		size_t dst_offset = get_eb_offset_in_page(dst, 0);
6971 
6972 		ASSERT(src->fs_info->sectorsize < PAGE_SIZE);
6973 		memcpy(page_address(dst->pages[0]) + dst_offset,
6974 		       page_address(src->pages[0]) + src_offset,
6975 		       src->len);
6976 	}
6977 }
6978 
6979 void copy_extent_buffer(const struct extent_buffer *dst,
6980 			const struct extent_buffer *src,
6981 			unsigned long dst_offset, unsigned long src_offset,
6982 			unsigned long len)
6983 {
6984 	u64 dst_len = dst->len;
6985 	size_t cur;
6986 	size_t offset;
6987 	struct page *page;
6988 	char *kaddr;
6989 	unsigned long i = get_eb_page_index(dst_offset);
6990 
6991 	if (check_eb_range(dst, dst_offset, len) ||
6992 	    check_eb_range(src, src_offset, len))
6993 		return;
6994 
6995 	WARN_ON(src->len != dst_len);
6996 
6997 	offset = get_eb_offset_in_page(dst, dst_offset);
6998 
6999 	while (len > 0) {
7000 		page = dst->pages[i];
7001 		assert_eb_page_uptodate(dst, page);
7002 
7003 		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
7004 
7005 		kaddr = page_address(page);
7006 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
7007 
7008 		src_offset += cur;
7009 		len -= cur;
7010 		offset = 0;
7011 		i++;
7012 	}
7013 }
7014 
7015 /*
7016  * eb_bitmap_offset() - calculate the page and offset of the byte containing the
7017  * given bit number
7018  * @eb: the extent buffer
7019  * @start: offset of the bitmap item in the extent buffer
7020  * @nr: bit number
7021  * @page_index: return index of the page in the extent buffer that contains the
7022  * given bit number
7023  * @page_offset: return offset into the page given by page_index
7024  *
7025  * This helper hides the ugliness of finding the byte in an extent buffer which
7026  * contains a given bit.
7027  */
7028 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
7029 				    unsigned long start, unsigned long nr,
7030 				    unsigned long *page_index,
7031 				    size_t *page_offset)
7032 {
7033 	size_t byte_offset = BIT_BYTE(nr);
7034 	size_t offset;
7035 
7036 	/*
7037 	 * The byte we want is the offset of the extent buffer + the offset of
7038 	 * the bitmap item in the extent buffer + the offset of the byte in the
7039 	 * bitmap item.
7040 	 */
7041 	offset = start + offset_in_page(eb->start) + byte_offset;
7042 
7043 	*page_index = offset >> PAGE_SHIFT;
7044 	*page_offset = offset_in_page(offset);
7045 }
7046 
7047 /**
7048  * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
7049  * @eb: the extent buffer
7050  * @start: offset of the bitmap item in the extent buffer
7051  * @nr: bit number to test
7052  */
7053 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
7054 			   unsigned long nr)
7055 {
7056 	u8 *kaddr;
7057 	struct page *page;
7058 	unsigned long i;
7059 	size_t offset;
7060 
7061 	eb_bitmap_offset(eb, start, nr, &i, &offset);
7062 	page = eb->pages[i];
7063 	assert_eb_page_uptodate(eb, page);
7064 	kaddr = page_address(page);
7065 	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
7066 }
7067 
7068 /**
7069  * extent_buffer_bitmap_set - set an area of a bitmap
7070  * @eb: the extent buffer
7071  * @start: offset of the bitmap item in the extent buffer
7072  * @pos: bit number of the first bit
7073  * @len: number of bits to set
7074  */
7075 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
7076 			      unsigned long pos, unsigned long len)
7077 {
7078 	u8 *kaddr;
7079 	struct page *page;
7080 	unsigned long i;
7081 	size_t offset;
7082 	const unsigned int size = pos + len;
7083 	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
7084 	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
7085 
7086 	eb_bitmap_offset(eb, start, pos, &i, &offset);
7087 	page = eb->pages[i];
7088 	assert_eb_page_uptodate(eb, page);
7089 	kaddr = page_address(page);
7090 
7091 	while (len >= bits_to_set) {
7092 		kaddr[offset] |= mask_to_set;
7093 		len -= bits_to_set;
7094 		bits_to_set = BITS_PER_BYTE;
7095 		mask_to_set = ~0;
7096 		if (++offset >= PAGE_SIZE && len > 0) {
7097 			offset = 0;
7098 			page = eb->pages[++i];
7099 			assert_eb_page_uptodate(eb, page);
7100 			kaddr = page_address(page);
7101 		}
7102 	}
7103 	if (len) {
7104 		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
7105 		kaddr[offset] |= mask_to_set;
7106 	}
7107 }
7108 
7109 
7110 /**
7111  * extent_buffer_bitmap_clear - clear an area of a bitmap
7112  * @eb: the extent buffer
7113  * @start: offset of the bitmap item in the extent buffer
7114  * @pos: bit number of the first bit
7115  * @len: number of bits to clear
7116  */
7117 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
7118 				unsigned long start, unsigned long pos,
7119 				unsigned long len)
7120 {
7121 	u8 *kaddr;
7122 	struct page *page;
7123 	unsigned long i;
7124 	size_t offset;
7125 	const unsigned int size = pos + len;
7126 	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
7127 	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
7128 
7129 	eb_bitmap_offset(eb, start, pos, &i, &offset);
7130 	page = eb->pages[i];
7131 	assert_eb_page_uptodate(eb, page);
7132 	kaddr = page_address(page);
7133 
7134 	while (len >= bits_to_clear) {
7135 		kaddr[offset] &= ~mask_to_clear;
7136 		len -= bits_to_clear;
7137 		bits_to_clear = BITS_PER_BYTE;
7138 		mask_to_clear = ~0;
7139 		if (++offset >= PAGE_SIZE && len > 0) {
7140 			offset = 0;
7141 			page = eb->pages[++i];
7142 			assert_eb_page_uptodate(eb, page);
7143 			kaddr = page_address(page);
7144 		}
7145 	}
7146 	if (len) {
7147 		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
7148 		kaddr[offset] &= ~mask_to_clear;
7149 	}
7150 }
7151 
7152 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
7153 {
7154 	unsigned long distance = (src > dst) ? src - dst : dst - src;
7155 	return distance < len;
7156 }
7157 
7158 static void copy_pages(struct page *dst_page, struct page *src_page,
7159 		       unsigned long dst_off, unsigned long src_off,
7160 		       unsigned long len)
7161 {
7162 	char *dst_kaddr = page_address(dst_page);
7163 	char *src_kaddr;
7164 	int must_memmove = 0;
7165 
7166 	if (dst_page != src_page) {
7167 		src_kaddr = page_address(src_page);
7168 	} else {
7169 		src_kaddr = dst_kaddr;
7170 		if (areas_overlap(src_off, dst_off, len))
7171 			must_memmove = 1;
7172 	}
7173 
7174 	if (must_memmove)
7175 		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
7176 	else
7177 		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
7178 }
7179 
7180 void memcpy_extent_buffer(const struct extent_buffer *dst,
7181 			  unsigned long dst_offset, unsigned long src_offset,
7182 			  unsigned long len)
7183 {
7184 	size_t cur;
7185 	size_t dst_off_in_page;
7186 	size_t src_off_in_page;
7187 	unsigned long dst_i;
7188 	unsigned long src_i;
7189 
7190 	if (check_eb_range(dst, dst_offset, len) ||
7191 	    check_eb_range(dst, src_offset, len))
7192 		return;
7193 
7194 	while (len > 0) {
7195 		dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
7196 		src_off_in_page = get_eb_offset_in_page(dst, src_offset);
7197 
7198 		dst_i = get_eb_page_index(dst_offset);
7199 		src_i = get_eb_page_index(src_offset);
7200 
7201 		cur = min(len, (unsigned long)(PAGE_SIZE -
7202 					       src_off_in_page));
7203 		cur = min_t(unsigned long, cur,
7204 			(unsigned long)(PAGE_SIZE - dst_off_in_page));
7205 
7206 		copy_pages(dst->pages[dst_i], dst->pages[src_i],
7207 			   dst_off_in_page, src_off_in_page, cur);
7208 
7209 		src_offset += cur;
7210 		dst_offset += cur;
7211 		len -= cur;
7212 	}
7213 }
7214 
7215 void memmove_extent_buffer(const struct extent_buffer *dst,
7216 			   unsigned long dst_offset, unsigned long src_offset,
7217 			   unsigned long len)
7218 {
7219 	size_t cur;
7220 	size_t dst_off_in_page;
7221 	size_t src_off_in_page;
7222 	unsigned long dst_end = dst_offset + len - 1;
7223 	unsigned long src_end = src_offset + len - 1;
7224 	unsigned long dst_i;
7225 	unsigned long src_i;
7226 
7227 	if (check_eb_range(dst, dst_offset, len) ||
7228 	    check_eb_range(dst, src_offset, len))
7229 		return;
7230 	if (dst_offset < src_offset) {
7231 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
7232 		return;
7233 	}
7234 	while (len > 0) {
7235 		dst_i = get_eb_page_index(dst_end);
7236 		src_i = get_eb_page_index(src_end);
7237 
7238 		dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
7239 		src_off_in_page = get_eb_offset_in_page(dst, src_end);
7240 
7241 		cur = min_t(unsigned long, len, src_off_in_page + 1);
7242 		cur = min(cur, dst_off_in_page + 1);
7243 		copy_pages(dst->pages[dst_i], dst->pages[src_i],
7244 			   dst_off_in_page - cur + 1,
7245 			   src_off_in_page - cur + 1, cur);
7246 
7247 		dst_end -= cur;
7248 		src_end -= cur;
7249 		len -= cur;
7250 	}
7251 }
7252 
7253 #define GANG_LOOKUP_SIZE	16
7254 static struct extent_buffer *get_next_extent_buffer(
7255 		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
7256 {
7257 	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
7258 	struct extent_buffer *found = NULL;
7259 	u64 page_start = page_offset(page);
7260 	u64 cur = page_start;
7261 
7262 	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
7263 	lockdep_assert_held(&fs_info->buffer_lock);
7264 
7265 	while (cur < page_start + PAGE_SIZE) {
7266 		int ret;
7267 		int i;
7268 
7269 		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
7270 				(void **)gang, cur >> fs_info->sectorsize_bits,
7271 				min_t(unsigned int, GANG_LOOKUP_SIZE,
7272 				      PAGE_SIZE / fs_info->nodesize));
7273 		if (ret == 0)
7274 			goto out;
7275 		for (i = 0; i < ret; i++) {
7276 			/* Already beyond page end */
7277 			if (gang[i]->start >= page_start + PAGE_SIZE)
7278 				goto out;
7279 			/* Found one */
7280 			if (gang[i]->start >= bytenr) {
7281 				found = gang[i];
7282 				goto out;
7283 			}
7284 		}
7285 		cur = gang[ret - 1]->start + gang[ret - 1]->len;
7286 	}
7287 out:
7288 	return found;
7289 }
7290 
7291 static int try_release_subpage_extent_buffer(struct page *page)
7292 {
7293 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
7294 	u64 cur = page_offset(page);
7295 	const u64 end = page_offset(page) + PAGE_SIZE;
7296 	int ret;
7297 
7298 	while (cur < end) {
7299 		struct extent_buffer *eb = NULL;
7300 
7301 		/*
7302 		 * Unlike try_release_extent_buffer() which uses page->private
7303 		 * to grab buffer, for subpage case we rely on radix tree, thus
7304 		 * we need to ensure radix tree consistency.
7305 		 *
7306 		 * We also want an atomic snapshot of the radix tree, thus go
7307 		 * with spinlock rather than RCU.
7308 		 */
7309 		spin_lock(&fs_info->buffer_lock);
7310 		eb = get_next_extent_buffer(fs_info, page, cur);
7311 		if (!eb) {
7312 			/* No more eb in the page range after or at cur */
7313 			spin_unlock(&fs_info->buffer_lock);
7314 			break;
7315 		}
7316 		cur = eb->start + eb->len;
7317 
7318 		/*
7319 		 * The same as try_release_extent_buffer(), to ensure the eb
7320 		 * won't disappear out from under us.
7321 		 */
7322 		spin_lock(&eb->refs_lock);
7323 		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
7324 			spin_unlock(&eb->refs_lock);
7325 			spin_unlock(&fs_info->buffer_lock);
7326 			break;
7327 		}
7328 		spin_unlock(&fs_info->buffer_lock);
7329 
7330 		/*
7331 		 * If tree ref isn't set then we know the ref on this eb is a
7332 		 * real ref, so just return, this eb will likely be freed soon
7333 		 * anyway.
7334 		 */
7335 		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
7336 			spin_unlock(&eb->refs_lock);
7337 			break;
7338 		}
7339 
7340 		/*
7341 		 * Here we don't care about the return value, we will always
7342 		 * check the page private at the end.  And
7343 		 * release_extent_buffer() will release the refs_lock.
7344 		 */
7345 		release_extent_buffer(eb);
7346 	}
7347 	/*
7348 	 * Finally to check if we have cleared page private, as if we have
7349 	 * released all ebs in the page, the page private should be cleared now.
7350 	 */
7351 	spin_lock(&page->mapping->private_lock);
7352 	if (!PagePrivate(page))
7353 		ret = 1;
7354 	else
7355 		ret = 0;
7356 	spin_unlock(&page->mapping->private_lock);
7357 	return ret;
7358 
7359 }
7360 
7361 int try_release_extent_buffer(struct page *page)
7362 {
7363 	struct extent_buffer *eb;
7364 
7365 	if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
7366 		return try_release_subpage_extent_buffer(page);
7367 
7368 	/*
7369 	 * We need to make sure nobody is changing page->private, as we rely on
7370 	 * page->private as the pointer to extent buffer.
7371 	 */
7372 	spin_lock(&page->mapping->private_lock);
7373 	if (!PagePrivate(page)) {
7374 		spin_unlock(&page->mapping->private_lock);
7375 		return 1;
7376 	}
7377 
7378 	eb = (struct extent_buffer *)page->private;
7379 	BUG_ON(!eb);
7380 
7381 	/*
7382 	 * This is a little awful but should be ok, we need to make sure that
7383 	 * the eb doesn't disappear out from under us while we're looking at
7384 	 * this page.
7385 	 */
7386 	spin_lock(&eb->refs_lock);
7387 	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
7388 		spin_unlock(&eb->refs_lock);
7389 		spin_unlock(&page->mapping->private_lock);
7390 		return 0;
7391 	}
7392 	spin_unlock(&page->mapping->private_lock);
7393 
7394 	/*
7395 	 * If tree ref isn't set then we know the ref on this eb is a real ref,
7396 	 * so just return, this page will likely be freed soon anyway.
7397 	 */
7398 	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
7399 		spin_unlock(&eb->refs_lock);
7400 		return 0;
7401 	}
7402 
7403 	return release_extent_buffer(eb);
7404 }
7405 
7406 /*
7407  * btrfs_readahead_tree_block - attempt to readahead a child block
7408  * @fs_info:	the fs_info
7409  * @bytenr:	bytenr to read
7410  * @owner_root: objectid of the root that owns this eb
7411  * @gen:	generation for the uptodate check, can be 0
7412  * @level:	level for the eb
7413  *
7414  * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
7415  * normal uptodate check of the eb, without checking the generation.  If we have
7416  * to read the block we will not block on anything.
7417  */
7418 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
7419 				u64 bytenr, u64 owner_root, u64 gen, int level)
7420 {
7421 	struct extent_buffer *eb;
7422 	int ret;
7423 
7424 	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
7425 	if (IS_ERR(eb))
7426 		return;
7427 
7428 	if (btrfs_buffer_uptodate(eb, gen, 1)) {
7429 		free_extent_buffer(eb);
7430 		return;
7431 	}
7432 
7433 	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0);
7434 	if (ret < 0)
7435 		free_extent_buffer_stale(eb);
7436 	else
7437 		free_extent_buffer(eb);
7438 }
7439 
7440 /*
7441  * btrfs_readahead_node_child - readahead a node's child block
7442  * @node:	parent node we're reading from
7443  * @slot:	slot in the parent node for the child we want to read
7444  *
7445  * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
7446  * the slot in the node provided.
7447  */
7448 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
7449 {
7450 	btrfs_readahead_tree_block(node->fs_info,
7451 				   btrfs_node_blockptr(node, slot),
7452 				   btrfs_header_owner(node),
7453 				   btrfs_node_ptr_generation(node, slot),
7454 				   btrfs_header_level(node) - 1);
7455 }
7456