xref: /openbmc/linux/fs/btrfs/extent_map.c (revision 43dd529a)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/err.h>
4 #include <linux/slab.h>
5 #include <linux/spinlock.h>
6 #include "messages.h"
7 #include "ctree.h"
8 #include "volumes.h"
9 #include "extent_map.h"
10 #include "compression.h"
11 #include "btrfs_inode.h"
12 
13 
14 static struct kmem_cache *extent_map_cache;
15 
16 int __init extent_map_init(void)
17 {
18 	extent_map_cache = kmem_cache_create("btrfs_extent_map",
19 			sizeof(struct extent_map), 0,
20 			SLAB_MEM_SPREAD, NULL);
21 	if (!extent_map_cache)
22 		return -ENOMEM;
23 	return 0;
24 }
25 
26 void __cold extent_map_exit(void)
27 {
28 	kmem_cache_destroy(extent_map_cache);
29 }
30 
31 /*
32  * Initialize the extent tree @tree.  Should be called for each new inode or
33  * other user of the extent_map interface.
34  */
35 void extent_map_tree_init(struct extent_map_tree *tree)
36 {
37 	tree->map = RB_ROOT_CACHED;
38 	INIT_LIST_HEAD(&tree->modified_extents);
39 	rwlock_init(&tree->lock);
40 }
41 
42 /*
43  * Allocate a new extent_map structure.  The new structure is returned with a
44  * reference count of one and needs to be freed using free_extent_map()
45  */
46 struct extent_map *alloc_extent_map(void)
47 {
48 	struct extent_map *em;
49 	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
50 	if (!em)
51 		return NULL;
52 	RB_CLEAR_NODE(&em->rb_node);
53 	em->compress_type = BTRFS_COMPRESS_NONE;
54 	refcount_set(&em->refs, 1);
55 	INIT_LIST_HEAD(&em->list);
56 	return em;
57 }
58 
59 /*
60  * Drop the reference out on @em by one and free the structure if the reference
61  * count hits zero.
62  */
63 void free_extent_map(struct extent_map *em)
64 {
65 	if (!em)
66 		return;
67 	if (refcount_dec_and_test(&em->refs)) {
68 		WARN_ON(extent_map_in_tree(em));
69 		WARN_ON(!list_empty(&em->list));
70 		if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
71 			kfree(em->map_lookup);
72 		kmem_cache_free(extent_map_cache, em);
73 	}
74 }
75 
76 /* Do the math around the end of an extent, handling wrapping. */
77 static u64 range_end(u64 start, u64 len)
78 {
79 	if (start + len < start)
80 		return (u64)-1;
81 	return start + len;
82 }
83 
84 static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
85 {
86 	struct rb_node **p = &root->rb_root.rb_node;
87 	struct rb_node *parent = NULL;
88 	struct extent_map *entry = NULL;
89 	struct rb_node *orig_parent = NULL;
90 	u64 end = range_end(em->start, em->len);
91 	bool leftmost = true;
92 
93 	while (*p) {
94 		parent = *p;
95 		entry = rb_entry(parent, struct extent_map, rb_node);
96 
97 		if (em->start < entry->start) {
98 			p = &(*p)->rb_left;
99 		} else if (em->start >= extent_map_end(entry)) {
100 			p = &(*p)->rb_right;
101 			leftmost = false;
102 		} else {
103 			return -EEXIST;
104 		}
105 	}
106 
107 	orig_parent = parent;
108 	while (parent && em->start >= extent_map_end(entry)) {
109 		parent = rb_next(parent);
110 		entry = rb_entry(parent, struct extent_map, rb_node);
111 	}
112 	if (parent)
113 		if (end > entry->start && em->start < extent_map_end(entry))
114 			return -EEXIST;
115 
116 	parent = orig_parent;
117 	entry = rb_entry(parent, struct extent_map, rb_node);
118 	while (parent && em->start < entry->start) {
119 		parent = rb_prev(parent);
120 		entry = rb_entry(parent, struct extent_map, rb_node);
121 	}
122 	if (parent)
123 		if (end > entry->start && em->start < extent_map_end(entry))
124 			return -EEXIST;
125 
126 	rb_link_node(&em->rb_node, orig_parent, p);
127 	rb_insert_color_cached(&em->rb_node, root, leftmost);
128 	return 0;
129 }
130 
131 /*
132  * Search through the tree for an extent_map with a given offset.  If it can't
133  * be found, try to find some neighboring extents
134  */
135 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
136 				     struct rb_node **prev_or_next_ret)
137 {
138 	struct rb_node *n = root->rb_node;
139 	struct rb_node *prev = NULL;
140 	struct rb_node *orig_prev = NULL;
141 	struct extent_map *entry;
142 	struct extent_map *prev_entry = NULL;
143 
144 	ASSERT(prev_or_next_ret);
145 
146 	while (n) {
147 		entry = rb_entry(n, struct extent_map, rb_node);
148 		prev = n;
149 		prev_entry = entry;
150 
151 		if (offset < entry->start)
152 			n = n->rb_left;
153 		else if (offset >= extent_map_end(entry))
154 			n = n->rb_right;
155 		else
156 			return n;
157 	}
158 
159 	orig_prev = prev;
160 	while (prev && offset >= extent_map_end(prev_entry)) {
161 		prev = rb_next(prev);
162 		prev_entry = rb_entry(prev, struct extent_map, rb_node);
163 	}
164 
165 	/*
166 	 * Previous extent map found, return as in this case the caller does not
167 	 * care about the next one.
168 	 */
169 	if (prev) {
170 		*prev_or_next_ret = prev;
171 		return NULL;
172 	}
173 
174 	prev = orig_prev;
175 	prev_entry = rb_entry(prev, struct extent_map, rb_node);
176 	while (prev && offset < prev_entry->start) {
177 		prev = rb_prev(prev);
178 		prev_entry = rb_entry(prev, struct extent_map, rb_node);
179 	}
180 	*prev_or_next_ret = prev;
181 
182 	return NULL;
183 }
184 
185 /* Check to see if two extent_map structs are adjacent and safe to merge. */
186 static int mergable_maps(struct extent_map *prev, struct extent_map *next)
187 {
188 	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
189 		return 0;
190 
191 	/*
192 	 * don't merge compressed extents, we need to know their
193 	 * actual size
194 	 */
195 	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
196 		return 0;
197 
198 	if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
199 	    test_bit(EXTENT_FLAG_LOGGING, &next->flags))
200 		return 0;
201 
202 	/*
203 	 * We don't want to merge stuff that hasn't been written to the log yet
204 	 * since it may not reflect exactly what is on disk, and that would be
205 	 * bad.
206 	 */
207 	if (!list_empty(&prev->list) || !list_empty(&next->list))
208 		return 0;
209 
210 	ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
211 	       prev->block_start != EXTENT_MAP_DELALLOC);
212 
213 	if (prev->map_lookup || next->map_lookup)
214 		ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
215 		       test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
216 
217 	if (extent_map_end(prev) == next->start &&
218 	    prev->flags == next->flags &&
219 	    prev->map_lookup == next->map_lookup &&
220 	    ((next->block_start == EXTENT_MAP_HOLE &&
221 	      prev->block_start == EXTENT_MAP_HOLE) ||
222 	     (next->block_start == EXTENT_MAP_INLINE &&
223 	      prev->block_start == EXTENT_MAP_INLINE) ||
224 	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
225 	      next->block_start == extent_map_block_end(prev)))) {
226 		return 1;
227 	}
228 	return 0;
229 }
230 
231 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
232 {
233 	struct extent_map *merge = NULL;
234 	struct rb_node *rb;
235 
236 	/*
237 	 * We can't modify an extent map that is in the tree and that is being
238 	 * used by another task, as it can cause that other task to see it in
239 	 * inconsistent state during the merging. We always have 1 reference for
240 	 * the tree and 1 for this task (which is unpinning the extent map or
241 	 * clearing the logging flag), so anything > 2 means it's being used by
242 	 * other tasks too.
243 	 */
244 	if (refcount_read(&em->refs) > 2)
245 		return;
246 
247 	if (em->start != 0) {
248 		rb = rb_prev(&em->rb_node);
249 		if (rb)
250 			merge = rb_entry(rb, struct extent_map, rb_node);
251 		if (rb && mergable_maps(merge, em)) {
252 			em->start = merge->start;
253 			em->orig_start = merge->orig_start;
254 			em->len += merge->len;
255 			em->block_len += merge->block_len;
256 			em->block_start = merge->block_start;
257 			em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
258 			em->mod_start = merge->mod_start;
259 			em->generation = max(em->generation, merge->generation);
260 			set_bit(EXTENT_FLAG_MERGED, &em->flags);
261 
262 			rb_erase_cached(&merge->rb_node, &tree->map);
263 			RB_CLEAR_NODE(&merge->rb_node);
264 			free_extent_map(merge);
265 		}
266 	}
267 
268 	rb = rb_next(&em->rb_node);
269 	if (rb)
270 		merge = rb_entry(rb, struct extent_map, rb_node);
271 	if (rb && mergable_maps(em, merge)) {
272 		em->len += merge->len;
273 		em->block_len += merge->block_len;
274 		rb_erase_cached(&merge->rb_node, &tree->map);
275 		RB_CLEAR_NODE(&merge->rb_node);
276 		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
277 		em->generation = max(em->generation, merge->generation);
278 		set_bit(EXTENT_FLAG_MERGED, &em->flags);
279 		free_extent_map(merge);
280 	}
281 }
282 
283 /*
284  * Unpin an extent from the cache.
285  *
286  * @tree:	tree to unpin the extent in
287  * @start:	logical offset in the file
288  * @len:	length of the extent
289  * @gen:	generation that this extent has been modified in
290  *
291  * Called after an extent has been written to disk properly.  Set the generation
292  * to the generation that actually added the file item to the inode so we know
293  * we need to sync this extent when we call fsync().
294  */
295 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
296 		       u64 gen)
297 {
298 	int ret = 0;
299 	struct extent_map *em;
300 	bool prealloc = false;
301 
302 	write_lock(&tree->lock);
303 	em = lookup_extent_mapping(tree, start, len);
304 
305 	WARN_ON(!em || em->start != start);
306 
307 	if (!em)
308 		goto out;
309 
310 	em->generation = gen;
311 	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
312 	em->mod_start = em->start;
313 	em->mod_len = em->len;
314 
315 	if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
316 		prealloc = true;
317 		clear_bit(EXTENT_FLAG_FILLING, &em->flags);
318 	}
319 
320 	try_merge_map(tree, em);
321 
322 	if (prealloc) {
323 		em->mod_start = em->start;
324 		em->mod_len = em->len;
325 	}
326 
327 	free_extent_map(em);
328 out:
329 	write_unlock(&tree->lock);
330 	return ret;
331 
332 }
333 
334 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
335 {
336 	lockdep_assert_held_write(&tree->lock);
337 
338 	clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
339 	if (extent_map_in_tree(em))
340 		try_merge_map(tree, em);
341 }
342 
343 static inline void setup_extent_mapping(struct extent_map_tree *tree,
344 					struct extent_map *em,
345 					int modified)
346 {
347 	refcount_inc(&em->refs);
348 	em->mod_start = em->start;
349 	em->mod_len = em->len;
350 
351 	if (modified)
352 		list_move(&em->list, &tree->modified_extents);
353 	else
354 		try_merge_map(tree, em);
355 }
356 
357 static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
358 {
359 	struct map_lookup *map = em->map_lookup;
360 	u64 stripe_size = em->orig_block_len;
361 	int i;
362 
363 	for (i = 0; i < map->num_stripes; i++) {
364 		struct btrfs_io_stripe *stripe = &map->stripes[i];
365 		struct btrfs_device *device = stripe->dev;
366 
367 		set_extent_bits_nowait(&device->alloc_state, stripe->physical,
368 				 stripe->physical + stripe_size - 1, bits);
369 	}
370 }
371 
372 static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
373 {
374 	struct map_lookup *map = em->map_lookup;
375 	u64 stripe_size = em->orig_block_len;
376 	int i;
377 
378 	for (i = 0; i < map->num_stripes; i++) {
379 		struct btrfs_io_stripe *stripe = &map->stripes[i];
380 		struct btrfs_device *device = stripe->dev;
381 
382 		__clear_extent_bit(&device->alloc_state, stripe->physical,
383 				   stripe->physical + stripe_size - 1, bits,
384 				   NULL, GFP_NOWAIT, NULL);
385 	}
386 }
387 
388 /*
389  * Add new extent map to the extent tree
390  *
391  * @tree:	tree to insert new map in
392  * @em:		map to insert
393  * @modified:	indicate whether the given @em should be added to the
394  *	        modified list, which indicates the extent needs to be logged
395  *
396  * Insert @em into @tree or perform a simple forward/backward merge with
397  * existing mappings.  The extent_map struct passed in will be inserted
398  * into the tree directly, with an additional reference taken, or a
399  * reference dropped if the merge attempt was successful.
400  */
401 int add_extent_mapping(struct extent_map_tree *tree,
402 		       struct extent_map *em, int modified)
403 {
404 	int ret = 0;
405 
406 	lockdep_assert_held_write(&tree->lock);
407 
408 	ret = tree_insert(&tree->map, em);
409 	if (ret)
410 		goto out;
411 
412 	setup_extent_mapping(tree, em, modified);
413 	if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
414 		extent_map_device_set_bits(em, CHUNK_ALLOCATED);
415 		extent_map_device_clear_bits(em, CHUNK_TRIMMED);
416 	}
417 out:
418 	return ret;
419 }
420 
421 static struct extent_map *
422 __lookup_extent_mapping(struct extent_map_tree *tree,
423 			u64 start, u64 len, int strict)
424 {
425 	struct extent_map *em;
426 	struct rb_node *rb_node;
427 	struct rb_node *prev_or_next = NULL;
428 	u64 end = range_end(start, len);
429 
430 	rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next);
431 	if (!rb_node) {
432 		if (prev_or_next)
433 			rb_node = prev_or_next;
434 		else
435 			return NULL;
436 	}
437 
438 	em = rb_entry(rb_node, struct extent_map, rb_node);
439 
440 	if (strict && !(end > em->start && start < extent_map_end(em)))
441 		return NULL;
442 
443 	refcount_inc(&em->refs);
444 	return em;
445 }
446 
447 /*
448  * Lookup extent_map that intersects @start + @len range.
449  *
450  * @tree:	tree to lookup in
451  * @start:	byte offset to start the search
452  * @len:	length of the lookup range
453  *
454  * Find and return the first extent_map struct in @tree that intersects the
455  * [start, len] range.  There may be additional objects in the tree that
456  * intersect, so check the object returned carefully to make sure that no
457  * additional lookups are needed.
458  */
459 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
460 					 u64 start, u64 len)
461 {
462 	return __lookup_extent_mapping(tree, start, len, 1);
463 }
464 
465 /*
466  * Find a nearby extent map intersecting @start + @len (not an exact search).
467  *
468  * @tree:	tree to lookup in
469  * @start:	byte offset to start the search
470  * @len:	length of the lookup range
471  *
472  * Find and return the first extent_map struct in @tree that intersects the
473  * [start, len] range.
474  *
475  * If one can't be found, any nearby extent may be returned
476  */
477 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
478 					 u64 start, u64 len)
479 {
480 	return __lookup_extent_mapping(tree, start, len, 0);
481 }
482 
483 /*
484  * Remove an extent_map from the extent tree.
485  *
486  * @tree:	extent tree to remove from
487  * @em:		extent map being removed
488  *
489  * Remove @em from @tree.  No reference counts are dropped, and no checks
490  * are done to see if the range is in use.
491  */
492 void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
493 {
494 	lockdep_assert_held_write(&tree->lock);
495 
496 	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
497 	rb_erase_cached(&em->rb_node, &tree->map);
498 	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
499 		list_del_init(&em->list);
500 	if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
501 		extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
502 	RB_CLEAR_NODE(&em->rb_node);
503 }
504 
505 void replace_extent_mapping(struct extent_map_tree *tree,
506 			    struct extent_map *cur,
507 			    struct extent_map *new,
508 			    int modified)
509 {
510 	lockdep_assert_held_write(&tree->lock);
511 
512 	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
513 	ASSERT(extent_map_in_tree(cur));
514 	if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
515 		list_del_init(&cur->list);
516 	rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
517 	RB_CLEAR_NODE(&cur->rb_node);
518 
519 	setup_extent_mapping(tree, new, modified);
520 }
521 
522 static struct extent_map *next_extent_map(const struct extent_map *em)
523 {
524 	struct rb_node *next;
525 
526 	next = rb_next(&em->rb_node);
527 	if (!next)
528 		return NULL;
529 	return container_of(next, struct extent_map, rb_node);
530 }
531 
532 /*
533  * Get the extent map that immediately follows another one.
534  *
535  * @tree:       The extent map tree that the extent map belong to.
536  *              Holding read or write access on the tree's lock is required.
537  * @em:         An extent map from the given tree. The caller must ensure that
538  *              between getting @em and between calling this function, the
539  *              extent map @em is not removed from the tree - for example, by
540  *              holding the tree's lock for the duration of those 2 operations.
541  *
542  * Returns the extent map that immediately follows @em, or NULL if @em is the
543  * last extent map in the tree.
544  */
545 struct extent_map *btrfs_next_extent_map(const struct extent_map_tree *tree,
546 					 const struct extent_map *em)
547 {
548 	struct extent_map *next;
549 
550 	/* The lock must be acquired either in read mode or write mode. */
551 	lockdep_assert_held(&tree->lock);
552 	ASSERT(extent_map_in_tree(em));
553 
554 	next = next_extent_map(em);
555 	if (next)
556 		refcount_inc(&next->refs);
557 
558 	return next;
559 }
560 
561 static struct extent_map *prev_extent_map(struct extent_map *em)
562 {
563 	struct rb_node *prev;
564 
565 	prev = rb_prev(&em->rb_node);
566 	if (!prev)
567 		return NULL;
568 	return container_of(prev, struct extent_map, rb_node);
569 }
570 
571 /*
572  * Helper for btrfs_get_extent.  Given an existing extent in the tree,
573  * the existing extent is the nearest extent to map_start,
574  * and an extent that you want to insert, deal with overlap and insert
575  * the best fitted new extent into the tree.
576  */
577 static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
578 					 struct extent_map *existing,
579 					 struct extent_map *em,
580 					 u64 map_start)
581 {
582 	struct extent_map *prev;
583 	struct extent_map *next;
584 	u64 start;
585 	u64 end;
586 	u64 start_diff;
587 
588 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
589 
590 	if (existing->start > map_start) {
591 		next = existing;
592 		prev = prev_extent_map(next);
593 	} else {
594 		prev = existing;
595 		next = next_extent_map(prev);
596 	}
597 
598 	start = prev ? extent_map_end(prev) : em->start;
599 	start = max_t(u64, start, em->start);
600 	end = next ? next->start : extent_map_end(em);
601 	end = min_t(u64, end, extent_map_end(em));
602 	start_diff = start - em->start;
603 	em->start = start;
604 	em->len = end - start;
605 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
606 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
607 		em->block_start += start_diff;
608 		em->block_len = em->len;
609 	}
610 	return add_extent_mapping(em_tree, em, 0);
611 }
612 
613 /*
614  * Add extent mapping into em_tree.
615  *
616  * @fs_info:  the filesystem
617  * @em_tree:  extent tree into which we want to insert the extent mapping
618  * @em_in:    extent we are inserting
619  * @start:    start of the logical range btrfs_get_extent() is requesting
620  * @len:      length of the logical range btrfs_get_extent() is requesting
621  *
622  * Note that @em_in's range may be different from [start, start+len),
623  * but they must be overlapped.
624  *
625  * Insert @em_in into @em_tree. In case there is an overlapping range, handle
626  * the -EEXIST by either:
627  * a) Returning the existing extent in @em_in if @start is within the
628  *    existing em.
629  * b) Merge the existing extent with @em_in passed in.
630  *
631  * Return 0 on success, otherwise -EEXIST.
632  *
633  */
634 int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
635 			     struct extent_map_tree *em_tree,
636 			     struct extent_map **em_in, u64 start, u64 len)
637 {
638 	int ret;
639 	struct extent_map *em = *em_in;
640 
641 	ret = add_extent_mapping(em_tree, em, 0);
642 	/* it is possible that someone inserted the extent into the tree
643 	 * while we had the lock dropped.  It is also possible that
644 	 * an overlapping map exists in the tree
645 	 */
646 	if (ret == -EEXIST) {
647 		struct extent_map *existing;
648 
649 		ret = 0;
650 
651 		existing = search_extent_mapping(em_tree, start, len);
652 
653 		trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
654 
655 		/*
656 		 * existing will always be non-NULL, since there must be
657 		 * extent causing the -EEXIST.
658 		 */
659 		if (start >= existing->start &&
660 		    start < extent_map_end(existing)) {
661 			free_extent_map(em);
662 			*em_in = existing;
663 			ret = 0;
664 		} else {
665 			u64 orig_start = em->start;
666 			u64 orig_len = em->len;
667 
668 			/*
669 			 * The existing extent map is the one nearest to
670 			 * the [start, start + len) range which overlaps
671 			 */
672 			ret = merge_extent_mapping(em_tree, existing,
673 						   em, start);
674 			if (ret) {
675 				free_extent_map(em);
676 				*em_in = NULL;
677 				WARN_ONCE(ret,
678 "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
679 					  ret, existing->start, existing->len,
680 					  orig_start, orig_len);
681 			}
682 			free_extent_map(existing);
683 		}
684 	}
685 
686 	ASSERT(ret == 0 || ret == -EEXIST);
687 	return ret;
688 }
689 
690 /*
691  * Drop all extent maps from a tree in the fastest possible way, rescheduling
692  * if needed. This avoids searching the tree, from the root down to the first
693  * extent map, before each deletion.
694  */
695 static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
696 {
697 	write_lock(&tree->lock);
698 	while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
699 		struct extent_map *em;
700 		struct rb_node *node;
701 
702 		node = rb_first_cached(&tree->map);
703 		em = rb_entry(node, struct extent_map, rb_node);
704 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
705 		clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
706 		remove_extent_mapping(tree, em);
707 		free_extent_map(em);
708 		cond_resched_rwlock_write(&tree->lock);
709 	}
710 	write_unlock(&tree->lock);
711 }
712 
713 /*
714  * Drop all extent maps in a given range.
715  *
716  * @inode:       The target inode.
717  * @start:       Start offset of the range.
718  * @end:         End offset of the range (inclusive value).
719  * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
720  *
721  * This drops all the extent maps that intersect the given range [@start, @end].
722  * Extent maps that partially overlap the range and extend behind or beyond it,
723  * are split.
724  * The caller should have locked an appropriate file range in the inode's io
725  * tree before calling this function.
726  */
727 void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
728 				 bool skip_pinned)
729 {
730 	struct extent_map *split;
731 	struct extent_map *split2;
732 	struct extent_map *em;
733 	struct extent_map_tree *em_tree = &inode->extent_tree;
734 	u64 len = end - start + 1;
735 
736 	WARN_ON(end < start);
737 	if (end == (u64)-1) {
738 		if (start == 0 && !skip_pinned) {
739 			drop_all_extent_maps_fast(em_tree);
740 			return;
741 		}
742 		len = (u64)-1;
743 	} else {
744 		/* Make end offset exclusive for use in the loop below. */
745 		end++;
746 	}
747 
748 	/*
749 	 * It's ok if we fail to allocate the extent maps, see the comment near
750 	 * the bottom of the loop below. We only need two spare extent maps in
751 	 * the worst case, where the first extent map that intersects our range
752 	 * starts before the range and the last extent map that intersects our
753 	 * range ends after our range (and they might be the same extent map),
754 	 * because we need to split those two extent maps at the boundaries.
755 	 */
756 	split = alloc_extent_map();
757 	split2 = alloc_extent_map();
758 
759 	write_lock(&em_tree->lock);
760 	em = lookup_extent_mapping(em_tree, start, len);
761 
762 	while (em) {
763 		/* extent_map_end() returns exclusive value (last byte + 1). */
764 		const u64 em_end = extent_map_end(em);
765 		struct extent_map *next_em = NULL;
766 		u64 gen;
767 		unsigned long flags;
768 		bool modified;
769 		bool compressed;
770 
771 		if (em_end < end) {
772 			next_em = next_extent_map(em);
773 			if (next_em) {
774 				if (next_em->start < end)
775 					refcount_inc(&next_em->refs);
776 				else
777 					next_em = NULL;
778 			}
779 		}
780 
781 		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
782 			start = em_end;
783 			if (end != (u64)-1)
784 				len = start + len - em_end;
785 			goto next;
786 		}
787 
788 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
789 		clear_bit(EXTENT_FLAG_LOGGING, &flags);
790 		modified = !list_empty(&em->list);
791 
792 		/*
793 		 * The extent map does not cross our target range, so no need to
794 		 * split it, we can remove it directly.
795 		 */
796 		if (em->start >= start && em_end <= end)
797 			goto remove_em;
798 
799 		flags = em->flags;
800 		gen = em->generation;
801 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
802 
803 		if (em->start < start) {
804 			if (!split) {
805 				split = split2;
806 				split2 = NULL;
807 				if (!split)
808 					goto remove_em;
809 			}
810 			split->start = em->start;
811 			split->len = start - em->start;
812 
813 			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
814 				split->orig_start = em->orig_start;
815 				split->block_start = em->block_start;
816 
817 				if (compressed)
818 					split->block_len = em->block_len;
819 				else
820 					split->block_len = split->len;
821 				split->orig_block_len = max(split->block_len,
822 						em->orig_block_len);
823 				split->ram_bytes = em->ram_bytes;
824 			} else {
825 				split->orig_start = split->start;
826 				split->block_len = 0;
827 				split->block_start = em->block_start;
828 				split->orig_block_len = 0;
829 				split->ram_bytes = split->len;
830 			}
831 
832 			split->generation = gen;
833 			split->flags = flags;
834 			split->compress_type = em->compress_type;
835 			replace_extent_mapping(em_tree, em, split, modified);
836 			free_extent_map(split);
837 			split = split2;
838 			split2 = NULL;
839 		}
840 		if (em_end > end) {
841 			if (!split) {
842 				split = split2;
843 				split2 = NULL;
844 				if (!split)
845 					goto remove_em;
846 			}
847 			split->start = start + len;
848 			split->len = em_end - (start + len);
849 			split->block_start = em->block_start;
850 			split->flags = flags;
851 			split->compress_type = em->compress_type;
852 			split->generation = gen;
853 
854 			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
855 				split->orig_block_len = max(em->block_len,
856 						    em->orig_block_len);
857 
858 				split->ram_bytes = em->ram_bytes;
859 				if (compressed) {
860 					split->block_len = em->block_len;
861 					split->orig_start = em->orig_start;
862 				} else {
863 					const u64 diff = start + len - em->start;
864 
865 					split->block_len = split->len;
866 					split->block_start += diff;
867 					split->orig_start = em->orig_start;
868 				}
869 			} else {
870 				split->ram_bytes = split->len;
871 				split->orig_start = split->start;
872 				split->block_len = 0;
873 				split->orig_block_len = 0;
874 			}
875 
876 			if (extent_map_in_tree(em)) {
877 				replace_extent_mapping(em_tree, em, split,
878 						       modified);
879 			} else {
880 				int ret;
881 
882 				ret = add_extent_mapping(em_tree, split,
883 							 modified);
884 				/* Logic error, shouldn't happen. */
885 				ASSERT(ret == 0);
886 				if (WARN_ON(ret != 0) && modified)
887 					btrfs_set_inode_full_sync(inode);
888 			}
889 			free_extent_map(split);
890 			split = NULL;
891 		}
892 remove_em:
893 		if (extent_map_in_tree(em)) {
894 			/*
895 			 * If the extent map is still in the tree it means that
896 			 * either of the following is true:
897 			 *
898 			 * 1) It fits entirely in our range (doesn't end beyond
899 			 *    it or starts before it);
900 			 *
901 			 * 2) It starts before our range and/or ends after our
902 			 *    range, and we were not able to allocate the extent
903 			 *    maps for split operations, @split and @split2.
904 			 *
905 			 * If we are at case 2) then we just remove the entire
906 			 * extent map - this is fine since if anyone needs it to
907 			 * access the subranges outside our range, will just
908 			 * load it again from the subvolume tree's file extent
909 			 * item. However if the extent map was in the list of
910 			 * modified extents, then we must mark the inode for a
911 			 * full fsync, otherwise a fast fsync will miss this
912 			 * extent if it's new and needs to be logged.
913 			 */
914 			if ((em->start < start || em_end > end) && modified) {
915 				ASSERT(!split);
916 				btrfs_set_inode_full_sync(inode);
917 			}
918 			remove_extent_mapping(em_tree, em);
919 		}
920 
921 		/*
922 		 * Once for the tree reference (we replaced or removed the
923 		 * extent map from the tree).
924 		 */
925 		free_extent_map(em);
926 next:
927 		/* Once for us (for our lookup reference). */
928 		free_extent_map(em);
929 
930 		em = next_em;
931 	}
932 
933 	write_unlock(&em_tree->lock);
934 
935 	free_extent_map(split);
936 	free_extent_map(split2);
937 }
938 
939 /*
940  * Replace a range in the inode's extent map tree with a new extent map.
941  *
942  * @inode:      The target inode.
943  * @new_em:     The new extent map to add to the inode's extent map tree.
944  * @modified:   Indicate if the new extent map should be added to the list of
945  *              modified extents (for fast fsync tracking).
946  *
947  * Drops all the extent maps in the inode's extent map tree that intersect the
948  * range of the new extent map and adds the new extent map to the tree.
949  * The caller should have locked an appropriate file range in the inode's io
950  * tree before calling this function.
951  */
952 int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
953 				   struct extent_map *new_em,
954 				   bool modified)
955 {
956 	const u64 end = new_em->start + new_em->len - 1;
957 	struct extent_map_tree *tree = &inode->extent_tree;
958 	int ret;
959 
960 	ASSERT(!extent_map_in_tree(new_em));
961 
962 	/*
963 	 * The caller has locked an appropriate file range in the inode's io
964 	 * tree, but getting -EEXIST when adding the new extent map can still
965 	 * happen in case there are extents that partially cover the range, and
966 	 * this is due to two tasks operating on different parts of the extent.
967 	 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
968 	 * btrfs_get_extent") for an example and details.
969 	 */
970 	do {
971 		btrfs_drop_extent_map_range(inode, new_em->start, end, false);
972 		write_lock(&tree->lock);
973 		ret = add_extent_mapping(tree, new_em, modified);
974 		write_unlock(&tree->lock);
975 	} while (ret == -EEXIST);
976 
977 	return ret;
978 }
979