1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2c1d7c514SDavid Sterba
3d1310b2eSChris Mason #include <linux/err.h>
4d1310b2eSChris Mason #include <linux/slab.h>
5a52d9a80SChris Mason #include <linux/spinlock.h>
69b569ea0SJosef Bacik #include "messages.h"
7261507a0SLi Zefan #include "ctree.h"
81c11b63eSJeff Mahoney #include "volumes.h"
9a52d9a80SChris Mason #include "extent_map.h"
10ebb8765bSAnand Jain #include "compression.h"
114c0c8cfcSFilipe Manana #include "btrfs_inode.h"
12a52d9a80SChris Mason
1386479a04SChris Mason
14a52d9a80SChris Mason static struct kmem_cache *extent_map_cache;
15ca664626SChris Mason
extent_map_init(void)162f4cbe64SWyatt Banks int __init extent_map_init(void)
17a52d9a80SChris Mason {
18837e1972SDavid Sterba extent_map_cache = kmem_cache_create("btrfs_extent_map",
196d36dcd4SChris Mason sizeof(struct extent_map), 0,
20fba4b697SNikolay Borisov SLAB_MEM_SPREAD, NULL);
212f4cbe64SWyatt Banks if (!extent_map_cache)
222f4cbe64SWyatt Banks return -ENOMEM;
232f4cbe64SWyatt Banks return 0;
24a52d9a80SChris Mason }
25a52d9a80SChris Mason
extent_map_exit(void)26e67c718bSDavid Sterba void __cold extent_map_exit(void)
27a52d9a80SChris Mason {
28a52d9a80SChris Mason kmem_cache_destroy(extent_map_cache);
29a52d9a80SChris Mason }
30a52d9a80SChris Mason
3143dd529aSDavid Sterba /*
3243dd529aSDavid Sterba * Initialize the extent tree @tree. Should be called for each new inode or
3343dd529aSDavid Sterba * other user of the extent_map interface.
349d2423c5SChristoph Hellwig */
extent_map_tree_init(struct extent_map_tree * tree)35a8067e02SDavid Sterba void extent_map_tree_init(struct extent_map_tree *tree)
36a52d9a80SChris Mason {
3707e1ce09SLiu Bo tree->map = RB_ROOT_CACHED;
385dc562c5SJosef Bacik INIT_LIST_HEAD(&tree->modified_extents);
39890871beSChris Mason rwlock_init(&tree->lock);
40a52d9a80SChris Mason }
41a52d9a80SChris Mason
4243dd529aSDavid Sterba /*
4343dd529aSDavid Sterba * Allocate a new extent_map structure. The new structure is returned with a
4443dd529aSDavid Sterba * reference count of one and needs to be freed using free_extent_map()
459d2423c5SChristoph Hellwig */
alloc_extent_map(void)46172ddd60SDavid Sterba struct extent_map *alloc_extent_map(void)
47a52d9a80SChris Mason {
48a52d9a80SChris Mason struct extent_map *em;
4970c8a91cSJosef Bacik em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
50c26a9203STsutomu Itoh if (!em)
51c26a9203STsutomu Itoh return NULL;
52cbc0e928SFilipe Manana RB_CLEAR_NODE(&em->rb_node);
53261507a0SLi Zefan em->compress_type = BTRFS_COMPRESS_NONE;
54490b54d6SElena Reshetova refcount_set(&em->refs, 1);
555dc562c5SJosef Bacik INIT_LIST_HEAD(&em->list);
56a52d9a80SChris Mason return em;
57a52d9a80SChris Mason }
58a52d9a80SChris Mason
5943dd529aSDavid Sterba /*
6043dd529aSDavid Sterba * Drop the reference out on @em by one and free the structure if the reference
6143dd529aSDavid Sterba * count hits zero.
629d2423c5SChristoph Hellwig */
free_extent_map(struct extent_map * em)63a52d9a80SChris Mason void free_extent_map(struct extent_map *em)
64a52d9a80SChris Mason {
652bf5a725SChris Mason if (!em)
662bf5a725SChris Mason return;
67490b54d6SElena Reshetova if (refcount_dec_and_test(&em->refs)) {
68cbc0e928SFilipe Manana WARN_ON(extent_map_in_tree(em));
695dc562c5SJosef Bacik WARN_ON(!list_empty(&em->list));
70298a8f9cSWang Shilong if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
7195617d69SJeff Mahoney kfree(em->map_lookup);
72a52d9a80SChris Mason kmem_cache_free(extent_map_cache, em);
73a52d9a80SChris Mason }
74a52d9a80SChris Mason }
75a52d9a80SChris Mason
7643dd529aSDavid Sterba /* Do the math around the end of an extent, handling wrapping. */
range_end(u64 start,u64 len)7732193c14SFilipe David Borba Manana static u64 range_end(u64 start, u64 len)
7832193c14SFilipe David Borba Manana {
7932193c14SFilipe David Borba Manana if (start + len < start)
8032193c14SFilipe David Borba Manana return (u64)-1;
8132193c14SFilipe David Borba Manana return start + len;
8232193c14SFilipe David Borba Manana }
8332193c14SFilipe David Borba Manana
tree_insert(struct rb_root_cached * root,struct extent_map * em)8407e1ce09SLiu Bo static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
85a52d9a80SChris Mason {
8607e1ce09SLiu Bo struct rb_node **p = &root->rb_root.rb_node;
87a52d9a80SChris Mason struct rb_node *parent = NULL;
8832193c14SFilipe David Borba Manana struct extent_map *entry = NULL;
8932193c14SFilipe David Borba Manana struct rb_node *orig_parent = NULL;
9032193c14SFilipe David Borba Manana u64 end = range_end(em->start, em->len);
9107e1ce09SLiu Bo bool leftmost = true;
92a52d9a80SChris Mason
93a52d9a80SChris Mason while (*p) {
94a52d9a80SChris Mason parent = *p;
95d1310b2eSChris Mason entry = rb_entry(parent, struct extent_map, rb_node);
96d1310b2eSChris Mason
9707e1ce09SLiu Bo if (em->start < entry->start) {
98a52d9a80SChris Mason p = &(*p)->rb_left;
9907e1ce09SLiu Bo } else if (em->start >= extent_map_end(entry)) {
100a52d9a80SChris Mason p = &(*p)->rb_right;
10107e1ce09SLiu Bo leftmost = false;
10207e1ce09SLiu Bo } else {
10332193c14SFilipe David Borba Manana return -EEXIST;
104a52d9a80SChris Mason }
10507e1ce09SLiu Bo }
106a52d9a80SChris Mason
10732193c14SFilipe David Borba Manana orig_parent = parent;
10832193c14SFilipe David Borba Manana while (parent && em->start >= extent_map_end(entry)) {
10932193c14SFilipe David Borba Manana parent = rb_next(parent);
11032193c14SFilipe David Borba Manana entry = rb_entry(parent, struct extent_map, rb_node);
11132193c14SFilipe David Borba Manana }
11232193c14SFilipe David Borba Manana if (parent)
11332193c14SFilipe David Borba Manana if (end > entry->start && em->start < extent_map_end(entry))
11432193c14SFilipe David Borba Manana return -EEXIST;
11532193c14SFilipe David Borba Manana
11632193c14SFilipe David Borba Manana parent = orig_parent;
11732193c14SFilipe David Borba Manana entry = rb_entry(parent, struct extent_map, rb_node);
11832193c14SFilipe David Borba Manana while (parent && em->start < entry->start) {
11932193c14SFilipe David Borba Manana parent = rb_prev(parent);
12032193c14SFilipe David Borba Manana entry = rb_entry(parent, struct extent_map, rb_node);
12132193c14SFilipe David Borba Manana }
12232193c14SFilipe David Borba Manana if (parent)
12332193c14SFilipe David Borba Manana if (end > entry->start && em->start < extent_map_end(entry))
12432193c14SFilipe David Borba Manana return -EEXIST;
12532193c14SFilipe David Borba Manana
12632193c14SFilipe David Borba Manana rb_link_node(&em->rb_node, orig_parent, p);
12707e1ce09SLiu Bo rb_insert_color_cached(&em->rb_node, root, leftmost);
12832193c14SFilipe David Borba Manana return 0;
129a52d9a80SChris Mason }
130a52d9a80SChris Mason
131d352ac68SChris Mason /*
13243dd529aSDavid Sterba * Search through the tree for an extent_map with a given offset. If it can't
13343dd529aSDavid Sterba * be found, try to find some neighboring extents
134d352ac68SChris Mason */
__tree_search(struct rb_root * root,u64 offset,struct rb_node ** prev_or_next_ret)135a52d9a80SChris Mason static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
1366c05813eSFilipe Manana struct rb_node **prev_or_next_ret)
137a52d9a80SChris Mason {
138a52d9a80SChris Mason struct rb_node *n = root->rb_node;
139a52d9a80SChris Mason struct rb_node *prev = NULL;
1405f56406aSChris Mason struct rb_node *orig_prev = NULL;
141d1310b2eSChris Mason struct extent_map *entry;
142d1310b2eSChris Mason struct extent_map *prev_entry = NULL;
143a52d9a80SChris Mason
1446c05813eSFilipe Manana ASSERT(prev_or_next_ret);
14508f088ddSFilipe Manana
146a52d9a80SChris Mason while (n) {
147d1310b2eSChris Mason entry = rb_entry(n, struct extent_map, rb_node);
148a52d9a80SChris Mason prev = n;
149a52d9a80SChris Mason prev_entry = entry;
150a52d9a80SChris Mason
151a52d9a80SChris Mason if (offset < entry->start)
152a52d9a80SChris Mason n = n->rb_left;
153d1310b2eSChris Mason else if (offset >= extent_map_end(entry))
154a52d9a80SChris Mason n = n->rb_right;
155a52d9a80SChris Mason else
156a52d9a80SChris Mason return n;
157a52d9a80SChris Mason }
1585f56406aSChris Mason
1595f56406aSChris Mason orig_prev = prev;
160d1310b2eSChris Mason while (prev && offset >= extent_map_end(prev_entry)) {
161a52d9a80SChris Mason prev = rb_next(prev);
162d1310b2eSChris Mason prev_entry = rb_entry(prev, struct extent_map, rb_node);
163a52d9a80SChris Mason }
1645f56406aSChris Mason
1656c05813eSFilipe Manana /*
1666c05813eSFilipe Manana * Previous extent map found, return as in this case the caller does not
1676c05813eSFilipe Manana * care about the next one.
1686c05813eSFilipe Manana */
1696c05813eSFilipe Manana if (prev) {
1706c05813eSFilipe Manana *prev_or_next_ret = prev;
1716c05813eSFilipe Manana return NULL;
1726c05813eSFilipe Manana }
1736c05813eSFilipe Manana
1746c05813eSFilipe Manana prev = orig_prev;
175d1310b2eSChris Mason prev_entry = rb_entry(prev, struct extent_map, rb_node);
1765f56406aSChris Mason while (prev && offset < prev_entry->start) {
1775f56406aSChris Mason prev = rb_prev(prev);
178d1310b2eSChris Mason prev_entry = rb_entry(prev, struct extent_map, rb_node);
1795f56406aSChris Mason }
1806c05813eSFilipe Manana *prev_or_next_ret = prev;
18108f088ddSFilipe Manana
182a52d9a80SChris Mason return NULL;
183a52d9a80SChris Mason }
184a52d9a80SChris Mason
18543dd529aSDavid Sterba /* Check to see if two extent_map structs are adjacent and safe to merge. */
mergable_maps(struct extent_map * prev,struct extent_map * next)186d1310b2eSChris Mason static int mergable_maps(struct extent_map *prev, struct extent_map *next)
187a52d9a80SChris Mason {
1887f3c74fbSChris Mason if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
1897f3c74fbSChris Mason return 0;
1907f3c74fbSChris Mason
191c8b97818SChris Mason /*
192c8b97818SChris Mason * don't merge compressed extents, we need to know their
193c8b97818SChris Mason * actual size
194c8b97818SChris Mason */
195c8b97818SChris Mason if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
196c8b97818SChris Mason return 0;
197c8b97818SChris Mason
198201a9038SJosef Bacik if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
199201a9038SJosef Bacik test_bit(EXTENT_FLAG_LOGGING, &next->flags))
200201a9038SJosef Bacik return 0;
201201a9038SJosef Bacik
20209a2a8f9SJosef Bacik /*
20309a2a8f9SJosef Bacik * We don't want to merge stuff that hasn't been written to the log yet
20409a2a8f9SJosef Bacik * since it may not reflect exactly what is on disk, and that would be
20509a2a8f9SJosef Bacik * bad.
20609a2a8f9SJosef Bacik */
20709a2a8f9SJosef Bacik if (!list_empty(&prev->list) || !list_empty(&next->list))
20809a2a8f9SJosef Bacik return 0;
20909a2a8f9SJosef Bacik
210951e05a9SNikolay Borisov ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
211951e05a9SNikolay Borisov prev->block_start != EXTENT_MAP_DELALLOC);
212951e05a9SNikolay Borisov
213c3e14909SDavid Sterba if (prev->map_lookup || next->map_lookup)
214c3e14909SDavid Sterba ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
215c3e14909SDavid Sterba test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
216c3e14909SDavid Sterba
217d1310b2eSChris Mason if (extent_map_end(prev) == next->start &&
218d1310b2eSChris Mason prev->flags == next->flags &&
219c3e14909SDavid Sterba prev->map_lookup == next->map_lookup &&
220d1310b2eSChris Mason ((next->block_start == EXTENT_MAP_HOLE &&
221d1310b2eSChris Mason prev->block_start == EXTENT_MAP_HOLE) ||
222d1310b2eSChris Mason (next->block_start == EXTENT_MAP_INLINE &&
223d1310b2eSChris Mason prev->block_start == EXTENT_MAP_INLINE) ||
224d1310b2eSChris Mason (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
225d1310b2eSChris Mason next->block_start == extent_map_block_end(prev)))) {
226d1310b2eSChris Mason return 1;
227d1310b2eSChris Mason }
228a52d9a80SChris Mason return 0;
229a52d9a80SChris Mason }
230a52d9a80SChris Mason
try_merge_map(struct extent_map_tree * tree,struct extent_map * em)2314d2c8f62SLi Zefan static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
232a1ed835eSChris Mason {
233a1ed835eSChris Mason struct extent_map *merge = NULL;
234a1ed835eSChris Mason struct rb_node *rb;
235a1ed835eSChris Mason
236ac05ca91SFilipe Manana /*
237ac05ca91SFilipe Manana * We can't modify an extent map that is in the tree and that is being
238ac05ca91SFilipe Manana * used by another task, as it can cause that other task to see it in
239ac05ca91SFilipe Manana * inconsistent state during the merging. We always have 1 reference for
240ac05ca91SFilipe Manana * the tree and 1 for this task (which is unpinning the extent map or
241ac05ca91SFilipe Manana * clearing the logging flag), so anything > 2 means it's being used by
242ac05ca91SFilipe Manana * other tasks too.
243ac05ca91SFilipe Manana */
244ac05ca91SFilipe Manana if (refcount_read(&em->refs) > 2)
245ac05ca91SFilipe Manana return;
246ac05ca91SFilipe Manana
247a1ed835eSChris Mason if (em->start != 0) {
248a1ed835eSChris Mason rb = rb_prev(&em->rb_node);
249a1ed835eSChris Mason if (rb)
250a1ed835eSChris Mason merge = rb_entry(rb, struct extent_map, rb_node);
251a1ed835eSChris Mason if (rb && mergable_maps(merge, em)) {
252a1ed835eSChris Mason em->start = merge->start;
25370c8a91cSJosef Bacik em->orig_start = merge->orig_start;
254a1ed835eSChris Mason em->len += merge->len;
255a1ed835eSChris Mason em->block_len += merge->block_len;
256a1ed835eSChris Mason em->block_start = merge->block_start;
25770c8a91cSJosef Bacik em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
25870c8a91cSJosef Bacik em->mod_start = merge->mod_start;
25970c8a91cSJosef Bacik em->generation = max(em->generation, merge->generation);
260199257a7SQu Wenruo set_bit(EXTENT_FLAG_MERGED, &em->flags);
2615dc562c5SJosef Bacik
26207e1ce09SLiu Bo rb_erase_cached(&merge->rb_node, &tree->map);
263cbc0e928SFilipe Manana RB_CLEAR_NODE(&merge->rb_node);
264a1ed835eSChris Mason free_extent_map(merge);
265a1ed835eSChris Mason }
266a1ed835eSChris Mason }
267a1ed835eSChris Mason
268a1ed835eSChris Mason rb = rb_next(&em->rb_node);
269a1ed835eSChris Mason if (rb)
270a1ed835eSChris Mason merge = rb_entry(rb, struct extent_map, rb_node);
271a1ed835eSChris Mason if (rb && mergable_maps(em, merge)) {
272a1ed835eSChris Mason em->len += merge->len;
273d527afe1SFilipe David Borba Manana em->block_len += merge->block_len;
27407e1ce09SLiu Bo rb_erase_cached(&merge->rb_node, &tree->map);
275cbc0e928SFilipe Manana RB_CLEAR_NODE(&merge->rb_node);
27670c8a91cSJosef Bacik em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
27770c8a91cSJosef Bacik em->generation = max(em->generation, merge->generation);
278199257a7SQu Wenruo set_bit(EXTENT_FLAG_MERGED, &em->flags);
279a1ed835eSChris Mason free_extent_map(merge);
280a1ed835eSChris Mason }
2814d2c8f62SLi Zefan }
2824d2c8f62SLi Zefan
28343dd529aSDavid Sterba /*
28443dd529aSDavid Sterba * Unpin an extent from the cache.
28543dd529aSDavid Sterba *
2865dc562c5SJosef Bacik * @tree: tree to unpin the extent in
2875dc562c5SJosef Bacik * @start: logical offset in the file
2885dc562c5SJosef Bacik * @len: length of the extent
2895dc562c5SJosef Bacik * @gen: generation that this extent has been modified in
2905dc562c5SJosef Bacik *
2915dc562c5SJosef Bacik * Called after an extent has been written to disk properly. Set the generation
2925dc562c5SJosef Bacik * to the generation that actually added the file item to the inode so we know
2935dc562c5SJosef Bacik * we need to sync this extent when we call fsync().
2945dc562c5SJosef Bacik */
unpin_extent_cache(struct extent_map_tree * tree,u64 start,u64 len,u64 gen)2955dc562c5SJosef Bacik int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
2965dc562c5SJosef Bacik u64 gen)
2974d2c8f62SLi Zefan {
2984d2c8f62SLi Zefan int ret = 0;
2994d2c8f62SLi Zefan struct extent_map *em;
3004e2f84e6SLiu Bo bool prealloc = false;
3014d2c8f62SLi Zefan
3024d2c8f62SLi Zefan write_lock(&tree->lock);
3034d2c8f62SLi Zefan em = lookup_extent_mapping(tree, start, len);
3044d2c8f62SLi Zefan
3054d2c8f62SLi Zefan WARN_ON(!em || em->start != start);
3064d2c8f62SLi Zefan
3074d2c8f62SLi Zefan if (!em)
3084d2c8f62SLi Zefan goto out;
3094d2c8f62SLi Zefan
3105dc562c5SJosef Bacik em->generation = gen;
3114d2c8f62SLi Zefan clear_bit(EXTENT_FLAG_PINNED, &em->flags);
3124e2f84e6SLiu Bo em->mod_start = em->start;
3134e2f84e6SLiu Bo em->mod_len = em->len;
3144e2f84e6SLiu Bo
315b11e234dSJosef Bacik if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
3164e2f84e6SLiu Bo prealloc = true;
317b11e234dSJosef Bacik clear_bit(EXTENT_FLAG_FILLING, &em->flags);
3184e2f84e6SLiu Bo }
3194d2c8f62SLi Zefan
3204d2c8f62SLi Zefan try_merge_map(tree, em);
3214e2f84e6SLiu Bo
3224e2f84e6SLiu Bo if (prealloc) {
3234e2f84e6SLiu Bo em->mod_start = em->start;
3244e2f84e6SLiu Bo em->mod_len = em->len;
3254e2f84e6SLiu Bo }
3264e2f84e6SLiu Bo
327a1ed835eSChris Mason free_extent_map(em);
328a1ed835eSChris Mason out:
329a1ed835eSChris Mason write_unlock(&tree->lock);
330a1ed835eSChris Mason return ret;
331a1ed835eSChris Mason
332a1ed835eSChris Mason }
333a1ed835eSChris Mason
clear_em_logging(struct extent_map_tree * tree,struct extent_map * em)334201a9038SJosef Bacik void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
335201a9038SJosef Bacik {
33674333c7dSFilipe Manana lockdep_assert_held_write(&tree->lock);
33774333c7dSFilipe Manana
338201a9038SJosef Bacik clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
339cbc0e928SFilipe Manana if (extent_map_in_tree(em))
340201a9038SJosef Bacik try_merge_map(tree, em);
341201a9038SJosef Bacik }
342201a9038SJosef Bacik
setup_extent_mapping(struct extent_map_tree * tree,struct extent_map * em,int modified)343176840b3SFilipe Manana static inline void setup_extent_mapping(struct extent_map_tree *tree,
344176840b3SFilipe Manana struct extent_map *em,
345176840b3SFilipe Manana int modified)
346176840b3SFilipe Manana {
347490b54d6SElena Reshetova refcount_inc(&em->refs);
348176840b3SFilipe Manana em->mod_start = em->start;
349176840b3SFilipe Manana em->mod_len = em->len;
350176840b3SFilipe Manana
351176840b3SFilipe Manana if (modified)
352176840b3SFilipe Manana list_move(&em->list, &tree->modified_extents);
353176840b3SFilipe Manana else
354176840b3SFilipe Manana try_merge_map(tree, em);
355176840b3SFilipe Manana }
356176840b3SFilipe Manana
extent_map_device_set_bits(struct extent_map * em,unsigned bits)3571c11b63eSJeff Mahoney static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
3581c11b63eSJeff Mahoney {
3591c11b63eSJeff Mahoney struct map_lookup *map = em->map_lookup;
3601c11b63eSJeff Mahoney u64 stripe_size = em->orig_block_len;
3611c11b63eSJeff Mahoney int i;
3621c11b63eSJeff Mahoney
3631c11b63eSJeff Mahoney for (i = 0; i < map->num_stripes; i++) {
3644c664611SQu Wenruo struct btrfs_io_stripe *stripe = &map->stripes[i];
3651c11b63eSJeff Mahoney struct btrfs_device *device = stripe->dev;
3661c11b63eSJeff Mahoney
367e85de967SDavid Sterba set_extent_bit(&device->alloc_state, stripe->physical,
36862bc6047SDavid Sterba stripe->physical + stripe_size - 1,
3691d126800SDavid Sterba bits | EXTENT_NOWAIT, NULL);
3701c11b63eSJeff Mahoney }
3711c11b63eSJeff Mahoney }
3721c11b63eSJeff Mahoney
extent_map_device_clear_bits(struct extent_map * em,unsigned bits)3731c11b63eSJeff Mahoney static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
3741c11b63eSJeff Mahoney {
3751c11b63eSJeff Mahoney struct map_lookup *map = em->map_lookup;
3761c11b63eSJeff Mahoney u64 stripe_size = em->orig_block_len;
3771c11b63eSJeff Mahoney int i;
3781c11b63eSJeff Mahoney
3791c11b63eSJeff Mahoney for (i = 0; i < map->num_stripes; i++) {
3804c664611SQu Wenruo struct btrfs_io_stripe *stripe = &map->stripes[i];
3811c11b63eSJeff Mahoney struct btrfs_device *device = stripe->dev;
3821c11b63eSJeff Mahoney
3831c11b63eSJeff Mahoney __clear_extent_bit(&device->alloc_state, stripe->physical,
38462bc6047SDavid Sterba stripe->physical + stripe_size - 1,
38562bc6047SDavid Sterba bits | EXTENT_NOWAIT,
3861d126800SDavid Sterba NULL, NULL);
3871c11b63eSJeff Mahoney }
3881c11b63eSJeff Mahoney }
3891c11b63eSJeff Mahoney
39043dd529aSDavid Sterba /*
391401bd2ddSNikolay Borisov * Add new extent map to the extent tree
392401bd2ddSNikolay Borisov *
3939d2423c5SChristoph Hellwig * @tree: tree to insert new map in
3949d2423c5SChristoph Hellwig * @em: map to insert
395401bd2ddSNikolay Borisov * @modified: indicate whether the given @em should be added to the
396401bd2ddSNikolay Borisov * modified list, which indicates the extent needs to be logged
3979d2423c5SChristoph Hellwig *
3989d2423c5SChristoph Hellwig * Insert @em into @tree or perform a simple forward/backward merge with
3999d2423c5SChristoph Hellwig * existing mappings. The extent_map struct passed in will be inserted
4009d2423c5SChristoph Hellwig * into the tree directly, with an additional reference taken, or a
40125985edcSLucas De Marchi * reference dropped if the merge attempt was successful.
402a52d9a80SChris Mason */
add_extent_mapping(struct extent_map_tree * tree,struct extent_map * em,int modified)403a52d9a80SChris Mason int add_extent_mapping(struct extent_map_tree *tree,
40409a2a8f9SJosef Bacik struct extent_map *em, int modified)
405a52d9a80SChris Mason {
406a52d9a80SChris Mason int ret = 0;
407a52d9a80SChris Mason
408d23ea3faSDavid Sterba lockdep_assert_held_write(&tree->lock);
409d23ea3faSDavid Sterba
41032193c14SFilipe David Borba Manana ret = tree_insert(&tree->map, em);
41132193c14SFilipe David Borba Manana if (ret)
4127c2fe32aSChris Mason goto out;
41332193c14SFilipe David Borba Manana
414176840b3SFilipe Manana setup_extent_mapping(tree, em, modified);
4158811133dSNikolay Borisov if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
4161c11b63eSJeff Mahoney extent_map_device_set_bits(em, CHUNK_ALLOCATED);
4178811133dSNikolay Borisov extent_map_device_clear_bits(em, CHUNK_TRIMMED);
4188811133dSNikolay Borisov }
419a52d9a80SChris Mason out:
420a52d9a80SChris Mason return ret;
421a52d9a80SChris Mason }
422a52d9a80SChris Mason
42348a3b636SEric Sandeen static struct extent_map *
__lookup_extent_mapping(struct extent_map_tree * tree,u64 start,u64 len,int strict)42448a3b636SEric Sandeen __lookup_extent_mapping(struct extent_map_tree *tree,
425ed64f066SLi Zefan u64 start, u64 len, int strict)
426ed64f066SLi Zefan {
427ed64f066SLi Zefan struct extent_map *em;
428ed64f066SLi Zefan struct rb_node *rb_node;
4296c05813eSFilipe Manana struct rb_node *prev_or_next = NULL;
430ed64f066SLi Zefan u64 end = range_end(start, len);
431ed64f066SLi Zefan
4326c05813eSFilipe Manana rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next);
433ed64f066SLi Zefan if (!rb_node) {
4346c05813eSFilipe Manana if (prev_or_next)
4356c05813eSFilipe Manana rb_node = prev_or_next;
436ed64f066SLi Zefan else
437ed64f066SLi Zefan return NULL;
438ed64f066SLi Zefan }
439ed64f066SLi Zefan
440ed64f066SLi Zefan em = rb_entry(rb_node, struct extent_map, rb_node);
441ed64f066SLi Zefan
442ed64f066SLi Zefan if (strict && !(end > em->start && start < extent_map_end(em)))
443ed64f066SLi Zefan return NULL;
444ed64f066SLi Zefan
445490b54d6SElena Reshetova refcount_inc(&em->refs);
446ed64f066SLi Zefan return em;
447ed64f066SLi Zefan }
448ed64f066SLi Zefan
44943dd529aSDavid Sterba /*
45043dd529aSDavid Sterba * Lookup extent_map that intersects @start + @len range.
45143dd529aSDavid Sterba *
4529d2423c5SChristoph Hellwig * @tree: tree to lookup in
4539d2423c5SChristoph Hellwig * @start: byte offset to start the search
4549d2423c5SChristoph Hellwig * @len: length of the lookup range
4559d2423c5SChristoph Hellwig *
4569d2423c5SChristoph Hellwig * Find and return the first extent_map struct in @tree that intersects the
4579d2423c5SChristoph Hellwig * [start, len] range. There may be additional objects in the tree that
4589d2423c5SChristoph Hellwig * intersect, so check the object returned carefully to make sure that no
4599d2423c5SChristoph Hellwig * additional lookups are needed.
460a52d9a80SChris Mason */
lookup_extent_mapping(struct extent_map_tree * tree,u64 start,u64 len)461a52d9a80SChris Mason struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
462d1310b2eSChris Mason u64 start, u64 len)
463a52d9a80SChris Mason {
464ed64f066SLi Zefan return __lookup_extent_mapping(tree, start, len, 1);
465a52d9a80SChris Mason }
466a52d9a80SChris Mason
46743dd529aSDavid Sterba /*
46843dd529aSDavid Sterba * Find a nearby extent map intersecting @start + @len (not an exact search).
46943dd529aSDavid Sterba *
470b917b7c3SChris Mason * @tree: tree to lookup in
471b917b7c3SChris Mason * @start: byte offset to start the search
472b917b7c3SChris Mason * @len: length of the lookup range
473b917b7c3SChris Mason *
474b917b7c3SChris Mason * Find and return the first extent_map struct in @tree that intersects the
475b917b7c3SChris Mason * [start, len] range.
476b917b7c3SChris Mason *
477b917b7c3SChris Mason * If one can't be found, any nearby extent may be returned
478b917b7c3SChris Mason */
search_extent_mapping(struct extent_map_tree * tree,u64 start,u64 len)479b917b7c3SChris Mason struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
480b917b7c3SChris Mason u64 start, u64 len)
481b917b7c3SChris Mason {
482ed64f066SLi Zefan return __lookup_extent_mapping(tree, start, len, 0);
483b917b7c3SChris Mason }
484b917b7c3SChris Mason
48543dd529aSDavid Sterba /*
48643dd529aSDavid Sterba * Remove an extent_map from the extent tree.
48743dd529aSDavid Sterba *
4889d2423c5SChristoph Hellwig * @tree: extent tree to remove from
489bb7ab3b9SAdam Buchbinder * @em: extent map being removed
4909d2423c5SChristoph Hellwig *
49143dd529aSDavid Sterba * Remove @em from @tree. No reference counts are dropped, and no checks
49243dd529aSDavid Sterba * are done to see if the range is in use.
493a52d9a80SChris Mason */
remove_extent_mapping(struct extent_map_tree * tree,struct extent_map * em)494c1766dd7Szhong jiang void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
495a52d9a80SChris Mason {
4966d3b050eSFilipe Manana lockdep_assert_held_write(&tree->lock);
4976d3b050eSFilipe Manana
4987f3c74fbSChris Mason WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
49907e1ce09SLiu Bo rb_erase_cached(&em->rb_node, &tree->map);
500ff44c6e3SJosef Bacik if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
5015dc562c5SJosef Bacik list_del_init(&em->list);
5021c11b63eSJeff Mahoney if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
5031c11b63eSJeff Mahoney extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
504cbc0e928SFilipe Manana RB_CLEAR_NODE(&em->rb_node);
505a52d9a80SChris Mason }
506176840b3SFilipe Manana
replace_extent_mapping(struct extent_map_tree * tree,struct extent_map * cur,struct extent_map * new,int modified)507a6f3e205SChristoph Hellwig static void replace_extent_mapping(struct extent_map_tree *tree,
508176840b3SFilipe Manana struct extent_map *cur,
509176840b3SFilipe Manana struct extent_map *new,
510176840b3SFilipe Manana int modified)
511176840b3SFilipe Manana {
5126d3b050eSFilipe Manana lockdep_assert_held_write(&tree->lock);
5136d3b050eSFilipe Manana
514176840b3SFilipe Manana WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
515176840b3SFilipe Manana ASSERT(extent_map_in_tree(cur));
516176840b3SFilipe Manana if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
517176840b3SFilipe Manana list_del_init(&cur->list);
51807e1ce09SLiu Bo rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
519176840b3SFilipe Manana RB_CLEAR_NODE(&cur->rb_node);
520176840b3SFilipe Manana
521176840b3SFilipe Manana setup_extent_mapping(tree, new, modified);
522176840b3SFilipe Manana }
523c04e61b5SLiu Bo
next_extent_map(const struct extent_map * em)524d47704bdSFilipe Manana static struct extent_map *next_extent_map(const struct extent_map *em)
525c04e61b5SLiu Bo {
526c04e61b5SLiu Bo struct rb_node *next;
527c04e61b5SLiu Bo
528c04e61b5SLiu Bo next = rb_next(&em->rb_node);
529c04e61b5SLiu Bo if (!next)
530c04e61b5SLiu Bo return NULL;
531c04e61b5SLiu Bo return container_of(next, struct extent_map, rb_node);
532c04e61b5SLiu Bo }
533c04e61b5SLiu Bo
prev_extent_map(struct extent_map * em)534c04e61b5SLiu Bo static struct extent_map *prev_extent_map(struct extent_map *em)
535c04e61b5SLiu Bo {
536c04e61b5SLiu Bo struct rb_node *prev;
537c04e61b5SLiu Bo
538c04e61b5SLiu Bo prev = rb_prev(&em->rb_node);
539c04e61b5SLiu Bo if (!prev)
540c04e61b5SLiu Bo return NULL;
541c04e61b5SLiu Bo return container_of(prev, struct extent_map, rb_node);
542c04e61b5SLiu Bo }
543c04e61b5SLiu Bo
54452042d8eSAndrea Gelmini /*
54552042d8eSAndrea Gelmini * Helper for btrfs_get_extent. Given an existing extent in the tree,
546c04e61b5SLiu Bo * the existing extent is the nearest extent to map_start,
547c04e61b5SLiu Bo * and an extent that you want to insert, deal with overlap and insert
548c04e61b5SLiu Bo * the best fitted new extent into the tree.
549c04e61b5SLiu Bo */
merge_extent_mapping(struct extent_map_tree * em_tree,struct extent_map * existing,struct extent_map * em,u64 map_start)5505f4791f4SLiu Bo static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
551c04e61b5SLiu Bo struct extent_map *existing,
552c04e61b5SLiu Bo struct extent_map *em,
553c04e61b5SLiu Bo u64 map_start)
554c04e61b5SLiu Bo {
555c04e61b5SLiu Bo struct extent_map *prev;
556c04e61b5SLiu Bo struct extent_map *next;
557c04e61b5SLiu Bo u64 start;
558c04e61b5SLiu Bo u64 end;
559c04e61b5SLiu Bo u64 start_diff;
560c04e61b5SLiu Bo
561c04e61b5SLiu Bo BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
562c04e61b5SLiu Bo
563c04e61b5SLiu Bo if (existing->start > map_start) {
564c04e61b5SLiu Bo next = existing;
565c04e61b5SLiu Bo prev = prev_extent_map(next);
566c04e61b5SLiu Bo } else {
567c04e61b5SLiu Bo prev = existing;
568c04e61b5SLiu Bo next = next_extent_map(prev);
569c04e61b5SLiu Bo }
570c04e61b5SLiu Bo
571c04e61b5SLiu Bo start = prev ? extent_map_end(prev) : em->start;
572c04e61b5SLiu Bo start = max_t(u64, start, em->start);
573c04e61b5SLiu Bo end = next ? next->start : extent_map_end(em);
574c04e61b5SLiu Bo end = min_t(u64, end, extent_map_end(em));
575c04e61b5SLiu Bo start_diff = start - em->start;
576c04e61b5SLiu Bo em->start = start;
577c04e61b5SLiu Bo em->len = end - start;
578c04e61b5SLiu Bo if (em->block_start < EXTENT_MAP_LAST_BYTE &&
579c04e61b5SLiu Bo !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
580c04e61b5SLiu Bo em->block_start += start_diff;
581c04e61b5SLiu Bo em->block_len = em->len;
582c04e61b5SLiu Bo }
583c04e61b5SLiu Bo return add_extent_mapping(em_tree, em, 0);
584c04e61b5SLiu Bo }
585c04e61b5SLiu Bo
58643dd529aSDavid Sterba /*
58743dd529aSDavid Sterba * Add extent mapping into em_tree.
5889ad37bb3SNikolay Borisov *
5899ad37bb3SNikolay Borisov * @fs_info: the filesystem
5909ad37bb3SNikolay Borisov * @em_tree: extent tree into which we want to insert the extent mapping
5919ad37bb3SNikolay Borisov * @em_in: extent we are inserting
5929ad37bb3SNikolay Borisov * @start: start of the logical range btrfs_get_extent() is requesting
5939ad37bb3SNikolay Borisov * @len: length of the logical range btrfs_get_extent() is requesting
594c04e61b5SLiu Bo *
595c04e61b5SLiu Bo * Note that @em_in's range may be different from [start, start+len),
596c04e61b5SLiu Bo * but they must be overlapped.
597c04e61b5SLiu Bo *
598c04e61b5SLiu Bo * Insert @em_in into @em_tree. In case there is an overlapping range, handle
599c04e61b5SLiu Bo * the -EEXIST by either:
600c04e61b5SLiu Bo * a) Returning the existing extent in @em_in if @start is within the
601c04e61b5SLiu Bo * existing em.
602c04e61b5SLiu Bo * b) Merge the existing extent with @em_in passed in.
603c04e61b5SLiu Bo *
604c04e61b5SLiu Bo * Return 0 on success, otherwise -EEXIST.
605c04e61b5SLiu Bo *
606c04e61b5SLiu Bo */
btrfs_add_extent_mapping(struct btrfs_fs_info * fs_info,struct extent_map_tree * em_tree,struct extent_map ** em_in,u64 start,u64 len)607f46b24c9SDavid Sterba int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
608f46b24c9SDavid Sterba struct extent_map_tree *em_tree,
609c04e61b5SLiu Bo struct extent_map **em_in, u64 start, u64 len)
610c04e61b5SLiu Bo {
611c04e61b5SLiu Bo int ret;
612c04e61b5SLiu Bo struct extent_map *em = *em_in;
613c04e61b5SLiu Bo
614d52a1365SQu Wenruo /*
615d52a1365SQu Wenruo * Tree-checker should have rejected any inline extent with non-zero
616d52a1365SQu Wenruo * file offset. Here just do a sanity check.
617d52a1365SQu Wenruo */
618d52a1365SQu Wenruo if (em->block_start == EXTENT_MAP_INLINE)
619d52a1365SQu Wenruo ASSERT(em->start == 0);
620d52a1365SQu Wenruo
621c04e61b5SLiu Bo ret = add_extent_mapping(em_tree, em, 0);
622c04e61b5SLiu Bo /* it is possible that someone inserted the extent into the tree
623c04e61b5SLiu Bo * while we had the lock dropped. It is also possible that
624c04e61b5SLiu Bo * an overlapping map exists in the tree
625c04e61b5SLiu Bo */
626c04e61b5SLiu Bo if (ret == -EEXIST) {
627c04e61b5SLiu Bo struct extent_map *existing;
628c04e61b5SLiu Bo
629c04e61b5SLiu Bo ret = 0;
630c04e61b5SLiu Bo
631c04e61b5SLiu Bo existing = search_extent_mapping(em_tree, start, len);
632393da918SLiu Bo
633f46b24c9SDavid Sterba trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
634393da918SLiu Bo
635c04e61b5SLiu Bo /*
636c04e61b5SLiu Bo * existing will always be non-NULL, since there must be
637c04e61b5SLiu Bo * extent causing the -EEXIST.
638c04e61b5SLiu Bo */
639c04e61b5SLiu Bo if (start >= existing->start &&
640c04e61b5SLiu Bo start < extent_map_end(existing)) {
641c04e61b5SLiu Bo free_extent_map(em);
642c04e61b5SLiu Bo *em_in = existing;
643c04e61b5SLiu Bo ret = 0;
644c04e61b5SLiu Bo } else {
6459a7e10e7SLiu Bo u64 orig_start = em->start;
6469a7e10e7SLiu Bo u64 orig_len = em->len;
6479a7e10e7SLiu Bo
648c04e61b5SLiu Bo /*
649c04e61b5SLiu Bo * The existing extent map is the one nearest to
650c04e61b5SLiu Bo * the [start, start + len) range which overlaps
651c04e61b5SLiu Bo */
652c04e61b5SLiu Bo ret = merge_extent_mapping(em_tree, existing,
653c04e61b5SLiu Bo em, start);
654c04e61b5SLiu Bo if (ret) {
655c04e61b5SLiu Bo free_extent_map(em);
656c04e61b5SLiu Bo *em_in = NULL;
6579a7e10e7SLiu Bo WARN_ONCE(ret,
6589a7e10e7SLiu Bo "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
6599a7e10e7SLiu Bo ret, existing->start, existing->len,
6609a7e10e7SLiu Bo orig_start, orig_len);
661c04e61b5SLiu Bo }
6629a7e10e7SLiu Bo free_extent_map(existing);
663c04e61b5SLiu Bo }
664c04e61b5SLiu Bo }
665c04e61b5SLiu Bo
666c04e61b5SLiu Bo ASSERT(ret == 0 || ret == -EEXIST);
667c04e61b5SLiu Bo return ret;
668c04e61b5SLiu Bo }
6694c0c8cfcSFilipe Manana
6704c0c8cfcSFilipe Manana /*
6719c9d1b4fSFilipe Manana * Drop all extent maps from a tree in the fastest possible way, rescheduling
6729c9d1b4fSFilipe Manana * if needed. This avoids searching the tree, from the root down to the first
6739c9d1b4fSFilipe Manana * extent map, before each deletion.
6749c9d1b4fSFilipe Manana */
drop_all_extent_maps_fast(struct extent_map_tree * tree)6759c9d1b4fSFilipe Manana static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
6769c9d1b4fSFilipe Manana {
6779c9d1b4fSFilipe Manana write_lock(&tree->lock);
6789c9d1b4fSFilipe Manana while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
6799c9d1b4fSFilipe Manana struct extent_map *em;
6809c9d1b4fSFilipe Manana struct rb_node *node;
6819c9d1b4fSFilipe Manana
6829c9d1b4fSFilipe Manana node = rb_first_cached(&tree->map);
6839c9d1b4fSFilipe Manana em = rb_entry(node, struct extent_map, rb_node);
6849c9d1b4fSFilipe Manana clear_bit(EXTENT_FLAG_PINNED, &em->flags);
6859c9d1b4fSFilipe Manana clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
6869c9d1b4fSFilipe Manana remove_extent_mapping(tree, em);
6879c9d1b4fSFilipe Manana free_extent_map(em);
6889c9d1b4fSFilipe Manana cond_resched_rwlock_write(&tree->lock);
6899c9d1b4fSFilipe Manana }
6909c9d1b4fSFilipe Manana write_unlock(&tree->lock);
6919c9d1b4fSFilipe Manana }
6929c9d1b4fSFilipe Manana
6939c9d1b4fSFilipe Manana /*
6944c0c8cfcSFilipe Manana * Drop all extent maps in a given range.
6954c0c8cfcSFilipe Manana *
6964c0c8cfcSFilipe Manana * @inode: The target inode.
6974c0c8cfcSFilipe Manana * @start: Start offset of the range.
6984c0c8cfcSFilipe Manana * @end: End offset of the range (inclusive value).
6994c0c8cfcSFilipe Manana * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
7004c0c8cfcSFilipe Manana *
7014c0c8cfcSFilipe Manana * This drops all the extent maps that intersect the given range [@start, @end].
7024c0c8cfcSFilipe Manana * Extent maps that partially overlap the range and extend behind or beyond it,
7034c0c8cfcSFilipe Manana * are split.
7044c0c8cfcSFilipe Manana * The caller should have locked an appropriate file range in the inode's io
7054c0c8cfcSFilipe Manana * tree before calling this function.
7064c0c8cfcSFilipe Manana */
btrfs_drop_extent_map_range(struct btrfs_inode * inode,u64 start,u64 end,bool skip_pinned)7074c0c8cfcSFilipe Manana void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
7084c0c8cfcSFilipe Manana bool skip_pinned)
7094c0c8cfcSFilipe Manana {
710db21370bSFilipe Manana struct extent_map *split;
711db21370bSFilipe Manana struct extent_map *split2;
712db21370bSFilipe Manana struct extent_map *em;
7134c0c8cfcSFilipe Manana struct extent_map_tree *em_tree = &inode->extent_tree;
7144c0c8cfcSFilipe Manana u64 len = end - start + 1;
7154c0c8cfcSFilipe Manana
7164c0c8cfcSFilipe Manana WARN_ON(end < start);
7174c0c8cfcSFilipe Manana if (end == (u64)-1) {
7189c9d1b4fSFilipe Manana if (start == 0 && !skip_pinned) {
7199c9d1b4fSFilipe Manana drop_all_extent_maps_fast(em_tree);
7209c9d1b4fSFilipe Manana return;
7219c9d1b4fSFilipe Manana }
7224c0c8cfcSFilipe Manana len = (u64)-1;
723db21370bSFilipe Manana } else {
724db21370bSFilipe Manana /* Make end offset exclusive for use in the loop below. */
725db21370bSFilipe Manana end++;
7264c0c8cfcSFilipe Manana }
7274c0c8cfcSFilipe Manana
728db21370bSFilipe Manana /*
729db21370bSFilipe Manana * It's ok if we fail to allocate the extent maps, see the comment near
730db21370bSFilipe Manana * the bottom of the loop below. We only need two spare extent maps in
731db21370bSFilipe Manana * the worst case, where the first extent map that intersects our range
732db21370bSFilipe Manana * starts before the range and the last extent map that intersects our
733db21370bSFilipe Manana * range ends after our range (and they might be the same extent map),
734db21370bSFilipe Manana * because we need to split those two extent maps at the boundaries.
735db21370bSFilipe Manana */
7364c0c8cfcSFilipe Manana split = alloc_extent_map();
7374c0c8cfcSFilipe Manana split2 = alloc_extent_map();
7384c0c8cfcSFilipe Manana
7394c0c8cfcSFilipe Manana write_lock(&em_tree->lock);
7404c0c8cfcSFilipe Manana em = lookup_extent_mapping(em_tree, start, len);
741db21370bSFilipe Manana
742db21370bSFilipe Manana while (em) {
743db21370bSFilipe Manana /* extent_map_end() returns exclusive value (last byte + 1). */
744db21370bSFilipe Manana const u64 em_end = extent_map_end(em);
745db21370bSFilipe Manana struct extent_map *next_em = NULL;
746db21370bSFilipe Manana u64 gen;
747db21370bSFilipe Manana unsigned long flags;
748db21370bSFilipe Manana bool modified;
749db21370bSFilipe Manana bool compressed;
750db21370bSFilipe Manana
751db21370bSFilipe Manana if (em_end < end) {
752db21370bSFilipe Manana next_em = next_extent_map(em);
753db21370bSFilipe Manana if (next_em) {
754db21370bSFilipe Manana if (next_em->start < end)
755db21370bSFilipe Manana refcount_inc(&next_em->refs);
756db21370bSFilipe Manana else
757db21370bSFilipe Manana next_em = NULL;
7584c0c8cfcSFilipe Manana }
759db21370bSFilipe Manana }
760db21370bSFilipe Manana
7614c0c8cfcSFilipe Manana if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
762f3109e33SFilipe Manana start = em_end;
763db21370bSFilipe Manana goto next;
7644c0c8cfcSFilipe Manana }
765db21370bSFilipe Manana
766e4cc1483SFilipe Manana flags = em->flags;
7674c0c8cfcSFilipe Manana clear_bit(EXTENT_FLAG_PINNED, &em->flags);
768e4cc1483SFilipe Manana /*
769e4cc1483SFilipe Manana * In case we split the extent map, we want to preserve the
770e4cc1483SFilipe Manana * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
771e4cc1483SFilipe Manana * it on the new extent maps.
772e4cc1483SFilipe Manana */
7734c0c8cfcSFilipe Manana clear_bit(EXTENT_FLAG_LOGGING, &flags);
7744c0c8cfcSFilipe Manana modified = !list_empty(&em->list);
775db21370bSFilipe Manana
776db21370bSFilipe Manana /*
777db21370bSFilipe Manana * The extent map does not cross our target range, so no need to
778db21370bSFilipe Manana * split it, we can remove it directly.
779db21370bSFilipe Manana */
780db21370bSFilipe Manana if (em->start >= start && em_end <= end)
781db21370bSFilipe Manana goto remove_em;
782db21370bSFilipe Manana
783db21370bSFilipe Manana gen = em->generation;
784db21370bSFilipe Manana compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7854c0c8cfcSFilipe Manana
7864c0c8cfcSFilipe Manana if (em->start < start) {
787db21370bSFilipe Manana if (!split) {
788db21370bSFilipe Manana split = split2;
789db21370bSFilipe Manana split2 = NULL;
790db21370bSFilipe Manana if (!split)
791db21370bSFilipe Manana goto remove_em;
792db21370bSFilipe Manana }
7934c0c8cfcSFilipe Manana split->start = em->start;
7944c0c8cfcSFilipe Manana split->len = start - em->start;
7954c0c8cfcSFilipe Manana
7964c0c8cfcSFilipe Manana if (em->block_start < EXTENT_MAP_LAST_BYTE) {
7974c0c8cfcSFilipe Manana split->orig_start = em->orig_start;
7984c0c8cfcSFilipe Manana split->block_start = em->block_start;
7994c0c8cfcSFilipe Manana
8004c0c8cfcSFilipe Manana if (compressed)
8014c0c8cfcSFilipe Manana split->block_len = em->block_len;
8024c0c8cfcSFilipe Manana else
8034c0c8cfcSFilipe Manana split->block_len = split->len;
8044c0c8cfcSFilipe Manana split->orig_block_len = max(split->block_len,
8054c0c8cfcSFilipe Manana em->orig_block_len);
8064c0c8cfcSFilipe Manana split->ram_bytes = em->ram_bytes;
8074c0c8cfcSFilipe Manana } else {
8084c0c8cfcSFilipe Manana split->orig_start = split->start;
8094c0c8cfcSFilipe Manana split->block_len = 0;
8104c0c8cfcSFilipe Manana split->block_start = em->block_start;
8114c0c8cfcSFilipe Manana split->orig_block_len = 0;
8124c0c8cfcSFilipe Manana split->ram_bytes = split->len;
8134c0c8cfcSFilipe Manana }
8144c0c8cfcSFilipe Manana
8154c0c8cfcSFilipe Manana split->generation = gen;
8164c0c8cfcSFilipe Manana split->flags = flags;
8174c0c8cfcSFilipe Manana split->compress_type = em->compress_type;
8184c0c8cfcSFilipe Manana replace_extent_mapping(em_tree, em, split, modified);
8194c0c8cfcSFilipe Manana free_extent_map(split);
8204c0c8cfcSFilipe Manana split = split2;
8214c0c8cfcSFilipe Manana split2 = NULL;
8224c0c8cfcSFilipe Manana }
823db21370bSFilipe Manana if (em_end > end) {
824db21370bSFilipe Manana if (!split) {
825db21370bSFilipe Manana split = split2;
826db21370bSFilipe Manana split2 = NULL;
827db21370bSFilipe Manana if (!split)
828db21370bSFilipe Manana goto remove_em;
829db21370bSFilipe Manana }
830c962098cSJosef Bacik split->start = end;
831c962098cSJosef Bacik split->len = em_end - end;
8324c0c8cfcSFilipe Manana split->block_start = em->block_start;
8334c0c8cfcSFilipe Manana split->flags = flags;
8344c0c8cfcSFilipe Manana split->compress_type = em->compress_type;
8354c0c8cfcSFilipe Manana split->generation = gen;
8364c0c8cfcSFilipe Manana
8374c0c8cfcSFilipe Manana if (em->block_start < EXTENT_MAP_LAST_BYTE) {
8384c0c8cfcSFilipe Manana split->orig_block_len = max(em->block_len,
8394c0c8cfcSFilipe Manana em->orig_block_len);
8404c0c8cfcSFilipe Manana
8414c0c8cfcSFilipe Manana split->ram_bytes = em->ram_bytes;
8424c0c8cfcSFilipe Manana if (compressed) {
8434c0c8cfcSFilipe Manana split->block_len = em->block_len;
8444c0c8cfcSFilipe Manana split->orig_start = em->orig_start;
8454c0c8cfcSFilipe Manana } else {
846*73aa8ea0SQu Wenruo const u64 diff = end - em->start;
8474c0c8cfcSFilipe Manana
8484c0c8cfcSFilipe Manana split->block_len = split->len;
8494c0c8cfcSFilipe Manana split->block_start += diff;
8504c0c8cfcSFilipe Manana split->orig_start = em->orig_start;
8514c0c8cfcSFilipe Manana }
8524c0c8cfcSFilipe Manana } else {
8534c0c8cfcSFilipe Manana split->ram_bytes = split->len;
8544c0c8cfcSFilipe Manana split->orig_start = split->start;
8554c0c8cfcSFilipe Manana split->block_len = 0;
8564c0c8cfcSFilipe Manana split->orig_block_len = 0;
8574c0c8cfcSFilipe Manana }
8584c0c8cfcSFilipe Manana
8594c0c8cfcSFilipe Manana if (extent_map_in_tree(em)) {
8604c0c8cfcSFilipe Manana replace_extent_mapping(em_tree, em, split,
8614c0c8cfcSFilipe Manana modified);
8624c0c8cfcSFilipe Manana } else {
8634c0c8cfcSFilipe Manana int ret;
8644c0c8cfcSFilipe Manana
8654c0c8cfcSFilipe Manana ret = add_extent_mapping(em_tree, split,
8664c0c8cfcSFilipe Manana modified);
8674c0c8cfcSFilipe Manana /* Logic error, shouldn't happen. */
8684c0c8cfcSFilipe Manana ASSERT(ret == 0);
8694c0c8cfcSFilipe Manana if (WARN_ON(ret != 0) && modified)
8704c0c8cfcSFilipe Manana btrfs_set_inode_full_sync(inode);
8714c0c8cfcSFilipe Manana }
8724c0c8cfcSFilipe Manana free_extent_map(split);
8734c0c8cfcSFilipe Manana split = NULL;
8744c0c8cfcSFilipe Manana }
875db21370bSFilipe Manana remove_em:
8764c0c8cfcSFilipe Manana if (extent_map_in_tree(em)) {
8774c0c8cfcSFilipe Manana /*
8784c0c8cfcSFilipe Manana * If the extent map is still in the tree it means that
8794c0c8cfcSFilipe Manana * either of the following is true:
8804c0c8cfcSFilipe Manana *
8814c0c8cfcSFilipe Manana * 1) It fits entirely in our range (doesn't end beyond
8824c0c8cfcSFilipe Manana * it or starts before it);
8834c0c8cfcSFilipe Manana *
8844c0c8cfcSFilipe Manana * 2) It starts before our range and/or ends after our
8854c0c8cfcSFilipe Manana * range, and we were not able to allocate the extent
8864c0c8cfcSFilipe Manana * maps for split operations, @split and @split2.
8874c0c8cfcSFilipe Manana *
8884c0c8cfcSFilipe Manana * If we are at case 2) then we just remove the entire
8894c0c8cfcSFilipe Manana * extent map - this is fine since if anyone needs it to
8904c0c8cfcSFilipe Manana * access the subranges outside our range, will just
8914c0c8cfcSFilipe Manana * load it again from the subvolume tree's file extent
8924c0c8cfcSFilipe Manana * item. However if the extent map was in the list of
8934c0c8cfcSFilipe Manana * modified extents, then we must mark the inode for a
8944c0c8cfcSFilipe Manana * full fsync, otherwise a fast fsync will miss this
8954c0c8cfcSFilipe Manana * extent if it's new and needs to be logged.
8964c0c8cfcSFilipe Manana */
897db21370bSFilipe Manana if ((em->start < start || em_end > end) && modified) {
898db21370bSFilipe Manana ASSERT(!split);
8994c0c8cfcSFilipe Manana btrfs_set_inode_full_sync(inode);
9004c0c8cfcSFilipe Manana }
9014c0c8cfcSFilipe Manana remove_extent_mapping(em_tree, em);
9024c0c8cfcSFilipe Manana }
9034c0c8cfcSFilipe Manana
904db21370bSFilipe Manana /*
905db21370bSFilipe Manana * Once for the tree reference (we replaced or removed the
906db21370bSFilipe Manana * extent map from the tree).
907db21370bSFilipe Manana */
9084c0c8cfcSFilipe Manana free_extent_map(em);
909db21370bSFilipe Manana next:
910db21370bSFilipe Manana /* Once for us (for our lookup reference). */
9114c0c8cfcSFilipe Manana free_extent_map(em);
912db21370bSFilipe Manana
913db21370bSFilipe Manana em = next_em;
9144c0c8cfcSFilipe Manana }
9154c0c8cfcSFilipe Manana
916db21370bSFilipe Manana write_unlock(&em_tree->lock);
917db21370bSFilipe Manana
9184c0c8cfcSFilipe Manana free_extent_map(split);
9194c0c8cfcSFilipe Manana free_extent_map(split2);
9204c0c8cfcSFilipe Manana }
921a1ba4c08SFilipe Manana
922a1ba4c08SFilipe Manana /*
923a1ba4c08SFilipe Manana * Replace a range in the inode's extent map tree with a new extent map.
924a1ba4c08SFilipe Manana *
925a1ba4c08SFilipe Manana * @inode: The target inode.
926a1ba4c08SFilipe Manana * @new_em: The new extent map to add to the inode's extent map tree.
927a1ba4c08SFilipe Manana * @modified: Indicate if the new extent map should be added to the list of
928a1ba4c08SFilipe Manana * modified extents (for fast fsync tracking).
929a1ba4c08SFilipe Manana *
930a1ba4c08SFilipe Manana * Drops all the extent maps in the inode's extent map tree that intersect the
931a1ba4c08SFilipe Manana * range of the new extent map and adds the new extent map to the tree.
932a1ba4c08SFilipe Manana * The caller should have locked an appropriate file range in the inode's io
933a1ba4c08SFilipe Manana * tree before calling this function.
934a1ba4c08SFilipe Manana */
btrfs_replace_extent_map_range(struct btrfs_inode * inode,struct extent_map * new_em,bool modified)935a1ba4c08SFilipe Manana int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
936a1ba4c08SFilipe Manana struct extent_map *new_em,
937a1ba4c08SFilipe Manana bool modified)
938a1ba4c08SFilipe Manana {
939a1ba4c08SFilipe Manana const u64 end = new_em->start + new_em->len - 1;
940a1ba4c08SFilipe Manana struct extent_map_tree *tree = &inode->extent_tree;
941a1ba4c08SFilipe Manana int ret;
942a1ba4c08SFilipe Manana
943a1ba4c08SFilipe Manana ASSERT(!extent_map_in_tree(new_em));
944a1ba4c08SFilipe Manana
945a1ba4c08SFilipe Manana /*
946a1ba4c08SFilipe Manana * The caller has locked an appropriate file range in the inode's io
947a1ba4c08SFilipe Manana * tree, but getting -EEXIST when adding the new extent map can still
948a1ba4c08SFilipe Manana * happen in case there are extents that partially cover the range, and
949a1ba4c08SFilipe Manana * this is due to two tasks operating on different parts of the extent.
950a1ba4c08SFilipe Manana * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
951a1ba4c08SFilipe Manana * btrfs_get_extent") for an example and details.
952a1ba4c08SFilipe Manana */
953a1ba4c08SFilipe Manana do {
954a1ba4c08SFilipe Manana btrfs_drop_extent_map_range(inode, new_em->start, end, false);
955a1ba4c08SFilipe Manana write_lock(&tree->lock);
956a1ba4c08SFilipe Manana ret = add_extent_mapping(tree, new_em, modified);
957a1ba4c08SFilipe Manana write_unlock(&tree->lock);
958a1ba4c08SFilipe Manana } while (ret == -EEXIST);
959a1ba4c08SFilipe Manana
960a1ba4c08SFilipe Manana return ret;
961a1ba4c08SFilipe Manana }
962a6f3e205SChristoph Hellwig
963a6f3e205SChristoph Hellwig /*
964f000bc6fSChristoph Hellwig * Split off the first pre bytes from the extent_map at [start, start + len],
965f000bc6fSChristoph Hellwig * and set the block_start for it to new_logical.
966a6f3e205SChristoph Hellwig *
967a6f3e205SChristoph Hellwig * This function is used when an ordered_extent needs to be split.
968a6f3e205SChristoph Hellwig */
split_extent_map(struct btrfs_inode * inode,u64 start,u64 len,u64 pre,u64 new_logical)969f000bc6fSChristoph Hellwig int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
970f000bc6fSChristoph Hellwig u64 new_logical)
971a6f3e205SChristoph Hellwig {
972a6f3e205SChristoph Hellwig struct extent_map_tree *em_tree = &inode->extent_tree;
973a6f3e205SChristoph Hellwig struct extent_map *em;
974a6f3e205SChristoph Hellwig struct extent_map *split_pre = NULL;
975a6f3e205SChristoph Hellwig struct extent_map *split_mid = NULL;
976a6f3e205SChristoph Hellwig int ret = 0;
977a6f3e205SChristoph Hellwig unsigned long flags;
978a6f3e205SChristoph Hellwig
979a6f3e205SChristoph Hellwig ASSERT(pre != 0);
980a6f3e205SChristoph Hellwig ASSERT(pre < len);
981a6f3e205SChristoph Hellwig
982a6f3e205SChristoph Hellwig split_pre = alloc_extent_map();
983a6f3e205SChristoph Hellwig if (!split_pre)
984a6f3e205SChristoph Hellwig return -ENOMEM;
985a6f3e205SChristoph Hellwig split_mid = alloc_extent_map();
986a6f3e205SChristoph Hellwig if (!split_mid) {
987a6f3e205SChristoph Hellwig ret = -ENOMEM;
988a6f3e205SChristoph Hellwig goto out_free_pre;
989a6f3e205SChristoph Hellwig }
990a6f3e205SChristoph Hellwig
991a6f3e205SChristoph Hellwig lock_extent(&inode->io_tree, start, start + len - 1, NULL);
992a6f3e205SChristoph Hellwig write_lock(&em_tree->lock);
993a6f3e205SChristoph Hellwig em = lookup_extent_mapping(em_tree, start, len);
994a6f3e205SChristoph Hellwig if (!em) {
995a6f3e205SChristoph Hellwig ret = -EIO;
996a6f3e205SChristoph Hellwig goto out_unlock;
997a6f3e205SChristoph Hellwig }
998a6f3e205SChristoph Hellwig
999a6f3e205SChristoph Hellwig ASSERT(em->len == len);
1000a6f3e205SChristoph Hellwig ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
1001a6f3e205SChristoph Hellwig ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
1002a6f3e205SChristoph Hellwig ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags));
1003a6f3e205SChristoph Hellwig ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags));
1004a6f3e205SChristoph Hellwig ASSERT(!list_empty(&em->list));
1005a6f3e205SChristoph Hellwig
1006a6f3e205SChristoph Hellwig flags = em->flags;
1007a6f3e205SChristoph Hellwig clear_bit(EXTENT_FLAG_PINNED, &em->flags);
1008a6f3e205SChristoph Hellwig
1009a6f3e205SChristoph Hellwig /* First, replace the em with a new extent_map starting from * em->start */
1010a6f3e205SChristoph Hellwig split_pre->start = em->start;
1011a6f3e205SChristoph Hellwig split_pre->len = pre;
1012a6f3e205SChristoph Hellwig split_pre->orig_start = split_pre->start;
1013f000bc6fSChristoph Hellwig split_pre->block_start = new_logical;
1014a6f3e205SChristoph Hellwig split_pre->block_len = split_pre->len;
1015a6f3e205SChristoph Hellwig split_pre->orig_block_len = split_pre->block_len;
1016a6f3e205SChristoph Hellwig split_pre->ram_bytes = split_pre->len;
1017a6f3e205SChristoph Hellwig split_pre->flags = flags;
1018a6f3e205SChristoph Hellwig split_pre->compress_type = em->compress_type;
1019a6f3e205SChristoph Hellwig split_pre->generation = em->generation;
1020a6f3e205SChristoph Hellwig
1021a6f3e205SChristoph Hellwig replace_extent_mapping(em_tree, em, split_pre, 1);
1022a6f3e205SChristoph Hellwig
1023a6f3e205SChristoph Hellwig /*
1024a6f3e205SChristoph Hellwig * Now we only have an extent_map at:
1025a6f3e205SChristoph Hellwig * [em->start, em->start + pre]
1026a6f3e205SChristoph Hellwig */
1027a6f3e205SChristoph Hellwig
1028a6f3e205SChristoph Hellwig /* Insert the middle extent_map. */
1029a6f3e205SChristoph Hellwig split_mid->start = em->start + pre;
1030a6f3e205SChristoph Hellwig split_mid->len = em->len - pre;
1031a6f3e205SChristoph Hellwig split_mid->orig_start = split_mid->start;
1032a6f3e205SChristoph Hellwig split_mid->block_start = em->block_start + pre;
1033a6f3e205SChristoph Hellwig split_mid->block_len = split_mid->len;
1034a6f3e205SChristoph Hellwig split_mid->orig_block_len = split_mid->block_len;
1035a6f3e205SChristoph Hellwig split_mid->ram_bytes = split_mid->len;
1036a6f3e205SChristoph Hellwig split_mid->flags = flags;
1037a6f3e205SChristoph Hellwig split_mid->compress_type = em->compress_type;
1038a6f3e205SChristoph Hellwig split_mid->generation = em->generation;
1039a6f3e205SChristoph Hellwig add_extent_mapping(em_tree, split_mid, 1);
1040a6f3e205SChristoph Hellwig
1041a6f3e205SChristoph Hellwig /* Once for us */
1042a6f3e205SChristoph Hellwig free_extent_map(em);
1043a6f3e205SChristoph Hellwig /* Once for the tree */
1044a6f3e205SChristoph Hellwig free_extent_map(em);
1045a6f3e205SChristoph Hellwig
1046a6f3e205SChristoph Hellwig out_unlock:
1047a6f3e205SChristoph Hellwig write_unlock(&em_tree->lock);
1048a6f3e205SChristoph Hellwig unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
1049a6f3e205SChristoph Hellwig free_extent_map(split_mid);
1050a6f3e205SChristoph Hellwig out_free_pre:
1051a6f3e205SChristoph Hellwig free_extent_map(split_pre);
1052a6f3e205SChristoph Hellwig return ret;
1053a6f3e205SChristoph Hellwig }
1054