1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/err.h> 4 #include <linux/slab.h> 5 #include <linux/spinlock.h> 6 #include "ctree.h" 7 #include "volumes.h" 8 #include "extent_map.h" 9 #include "compression.h" 10 #include "btrfs_inode.h" 11 12 13 static struct kmem_cache *extent_map_cache; 14 15 int __init extent_map_init(void) 16 { 17 extent_map_cache = kmem_cache_create("btrfs_extent_map", 18 sizeof(struct extent_map), 0, 19 SLAB_MEM_SPREAD, NULL); 20 if (!extent_map_cache) 21 return -ENOMEM; 22 return 0; 23 } 24 25 void __cold extent_map_exit(void) 26 { 27 kmem_cache_destroy(extent_map_cache); 28 } 29 30 /** 31 * extent_map_tree_init - initialize extent map tree 32 * @tree: tree to initialize 33 * 34 * Initialize the extent tree @tree. Should be called for each new inode 35 * or other user of the extent_map interface. 36 */ 37 void extent_map_tree_init(struct extent_map_tree *tree) 38 { 39 tree->map = RB_ROOT_CACHED; 40 INIT_LIST_HEAD(&tree->modified_extents); 41 rwlock_init(&tree->lock); 42 } 43 44 /** 45 * alloc_extent_map - allocate new extent map structure 46 * 47 * Allocate a new extent_map structure. The new structure is 48 * returned with a reference count of one and needs to be 49 * freed using free_extent_map() 50 */ 51 struct extent_map *alloc_extent_map(void) 52 { 53 struct extent_map *em; 54 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); 55 if (!em) 56 return NULL; 57 RB_CLEAR_NODE(&em->rb_node); 58 em->compress_type = BTRFS_COMPRESS_NONE; 59 refcount_set(&em->refs, 1); 60 INIT_LIST_HEAD(&em->list); 61 return em; 62 } 63 64 /** 65 * free_extent_map - drop reference count of an extent_map 66 * @em: extent map being released 67 * 68 * Drops the reference out on @em by one and free the structure 69 * if the reference count hits zero. 70 */ 71 void free_extent_map(struct extent_map *em) 72 { 73 if (!em) 74 return; 75 if (refcount_dec_and_test(&em->refs)) { 76 WARN_ON(extent_map_in_tree(em)); 77 WARN_ON(!list_empty(&em->list)); 78 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) 79 kfree(em->map_lookup); 80 kmem_cache_free(extent_map_cache, em); 81 } 82 } 83 84 /* simple helper to do math around the end of an extent, handling wrap */ 85 static u64 range_end(u64 start, u64 len) 86 { 87 if (start + len < start) 88 return (u64)-1; 89 return start + len; 90 } 91 92 static int tree_insert(struct rb_root_cached *root, struct extent_map *em) 93 { 94 struct rb_node **p = &root->rb_root.rb_node; 95 struct rb_node *parent = NULL; 96 struct extent_map *entry = NULL; 97 struct rb_node *orig_parent = NULL; 98 u64 end = range_end(em->start, em->len); 99 bool leftmost = true; 100 101 while (*p) { 102 parent = *p; 103 entry = rb_entry(parent, struct extent_map, rb_node); 104 105 if (em->start < entry->start) { 106 p = &(*p)->rb_left; 107 } else if (em->start >= extent_map_end(entry)) { 108 p = &(*p)->rb_right; 109 leftmost = false; 110 } else { 111 return -EEXIST; 112 } 113 } 114 115 orig_parent = parent; 116 while (parent && em->start >= extent_map_end(entry)) { 117 parent = rb_next(parent); 118 entry = rb_entry(parent, struct extent_map, rb_node); 119 } 120 if (parent) 121 if (end > entry->start && em->start < extent_map_end(entry)) 122 return -EEXIST; 123 124 parent = orig_parent; 125 entry = rb_entry(parent, struct extent_map, rb_node); 126 while (parent && em->start < entry->start) { 127 parent = rb_prev(parent); 128 entry = rb_entry(parent, struct extent_map, rb_node); 129 } 130 if (parent) 131 if (end > entry->start && em->start < extent_map_end(entry)) 132 return -EEXIST; 133 134 rb_link_node(&em->rb_node, orig_parent, p); 135 rb_insert_color_cached(&em->rb_node, root, leftmost); 136 return 0; 137 } 138 139 /* 140 * search through the tree for an extent_map with a given offset. If 141 * it can't be found, try to find some neighboring extents 142 */ 143 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 144 struct rb_node **prev_ret, 145 struct rb_node **next_ret) 146 { 147 struct rb_node *n = root->rb_node; 148 struct rb_node *prev = NULL; 149 struct rb_node *orig_prev = NULL; 150 struct extent_map *entry; 151 struct extent_map *prev_entry = NULL; 152 153 while (n) { 154 entry = rb_entry(n, struct extent_map, rb_node); 155 prev = n; 156 prev_entry = entry; 157 158 if (offset < entry->start) 159 n = n->rb_left; 160 else if (offset >= extent_map_end(entry)) 161 n = n->rb_right; 162 else 163 return n; 164 } 165 166 if (prev_ret) { 167 orig_prev = prev; 168 while (prev && offset >= extent_map_end(prev_entry)) { 169 prev = rb_next(prev); 170 prev_entry = rb_entry(prev, struct extent_map, rb_node); 171 } 172 *prev_ret = prev; 173 prev = orig_prev; 174 } 175 176 if (next_ret) { 177 prev_entry = rb_entry(prev, struct extent_map, rb_node); 178 while (prev && offset < prev_entry->start) { 179 prev = rb_prev(prev); 180 prev_entry = rb_entry(prev, struct extent_map, rb_node); 181 } 182 *next_ret = prev; 183 } 184 return NULL; 185 } 186 187 /* check to see if two extent_map structs are adjacent and safe to merge */ 188 static int mergable_maps(struct extent_map *prev, struct extent_map *next) 189 { 190 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) 191 return 0; 192 193 /* 194 * don't merge compressed extents, we need to know their 195 * actual size 196 */ 197 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 198 return 0; 199 200 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || 201 test_bit(EXTENT_FLAG_LOGGING, &next->flags)) 202 return 0; 203 204 /* 205 * We don't want to merge stuff that hasn't been written to the log yet 206 * since it may not reflect exactly what is on disk, and that would be 207 * bad. 208 */ 209 if (!list_empty(&prev->list) || !list_empty(&next->list)) 210 return 0; 211 212 ASSERT(next->block_start != EXTENT_MAP_DELALLOC && 213 prev->block_start != EXTENT_MAP_DELALLOC); 214 215 if (prev->map_lookup || next->map_lookup) 216 ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) && 217 test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags)); 218 219 if (extent_map_end(prev) == next->start && 220 prev->flags == next->flags && 221 prev->map_lookup == next->map_lookup && 222 ((next->block_start == EXTENT_MAP_HOLE && 223 prev->block_start == EXTENT_MAP_HOLE) || 224 (next->block_start == EXTENT_MAP_INLINE && 225 prev->block_start == EXTENT_MAP_INLINE) || 226 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && 227 next->block_start == extent_map_block_end(prev)))) { 228 return 1; 229 } 230 return 0; 231 } 232 233 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) 234 { 235 struct extent_map *merge = NULL; 236 struct rb_node *rb; 237 238 /* 239 * We can't modify an extent map that is in the tree and that is being 240 * used by another task, as it can cause that other task to see it in 241 * inconsistent state during the merging. We always have 1 reference for 242 * the tree and 1 for this task (which is unpinning the extent map or 243 * clearing the logging flag), so anything > 2 means it's being used by 244 * other tasks too. 245 */ 246 if (refcount_read(&em->refs) > 2) 247 return; 248 249 if (em->start != 0) { 250 rb = rb_prev(&em->rb_node); 251 if (rb) 252 merge = rb_entry(rb, struct extent_map, rb_node); 253 if (rb && mergable_maps(merge, em)) { 254 em->start = merge->start; 255 em->orig_start = merge->orig_start; 256 em->len += merge->len; 257 em->block_len += merge->block_len; 258 em->block_start = merge->block_start; 259 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; 260 em->mod_start = merge->mod_start; 261 em->generation = max(em->generation, merge->generation); 262 set_bit(EXTENT_FLAG_MERGED, &em->flags); 263 264 rb_erase_cached(&merge->rb_node, &tree->map); 265 RB_CLEAR_NODE(&merge->rb_node); 266 free_extent_map(merge); 267 } 268 } 269 270 rb = rb_next(&em->rb_node); 271 if (rb) 272 merge = rb_entry(rb, struct extent_map, rb_node); 273 if (rb && mergable_maps(em, merge)) { 274 em->len += merge->len; 275 em->block_len += merge->block_len; 276 rb_erase_cached(&merge->rb_node, &tree->map); 277 RB_CLEAR_NODE(&merge->rb_node); 278 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; 279 em->generation = max(em->generation, merge->generation); 280 set_bit(EXTENT_FLAG_MERGED, &em->flags); 281 free_extent_map(merge); 282 } 283 } 284 285 /** 286 * unpin_extent_cache - unpin an extent from the cache 287 * @tree: tree to unpin the extent in 288 * @start: logical offset in the file 289 * @len: length of the extent 290 * @gen: generation that this extent has been modified in 291 * 292 * Called after an extent has been written to disk properly. Set the generation 293 * to the generation that actually added the file item to the inode so we know 294 * we need to sync this extent when we call fsync(). 295 */ 296 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, 297 u64 gen) 298 { 299 int ret = 0; 300 struct extent_map *em; 301 bool prealloc = false; 302 303 write_lock(&tree->lock); 304 em = lookup_extent_mapping(tree, start, len); 305 306 WARN_ON(!em || em->start != start); 307 308 if (!em) 309 goto out; 310 311 em->generation = gen; 312 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 313 em->mod_start = em->start; 314 em->mod_len = em->len; 315 316 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { 317 prealloc = true; 318 clear_bit(EXTENT_FLAG_FILLING, &em->flags); 319 } 320 321 try_merge_map(tree, em); 322 323 if (prealloc) { 324 em->mod_start = em->start; 325 em->mod_len = em->len; 326 } 327 328 free_extent_map(em); 329 out: 330 write_unlock(&tree->lock); 331 return ret; 332 333 } 334 335 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) 336 { 337 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 338 if (extent_map_in_tree(em)) 339 try_merge_map(tree, em); 340 } 341 342 static inline void setup_extent_mapping(struct extent_map_tree *tree, 343 struct extent_map *em, 344 int modified) 345 { 346 refcount_inc(&em->refs); 347 em->mod_start = em->start; 348 em->mod_len = em->len; 349 350 if (modified) 351 list_move(&em->list, &tree->modified_extents); 352 else 353 try_merge_map(tree, em); 354 } 355 356 static void extent_map_device_set_bits(struct extent_map *em, unsigned bits) 357 { 358 struct map_lookup *map = em->map_lookup; 359 u64 stripe_size = em->orig_block_len; 360 int i; 361 362 for (i = 0; i < map->num_stripes; i++) { 363 struct btrfs_io_stripe *stripe = &map->stripes[i]; 364 struct btrfs_device *device = stripe->dev; 365 366 set_extent_bits_nowait(&device->alloc_state, stripe->physical, 367 stripe->physical + stripe_size - 1, bits); 368 } 369 } 370 371 static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits) 372 { 373 struct map_lookup *map = em->map_lookup; 374 u64 stripe_size = em->orig_block_len; 375 int i; 376 377 for (i = 0; i < map->num_stripes; i++) { 378 struct btrfs_io_stripe *stripe = &map->stripes[i]; 379 struct btrfs_device *device = stripe->dev; 380 381 __clear_extent_bit(&device->alloc_state, stripe->physical, 382 stripe->physical + stripe_size - 1, bits, 383 NULL, GFP_NOWAIT, NULL); 384 } 385 } 386 387 /** 388 * Add new extent map to the extent tree 389 * 390 * @tree: tree to insert new map in 391 * @em: map to insert 392 * @modified: indicate whether the given @em should be added to the 393 * modified list, which indicates the extent needs to be logged 394 * 395 * Insert @em into @tree or perform a simple forward/backward merge with 396 * existing mappings. The extent_map struct passed in will be inserted 397 * into the tree directly, with an additional reference taken, or a 398 * reference dropped if the merge attempt was successful. 399 */ 400 int add_extent_mapping(struct extent_map_tree *tree, 401 struct extent_map *em, int modified) 402 { 403 int ret = 0; 404 405 lockdep_assert_held_write(&tree->lock); 406 407 ret = tree_insert(&tree->map, em); 408 if (ret) 409 goto out; 410 411 setup_extent_mapping(tree, em, modified); 412 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) { 413 extent_map_device_set_bits(em, CHUNK_ALLOCATED); 414 extent_map_device_clear_bits(em, CHUNK_TRIMMED); 415 } 416 out: 417 return ret; 418 } 419 420 static struct extent_map * 421 __lookup_extent_mapping(struct extent_map_tree *tree, 422 u64 start, u64 len, int strict) 423 { 424 struct extent_map *em; 425 struct rb_node *rb_node; 426 struct rb_node *prev = NULL; 427 struct rb_node *next = NULL; 428 u64 end = range_end(start, len); 429 430 rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next); 431 if (!rb_node) { 432 if (prev) 433 rb_node = prev; 434 else if (next) 435 rb_node = next; 436 else 437 return NULL; 438 } 439 440 em = rb_entry(rb_node, struct extent_map, rb_node); 441 442 if (strict && !(end > em->start && start < extent_map_end(em))) 443 return NULL; 444 445 refcount_inc(&em->refs); 446 return em; 447 } 448 449 /** 450 * lookup_extent_mapping - lookup extent_map 451 * @tree: tree to lookup in 452 * @start: byte offset to start the search 453 * @len: length of the lookup range 454 * 455 * Find and return the first extent_map struct in @tree that intersects the 456 * [start, len] range. There may be additional objects in the tree that 457 * intersect, so check the object returned carefully to make sure that no 458 * additional lookups are needed. 459 */ 460 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 461 u64 start, u64 len) 462 { 463 return __lookup_extent_mapping(tree, start, len, 1); 464 } 465 466 /** 467 * search_extent_mapping - find a nearby extent map 468 * @tree: tree to lookup in 469 * @start: byte offset to start the search 470 * @len: length of the lookup range 471 * 472 * Find and return the first extent_map struct in @tree that intersects the 473 * [start, len] range. 474 * 475 * If one can't be found, any nearby extent may be returned 476 */ 477 struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 478 u64 start, u64 len) 479 { 480 return __lookup_extent_mapping(tree, start, len, 0); 481 } 482 483 /** 484 * remove_extent_mapping - removes an extent_map from the extent tree 485 * @tree: extent tree to remove from 486 * @em: extent map being removed 487 * 488 * Removes @em from @tree. No reference counts are dropped, and no checks 489 * are done to see if the range is in use 490 */ 491 void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 492 { 493 lockdep_assert_held_write(&tree->lock); 494 495 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 496 rb_erase_cached(&em->rb_node, &tree->map); 497 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) 498 list_del_init(&em->list); 499 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) 500 extent_map_device_clear_bits(em, CHUNK_ALLOCATED); 501 RB_CLEAR_NODE(&em->rb_node); 502 } 503 504 void replace_extent_mapping(struct extent_map_tree *tree, 505 struct extent_map *cur, 506 struct extent_map *new, 507 int modified) 508 { 509 lockdep_assert_held_write(&tree->lock); 510 511 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags)); 512 ASSERT(extent_map_in_tree(cur)); 513 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags)) 514 list_del_init(&cur->list); 515 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map); 516 RB_CLEAR_NODE(&cur->rb_node); 517 518 setup_extent_mapping(tree, new, modified); 519 } 520 521 static struct extent_map *next_extent_map(struct extent_map *em) 522 { 523 struct rb_node *next; 524 525 next = rb_next(&em->rb_node); 526 if (!next) 527 return NULL; 528 return container_of(next, struct extent_map, rb_node); 529 } 530 531 static struct extent_map *prev_extent_map(struct extent_map *em) 532 { 533 struct rb_node *prev; 534 535 prev = rb_prev(&em->rb_node); 536 if (!prev) 537 return NULL; 538 return container_of(prev, struct extent_map, rb_node); 539 } 540 541 /* 542 * Helper for btrfs_get_extent. Given an existing extent in the tree, 543 * the existing extent is the nearest extent to map_start, 544 * and an extent that you want to insert, deal with overlap and insert 545 * the best fitted new extent into the tree. 546 */ 547 static noinline int merge_extent_mapping(struct extent_map_tree *em_tree, 548 struct extent_map *existing, 549 struct extent_map *em, 550 u64 map_start) 551 { 552 struct extent_map *prev; 553 struct extent_map *next; 554 u64 start; 555 u64 end; 556 u64 start_diff; 557 558 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); 559 560 if (existing->start > map_start) { 561 next = existing; 562 prev = prev_extent_map(next); 563 } else { 564 prev = existing; 565 next = next_extent_map(prev); 566 } 567 568 start = prev ? extent_map_end(prev) : em->start; 569 start = max_t(u64, start, em->start); 570 end = next ? next->start : extent_map_end(em); 571 end = min_t(u64, end, extent_map_end(em)); 572 start_diff = start - em->start; 573 em->start = start; 574 em->len = end - start; 575 if (em->block_start < EXTENT_MAP_LAST_BYTE && 576 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 577 em->block_start += start_diff; 578 em->block_len = em->len; 579 } 580 return add_extent_mapping(em_tree, em, 0); 581 } 582 583 /** 584 * Add extent mapping into em_tree 585 * 586 * @fs_info: the filesystem 587 * @em_tree: extent tree into which we want to insert the extent mapping 588 * @em_in: extent we are inserting 589 * @start: start of the logical range btrfs_get_extent() is requesting 590 * @len: length of the logical range btrfs_get_extent() is requesting 591 * 592 * Note that @em_in's range may be different from [start, start+len), 593 * but they must be overlapped. 594 * 595 * Insert @em_in into @em_tree. In case there is an overlapping range, handle 596 * the -EEXIST by either: 597 * a) Returning the existing extent in @em_in if @start is within the 598 * existing em. 599 * b) Merge the existing extent with @em_in passed in. 600 * 601 * Return 0 on success, otherwise -EEXIST. 602 * 603 */ 604 int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info, 605 struct extent_map_tree *em_tree, 606 struct extent_map **em_in, u64 start, u64 len) 607 { 608 int ret; 609 struct extent_map *em = *em_in; 610 611 ret = add_extent_mapping(em_tree, em, 0); 612 /* it is possible that someone inserted the extent into the tree 613 * while we had the lock dropped. It is also possible that 614 * an overlapping map exists in the tree 615 */ 616 if (ret == -EEXIST) { 617 struct extent_map *existing; 618 619 ret = 0; 620 621 existing = search_extent_mapping(em_tree, start, len); 622 623 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); 624 625 /* 626 * existing will always be non-NULL, since there must be 627 * extent causing the -EEXIST. 628 */ 629 if (start >= existing->start && 630 start < extent_map_end(existing)) { 631 free_extent_map(em); 632 *em_in = existing; 633 ret = 0; 634 } else { 635 u64 orig_start = em->start; 636 u64 orig_len = em->len; 637 638 /* 639 * The existing extent map is the one nearest to 640 * the [start, start + len) range which overlaps 641 */ 642 ret = merge_extent_mapping(em_tree, existing, 643 em, start); 644 if (ret) { 645 free_extent_map(em); 646 *em_in = NULL; 647 WARN_ONCE(ret, 648 "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n", 649 ret, existing->start, existing->len, 650 orig_start, orig_len); 651 } 652 free_extent_map(existing); 653 } 654 } 655 656 ASSERT(ret == 0 || ret == -EEXIST); 657 return ret; 658 } 659 660 /* 661 * Drop all extent maps from a tree in the fastest possible way, rescheduling 662 * if needed. This avoids searching the tree, from the root down to the first 663 * extent map, before each deletion. 664 */ 665 static void drop_all_extent_maps_fast(struct extent_map_tree *tree) 666 { 667 write_lock(&tree->lock); 668 while (!RB_EMPTY_ROOT(&tree->map.rb_root)) { 669 struct extent_map *em; 670 struct rb_node *node; 671 672 node = rb_first_cached(&tree->map); 673 em = rb_entry(node, struct extent_map, rb_node); 674 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 675 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 676 remove_extent_mapping(tree, em); 677 free_extent_map(em); 678 cond_resched_rwlock_write(&tree->lock); 679 } 680 write_unlock(&tree->lock); 681 } 682 683 /* 684 * Drop all extent maps in a given range. 685 * 686 * @inode: The target inode. 687 * @start: Start offset of the range. 688 * @end: End offset of the range (inclusive value). 689 * @skip_pinned: Indicate if pinned extent maps should be ignored or not. 690 * 691 * This drops all the extent maps that intersect the given range [@start, @end]. 692 * Extent maps that partially overlap the range and extend behind or beyond it, 693 * are split. 694 * The caller should have locked an appropriate file range in the inode's io 695 * tree before calling this function. 696 */ 697 void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, 698 bool skip_pinned) 699 { 700 struct extent_map *split = NULL; 701 struct extent_map *split2 = NULL; 702 struct extent_map_tree *em_tree = &inode->extent_tree; 703 u64 len = end - start + 1; 704 bool testend = true; 705 706 WARN_ON(end < start); 707 if (end == (u64)-1) { 708 if (start == 0 && !skip_pinned) { 709 drop_all_extent_maps_fast(em_tree); 710 return; 711 } 712 len = (u64)-1; 713 testend = false; 714 } 715 while (1) { 716 struct extent_map *em; 717 u64 em_end; 718 u64 gen; 719 unsigned long flags; 720 bool ends_after_range = false; 721 bool no_splits = false; 722 bool modified; 723 bool compressed; 724 725 if (!split) 726 split = alloc_extent_map(); 727 if (!split2) 728 split2 = alloc_extent_map(); 729 if (!split || !split2) 730 no_splits = true; 731 732 write_lock(&em_tree->lock); 733 em = lookup_extent_mapping(em_tree, start, len); 734 if (!em) { 735 write_unlock(&em_tree->lock); 736 break; 737 } 738 em_end = extent_map_end(em); 739 if (testend && em_end > start + len) 740 ends_after_range = true; 741 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 742 if (ends_after_range) { 743 free_extent_map(em); 744 write_unlock(&em_tree->lock); 745 break; 746 } 747 start = em_end; 748 if (testend) 749 len = start + len - em_end; 750 free_extent_map(em); 751 write_unlock(&em_tree->lock); 752 continue; 753 } 754 flags = em->flags; 755 gen = em->generation; 756 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 757 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 758 clear_bit(EXTENT_FLAG_LOGGING, &flags); 759 modified = !list_empty(&em->list); 760 if (no_splits) 761 goto next; 762 763 if (em->start < start) { 764 split->start = em->start; 765 split->len = start - em->start; 766 767 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 768 split->orig_start = em->orig_start; 769 split->block_start = em->block_start; 770 771 if (compressed) 772 split->block_len = em->block_len; 773 else 774 split->block_len = split->len; 775 split->orig_block_len = max(split->block_len, 776 em->orig_block_len); 777 split->ram_bytes = em->ram_bytes; 778 } else { 779 split->orig_start = split->start; 780 split->block_len = 0; 781 split->block_start = em->block_start; 782 split->orig_block_len = 0; 783 split->ram_bytes = split->len; 784 } 785 786 split->generation = gen; 787 split->flags = flags; 788 split->compress_type = em->compress_type; 789 replace_extent_mapping(em_tree, em, split, modified); 790 free_extent_map(split); 791 split = split2; 792 split2 = NULL; 793 } 794 if (ends_after_range) { 795 split->start = start + len; 796 split->len = em_end - (start + len); 797 split->block_start = em->block_start; 798 split->flags = flags; 799 split->compress_type = em->compress_type; 800 split->generation = gen; 801 802 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 803 split->orig_block_len = max(em->block_len, 804 em->orig_block_len); 805 806 split->ram_bytes = em->ram_bytes; 807 if (compressed) { 808 split->block_len = em->block_len; 809 split->orig_start = em->orig_start; 810 } else { 811 const u64 diff = start + len - em->start; 812 813 split->block_len = split->len; 814 split->block_start += diff; 815 split->orig_start = em->orig_start; 816 } 817 } else { 818 split->ram_bytes = split->len; 819 split->orig_start = split->start; 820 split->block_len = 0; 821 split->orig_block_len = 0; 822 } 823 824 if (extent_map_in_tree(em)) { 825 replace_extent_mapping(em_tree, em, split, 826 modified); 827 } else { 828 int ret; 829 830 ret = add_extent_mapping(em_tree, split, 831 modified); 832 /* Logic error, shouldn't happen. */ 833 ASSERT(ret == 0); 834 if (WARN_ON(ret != 0) && modified) 835 btrfs_set_inode_full_sync(inode); 836 } 837 free_extent_map(split); 838 split = NULL; 839 } 840 next: 841 if (extent_map_in_tree(em)) { 842 /* 843 * If the extent map is still in the tree it means that 844 * either of the following is true: 845 * 846 * 1) It fits entirely in our range (doesn't end beyond 847 * it or starts before it); 848 * 849 * 2) It starts before our range and/or ends after our 850 * range, and we were not able to allocate the extent 851 * maps for split operations, @split and @split2. 852 * 853 * If we are at case 2) then we just remove the entire 854 * extent map - this is fine since if anyone needs it to 855 * access the subranges outside our range, will just 856 * load it again from the subvolume tree's file extent 857 * item. However if the extent map was in the list of 858 * modified extents, then we must mark the inode for a 859 * full fsync, otherwise a fast fsync will miss this 860 * extent if it's new and needs to be logged. 861 */ 862 if ((em->start < start || ends_after_range) && modified) { 863 ASSERT(no_splits); 864 btrfs_set_inode_full_sync(inode); 865 } 866 remove_extent_mapping(em_tree, em); 867 } 868 write_unlock(&em_tree->lock); 869 870 /* Once for us. */ 871 free_extent_map(em); 872 /* And once for the tree. */ 873 free_extent_map(em); 874 } 875 876 free_extent_map(split); 877 free_extent_map(split2); 878 } 879 880 /* 881 * Replace a range in the inode's extent map tree with a new extent map. 882 * 883 * @inode: The target inode. 884 * @new_em: The new extent map to add to the inode's extent map tree. 885 * @modified: Indicate if the new extent map should be added to the list of 886 * modified extents (for fast fsync tracking). 887 * 888 * Drops all the extent maps in the inode's extent map tree that intersect the 889 * range of the new extent map and adds the new extent map to the tree. 890 * The caller should have locked an appropriate file range in the inode's io 891 * tree before calling this function. 892 */ 893 int btrfs_replace_extent_map_range(struct btrfs_inode *inode, 894 struct extent_map *new_em, 895 bool modified) 896 { 897 const u64 end = new_em->start + new_em->len - 1; 898 struct extent_map_tree *tree = &inode->extent_tree; 899 int ret; 900 901 ASSERT(!extent_map_in_tree(new_em)); 902 903 /* 904 * The caller has locked an appropriate file range in the inode's io 905 * tree, but getting -EEXIST when adding the new extent map can still 906 * happen in case there are extents that partially cover the range, and 907 * this is due to two tasks operating on different parts of the extent. 908 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from 909 * btrfs_get_extent") for an example and details. 910 */ 911 do { 912 btrfs_drop_extent_map_range(inode, new_em->start, end, false); 913 write_lock(&tree->lock); 914 ret = add_extent_mapping(tree, new_em, modified); 915 write_unlock(&tree->lock); 916 } while (ret == -EEXIST); 917 918 return ret; 919 } 920