1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/err.h> 4 #include <linux/slab.h> 5 #include <linux/spinlock.h> 6 #include "ctree.h" 7 #include "volumes.h" 8 #include "extent_map.h" 9 #include "compression.h" 10 #include "btrfs_inode.h" 11 12 13 static struct kmem_cache *extent_map_cache; 14 15 int __init extent_map_init(void) 16 { 17 extent_map_cache = kmem_cache_create("btrfs_extent_map", 18 sizeof(struct extent_map), 0, 19 SLAB_MEM_SPREAD, NULL); 20 if (!extent_map_cache) 21 return -ENOMEM; 22 return 0; 23 } 24 25 void __cold extent_map_exit(void) 26 { 27 kmem_cache_destroy(extent_map_cache); 28 } 29 30 /** 31 * extent_map_tree_init - initialize extent map tree 32 * @tree: tree to initialize 33 * 34 * Initialize the extent tree @tree. Should be called for each new inode 35 * or other user of the extent_map interface. 36 */ 37 void extent_map_tree_init(struct extent_map_tree *tree) 38 { 39 tree->map = RB_ROOT_CACHED; 40 INIT_LIST_HEAD(&tree->modified_extents); 41 rwlock_init(&tree->lock); 42 } 43 44 /** 45 * alloc_extent_map - allocate new extent map structure 46 * 47 * Allocate a new extent_map structure. The new structure is 48 * returned with a reference count of one and needs to be 49 * freed using free_extent_map() 50 */ 51 struct extent_map *alloc_extent_map(void) 52 { 53 struct extent_map *em; 54 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); 55 if (!em) 56 return NULL; 57 RB_CLEAR_NODE(&em->rb_node); 58 em->compress_type = BTRFS_COMPRESS_NONE; 59 refcount_set(&em->refs, 1); 60 INIT_LIST_HEAD(&em->list); 61 return em; 62 } 63 64 /** 65 * free_extent_map - drop reference count of an extent_map 66 * @em: extent map being released 67 * 68 * Drops the reference out on @em by one and free the structure 69 * if the reference count hits zero. 70 */ 71 void free_extent_map(struct extent_map *em) 72 { 73 if (!em) 74 return; 75 if (refcount_dec_and_test(&em->refs)) { 76 WARN_ON(extent_map_in_tree(em)); 77 WARN_ON(!list_empty(&em->list)); 78 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) 79 kfree(em->map_lookup); 80 kmem_cache_free(extent_map_cache, em); 81 } 82 } 83 84 /* simple helper to do math around the end of an extent, handling wrap */ 85 static u64 range_end(u64 start, u64 len) 86 { 87 if (start + len < start) 88 return (u64)-1; 89 return start + len; 90 } 91 92 static int tree_insert(struct rb_root_cached *root, struct extent_map *em) 93 { 94 struct rb_node **p = &root->rb_root.rb_node; 95 struct rb_node *parent = NULL; 96 struct extent_map *entry = NULL; 97 struct rb_node *orig_parent = NULL; 98 u64 end = range_end(em->start, em->len); 99 bool leftmost = true; 100 101 while (*p) { 102 parent = *p; 103 entry = rb_entry(parent, struct extent_map, rb_node); 104 105 if (em->start < entry->start) { 106 p = &(*p)->rb_left; 107 } else if (em->start >= extent_map_end(entry)) { 108 p = &(*p)->rb_right; 109 leftmost = false; 110 } else { 111 return -EEXIST; 112 } 113 } 114 115 orig_parent = parent; 116 while (parent && em->start >= extent_map_end(entry)) { 117 parent = rb_next(parent); 118 entry = rb_entry(parent, struct extent_map, rb_node); 119 } 120 if (parent) 121 if (end > entry->start && em->start < extent_map_end(entry)) 122 return -EEXIST; 123 124 parent = orig_parent; 125 entry = rb_entry(parent, struct extent_map, rb_node); 126 while (parent && em->start < entry->start) { 127 parent = rb_prev(parent); 128 entry = rb_entry(parent, struct extent_map, rb_node); 129 } 130 if (parent) 131 if (end > entry->start && em->start < extent_map_end(entry)) 132 return -EEXIST; 133 134 rb_link_node(&em->rb_node, orig_parent, p); 135 rb_insert_color_cached(&em->rb_node, root, leftmost); 136 return 0; 137 } 138 139 /* 140 * search through the tree for an extent_map with a given offset. If 141 * it can't be found, try to find some neighboring extents 142 */ 143 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 144 struct rb_node **prev_or_next_ret) 145 { 146 struct rb_node *n = root->rb_node; 147 struct rb_node *prev = NULL; 148 struct rb_node *orig_prev = NULL; 149 struct extent_map *entry; 150 struct extent_map *prev_entry = NULL; 151 152 ASSERT(prev_or_next_ret); 153 154 while (n) { 155 entry = rb_entry(n, struct extent_map, rb_node); 156 prev = n; 157 prev_entry = entry; 158 159 if (offset < entry->start) 160 n = n->rb_left; 161 else if (offset >= extent_map_end(entry)) 162 n = n->rb_right; 163 else 164 return n; 165 } 166 167 orig_prev = prev; 168 while (prev && offset >= extent_map_end(prev_entry)) { 169 prev = rb_next(prev); 170 prev_entry = rb_entry(prev, struct extent_map, rb_node); 171 } 172 173 /* 174 * Previous extent map found, return as in this case the caller does not 175 * care about the next one. 176 */ 177 if (prev) { 178 *prev_or_next_ret = prev; 179 return NULL; 180 } 181 182 prev = orig_prev; 183 prev_entry = rb_entry(prev, struct extent_map, rb_node); 184 while (prev && offset < prev_entry->start) { 185 prev = rb_prev(prev); 186 prev_entry = rb_entry(prev, struct extent_map, rb_node); 187 } 188 *prev_or_next_ret = prev; 189 190 return NULL; 191 } 192 193 /* check to see if two extent_map structs are adjacent and safe to merge */ 194 static int mergable_maps(struct extent_map *prev, struct extent_map *next) 195 { 196 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) 197 return 0; 198 199 /* 200 * don't merge compressed extents, we need to know their 201 * actual size 202 */ 203 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 204 return 0; 205 206 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || 207 test_bit(EXTENT_FLAG_LOGGING, &next->flags)) 208 return 0; 209 210 /* 211 * We don't want to merge stuff that hasn't been written to the log yet 212 * since it may not reflect exactly what is on disk, and that would be 213 * bad. 214 */ 215 if (!list_empty(&prev->list) || !list_empty(&next->list)) 216 return 0; 217 218 ASSERT(next->block_start != EXTENT_MAP_DELALLOC && 219 prev->block_start != EXTENT_MAP_DELALLOC); 220 221 if (prev->map_lookup || next->map_lookup) 222 ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) && 223 test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags)); 224 225 if (extent_map_end(prev) == next->start && 226 prev->flags == next->flags && 227 prev->map_lookup == next->map_lookup && 228 ((next->block_start == EXTENT_MAP_HOLE && 229 prev->block_start == EXTENT_MAP_HOLE) || 230 (next->block_start == EXTENT_MAP_INLINE && 231 prev->block_start == EXTENT_MAP_INLINE) || 232 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && 233 next->block_start == extent_map_block_end(prev)))) { 234 return 1; 235 } 236 return 0; 237 } 238 239 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) 240 { 241 struct extent_map *merge = NULL; 242 struct rb_node *rb; 243 244 /* 245 * We can't modify an extent map that is in the tree and that is being 246 * used by another task, as it can cause that other task to see it in 247 * inconsistent state during the merging. We always have 1 reference for 248 * the tree and 1 for this task (which is unpinning the extent map or 249 * clearing the logging flag), so anything > 2 means it's being used by 250 * other tasks too. 251 */ 252 if (refcount_read(&em->refs) > 2) 253 return; 254 255 if (em->start != 0) { 256 rb = rb_prev(&em->rb_node); 257 if (rb) 258 merge = rb_entry(rb, struct extent_map, rb_node); 259 if (rb && mergable_maps(merge, em)) { 260 em->start = merge->start; 261 em->orig_start = merge->orig_start; 262 em->len += merge->len; 263 em->block_len += merge->block_len; 264 em->block_start = merge->block_start; 265 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; 266 em->mod_start = merge->mod_start; 267 em->generation = max(em->generation, merge->generation); 268 set_bit(EXTENT_FLAG_MERGED, &em->flags); 269 270 rb_erase_cached(&merge->rb_node, &tree->map); 271 RB_CLEAR_NODE(&merge->rb_node); 272 free_extent_map(merge); 273 } 274 } 275 276 rb = rb_next(&em->rb_node); 277 if (rb) 278 merge = rb_entry(rb, struct extent_map, rb_node); 279 if (rb && mergable_maps(em, merge)) { 280 em->len += merge->len; 281 em->block_len += merge->block_len; 282 rb_erase_cached(&merge->rb_node, &tree->map); 283 RB_CLEAR_NODE(&merge->rb_node); 284 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; 285 em->generation = max(em->generation, merge->generation); 286 set_bit(EXTENT_FLAG_MERGED, &em->flags); 287 free_extent_map(merge); 288 } 289 } 290 291 /** 292 * unpin_extent_cache - unpin an extent from the cache 293 * @tree: tree to unpin the extent in 294 * @start: logical offset in the file 295 * @len: length of the extent 296 * @gen: generation that this extent has been modified in 297 * 298 * Called after an extent has been written to disk properly. Set the generation 299 * to the generation that actually added the file item to the inode so we know 300 * we need to sync this extent when we call fsync(). 301 */ 302 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, 303 u64 gen) 304 { 305 int ret = 0; 306 struct extent_map *em; 307 bool prealloc = false; 308 309 write_lock(&tree->lock); 310 em = lookup_extent_mapping(tree, start, len); 311 312 WARN_ON(!em || em->start != start); 313 314 if (!em) 315 goto out; 316 317 em->generation = gen; 318 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 319 em->mod_start = em->start; 320 em->mod_len = em->len; 321 322 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { 323 prealloc = true; 324 clear_bit(EXTENT_FLAG_FILLING, &em->flags); 325 } 326 327 try_merge_map(tree, em); 328 329 if (prealloc) { 330 em->mod_start = em->start; 331 em->mod_len = em->len; 332 } 333 334 free_extent_map(em); 335 out: 336 write_unlock(&tree->lock); 337 return ret; 338 339 } 340 341 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) 342 { 343 lockdep_assert_held_write(&tree->lock); 344 345 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 346 if (extent_map_in_tree(em)) 347 try_merge_map(tree, em); 348 } 349 350 static inline void setup_extent_mapping(struct extent_map_tree *tree, 351 struct extent_map *em, 352 int modified) 353 { 354 refcount_inc(&em->refs); 355 em->mod_start = em->start; 356 em->mod_len = em->len; 357 358 if (modified) 359 list_move(&em->list, &tree->modified_extents); 360 else 361 try_merge_map(tree, em); 362 } 363 364 static void extent_map_device_set_bits(struct extent_map *em, unsigned bits) 365 { 366 struct map_lookup *map = em->map_lookup; 367 u64 stripe_size = em->orig_block_len; 368 int i; 369 370 for (i = 0; i < map->num_stripes; i++) { 371 struct btrfs_io_stripe *stripe = &map->stripes[i]; 372 struct btrfs_device *device = stripe->dev; 373 374 set_extent_bits_nowait(&device->alloc_state, stripe->physical, 375 stripe->physical + stripe_size - 1, bits); 376 } 377 } 378 379 static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits) 380 { 381 struct map_lookup *map = em->map_lookup; 382 u64 stripe_size = em->orig_block_len; 383 int i; 384 385 for (i = 0; i < map->num_stripes; i++) { 386 struct btrfs_io_stripe *stripe = &map->stripes[i]; 387 struct btrfs_device *device = stripe->dev; 388 389 __clear_extent_bit(&device->alloc_state, stripe->physical, 390 stripe->physical + stripe_size - 1, bits, 391 NULL, GFP_NOWAIT, NULL); 392 } 393 } 394 395 /** 396 * Add new extent map to the extent tree 397 * 398 * @tree: tree to insert new map in 399 * @em: map to insert 400 * @modified: indicate whether the given @em should be added to the 401 * modified list, which indicates the extent needs to be logged 402 * 403 * Insert @em into @tree or perform a simple forward/backward merge with 404 * existing mappings. The extent_map struct passed in will be inserted 405 * into the tree directly, with an additional reference taken, or a 406 * reference dropped if the merge attempt was successful. 407 */ 408 int add_extent_mapping(struct extent_map_tree *tree, 409 struct extent_map *em, int modified) 410 { 411 int ret = 0; 412 413 lockdep_assert_held_write(&tree->lock); 414 415 ret = tree_insert(&tree->map, em); 416 if (ret) 417 goto out; 418 419 setup_extent_mapping(tree, em, modified); 420 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) { 421 extent_map_device_set_bits(em, CHUNK_ALLOCATED); 422 extent_map_device_clear_bits(em, CHUNK_TRIMMED); 423 } 424 out: 425 return ret; 426 } 427 428 static struct extent_map * 429 __lookup_extent_mapping(struct extent_map_tree *tree, 430 u64 start, u64 len, int strict) 431 { 432 struct extent_map *em; 433 struct rb_node *rb_node; 434 struct rb_node *prev_or_next = NULL; 435 u64 end = range_end(start, len); 436 437 rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next); 438 if (!rb_node) { 439 if (prev_or_next) 440 rb_node = prev_or_next; 441 else 442 return NULL; 443 } 444 445 em = rb_entry(rb_node, struct extent_map, rb_node); 446 447 if (strict && !(end > em->start && start < extent_map_end(em))) 448 return NULL; 449 450 refcount_inc(&em->refs); 451 return em; 452 } 453 454 /** 455 * lookup_extent_mapping - lookup extent_map 456 * @tree: tree to lookup in 457 * @start: byte offset to start the search 458 * @len: length of the lookup range 459 * 460 * Find and return the first extent_map struct in @tree that intersects the 461 * [start, len] range. There may be additional objects in the tree that 462 * intersect, so check the object returned carefully to make sure that no 463 * additional lookups are needed. 464 */ 465 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 466 u64 start, u64 len) 467 { 468 return __lookup_extent_mapping(tree, start, len, 1); 469 } 470 471 /** 472 * search_extent_mapping - find a nearby extent map 473 * @tree: tree to lookup in 474 * @start: byte offset to start the search 475 * @len: length of the lookup range 476 * 477 * Find and return the first extent_map struct in @tree that intersects the 478 * [start, len] range. 479 * 480 * If one can't be found, any nearby extent may be returned 481 */ 482 struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 483 u64 start, u64 len) 484 { 485 return __lookup_extent_mapping(tree, start, len, 0); 486 } 487 488 /** 489 * remove_extent_mapping - removes an extent_map from the extent tree 490 * @tree: extent tree to remove from 491 * @em: extent map being removed 492 * 493 * Removes @em from @tree. No reference counts are dropped, and no checks 494 * are done to see if the range is in use 495 */ 496 void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 497 { 498 lockdep_assert_held_write(&tree->lock); 499 500 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 501 rb_erase_cached(&em->rb_node, &tree->map); 502 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) 503 list_del_init(&em->list); 504 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) 505 extent_map_device_clear_bits(em, CHUNK_ALLOCATED); 506 RB_CLEAR_NODE(&em->rb_node); 507 } 508 509 void replace_extent_mapping(struct extent_map_tree *tree, 510 struct extent_map *cur, 511 struct extent_map *new, 512 int modified) 513 { 514 lockdep_assert_held_write(&tree->lock); 515 516 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags)); 517 ASSERT(extent_map_in_tree(cur)); 518 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags)) 519 list_del_init(&cur->list); 520 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map); 521 RB_CLEAR_NODE(&cur->rb_node); 522 523 setup_extent_mapping(tree, new, modified); 524 } 525 526 static struct extent_map *next_extent_map(struct extent_map *em) 527 { 528 struct rb_node *next; 529 530 next = rb_next(&em->rb_node); 531 if (!next) 532 return NULL; 533 return container_of(next, struct extent_map, rb_node); 534 } 535 536 static struct extent_map *prev_extent_map(struct extent_map *em) 537 { 538 struct rb_node *prev; 539 540 prev = rb_prev(&em->rb_node); 541 if (!prev) 542 return NULL; 543 return container_of(prev, struct extent_map, rb_node); 544 } 545 546 /* 547 * Helper for btrfs_get_extent. Given an existing extent in the tree, 548 * the existing extent is the nearest extent to map_start, 549 * and an extent that you want to insert, deal with overlap and insert 550 * the best fitted new extent into the tree. 551 */ 552 static noinline int merge_extent_mapping(struct extent_map_tree *em_tree, 553 struct extent_map *existing, 554 struct extent_map *em, 555 u64 map_start) 556 { 557 struct extent_map *prev; 558 struct extent_map *next; 559 u64 start; 560 u64 end; 561 u64 start_diff; 562 563 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); 564 565 if (existing->start > map_start) { 566 next = existing; 567 prev = prev_extent_map(next); 568 } else { 569 prev = existing; 570 next = next_extent_map(prev); 571 } 572 573 start = prev ? extent_map_end(prev) : em->start; 574 start = max_t(u64, start, em->start); 575 end = next ? next->start : extent_map_end(em); 576 end = min_t(u64, end, extent_map_end(em)); 577 start_diff = start - em->start; 578 em->start = start; 579 em->len = end - start; 580 if (em->block_start < EXTENT_MAP_LAST_BYTE && 581 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 582 em->block_start += start_diff; 583 em->block_len = em->len; 584 } 585 return add_extent_mapping(em_tree, em, 0); 586 } 587 588 /** 589 * Add extent mapping into em_tree 590 * 591 * @fs_info: the filesystem 592 * @em_tree: extent tree into which we want to insert the extent mapping 593 * @em_in: extent we are inserting 594 * @start: start of the logical range btrfs_get_extent() is requesting 595 * @len: length of the logical range btrfs_get_extent() is requesting 596 * 597 * Note that @em_in's range may be different from [start, start+len), 598 * but they must be overlapped. 599 * 600 * Insert @em_in into @em_tree. In case there is an overlapping range, handle 601 * the -EEXIST by either: 602 * a) Returning the existing extent in @em_in if @start is within the 603 * existing em. 604 * b) Merge the existing extent with @em_in passed in. 605 * 606 * Return 0 on success, otherwise -EEXIST. 607 * 608 */ 609 int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info, 610 struct extent_map_tree *em_tree, 611 struct extent_map **em_in, u64 start, u64 len) 612 { 613 int ret; 614 struct extent_map *em = *em_in; 615 616 ret = add_extent_mapping(em_tree, em, 0); 617 /* it is possible that someone inserted the extent into the tree 618 * while we had the lock dropped. It is also possible that 619 * an overlapping map exists in the tree 620 */ 621 if (ret == -EEXIST) { 622 struct extent_map *existing; 623 624 ret = 0; 625 626 existing = search_extent_mapping(em_tree, start, len); 627 628 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); 629 630 /* 631 * existing will always be non-NULL, since there must be 632 * extent causing the -EEXIST. 633 */ 634 if (start >= existing->start && 635 start < extent_map_end(existing)) { 636 free_extent_map(em); 637 *em_in = existing; 638 ret = 0; 639 } else { 640 u64 orig_start = em->start; 641 u64 orig_len = em->len; 642 643 /* 644 * The existing extent map is the one nearest to 645 * the [start, start + len) range which overlaps 646 */ 647 ret = merge_extent_mapping(em_tree, existing, 648 em, start); 649 if (ret) { 650 free_extent_map(em); 651 *em_in = NULL; 652 WARN_ONCE(ret, 653 "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n", 654 ret, existing->start, existing->len, 655 orig_start, orig_len); 656 } 657 free_extent_map(existing); 658 } 659 } 660 661 ASSERT(ret == 0 || ret == -EEXIST); 662 return ret; 663 } 664 665 /* 666 * Drop all extent maps from a tree in the fastest possible way, rescheduling 667 * if needed. This avoids searching the tree, from the root down to the first 668 * extent map, before each deletion. 669 */ 670 static void drop_all_extent_maps_fast(struct extent_map_tree *tree) 671 { 672 write_lock(&tree->lock); 673 while (!RB_EMPTY_ROOT(&tree->map.rb_root)) { 674 struct extent_map *em; 675 struct rb_node *node; 676 677 node = rb_first_cached(&tree->map); 678 em = rb_entry(node, struct extent_map, rb_node); 679 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 680 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 681 remove_extent_mapping(tree, em); 682 free_extent_map(em); 683 cond_resched_rwlock_write(&tree->lock); 684 } 685 write_unlock(&tree->lock); 686 } 687 688 /* 689 * Drop all extent maps in a given range. 690 * 691 * @inode: The target inode. 692 * @start: Start offset of the range. 693 * @end: End offset of the range (inclusive value). 694 * @skip_pinned: Indicate if pinned extent maps should be ignored or not. 695 * 696 * This drops all the extent maps that intersect the given range [@start, @end]. 697 * Extent maps that partially overlap the range and extend behind or beyond it, 698 * are split. 699 * The caller should have locked an appropriate file range in the inode's io 700 * tree before calling this function. 701 */ 702 void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, 703 bool skip_pinned) 704 { 705 struct extent_map *split; 706 struct extent_map *split2; 707 struct extent_map *em; 708 struct extent_map_tree *em_tree = &inode->extent_tree; 709 u64 len = end - start + 1; 710 711 WARN_ON(end < start); 712 if (end == (u64)-1) { 713 if (start == 0 && !skip_pinned) { 714 drop_all_extent_maps_fast(em_tree); 715 return; 716 } 717 len = (u64)-1; 718 } else { 719 /* Make end offset exclusive for use in the loop below. */ 720 end++; 721 } 722 723 /* 724 * It's ok if we fail to allocate the extent maps, see the comment near 725 * the bottom of the loop below. We only need two spare extent maps in 726 * the worst case, where the first extent map that intersects our range 727 * starts before the range and the last extent map that intersects our 728 * range ends after our range (and they might be the same extent map), 729 * because we need to split those two extent maps at the boundaries. 730 */ 731 split = alloc_extent_map(); 732 split2 = alloc_extent_map(); 733 734 write_lock(&em_tree->lock); 735 em = lookup_extent_mapping(em_tree, start, len); 736 737 while (em) { 738 /* extent_map_end() returns exclusive value (last byte + 1). */ 739 const u64 em_end = extent_map_end(em); 740 struct extent_map *next_em = NULL; 741 u64 gen; 742 unsigned long flags; 743 bool modified; 744 bool compressed; 745 746 if (em_end < end) { 747 next_em = next_extent_map(em); 748 if (next_em) { 749 if (next_em->start < end) 750 refcount_inc(&next_em->refs); 751 else 752 next_em = NULL; 753 } 754 } 755 756 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 757 start = em_end; 758 if (end != (u64)-1) 759 len = start + len - em_end; 760 goto next; 761 } 762 763 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 764 clear_bit(EXTENT_FLAG_LOGGING, &flags); 765 modified = !list_empty(&em->list); 766 767 /* 768 * The extent map does not cross our target range, so no need to 769 * split it, we can remove it directly. 770 */ 771 if (em->start >= start && em_end <= end) 772 goto remove_em; 773 774 flags = em->flags; 775 gen = em->generation; 776 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 777 778 if (em->start < start) { 779 if (!split) { 780 split = split2; 781 split2 = NULL; 782 if (!split) 783 goto remove_em; 784 } 785 split->start = em->start; 786 split->len = start - em->start; 787 788 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 789 split->orig_start = em->orig_start; 790 split->block_start = em->block_start; 791 792 if (compressed) 793 split->block_len = em->block_len; 794 else 795 split->block_len = split->len; 796 split->orig_block_len = max(split->block_len, 797 em->orig_block_len); 798 split->ram_bytes = em->ram_bytes; 799 } else { 800 split->orig_start = split->start; 801 split->block_len = 0; 802 split->block_start = em->block_start; 803 split->orig_block_len = 0; 804 split->ram_bytes = split->len; 805 } 806 807 split->generation = gen; 808 split->flags = flags; 809 split->compress_type = em->compress_type; 810 replace_extent_mapping(em_tree, em, split, modified); 811 free_extent_map(split); 812 split = split2; 813 split2 = NULL; 814 } 815 if (em_end > end) { 816 if (!split) { 817 split = split2; 818 split2 = NULL; 819 if (!split) 820 goto remove_em; 821 } 822 split->start = start + len; 823 split->len = em_end - (start + len); 824 split->block_start = em->block_start; 825 split->flags = flags; 826 split->compress_type = em->compress_type; 827 split->generation = gen; 828 829 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 830 split->orig_block_len = max(em->block_len, 831 em->orig_block_len); 832 833 split->ram_bytes = em->ram_bytes; 834 if (compressed) { 835 split->block_len = em->block_len; 836 split->orig_start = em->orig_start; 837 } else { 838 const u64 diff = start + len - em->start; 839 840 split->block_len = split->len; 841 split->block_start += diff; 842 split->orig_start = em->orig_start; 843 } 844 } else { 845 split->ram_bytes = split->len; 846 split->orig_start = split->start; 847 split->block_len = 0; 848 split->orig_block_len = 0; 849 } 850 851 if (extent_map_in_tree(em)) { 852 replace_extent_mapping(em_tree, em, split, 853 modified); 854 } else { 855 int ret; 856 857 ret = add_extent_mapping(em_tree, split, 858 modified); 859 /* Logic error, shouldn't happen. */ 860 ASSERT(ret == 0); 861 if (WARN_ON(ret != 0) && modified) 862 btrfs_set_inode_full_sync(inode); 863 } 864 free_extent_map(split); 865 split = NULL; 866 } 867 remove_em: 868 if (extent_map_in_tree(em)) { 869 /* 870 * If the extent map is still in the tree it means that 871 * either of the following is true: 872 * 873 * 1) It fits entirely in our range (doesn't end beyond 874 * it or starts before it); 875 * 876 * 2) It starts before our range and/or ends after our 877 * range, and we were not able to allocate the extent 878 * maps for split operations, @split and @split2. 879 * 880 * If we are at case 2) then we just remove the entire 881 * extent map - this is fine since if anyone needs it to 882 * access the subranges outside our range, will just 883 * load it again from the subvolume tree's file extent 884 * item. However if the extent map was in the list of 885 * modified extents, then we must mark the inode for a 886 * full fsync, otherwise a fast fsync will miss this 887 * extent if it's new and needs to be logged. 888 */ 889 if ((em->start < start || em_end > end) && modified) { 890 ASSERT(!split); 891 btrfs_set_inode_full_sync(inode); 892 } 893 remove_extent_mapping(em_tree, em); 894 } 895 896 /* 897 * Once for the tree reference (we replaced or removed the 898 * extent map from the tree). 899 */ 900 free_extent_map(em); 901 next: 902 /* Once for us (for our lookup reference). */ 903 free_extent_map(em); 904 905 em = next_em; 906 } 907 908 write_unlock(&em_tree->lock); 909 910 free_extent_map(split); 911 free_extent_map(split2); 912 } 913 914 /* 915 * Replace a range in the inode's extent map tree with a new extent map. 916 * 917 * @inode: The target inode. 918 * @new_em: The new extent map to add to the inode's extent map tree. 919 * @modified: Indicate if the new extent map should be added to the list of 920 * modified extents (for fast fsync tracking). 921 * 922 * Drops all the extent maps in the inode's extent map tree that intersect the 923 * range of the new extent map and adds the new extent map to the tree. 924 * The caller should have locked an appropriate file range in the inode's io 925 * tree before calling this function. 926 */ 927 int btrfs_replace_extent_map_range(struct btrfs_inode *inode, 928 struct extent_map *new_em, 929 bool modified) 930 { 931 const u64 end = new_em->start + new_em->len - 1; 932 struct extent_map_tree *tree = &inode->extent_tree; 933 int ret; 934 935 ASSERT(!extent_map_in_tree(new_em)); 936 937 /* 938 * The caller has locked an appropriate file range in the inode's io 939 * tree, but getting -EEXIST when adding the new extent map can still 940 * happen in case there are extents that partially cover the range, and 941 * this is due to two tasks operating on different parts of the extent. 942 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from 943 * btrfs_get_extent") for an example and details. 944 */ 945 do { 946 btrfs_drop_extent_map_range(inode, new_em->start, end, false); 947 write_lock(&tree->lock); 948 ret = add_extent_mapping(tree, new_em, modified); 949 write_unlock(&tree->lock); 950 } while (ret == -EEXIST); 951 952 return ret; 953 } 954