1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/bitops.h> 4 #include <linux/slab.h> 5 #include <linux/bio.h> 6 #include <linux/mm.h> 7 #include <linux/pagemap.h> 8 #include <linux/page-flags.h> 9 #include <linux/spinlock.h> 10 #include <linux/blkdev.h> 11 #include <linux/swap.h> 12 #include <linux/writeback.h> 13 #include <linux/pagevec.h> 14 #include <linux/prefetch.h> 15 #include <linux/cleancache.h> 16 #include "extent_io.h" 17 #include "extent-io-tree.h" 18 #include "extent_map.h" 19 #include "ctree.h" 20 #include "btrfs_inode.h" 21 #include "volumes.h" 22 #include "check-integrity.h" 23 #include "locking.h" 24 #include "rcu-string.h" 25 #include "backref.h" 26 #include "disk-io.h" 27 28 static struct kmem_cache *extent_state_cache; 29 static struct kmem_cache *extent_buffer_cache; 30 static struct bio_set btrfs_bioset; 31 32 static inline bool extent_state_in_tree(const struct extent_state *state) 33 { 34 return !RB_EMPTY_NODE(&state->rb_node); 35 } 36 37 #ifdef CONFIG_BTRFS_DEBUG 38 static LIST_HEAD(states); 39 static DEFINE_SPINLOCK(leak_lock); 40 41 static inline void btrfs_leak_debug_add(spinlock_t *lock, 42 struct list_head *new, 43 struct list_head *head) 44 { 45 unsigned long flags; 46 47 spin_lock_irqsave(lock, flags); 48 list_add(new, head); 49 spin_unlock_irqrestore(lock, flags); 50 } 51 52 static inline void btrfs_leak_debug_del(spinlock_t *lock, 53 struct list_head *entry) 54 { 55 unsigned long flags; 56 57 spin_lock_irqsave(lock, flags); 58 list_del(entry); 59 spin_unlock_irqrestore(lock, flags); 60 } 61 62 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info) 63 { 64 struct extent_buffer *eb; 65 unsigned long flags; 66 67 /* 68 * If we didn't get into open_ctree our allocated_ebs will not be 69 * initialized, so just skip this. 70 */ 71 if (!fs_info->allocated_ebs.next) 72 return; 73 74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags); 75 while (!list_empty(&fs_info->allocated_ebs)) { 76 eb = list_first_entry(&fs_info->allocated_ebs, 77 struct extent_buffer, leak_list); 78 pr_err( 79 "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n", 80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, 81 btrfs_header_owner(eb)); 82 list_del(&eb->leak_list); 83 kmem_cache_free(extent_buffer_cache, eb); 84 } 85 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); 86 } 87 88 static inline void btrfs_extent_state_leak_debug_check(void) 89 { 90 struct extent_state *state; 91 92 while (!list_empty(&states)) { 93 state = list_entry(states.next, struct extent_state, leak_list); 94 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", 95 state->start, state->end, state->state, 96 extent_state_in_tree(state), 97 refcount_read(&state->refs)); 98 list_del(&state->leak_list); 99 kmem_cache_free(extent_state_cache, state); 100 } 101 } 102 103 #define btrfs_debug_check_extent_io_range(tree, start, end) \ 104 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end)) 105 static inline void __btrfs_debug_check_extent_io_range(const char *caller, 106 struct extent_io_tree *tree, u64 start, u64 end) 107 { 108 struct inode *inode = tree->private_data; 109 u64 isize; 110 111 if (!inode || !is_data_inode(inode)) 112 return; 113 114 isize = i_size_read(inode); 115 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { 116 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info, 117 "%s: ino %llu isize %llu odd range [%llu,%llu]", 118 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end); 119 } 120 } 121 #else 122 #define btrfs_leak_debug_add(lock, new, head) do {} while (0) 123 #define btrfs_leak_debug_del(lock, entry) do {} while (0) 124 #define btrfs_extent_state_leak_debug_check() do {} while (0) 125 #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) 126 #endif 127 128 struct tree_entry { 129 u64 start; 130 u64 end; 131 struct rb_node rb_node; 132 }; 133 134 struct extent_page_data { 135 struct bio *bio; 136 /* tells writepage not to lock the state bits for this range 137 * it still does the unlocking 138 */ 139 unsigned int extent_locked:1; 140 141 /* tells the submit_bio code to use REQ_SYNC */ 142 unsigned int sync_io:1; 143 }; 144 145 static int add_extent_changeset(struct extent_state *state, unsigned bits, 146 struct extent_changeset *changeset, 147 int set) 148 { 149 int ret; 150 151 if (!changeset) 152 return 0; 153 if (set && (state->state & bits) == bits) 154 return 0; 155 if (!set && (state->state & bits) == 0) 156 return 0; 157 changeset->bytes_changed += state->end - state->start + 1; 158 ret = ulist_add(&changeset->range_changed, state->start, state->end, 159 GFP_ATOMIC); 160 return ret; 161 } 162 163 static int __must_check submit_one_bio(struct bio *bio, int mirror_num, 164 unsigned long bio_flags) 165 { 166 blk_status_t ret = 0; 167 struct extent_io_tree *tree = bio->bi_private; 168 169 bio->bi_private = NULL; 170 171 if (tree->ops) 172 ret = tree->ops->submit_bio_hook(tree->private_data, bio, 173 mirror_num, bio_flags); 174 else 175 btrfsic_submit_bio(bio); 176 177 return blk_status_to_errno(ret); 178 } 179 180 /* Cleanup unsubmitted bios */ 181 static void end_write_bio(struct extent_page_data *epd, int ret) 182 { 183 if (epd->bio) { 184 epd->bio->bi_status = errno_to_blk_status(ret); 185 bio_endio(epd->bio); 186 epd->bio = NULL; 187 } 188 } 189 190 /* 191 * Submit bio from extent page data via submit_one_bio 192 * 193 * Return 0 if everything is OK. 194 * Return <0 for error. 195 */ 196 static int __must_check flush_write_bio(struct extent_page_data *epd) 197 { 198 int ret = 0; 199 200 if (epd->bio) { 201 ret = submit_one_bio(epd->bio, 0, 0); 202 /* 203 * Clean up of epd->bio is handled by its endio function. 204 * And endio is either triggered by successful bio execution 205 * or the error handler of submit bio hook. 206 * So at this point, no matter what happened, we don't need 207 * to clean up epd->bio. 208 */ 209 epd->bio = NULL; 210 } 211 return ret; 212 } 213 214 int __init extent_state_cache_init(void) 215 { 216 extent_state_cache = kmem_cache_create("btrfs_extent_state", 217 sizeof(struct extent_state), 0, 218 SLAB_MEM_SPREAD, NULL); 219 if (!extent_state_cache) 220 return -ENOMEM; 221 return 0; 222 } 223 224 int __init extent_io_init(void) 225 { 226 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer", 227 sizeof(struct extent_buffer), 0, 228 SLAB_MEM_SPREAD, NULL); 229 if (!extent_buffer_cache) 230 return -ENOMEM; 231 232 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, 233 offsetof(struct btrfs_io_bio, bio), 234 BIOSET_NEED_BVECS)) 235 goto free_buffer_cache; 236 237 if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE)) 238 goto free_bioset; 239 240 return 0; 241 242 free_bioset: 243 bioset_exit(&btrfs_bioset); 244 245 free_buffer_cache: 246 kmem_cache_destroy(extent_buffer_cache); 247 extent_buffer_cache = NULL; 248 return -ENOMEM; 249 } 250 251 void __cold extent_state_cache_exit(void) 252 { 253 btrfs_extent_state_leak_debug_check(); 254 kmem_cache_destroy(extent_state_cache); 255 } 256 257 void __cold extent_io_exit(void) 258 { 259 /* 260 * Make sure all delayed rcu free are flushed before we 261 * destroy caches. 262 */ 263 rcu_barrier(); 264 kmem_cache_destroy(extent_buffer_cache); 265 bioset_exit(&btrfs_bioset); 266 } 267 268 /* 269 * For the file_extent_tree, we want to hold the inode lock when we lookup and 270 * update the disk_i_size, but lockdep will complain because our io_tree we hold 271 * the tree lock and get the inode lock when setting delalloc. These two things 272 * are unrelated, so make a class for the file_extent_tree so we don't get the 273 * two locking patterns mixed up. 274 */ 275 static struct lock_class_key file_extent_tree_class; 276 277 void extent_io_tree_init(struct btrfs_fs_info *fs_info, 278 struct extent_io_tree *tree, unsigned int owner, 279 void *private_data) 280 { 281 tree->fs_info = fs_info; 282 tree->state = RB_ROOT; 283 tree->ops = NULL; 284 tree->dirty_bytes = 0; 285 spin_lock_init(&tree->lock); 286 tree->private_data = private_data; 287 tree->owner = owner; 288 if (owner == IO_TREE_INODE_FILE_EXTENT) 289 lockdep_set_class(&tree->lock, &file_extent_tree_class); 290 } 291 292 void extent_io_tree_release(struct extent_io_tree *tree) 293 { 294 spin_lock(&tree->lock); 295 /* 296 * Do a single barrier for the waitqueue_active check here, the state 297 * of the waitqueue should not change once extent_io_tree_release is 298 * called. 299 */ 300 smp_mb(); 301 while (!RB_EMPTY_ROOT(&tree->state)) { 302 struct rb_node *node; 303 struct extent_state *state; 304 305 node = rb_first(&tree->state); 306 state = rb_entry(node, struct extent_state, rb_node); 307 rb_erase(&state->rb_node, &tree->state); 308 RB_CLEAR_NODE(&state->rb_node); 309 /* 310 * btree io trees aren't supposed to have tasks waiting for 311 * changes in the flags of extent states ever. 312 */ 313 ASSERT(!waitqueue_active(&state->wq)); 314 free_extent_state(state); 315 316 cond_resched_lock(&tree->lock); 317 } 318 spin_unlock(&tree->lock); 319 } 320 321 static struct extent_state *alloc_extent_state(gfp_t mask) 322 { 323 struct extent_state *state; 324 325 /* 326 * The given mask might be not appropriate for the slab allocator, 327 * drop the unsupported bits 328 */ 329 mask &= ~(__GFP_DMA32|__GFP_HIGHMEM); 330 state = kmem_cache_alloc(extent_state_cache, mask); 331 if (!state) 332 return state; 333 state->state = 0; 334 state->failrec = NULL; 335 RB_CLEAR_NODE(&state->rb_node); 336 btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states); 337 refcount_set(&state->refs, 1); 338 init_waitqueue_head(&state->wq); 339 trace_alloc_extent_state(state, mask, _RET_IP_); 340 return state; 341 } 342 343 void free_extent_state(struct extent_state *state) 344 { 345 if (!state) 346 return; 347 if (refcount_dec_and_test(&state->refs)) { 348 WARN_ON(extent_state_in_tree(state)); 349 btrfs_leak_debug_del(&leak_lock, &state->leak_list); 350 trace_free_extent_state(state, _RET_IP_); 351 kmem_cache_free(extent_state_cache, state); 352 } 353 } 354 355 static struct rb_node *tree_insert(struct rb_root *root, 356 struct rb_node *search_start, 357 u64 offset, 358 struct rb_node *node, 359 struct rb_node ***p_in, 360 struct rb_node **parent_in) 361 { 362 struct rb_node **p; 363 struct rb_node *parent = NULL; 364 struct tree_entry *entry; 365 366 if (p_in && parent_in) { 367 p = *p_in; 368 parent = *parent_in; 369 goto do_insert; 370 } 371 372 p = search_start ? &search_start : &root->rb_node; 373 while (*p) { 374 parent = *p; 375 entry = rb_entry(parent, struct tree_entry, rb_node); 376 377 if (offset < entry->start) 378 p = &(*p)->rb_left; 379 else if (offset > entry->end) 380 p = &(*p)->rb_right; 381 else 382 return parent; 383 } 384 385 do_insert: 386 rb_link_node(node, parent, p); 387 rb_insert_color(node, root); 388 return NULL; 389 } 390 391 /** 392 * __etree_search - searche @tree for an entry that contains @offset. Such 393 * entry would have entry->start <= offset && entry->end >= offset. 394 * 395 * @tree - the tree to search 396 * @offset - offset that should fall within an entry in @tree 397 * @next_ret - pointer to the first entry whose range ends after @offset 398 * @prev - pointer to the first entry whose range begins before @offset 399 * @p_ret - pointer where new node should be anchored (used when inserting an 400 * entry in the tree) 401 * @parent_ret - points to entry which would have been the parent of the entry, 402 * containing @offset 403 * 404 * This function returns a pointer to the entry that contains @offset byte 405 * address. If no such entry exists, then NULL is returned and the other 406 * pointer arguments to the function are filled, otherwise the found entry is 407 * returned and other pointers are left untouched. 408 */ 409 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, 410 struct rb_node **next_ret, 411 struct rb_node **prev_ret, 412 struct rb_node ***p_ret, 413 struct rb_node **parent_ret) 414 { 415 struct rb_root *root = &tree->state; 416 struct rb_node **n = &root->rb_node; 417 struct rb_node *prev = NULL; 418 struct rb_node *orig_prev = NULL; 419 struct tree_entry *entry; 420 struct tree_entry *prev_entry = NULL; 421 422 while (*n) { 423 prev = *n; 424 entry = rb_entry(prev, struct tree_entry, rb_node); 425 prev_entry = entry; 426 427 if (offset < entry->start) 428 n = &(*n)->rb_left; 429 else if (offset > entry->end) 430 n = &(*n)->rb_right; 431 else 432 return *n; 433 } 434 435 if (p_ret) 436 *p_ret = n; 437 if (parent_ret) 438 *parent_ret = prev; 439 440 if (next_ret) { 441 orig_prev = prev; 442 while (prev && offset > prev_entry->end) { 443 prev = rb_next(prev); 444 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 445 } 446 *next_ret = prev; 447 prev = orig_prev; 448 } 449 450 if (prev_ret) { 451 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 452 while (prev && offset < prev_entry->start) { 453 prev = rb_prev(prev); 454 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 455 } 456 *prev_ret = prev; 457 } 458 return NULL; 459 } 460 461 static inline struct rb_node * 462 tree_search_for_insert(struct extent_io_tree *tree, 463 u64 offset, 464 struct rb_node ***p_ret, 465 struct rb_node **parent_ret) 466 { 467 struct rb_node *next= NULL; 468 struct rb_node *ret; 469 470 ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret); 471 if (!ret) 472 return next; 473 return ret; 474 } 475 476 static inline struct rb_node *tree_search(struct extent_io_tree *tree, 477 u64 offset) 478 { 479 return tree_search_for_insert(tree, offset, NULL, NULL); 480 } 481 482 /* 483 * utility function to look for merge candidates inside a given range. 484 * Any extents with matching state are merged together into a single 485 * extent in the tree. Extents with EXTENT_IO in their state field 486 * are not merged because the end_io handlers need to be able to do 487 * operations on them without sleeping (or doing allocations/splits). 488 * 489 * This should be called with the tree lock held. 490 */ 491 static void merge_state(struct extent_io_tree *tree, 492 struct extent_state *state) 493 { 494 struct extent_state *other; 495 struct rb_node *other_node; 496 497 if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY)) 498 return; 499 500 other_node = rb_prev(&state->rb_node); 501 if (other_node) { 502 other = rb_entry(other_node, struct extent_state, rb_node); 503 if (other->end == state->start - 1 && 504 other->state == state->state) { 505 if (tree->private_data && 506 is_data_inode(tree->private_data)) 507 btrfs_merge_delalloc_extent(tree->private_data, 508 state, other); 509 state->start = other->start; 510 rb_erase(&other->rb_node, &tree->state); 511 RB_CLEAR_NODE(&other->rb_node); 512 free_extent_state(other); 513 } 514 } 515 other_node = rb_next(&state->rb_node); 516 if (other_node) { 517 other = rb_entry(other_node, struct extent_state, rb_node); 518 if (other->start == state->end + 1 && 519 other->state == state->state) { 520 if (tree->private_data && 521 is_data_inode(tree->private_data)) 522 btrfs_merge_delalloc_extent(tree->private_data, 523 state, other); 524 state->end = other->end; 525 rb_erase(&other->rb_node, &tree->state); 526 RB_CLEAR_NODE(&other->rb_node); 527 free_extent_state(other); 528 } 529 } 530 } 531 532 static void set_state_bits(struct extent_io_tree *tree, 533 struct extent_state *state, unsigned *bits, 534 struct extent_changeset *changeset); 535 536 /* 537 * insert an extent_state struct into the tree. 'bits' are set on the 538 * struct before it is inserted. 539 * 540 * This may return -EEXIST if the extent is already there, in which case the 541 * state struct is freed. 542 * 543 * The tree lock is not taken internally. This is a utility function and 544 * probably isn't what you want to call (see set/clear_extent_bit). 545 */ 546 static int insert_state(struct extent_io_tree *tree, 547 struct extent_state *state, u64 start, u64 end, 548 struct rb_node ***p, 549 struct rb_node **parent, 550 unsigned *bits, struct extent_changeset *changeset) 551 { 552 struct rb_node *node; 553 554 if (end < start) { 555 btrfs_err(tree->fs_info, 556 "insert state: end < start %llu %llu", end, start); 557 WARN_ON(1); 558 } 559 state->start = start; 560 state->end = end; 561 562 set_state_bits(tree, state, bits, changeset); 563 564 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent); 565 if (node) { 566 struct extent_state *found; 567 found = rb_entry(node, struct extent_state, rb_node); 568 btrfs_err(tree->fs_info, 569 "found node %llu %llu on insert of %llu %llu", 570 found->start, found->end, start, end); 571 return -EEXIST; 572 } 573 merge_state(tree, state); 574 return 0; 575 } 576 577 /* 578 * split a given extent state struct in two, inserting the preallocated 579 * struct 'prealloc' as the newly created second half. 'split' indicates an 580 * offset inside 'orig' where it should be split. 581 * 582 * Before calling, 583 * the tree has 'orig' at [orig->start, orig->end]. After calling, there 584 * are two extent state structs in the tree: 585 * prealloc: [orig->start, split - 1] 586 * orig: [ split, orig->end ] 587 * 588 * The tree locks are not taken by this function. They need to be held 589 * by the caller. 590 */ 591 static int split_state(struct extent_io_tree *tree, struct extent_state *orig, 592 struct extent_state *prealloc, u64 split) 593 { 594 struct rb_node *node; 595 596 if (tree->private_data && is_data_inode(tree->private_data)) 597 btrfs_split_delalloc_extent(tree->private_data, orig, split); 598 599 prealloc->start = orig->start; 600 prealloc->end = split - 1; 601 prealloc->state = orig->state; 602 orig->start = split; 603 604 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end, 605 &prealloc->rb_node, NULL, NULL); 606 if (node) { 607 free_extent_state(prealloc); 608 return -EEXIST; 609 } 610 return 0; 611 } 612 613 static struct extent_state *next_state(struct extent_state *state) 614 { 615 struct rb_node *next = rb_next(&state->rb_node); 616 if (next) 617 return rb_entry(next, struct extent_state, rb_node); 618 else 619 return NULL; 620 } 621 622 /* 623 * utility function to clear some bits in an extent state struct. 624 * it will optionally wake up anyone waiting on this state (wake == 1). 625 * 626 * If no bits are set on the state struct after clearing things, the 627 * struct is freed and removed from the tree 628 */ 629 static struct extent_state *clear_state_bit(struct extent_io_tree *tree, 630 struct extent_state *state, 631 unsigned *bits, int wake, 632 struct extent_changeset *changeset) 633 { 634 struct extent_state *next; 635 unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS; 636 int ret; 637 638 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { 639 u64 range = state->end - state->start + 1; 640 WARN_ON(range > tree->dirty_bytes); 641 tree->dirty_bytes -= range; 642 } 643 644 if (tree->private_data && is_data_inode(tree->private_data)) 645 btrfs_clear_delalloc_extent(tree->private_data, state, bits); 646 647 ret = add_extent_changeset(state, bits_to_clear, changeset, 0); 648 BUG_ON(ret < 0); 649 state->state &= ~bits_to_clear; 650 if (wake) 651 wake_up(&state->wq); 652 if (state->state == 0) { 653 next = next_state(state); 654 if (extent_state_in_tree(state)) { 655 rb_erase(&state->rb_node, &tree->state); 656 RB_CLEAR_NODE(&state->rb_node); 657 free_extent_state(state); 658 } else { 659 WARN_ON(1); 660 } 661 } else { 662 merge_state(tree, state); 663 next = next_state(state); 664 } 665 return next; 666 } 667 668 static struct extent_state * 669 alloc_extent_state_atomic(struct extent_state *prealloc) 670 { 671 if (!prealloc) 672 prealloc = alloc_extent_state(GFP_ATOMIC); 673 674 return prealloc; 675 } 676 677 static void extent_io_tree_panic(struct extent_io_tree *tree, int err) 678 { 679 struct inode *inode = tree->private_data; 680 681 btrfs_panic(btrfs_sb(inode->i_sb), err, 682 "locking error: extent tree was modified by another thread while locked"); 683 } 684 685 /* 686 * clear some bits on a range in the tree. This may require splitting 687 * or inserting elements in the tree, so the gfp mask is used to 688 * indicate which allocations or sleeping are allowed. 689 * 690 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove 691 * the given range from the tree regardless of state (ie for truncate). 692 * 693 * the range [start, end] is inclusive. 694 * 695 * This takes the tree lock, and returns 0 on success and < 0 on error. 696 */ 697 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 698 unsigned bits, int wake, int delete, 699 struct extent_state **cached_state, 700 gfp_t mask, struct extent_changeset *changeset) 701 { 702 struct extent_state *state; 703 struct extent_state *cached; 704 struct extent_state *prealloc = NULL; 705 struct rb_node *node; 706 u64 last_end; 707 int err; 708 int clear = 0; 709 710 btrfs_debug_check_extent_io_range(tree, start, end); 711 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); 712 713 if (bits & EXTENT_DELALLOC) 714 bits |= EXTENT_NORESERVE; 715 716 if (delete) 717 bits |= ~EXTENT_CTLBITS; 718 719 if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY)) 720 clear = 1; 721 again: 722 if (!prealloc && gfpflags_allow_blocking(mask)) { 723 /* 724 * Don't care for allocation failure here because we might end 725 * up not needing the pre-allocated extent state at all, which 726 * is the case if we only have in the tree extent states that 727 * cover our input range and don't cover too any other range. 728 * If we end up needing a new extent state we allocate it later. 729 */ 730 prealloc = alloc_extent_state(mask); 731 } 732 733 spin_lock(&tree->lock); 734 if (cached_state) { 735 cached = *cached_state; 736 737 if (clear) { 738 *cached_state = NULL; 739 cached_state = NULL; 740 } 741 742 if (cached && extent_state_in_tree(cached) && 743 cached->start <= start && cached->end > start) { 744 if (clear) 745 refcount_dec(&cached->refs); 746 state = cached; 747 goto hit_next; 748 } 749 if (clear) 750 free_extent_state(cached); 751 } 752 /* 753 * this search will find the extents that end after 754 * our range starts 755 */ 756 node = tree_search(tree, start); 757 if (!node) 758 goto out; 759 state = rb_entry(node, struct extent_state, rb_node); 760 hit_next: 761 if (state->start > end) 762 goto out; 763 WARN_ON(state->end < start); 764 last_end = state->end; 765 766 /* the state doesn't have the wanted bits, go ahead */ 767 if (!(state->state & bits)) { 768 state = next_state(state); 769 goto next; 770 } 771 772 /* 773 * | ---- desired range ---- | 774 * | state | or 775 * | ------------- state -------------- | 776 * 777 * We need to split the extent we found, and may flip 778 * bits on second half. 779 * 780 * If the extent we found extends past our range, we 781 * just split and search again. It'll get split again 782 * the next time though. 783 * 784 * If the extent we found is inside our range, we clear 785 * the desired bit on it. 786 */ 787 788 if (state->start < start) { 789 prealloc = alloc_extent_state_atomic(prealloc); 790 BUG_ON(!prealloc); 791 err = split_state(tree, state, prealloc, start); 792 if (err) 793 extent_io_tree_panic(tree, err); 794 795 prealloc = NULL; 796 if (err) 797 goto out; 798 if (state->end <= end) { 799 state = clear_state_bit(tree, state, &bits, wake, 800 changeset); 801 goto next; 802 } 803 goto search_again; 804 } 805 /* 806 * | ---- desired range ---- | 807 * | state | 808 * We need to split the extent, and clear the bit 809 * on the first half 810 */ 811 if (state->start <= end && state->end > end) { 812 prealloc = alloc_extent_state_atomic(prealloc); 813 BUG_ON(!prealloc); 814 err = split_state(tree, state, prealloc, end + 1); 815 if (err) 816 extent_io_tree_panic(tree, err); 817 818 if (wake) 819 wake_up(&state->wq); 820 821 clear_state_bit(tree, prealloc, &bits, wake, changeset); 822 823 prealloc = NULL; 824 goto out; 825 } 826 827 state = clear_state_bit(tree, state, &bits, wake, changeset); 828 next: 829 if (last_end == (u64)-1) 830 goto out; 831 start = last_end + 1; 832 if (start <= end && state && !need_resched()) 833 goto hit_next; 834 835 search_again: 836 if (start > end) 837 goto out; 838 spin_unlock(&tree->lock); 839 if (gfpflags_allow_blocking(mask)) 840 cond_resched(); 841 goto again; 842 843 out: 844 spin_unlock(&tree->lock); 845 if (prealloc) 846 free_extent_state(prealloc); 847 848 return 0; 849 850 } 851 852 static void wait_on_state(struct extent_io_tree *tree, 853 struct extent_state *state) 854 __releases(tree->lock) 855 __acquires(tree->lock) 856 { 857 DEFINE_WAIT(wait); 858 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 859 spin_unlock(&tree->lock); 860 schedule(); 861 spin_lock(&tree->lock); 862 finish_wait(&state->wq, &wait); 863 } 864 865 /* 866 * waits for one or more bits to clear on a range in the state tree. 867 * The range [start, end] is inclusive. 868 * The tree lock is taken by this function 869 */ 870 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 871 unsigned long bits) 872 { 873 struct extent_state *state; 874 struct rb_node *node; 875 876 btrfs_debug_check_extent_io_range(tree, start, end); 877 878 spin_lock(&tree->lock); 879 again: 880 while (1) { 881 /* 882 * this search will find all the extents that end after 883 * our range starts 884 */ 885 node = tree_search(tree, start); 886 process_node: 887 if (!node) 888 break; 889 890 state = rb_entry(node, struct extent_state, rb_node); 891 892 if (state->start > end) 893 goto out; 894 895 if (state->state & bits) { 896 start = state->start; 897 refcount_inc(&state->refs); 898 wait_on_state(tree, state); 899 free_extent_state(state); 900 goto again; 901 } 902 start = state->end + 1; 903 904 if (start > end) 905 break; 906 907 if (!cond_resched_lock(&tree->lock)) { 908 node = rb_next(node); 909 goto process_node; 910 } 911 } 912 out: 913 spin_unlock(&tree->lock); 914 } 915 916 static void set_state_bits(struct extent_io_tree *tree, 917 struct extent_state *state, 918 unsigned *bits, struct extent_changeset *changeset) 919 { 920 unsigned bits_to_set = *bits & ~EXTENT_CTLBITS; 921 int ret; 922 923 if (tree->private_data && is_data_inode(tree->private_data)) 924 btrfs_set_delalloc_extent(tree->private_data, state, bits); 925 926 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 927 u64 range = state->end - state->start + 1; 928 tree->dirty_bytes += range; 929 } 930 ret = add_extent_changeset(state, bits_to_set, changeset, 1); 931 BUG_ON(ret < 0); 932 state->state |= bits_to_set; 933 } 934 935 static void cache_state_if_flags(struct extent_state *state, 936 struct extent_state **cached_ptr, 937 unsigned flags) 938 { 939 if (cached_ptr && !(*cached_ptr)) { 940 if (!flags || (state->state & flags)) { 941 *cached_ptr = state; 942 refcount_inc(&state->refs); 943 } 944 } 945 } 946 947 static void cache_state(struct extent_state *state, 948 struct extent_state **cached_ptr) 949 { 950 return cache_state_if_flags(state, cached_ptr, 951 EXTENT_LOCKED | EXTENT_BOUNDARY); 952 } 953 954 /* 955 * set some bits on a range in the tree. This may require allocations or 956 * sleeping, so the gfp mask is used to indicate what is allowed. 957 * 958 * If any of the exclusive bits are set, this will fail with -EEXIST if some 959 * part of the range already has the desired bits set. The start of the 960 * existing range is returned in failed_start in this case. 961 * 962 * [start, end] is inclusive This takes the tree lock. 963 */ 964 965 static int __must_check 966 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 967 unsigned bits, unsigned exclusive_bits, 968 u64 *failed_start, struct extent_state **cached_state, 969 gfp_t mask, struct extent_changeset *changeset) 970 { 971 struct extent_state *state; 972 struct extent_state *prealloc = NULL; 973 struct rb_node *node; 974 struct rb_node **p; 975 struct rb_node *parent; 976 int err = 0; 977 u64 last_start; 978 u64 last_end; 979 980 btrfs_debug_check_extent_io_range(tree, start, end); 981 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); 982 983 again: 984 if (!prealloc && gfpflags_allow_blocking(mask)) { 985 /* 986 * Don't care for allocation failure here because we might end 987 * up not needing the pre-allocated extent state at all, which 988 * is the case if we only have in the tree extent states that 989 * cover our input range and don't cover too any other range. 990 * If we end up needing a new extent state we allocate it later. 991 */ 992 prealloc = alloc_extent_state(mask); 993 } 994 995 spin_lock(&tree->lock); 996 if (cached_state && *cached_state) { 997 state = *cached_state; 998 if (state->start <= start && state->end > start && 999 extent_state_in_tree(state)) { 1000 node = &state->rb_node; 1001 goto hit_next; 1002 } 1003 } 1004 /* 1005 * this search will find all the extents that end after 1006 * our range starts. 1007 */ 1008 node = tree_search_for_insert(tree, start, &p, &parent); 1009 if (!node) { 1010 prealloc = alloc_extent_state_atomic(prealloc); 1011 BUG_ON(!prealloc); 1012 err = insert_state(tree, prealloc, start, end, 1013 &p, &parent, &bits, changeset); 1014 if (err) 1015 extent_io_tree_panic(tree, err); 1016 1017 cache_state(prealloc, cached_state); 1018 prealloc = NULL; 1019 goto out; 1020 } 1021 state = rb_entry(node, struct extent_state, rb_node); 1022 hit_next: 1023 last_start = state->start; 1024 last_end = state->end; 1025 1026 /* 1027 * | ---- desired range ---- | 1028 * | state | 1029 * 1030 * Just lock what we found and keep going 1031 */ 1032 if (state->start == start && state->end <= end) { 1033 if (state->state & exclusive_bits) { 1034 *failed_start = state->start; 1035 err = -EEXIST; 1036 goto out; 1037 } 1038 1039 set_state_bits(tree, state, &bits, changeset); 1040 cache_state(state, cached_state); 1041 merge_state(tree, state); 1042 if (last_end == (u64)-1) 1043 goto out; 1044 start = last_end + 1; 1045 state = next_state(state); 1046 if (start < end && state && state->start == start && 1047 !need_resched()) 1048 goto hit_next; 1049 goto search_again; 1050 } 1051 1052 /* 1053 * | ---- desired range ---- | 1054 * | state | 1055 * or 1056 * | ------------- state -------------- | 1057 * 1058 * We need to split the extent we found, and may flip bits on 1059 * second half. 1060 * 1061 * If the extent we found extends past our 1062 * range, we just split and search again. It'll get split 1063 * again the next time though. 1064 * 1065 * If the extent we found is inside our range, we set the 1066 * desired bit on it. 1067 */ 1068 if (state->start < start) { 1069 if (state->state & exclusive_bits) { 1070 *failed_start = start; 1071 err = -EEXIST; 1072 goto out; 1073 } 1074 1075 /* 1076 * If this extent already has all the bits we want set, then 1077 * skip it, not necessary to split it or do anything with it. 1078 */ 1079 if ((state->state & bits) == bits) { 1080 start = state->end + 1; 1081 cache_state(state, cached_state); 1082 goto search_again; 1083 } 1084 1085 prealloc = alloc_extent_state_atomic(prealloc); 1086 BUG_ON(!prealloc); 1087 err = split_state(tree, state, prealloc, start); 1088 if (err) 1089 extent_io_tree_panic(tree, err); 1090 1091 prealloc = NULL; 1092 if (err) 1093 goto out; 1094 if (state->end <= end) { 1095 set_state_bits(tree, state, &bits, changeset); 1096 cache_state(state, cached_state); 1097 merge_state(tree, state); 1098 if (last_end == (u64)-1) 1099 goto out; 1100 start = last_end + 1; 1101 state = next_state(state); 1102 if (start < end && state && state->start == start && 1103 !need_resched()) 1104 goto hit_next; 1105 } 1106 goto search_again; 1107 } 1108 /* 1109 * | ---- desired range ---- | 1110 * | state | or | state | 1111 * 1112 * There's a hole, we need to insert something in it and 1113 * ignore the extent we found. 1114 */ 1115 if (state->start > start) { 1116 u64 this_end; 1117 if (end < last_start) 1118 this_end = end; 1119 else 1120 this_end = last_start - 1; 1121 1122 prealloc = alloc_extent_state_atomic(prealloc); 1123 BUG_ON(!prealloc); 1124 1125 /* 1126 * Avoid to free 'prealloc' if it can be merged with 1127 * the later extent. 1128 */ 1129 err = insert_state(tree, prealloc, start, this_end, 1130 NULL, NULL, &bits, changeset); 1131 if (err) 1132 extent_io_tree_panic(tree, err); 1133 1134 cache_state(prealloc, cached_state); 1135 prealloc = NULL; 1136 start = this_end + 1; 1137 goto search_again; 1138 } 1139 /* 1140 * | ---- desired range ---- | 1141 * | state | 1142 * We need to split the extent, and set the bit 1143 * on the first half 1144 */ 1145 if (state->start <= end && state->end > end) { 1146 if (state->state & exclusive_bits) { 1147 *failed_start = start; 1148 err = -EEXIST; 1149 goto out; 1150 } 1151 1152 prealloc = alloc_extent_state_atomic(prealloc); 1153 BUG_ON(!prealloc); 1154 err = split_state(tree, state, prealloc, end + 1); 1155 if (err) 1156 extent_io_tree_panic(tree, err); 1157 1158 set_state_bits(tree, prealloc, &bits, changeset); 1159 cache_state(prealloc, cached_state); 1160 merge_state(tree, prealloc); 1161 prealloc = NULL; 1162 goto out; 1163 } 1164 1165 search_again: 1166 if (start > end) 1167 goto out; 1168 spin_unlock(&tree->lock); 1169 if (gfpflags_allow_blocking(mask)) 1170 cond_resched(); 1171 goto again; 1172 1173 out: 1174 spin_unlock(&tree->lock); 1175 if (prealloc) 1176 free_extent_state(prealloc); 1177 1178 return err; 1179 1180 } 1181 1182 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 1183 unsigned bits, u64 * failed_start, 1184 struct extent_state **cached_state, gfp_t mask) 1185 { 1186 return __set_extent_bit(tree, start, end, bits, 0, failed_start, 1187 cached_state, mask, NULL); 1188 } 1189 1190 1191 /** 1192 * convert_extent_bit - convert all bits in a given range from one bit to 1193 * another 1194 * @tree: the io tree to search 1195 * @start: the start offset in bytes 1196 * @end: the end offset in bytes (inclusive) 1197 * @bits: the bits to set in this range 1198 * @clear_bits: the bits to clear in this range 1199 * @cached_state: state that we're going to cache 1200 * 1201 * This will go through and set bits for the given range. If any states exist 1202 * already in this range they are set with the given bit and cleared of the 1203 * clear_bits. This is only meant to be used by things that are mergeable, ie 1204 * converting from say DELALLOC to DIRTY. This is not meant to be used with 1205 * boundary bits like LOCK. 1206 * 1207 * All allocations are done with GFP_NOFS. 1208 */ 1209 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 1210 unsigned bits, unsigned clear_bits, 1211 struct extent_state **cached_state) 1212 { 1213 struct extent_state *state; 1214 struct extent_state *prealloc = NULL; 1215 struct rb_node *node; 1216 struct rb_node **p; 1217 struct rb_node *parent; 1218 int err = 0; 1219 u64 last_start; 1220 u64 last_end; 1221 bool first_iteration = true; 1222 1223 btrfs_debug_check_extent_io_range(tree, start, end); 1224 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, 1225 clear_bits); 1226 1227 again: 1228 if (!prealloc) { 1229 /* 1230 * Best effort, don't worry if extent state allocation fails 1231 * here for the first iteration. We might have a cached state 1232 * that matches exactly the target range, in which case no 1233 * extent state allocations are needed. We'll only know this 1234 * after locking the tree. 1235 */ 1236 prealloc = alloc_extent_state(GFP_NOFS); 1237 if (!prealloc && !first_iteration) 1238 return -ENOMEM; 1239 } 1240 1241 spin_lock(&tree->lock); 1242 if (cached_state && *cached_state) { 1243 state = *cached_state; 1244 if (state->start <= start && state->end > start && 1245 extent_state_in_tree(state)) { 1246 node = &state->rb_node; 1247 goto hit_next; 1248 } 1249 } 1250 1251 /* 1252 * this search will find all the extents that end after 1253 * our range starts. 1254 */ 1255 node = tree_search_for_insert(tree, start, &p, &parent); 1256 if (!node) { 1257 prealloc = alloc_extent_state_atomic(prealloc); 1258 if (!prealloc) { 1259 err = -ENOMEM; 1260 goto out; 1261 } 1262 err = insert_state(tree, prealloc, start, end, 1263 &p, &parent, &bits, NULL); 1264 if (err) 1265 extent_io_tree_panic(tree, err); 1266 cache_state(prealloc, cached_state); 1267 prealloc = NULL; 1268 goto out; 1269 } 1270 state = rb_entry(node, struct extent_state, rb_node); 1271 hit_next: 1272 last_start = state->start; 1273 last_end = state->end; 1274 1275 /* 1276 * | ---- desired range ---- | 1277 * | state | 1278 * 1279 * Just lock what we found and keep going 1280 */ 1281 if (state->start == start && state->end <= end) { 1282 set_state_bits(tree, state, &bits, NULL); 1283 cache_state(state, cached_state); 1284 state = clear_state_bit(tree, state, &clear_bits, 0, NULL); 1285 if (last_end == (u64)-1) 1286 goto out; 1287 start = last_end + 1; 1288 if (start < end && state && state->start == start && 1289 !need_resched()) 1290 goto hit_next; 1291 goto search_again; 1292 } 1293 1294 /* 1295 * | ---- desired range ---- | 1296 * | state | 1297 * or 1298 * | ------------- state -------------- | 1299 * 1300 * We need to split the extent we found, and may flip bits on 1301 * second half. 1302 * 1303 * If the extent we found extends past our 1304 * range, we just split and search again. It'll get split 1305 * again the next time though. 1306 * 1307 * If the extent we found is inside our range, we set the 1308 * desired bit on it. 1309 */ 1310 if (state->start < start) { 1311 prealloc = alloc_extent_state_atomic(prealloc); 1312 if (!prealloc) { 1313 err = -ENOMEM; 1314 goto out; 1315 } 1316 err = split_state(tree, state, prealloc, start); 1317 if (err) 1318 extent_io_tree_panic(tree, err); 1319 prealloc = NULL; 1320 if (err) 1321 goto out; 1322 if (state->end <= end) { 1323 set_state_bits(tree, state, &bits, NULL); 1324 cache_state(state, cached_state); 1325 state = clear_state_bit(tree, state, &clear_bits, 0, 1326 NULL); 1327 if (last_end == (u64)-1) 1328 goto out; 1329 start = last_end + 1; 1330 if (start < end && state && state->start == start && 1331 !need_resched()) 1332 goto hit_next; 1333 } 1334 goto search_again; 1335 } 1336 /* 1337 * | ---- desired range ---- | 1338 * | state | or | state | 1339 * 1340 * There's a hole, we need to insert something in it and 1341 * ignore the extent we found. 1342 */ 1343 if (state->start > start) { 1344 u64 this_end; 1345 if (end < last_start) 1346 this_end = end; 1347 else 1348 this_end = last_start - 1; 1349 1350 prealloc = alloc_extent_state_atomic(prealloc); 1351 if (!prealloc) { 1352 err = -ENOMEM; 1353 goto out; 1354 } 1355 1356 /* 1357 * Avoid to free 'prealloc' if it can be merged with 1358 * the later extent. 1359 */ 1360 err = insert_state(tree, prealloc, start, this_end, 1361 NULL, NULL, &bits, NULL); 1362 if (err) 1363 extent_io_tree_panic(tree, err); 1364 cache_state(prealloc, cached_state); 1365 prealloc = NULL; 1366 start = this_end + 1; 1367 goto search_again; 1368 } 1369 /* 1370 * | ---- desired range ---- | 1371 * | state | 1372 * We need to split the extent, and set the bit 1373 * on the first half 1374 */ 1375 if (state->start <= end && state->end > end) { 1376 prealloc = alloc_extent_state_atomic(prealloc); 1377 if (!prealloc) { 1378 err = -ENOMEM; 1379 goto out; 1380 } 1381 1382 err = split_state(tree, state, prealloc, end + 1); 1383 if (err) 1384 extent_io_tree_panic(tree, err); 1385 1386 set_state_bits(tree, prealloc, &bits, NULL); 1387 cache_state(prealloc, cached_state); 1388 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL); 1389 prealloc = NULL; 1390 goto out; 1391 } 1392 1393 search_again: 1394 if (start > end) 1395 goto out; 1396 spin_unlock(&tree->lock); 1397 cond_resched(); 1398 first_iteration = false; 1399 goto again; 1400 1401 out: 1402 spin_unlock(&tree->lock); 1403 if (prealloc) 1404 free_extent_state(prealloc); 1405 1406 return err; 1407 } 1408 1409 /* wrappers around set/clear extent bit */ 1410 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1411 unsigned bits, struct extent_changeset *changeset) 1412 { 1413 /* 1414 * We don't support EXTENT_LOCKED yet, as current changeset will 1415 * record any bits changed, so for EXTENT_LOCKED case, it will 1416 * either fail with -EEXIST or changeset will record the whole 1417 * range. 1418 */ 1419 BUG_ON(bits & EXTENT_LOCKED); 1420 1421 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS, 1422 changeset); 1423 } 1424 1425 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, 1426 unsigned bits) 1427 { 1428 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, 1429 GFP_NOWAIT, NULL); 1430 } 1431 1432 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 1433 unsigned bits, int wake, int delete, 1434 struct extent_state **cached) 1435 { 1436 return __clear_extent_bit(tree, start, end, bits, wake, delete, 1437 cached, GFP_NOFS, NULL); 1438 } 1439 1440 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1441 unsigned bits, struct extent_changeset *changeset) 1442 { 1443 /* 1444 * Don't support EXTENT_LOCKED case, same reason as 1445 * set_record_extent_bits(). 1446 */ 1447 BUG_ON(bits & EXTENT_LOCKED); 1448 1449 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS, 1450 changeset); 1451 } 1452 1453 /* 1454 * either insert or lock state struct between start and end use mask to tell 1455 * us if waiting is desired. 1456 */ 1457 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1458 struct extent_state **cached_state) 1459 { 1460 int err; 1461 u64 failed_start; 1462 1463 while (1) { 1464 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, 1465 EXTENT_LOCKED, &failed_start, 1466 cached_state, GFP_NOFS, NULL); 1467 if (err == -EEXIST) { 1468 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 1469 start = failed_start; 1470 } else 1471 break; 1472 WARN_ON(start > end); 1473 } 1474 return err; 1475 } 1476 1477 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) 1478 { 1479 int err; 1480 u64 failed_start; 1481 1482 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, 1483 &failed_start, NULL, GFP_NOFS, NULL); 1484 if (err == -EEXIST) { 1485 if (failed_start > start) 1486 clear_extent_bit(tree, start, failed_start - 1, 1487 EXTENT_LOCKED, 1, 0, NULL); 1488 return 0; 1489 } 1490 return 1; 1491 } 1492 1493 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) 1494 { 1495 unsigned long index = start >> PAGE_SHIFT; 1496 unsigned long end_index = end >> PAGE_SHIFT; 1497 struct page *page; 1498 1499 while (index <= end_index) { 1500 page = find_get_page(inode->i_mapping, index); 1501 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1502 clear_page_dirty_for_io(page); 1503 put_page(page); 1504 index++; 1505 } 1506 } 1507 1508 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) 1509 { 1510 unsigned long index = start >> PAGE_SHIFT; 1511 unsigned long end_index = end >> PAGE_SHIFT; 1512 struct page *page; 1513 1514 while (index <= end_index) { 1515 page = find_get_page(inode->i_mapping, index); 1516 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1517 __set_page_dirty_nobuffers(page); 1518 account_page_redirty(page); 1519 put_page(page); 1520 index++; 1521 } 1522 } 1523 1524 /* find the first state struct with 'bits' set after 'start', and 1525 * return it. tree->lock must be held. NULL will returned if 1526 * nothing was found after 'start' 1527 */ 1528 static struct extent_state * 1529 find_first_extent_bit_state(struct extent_io_tree *tree, 1530 u64 start, unsigned bits) 1531 { 1532 struct rb_node *node; 1533 struct extent_state *state; 1534 1535 /* 1536 * this search will find all the extents that end after 1537 * our range starts. 1538 */ 1539 node = tree_search(tree, start); 1540 if (!node) 1541 goto out; 1542 1543 while (1) { 1544 state = rb_entry(node, struct extent_state, rb_node); 1545 if (state->end >= start && (state->state & bits)) 1546 return state; 1547 1548 node = rb_next(node); 1549 if (!node) 1550 break; 1551 } 1552 out: 1553 return NULL; 1554 } 1555 1556 /* 1557 * find the first offset in the io tree with 'bits' set. zero is 1558 * returned if we find something, and *start_ret and *end_ret are 1559 * set to reflect the state struct that was found. 1560 * 1561 * If nothing was found, 1 is returned. If found something, return 0. 1562 */ 1563 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 1564 u64 *start_ret, u64 *end_ret, unsigned bits, 1565 struct extent_state **cached_state) 1566 { 1567 struct extent_state *state; 1568 int ret = 1; 1569 1570 spin_lock(&tree->lock); 1571 if (cached_state && *cached_state) { 1572 state = *cached_state; 1573 if (state->end == start - 1 && extent_state_in_tree(state)) { 1574 while ((state = next_state(state)) != NULL) { 1575 if (state->state & bits) 1576 goto got_it; 1577 } 1578 free_extent_state(*cached_state); 1579 *cached_state = NULL; 1580 goto out; 1581 } 1582 free_extent_state(*cached_state); 1583 *cached_state = NULL; 1584 } 1585 1586 state = find_first_extent_bit_state(tree, start, bits); 1587 got_it: 1588 if (state) { 1589 cache_state_if_flags(state, cached_state, 0); 1590 *start_ret = state->start; 1591 *end_ret = state->end; 1592 ret = 0; 1593 } 1594 out: 1595 spin_unlock(&tree->lock); 1596 return ret; 1597 } 1598 1599 /** 1600 * find_contiguous_extent_bit: find a contiguous area of bits 1601 * @tree - io tree to check 1602 * @start - offset to start the search from 1603 * @start_ret - the first offset we found with the bits set 1604 * @end_ret - the final contiguous range of the bits that were set 1605 * @bits - bits to look for 1606 * 1607 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges 1608 * to set bits appropriately, and then merge them again. During this time it 1609 * will drop the tree->lock, so use this helper if you want to find the actual 1610 * contiguous area for given bits. We will search to the first bit we find, and 1611 * then walk down the tree until we find a non-contiguous area. The area 1612 * returned will be the full contiguous area with the bits set. 1613 */ 1614 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, 1615 u64 *start_ret, u64 *end_ret, unsigned bits) 1616 { 1617 struct extent_state *state; 1618 int ret = 1; 1619 1620 spin_lock(&tree->lock); 1621 state = find_first_extent_bit_state(tree, start, bits); 1622 if (state) { 1623 *start_ret = state->start; 1624 *end_ret = state->end; 1625 while ((state = next_state(state)) != NULL) { 1626 if (state->start > (*end_ret + 1)) 1627 break; 1628 *end_ret = state->end; 1629 } 1630 ret = 0; 1631 } 1632 spin_unlock(&tree->lock); 1633 return ret; 1634 } 1635 1636 /** 1637 * find_first_clear_extent_bit - find the first range that has @bits not set. 1638 * This range could start before @start. 1639 * 1640 * @tree - the tree to search 1641 * @start - the offset at/after which the found extent should start 1642 * @start_ret - records the beginning of the range 1643 * @end_ret - records the end of the range (inclusive) 1644 * @bits - the set of bits which must be unset 1645 * 1646 * Since unallocated range is also considered one which doesn't have the bits 1647 * set it's possible that @end_ret contains -1, this happens in case the range 1648 * spans (last_range_end, end of device]. In this case it's up to the caller to 1649 * trim @end_ret to the appropriate size. 1650 */ 1651 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, 1652 u64 *start_ret, u64 *end_ret, unsigned bits) 1653 { 1654 struct extent_state *state; 1655 struct rb_node *node, *prev = NULL, *next; 1656 1657 spin_lock(&tree->lock); 1658 1659 /* Find first extent with bits cleared */ 1660 while (1) { 1661 node = __etree_search(tree, start, &next, &prev, NULL, NULL); 1662 if (!node && !next && !prev) { 1663 /* 1664 * Tree is completely empty, send full range and let 1665 * caller deal with it 1666 */ 1667 *start_ret = 0; 1668 *end_ret = -1; 1669 goto out; 1670 } else if (!node && !next) { 1671 /* 1672 * We are past the last allocated chunk, set start at 1673 * the end of the last extent. 1674 */ 1675 state = rb_entry(prev, struct extent_state, rb_node); 1676 *start_ret = state->end + 1; 1677 *end_ret = -1; 1678 goto out; 1679 } else if (!node) { 1680 node = next; 1681 } 1682 /* 1683 * At this point 'node' either contains 'start' or start is 1684 * before 'node' 1685 */ 1686 state = rb_entry(node, struct extent_state, rb_node); 1687 1688 if (in_range(start, state->start, state->end - state->start + 1)) { 1689 if (state->state & bits) { 1690 /* 1691 * |--range with bits sets--| 1692 * | 1693 * start 1694 */ 1695 start = state->end + 1; 1696 } else { 1697 /* 1698 * 'start' falls within a range that doesn't 1699 * have the bits set, so take its start as 1700 * the beginning of the desired range 1701 * 1702 * |--range with bits cleared----| 1703 * | 1704 * start 1705 */ 1706 *start_ret = state->start; 1707 break; 1708 } 1709 } else { 1710 /* 1711 * |---prev range---|---hole/unset---|---node range---| 1712 * | 1713 * start 1714 * 1715 * or 1716 * 1717 * |---hole/unset--||--first node--| 1718 * 0 | 1719 * start 1720 */ 1721 if (prev) { 1722 state = rb_entry(prev, struct extent_state, 1723 rb_node); 1724 *start_ret = state->end + 1; 1725 } else { 1726 *start_ret = 0; 1727 } 1728 break; 1729 } 1730 } 1731 1732 /* 1733 * Find the longest stretch from start until an entry which has the 1734 * bits set 1735 */ 1736 while (1) { 1737 state = rb_entry(node, struct extent_state, rb_node); 1738 if (state->end >= start && !(state->state & bits)) { 1739 *end_ret = state->end; 1740 } else { 1741 *end_ret = state->start - 1; 1742 break; 1743 } 1744 1745 node = rb_next(node); 1746 if (!node) 1747 break; 1748 } 1749 out: 1750 spin_unlock(&tree->lock); 1751 } 1752 1753 /* 1754 * find a contiguous range of bytes in the file marked as delalloc, not 1755 * more than 'max_bytes'. start and end are used to return the range, 1756 * 1757 * true is returned if we find something, false if nothing was in the tree 1758 */ 1759 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, 1760 u64 *end, u64 max_bytes, 1761 struct extent_state **cached_state) 1762 { 1763 struct rb_node *node; 1764 struct extent_state *state; 1765 u64 cur_start = *start; 1766 bool found = false; 1767 u64 total_bytes = 0; 1768 1769 spin_lock(&tree->lock); 1770 1771 /* 1772 * this search will find all the extents that end after 1773 * our range starts. 1774 */ 1775 node = tree_search(tree, cur_start); 1776 if (!node) { 1777 *end = (u64)-1; 1778 goto out; 1779 } 1780 1781 while (1) { 1782 state = rb_entry(node, struct extent_state, rb_node); 1783 if (found && (state->start != cur_start || 1784 (state->state & EXTENT_BOUNDARY))) { 1785 goto out; 1786 } 1787 if (!(state->state & EXTENT_DELALLOC)) { 1788 if (!found) 1789 *end = state->end; 1790 goto out; 1791 } 1792 if (!found) { 1793 *start = state->start; 1794 *cached_state = state; 1795 refcount_inc(&state->refs); 1796 } 1797 found = true; 1798 *end = state->end; 1799 cur_start = state->end + 1; 1800 node = rb_next(node); 1801 total_bytes += state->end - state->start + 1; 1802 if (total_bytes >= max_bytes) 1803 break; 1804 if (!node) 1805 break; 1806 } 1807 out: 1808 spin_unlock(&tree->lock); 1809 return found; 1810 } 1811 1812 static int __process_pages_contig(struct address_space *mapping, 1813 struct page *locked_page, 1814 pgoff_t start_index, pgoff_t end_index, 1815 unsigned long page_ops, pgoff_t *index_ret); 1816 1817 static noinline void __unlock_for_delalloc(struct inode *inode, 1818 struct page *locked_page, 1819 u64 start, u64 end) 1820 { 1821 unsigned long index = start >> PAGE_SHIFT; 1822 unsigned long end_index = end >> PAGE_SHIFT; 1823 1824 ASSERT(locked_page); 1825 if (index == locked_page->index && end_index == index) 1826 return; 1827 1828 __process_pages_contig(inode->i_mapping, locked_page, index, end_index, 1829 PAGE_UNLOCK, NULL); 1830 } 1831 1832 static noinline int lock_delalloc_pages(struct inode *inode, 1833 struct page *locked_page, 1834 u64 delalloc_start, 1835 u64 delalloc_end) 1836 { 1837 unsigned long index = delalloc_start >> PAGE_SHIFT; 1838 unsigned long index_ret = index; 1839 unsigned long end_index = delalloc_end >> PAGE_SHIFT; 1840 int ret; 1841 1842 ASSERT(locked_page); 1843 if (index == locked_page->index && index == end_index) 1844 return 0; 1845 1846 ret = __process_pages_contig(inode->i_mapping, locked_page, index, 1847 end_index, PAGE_LOCK, &index_ret); 1848 if (ret == -EAGAIN) 1849 __unlock_for_delalloc(inode, locked_page, delalloc_start, 1850 (u64)index_ret << PAGE_SHIFT); 1851 return ret; 1852 } 1853 1854 /* 1855 * Find and lock a contiguous range of bytes in the file marked as delalloc, no 1856 * more than @max_bytes. @Start and @end are used to return the range, 1857 * 1858 * Return: true if we find something 1859 * false if nothing was in the tree 1860 */ 1861 EXPORT_FOR_TESTS 1862 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, 1863 struct page *locked_page, u64 *start, 1864 u64 *end) 1865 { 1866 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 1867 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE; 1868 u64 delalloc_start; 1869 u64 delalloc_end; 1870 bool found; 1871 struct extent_state *cached_state = NULL; 1872 int ret; 1873 int loops = 0; 1874 1875 again: 1876 /* step one, find a bunch of delalloc bytes starting at start */ 1877 delalloc_start = *start; 1878 delalloc_end = 0; 1879 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end, 1880 max_bytes, &cached_state); 1881 if (!found || delalloc_end <= *start) { 1882 *start = delalloc_start; 1883 *end = delalloc_end; 1884 free_extent_state(cached_state); 1885 return false; 1886 } 1887 1888 /* 1889 * start comes from the offset of locked_page. We have to lock 1890 * pages in order, so we can't process delalloc bytes before 1891 * locked_page 1892 */ 1893 if (delalloc_start < *start) 1894 delalloc_start = *start; 1895 1896 /* 1897 * make sure to limit the number of pages we try to lock down 1898 */ 1899 if (delalloc_end + 1 - delalloc_start > max_bytes) 1900 delalloc_end = delalloc_start + max_bytes - 1; 1901 1902 /* step two, lock all the pages after the page that has start */ 1903 ret = lock_delalloc_pages(inode, locked_page, 1904 delalloc_start, delalloc_end); 1905 ASSERT(!ret || ret == -EAGAIN); 1906 if (ret == -EAGAIN) { 1907 /* some of the pages are gone, lets avoid looping by 1908 * shortening the size of the delalloc range we're searching 1909 */ 1910 free_extent_state(cached_state); 1911 cached_state = NULL; 1912 if (!loops) { 1913 max_bytes = PAGE_SIZE; 1914 loops = 1; 1915 goto again; 1916 } else { 1917 found = false; 1918 goto out_failed; 1919 } 1920 } 1921 1922 /* step three, lock the state bits for the whole range */ 1923 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state); 1924 1925 /* then test to make sure it is all still delalloc */ 1926 ret = test_range_bit(tree, delalloc_start, delalloc_end, 1927 EXTENT_DELALLOC, 1, cached_state); 1928 if (!ret) { 1929 unlock_extent_cached(tree, delalloc_start, delalloc_end, 1930 &cached_state); 1931 __unlock_for_delalloc(inode, locked_page, 1932 delalloc_start, delalloc_end); 1933 cond_resched(); 1934 goto again; 1935 } 1936 free_extent_state(cached_state); 1937 *start = delalloc_start; 1938 *end = delalloc_end; 1939 out_failed: 1940 return found; 1941 } 1942 1943 static int __process_pages_contig(struct address_space *mapping, 1944 struct page *locked_page, 1945 pgoff_t start_index, pgoff_t end_index, 1946 unsigned long page_ops, pgoff_t *index_ret) 1947 { 1948 unsigned long nr_pages = end_index - start_index + 1; 1949 unsigned long pages_locked = 0; 1950 pgoff_t index = start_index; 1951 struct page *pages[16]; 1952 unsigned ret; 1953 int err = 0; 1954 int i; 1955 1956 if (page_ops & PAGE_LOCK) { 1957 ASSERT(page_ops == PAGE_LOCK); 1958 ASSERT(index_ret && *index_ret == start_index); 1959 } 1960 1961 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0) 1962 mapping_set_error(mapping, -EIO); 1963 1964 while (nr_pages > 0) { 1965 ret = find_get_pages_contig(mapping, index, 1966 min_t(unsigned long, 1967 nr_pages, ARRAY_SIZE(pages)), pages); 1968 if (ret == 0) { 1969 /* 1970 * Only if we're going to lock these pages, 1971 * can we find nothing at @index. 1972 */ 1973 ASSERT(page_ops & PAGE_LOCK); 1974 err = -EAGAIN; 1975 goto out; 1976 } 1977 1978 for (i = 0; i < ret; i++) { 1979 if (page_ops & PAGE_SET_PRIVATE2) 1980 SetPagePrivate2(pages[i]); 1981 1982 if (locked_page && pages[i] == locked_page) { 1983 put_page(pages[i]); 1984 pages_locked++; 1985 continue; 1986 } 1987 if (page_ops & PAGE_CLEAR_DIRTY) 1988 clear_page_dirty_for_io(pages[i]); 1989 if (page_ops & PAGE_SET_WRITEBACK) 1990 set_page_writeback(pages[i]); 1991 if (page_ops & PAGE_SET_ERROR) 1992 SetPageError(pages[i]); 1993 if (page_ops & PAGE_END_WRITEBACK) 1994 end_page_writeback(pages[i]); 1995 if (page_ops & PAGE_UNLOCK) 1996 unlock_page(pages[i]); 1997 if (page_ops & PAGE_LOCK) { 1998 lock_page(pages[i]); 1999 if (!PageDirty(pages[i]) || 2000 pages[i]->mapping != mapping) { 2001 unlock_page(pages[i]); 2002 for (; i < ret; i++) 2003 put_page(pages[i]); 2004 err = -EAGAIN; 2005 goto out; 2006 } 2007 } 2008 put_page(pages[i]); 2009 pages_locked++; 2010 } 2011 nr_pages -= ret; 2012 index += ret; 2013 cond_resched(); 2014 } 2015 out: 2016 if (err && index_ret) 2017 *index_ret = start_index + pages_locked - 1; 2018 return err; 2019 } 2020 2021 void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, 2022 struct page *locked_page, 2023 unsigned clear_bits, 2024 unsigned long page_ops) 2025 { 2026 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0, 2027 NULL); 2028 2029 __process_pages_contig(inode->i_mapping, locked_page, 2030 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 2031 page_ops, NULL); 2032 } 2033 2034 /* 2035 * count the number of bytes in the tree that have a given bit(s) 2036 * set. This can be fairly slow, except for EXTENT_DIRTY which is 2037 * cached. The total number found is returned. 2038 */ 2039 u64 count_range_bits(struct extent_io_tree *tree, 2040 u64 *start, u64 search_end, u64 max_bytes, 2041 unsigned bits, int contig) 2042 { 2043 struct rb_node *node; 2044 struct extent_state *state; 2045 u64 cur_start = *start; 2046 u64 total_bytes = 0; 2047 u64 last = 0; 2048 int found = 0; 2049 2050 if (WARN_ON(search_end <= cur_start)) 2051 return 0; 2052 2053 spin_lock(&tree->lock); 2054 if (cur_start == 0 && bits == EXTENT_DIRTY) { 2055 total_bytes = tree->dirty_bytes; 2056 goto out; 2057 } 2058 /* 2059 * this search will find all the extents that end after 2060 * our range starts. 2061 */ 2062 node = tree_search(tree, cur_start); 2063 if (!node) 2064 goto out; 2065 2066 while (1) { 2067 state = rb_entry(node, struct extent_state, rb_node); 2068 if (state->start > search_end) 2069 break; 2070 if (contig && found && state->start > last + 1) 2071 break; 2072 if (state->end >= cur_start && (state->state & bits) == bits) { 2073 total_bytes += min(search_end, state->end) + 1 - 2074 max(cur_start, state->start); 2075 if (total_bytes >= max_bytes) 2076 break; 2077 if (!found) { 2078 *start = max(cur_start, state->start); 2079 found = 1; 2080 } 2081 last = state->end; 2082 } else if (contig && found) { 2083 break; 2084 } 2085 node = rb_next(node); 2086 if (!node) 2087 break; 2088 } 2089 out: 2090 spin_unlock(&tree->lock); 2091 return total_bytes; 2092 } 2093 2094 /* 2095 * set the private field for a given byte offset in the tree. If there isn't 2096 * an extent_state there already, this does nothing. 2097 */ 2098 int set_state_failrec(struct extent_io_tree *tree, u64 start, 2099 struct io_failure_record *failrec) 2100 { 2101 struct rb_node *node; 2102 struct extent_state *state; 2103 int ret = 0; 2104 2105 spin_lock(&tree->lock); 2106 /* 2107 * this search will find all the extents that end after 2108 * our range starts. 2109 */ 2110 node = tree_search(tree, start); 2111 if (!node) { 2112 ret = -ENOENT; 2113 goto out; 2114 } 2115 state = rb_entry(node, struct extent_state, rb_node); 2116 if (state->start != start) { 2117 ret = -ENOENT; 2118 goto out; 2119 } 2120 state->failrec = failrec; 2121 out: 2122 spin_unlock(&tree->lock); 2123 return ret; 2124 } 2125 2126 int get_state_failrec(struct extent_io_tree *tree, u64 start, 2127 struct io_failure_record **failrec) 2128 { 2129 struct rb_node *node; 2130 struct extent_state *state; 2131 int ret = 0; 2132 2133 spin_lock(&tree->lock); 2134 /* 2135 * this search will find all the extents that end after 2136 * our range starts. 2137 */ 2138 node = tree_search(tree, start); 2139 if (!node) { 2140 ret = -ENOENT; 2141 goto out; 2142 } 2143 state = rb_entry(node, struct extent_state, rb_node); 2144 if (state->start != start) { 2145 ret = -ENOENT; 2146 goto out; 2147 } 2148 *failrec = state->failrec; 2149 out: 2150 spin_unlock(&tree->lock); 2151 return ret; 2152 } 2153 2154 /* 2155 * searches a range in the state tree for a given mask. 2156 * If 'filled' == 1, this returns 1 only if every extent in the tree 2157 * has the bits set. Otherwise, 1 is returned if any bit in the 2158 * range is found set. 2159 */ 2160 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 2161 unsigned bits, int filled, struct extent_state *cached) 2162 { 2163 struct extent_state *state = NULL; 2164 struct rb_node *node; 2165 int bitset = 0; 2166 2167 spin_lock(&tree->lock); 2168 if (cached && extent_state_in_tree(cached) && cached->start <= start && 2169 cached->end > start) 2170 node = &cached->rb_node; 2171 else 2172 node = tree_search(tree, start); 2173 while (node && start <= end) { 2174 state = rb_entry(node, struct extent_state, rb_node); 2175 2176 if (filled && state->start > start) { 2177 bitset = 0; 2178 break; 2179 } 2180 2181 if (state->start > end) 2182 break; 2183 2184 if (state->state & bits) { 2185 bitset = 1; 2186 if (!filled) 2187 break; 2188 } else if (filled) { 2189 bitset = 0; 2190 break; 2191 } 2192 2193 if (state->end == (u64)-1) 2194 break; 2195 2196 start = state->end + 1; 2197 if (start > end) 2198 break; 2199 node = rb_next(node); 2200 if (!node) { 2201 if (filled) 2202 bitset = 0; 2203 break; 2204 } 2205 } 2206 spin_unlock(&tree->lock); 2207 return bitset; 2208 } 2209 2210 /* 2211 * helper function to set a given page up to date if all the 2212 * extents in the tree for that page are up to date 2213 */ 2214 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) 2215 { 2216 u64 start = page_offset(page); 2217 u64 end = start + PAGE_SIZE - 1; 2218 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 2219 SetPageUptodate(page); 2220 } 2221 2222 int free_io_failure(struct extent_io_tree *failure_tree, 2223 struct extent_io_tree *io_tree, 2224 struct io_failure_record *rec) 2225 { 2226 int ret; 2227 int err = 0; 2228 2229 set_state_failrec(failure_tree, rec->start, NULL); 2230 ret = clear_extent_bits(failure_tree, rec->start, 2231 rec->start + rec->len - 1, 2232 EXTENT_LOCKED | EXTENT_DIRTY); 2233 if (ret) 2234 err = ret; 2235 2236 ret = clear_extent_bits(io_tree, rec->start, 2237 rec->start + rec->len - 1, 2238 EXTENT_DAMAGED); 2239 if (ret && !err) 2240 err = ret; 2241 2242 kfree(rec); 2243 return err; 2244 } 2245 2246 /* 2247 * this bypasses the standard btrfs submit functions deliberately, as 2248 * the standard behavior is to write all copies in a raid setup. here we only 2249 * want to write the one bad copy. so we do the mapping for ourselves and issue 2250 * submit_bio directly. 2251 * to avoid any synchronization issues, wait for the data after writing, which 2252 * actually prevents the read that triggered the error from finishing. 2253 * currently, there can be no more than two copies of every data bit. thus, 2254 * exactly one rewrite is required. 2255 */ 2256 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, 2257 u64 length, u64 logical, struct page *page, 2258 unsigned int pg_offset, int mirror_num) 2259 { 2260 struct bio *bio; 2261 struct btrfs_device *dev; 2262 u64 map_length = 0; 2263 u64 sector; 2264 struct btrfs_bio *bbio = NULL; 2265 int ret; 2266 2267 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); 2268 BUG_ON(!mirror_num); 2269 2270 bio = btrfs_io_bio_alloc(1); 2271 bio->bi_iter.bi_size = 0; 2272 map_length = length; 2273 2274 /* 2275 * Avoid races with device replace and make sure our bbio has devices 2276 * associated to its stripes that don't go away while we are doing the 2277 * read repair operation. 2278 */ 2279 btrfs_bio_counter_inc_blocked(fs_info); 2280 if (btrfs_is_parity_mirror(fs_info, logical, length)) { 2281 /* 2282 * Note that we don't use BTRFS_MAP_WRITE because it's supposed 2283 * to update all raid stripes, but here we just want to correct 2284 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad 2285 * stripe's dev and sector. 2286 */ 2287 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical, 2288 &map_length, &bbio, 0); 2289 if (ret) { 2290 btrfs_bio_counter_dec(fs_info); 2291 bio_put(bio); 2292 return -EIO; 2293 } 2294 ASSERT(bbio->mirror_num == 1); 2295 } else { 2296 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, 2297 &map_length, &bbio, mirror_num); 2298 if (ret) { 2299 btrfs_bio_counter_dec(fs_info); 2300 bio_put(bio); 2301 return -EIO; 2302 } 2303 BUG_ON(mirror_num != bbio->mirror_num); 2304 } 2305 2306 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9; 2307 bio->bi_iter.bi_sector = sector; 2308 dev = bbio->stripes[bbio->mirror_num - 1].dev; 2309 btrfs_put_bbio(bbio); 2310 if (!dev || !dev->bdev || 2311 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { 2312 btrfs_bio_counter_dec(fs_info); 2313 bio_put(bio); 2314 return -EIO; 2315 } 2316 bio_set_dev(bio, dev->bdev); 2317 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; 2318 bio_add_page(bio, page, length, pg_offset); 2319 2320 if (btrfsic_submit_bio_wait(bio)) { 2321 /* try to remap that extent elsewhere? */ 2322 btrfs_bio_counter_dec(fs_info); 2323 bio_put(bio); 2324 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 2325 return -EIO; 2326 } 2327 2328 btrfs_info_rl_in_rcu(fs_info, 2329 "read error corrected: ino %llu off %llu (dev %s sector %llu)", 2330 ino, start, 2331 rcu_str_deref(dev->name), sector); 2332 btrfs_bio_counter_dec(fs_info); 2333 bio_put(bio); 2334 return 0; 2335 } 2336 2337 int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num) 2338 { 2339 struct btrfs_fs_info *fs_info = eb->fs_info; 2340 u64 start = eb->start; 2341 int i, num_pages = num_extent_pages(eb); 2342 int ret = 0; 2343 2344 if (sb_rdonly(fs_info->sb)) 2345 return -EROFS; 2346 2347 for (i = 0; i < num_pages; i++) { 2348 struct page *p = eb->pages[i]; 2349 2350 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p, 2351 start - page_offset(p), mirror_num); 2352 if (ret) 2353 break; 2354 start += PAGE_SIZE; 2355 } 2356 2357 return ret; 2358 } 2359 2360 /* 2361 * each time an IO finishes, we do a fast check in the IO failure tree 2362 * to see if we need to process or clean up an io_failure_record 2363 */ 2364 int clean_io_failure(struct btrfs_fs_info *fs_info, 2365 struct extent_io_tree *failure_tree, 2366 struct extent_io_tree *io_tree, u64 start, 2367 struct page *page, u64 ino, unsigned int pg_offset) 2368 { 2369 u64 private; 2370 struct io_failure_record *failrec; 2371 struct extent_state *state; 2372 int num_copies; 2373 int ret; 2374 2375 private = 0; 2376 ret = count_range_bits(failure_tree, &private, (u64)-1, 1, 2377 EXTENT_DIRTY, 0); 2378 if (!ret) 2379 return 0; 2380 2381 ret = get_state_failrec(failure_tree, start, &failrec); 2382 if (ret) 2383 return 0; 2384 2385 BUG_ON(!failrec->this_mirror); 2386 2387 if (failrec->in_validation) { 2388 /* there was no real error, just free the record */ 2389 btrfs_debug(fs_info, 2390 "clean_io_failure: freeing dummy error at %llu", 2391 failrec->start); 2392 goto out; 2393 } 2394 if (sb_rdonly(fs_info->sb)) 2395 goto out; 2396 2397 spin_lock(&io_tree->lock); 2398 state = find_first_extent_bit_state(io_tree, 2399 failrec->start, 2400 EXTENT_LOCKED); 2401 spin_unlock(&io_tree->lock); 2402 2403 if (state && state->start <= failrec->start && 2404 state->end >= failrec->start + failrec->len - 1) { 2405 num_copies = btrfs_num_copies(fs_info, failrec->logical, 2406 failrec->len); 2407 if (num_copies > 1) { 2408 repair_io_failure(fs_info, ino, start, failrec->len, 2409 failrec->logical, page, pg_offset, 2410 failrec->failed_mirror); 2411 } 2412 } 2413 2414 out: 2415 free_io_failure(failure_tree, io_tree, failrec); 2416 2417 return 0; 2418 } 2419 2420 /* 2421 * Can be called when 2422 * - hold extent lock 2423 * - under ordered extent 2424 * - the inode is freeing 2425 */ 2426 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end) 2427 { 2428 struct extent_io_tree *failure_tree = &inode->io_failure_tree; 2429 struct io_failure_record *failrec; 2430 struct extent_state *state, *next; 2431 2432 if (RB_EMPTY_ROOT(&failure_tree->state)) 2433 return; 2434 2435 spin_lock(&failure_tree->lock); 2436 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY); 2437 while (state) { 2438 if (state->start > end) 2439 break; 2440 2441 ASSERT(state->end <= end); 2442 2443 next = next_state(state); 2444 2445 failrec = state->failrec; 2446 free_extent_state(state); 2447 kfree(failrec); 2448 2449 state = next; 2450 } 2451 spin_unlock(&failure_tree->lock); 2452 } 2453 2454 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, 2455 struct io_failure_record **failrec_ret) 2456 { 2457 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2458 struct io_failure_record *failrec; 2459 struct extent_map *em; 2460 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 2461 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 2462 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 2463 int ret; 2464 u64 logical; 2465 2466 ret = get_state_failrec(failure_tree, start, &failrec); 2467 if (ret) { 2468 failrec = kzalloc(sizeof(*failrec), GFP_NOFS); 2469 if (!failrec) 2470 return -ENOMEM; 2471 2472 failrec->start = start; 2473 failrec->len = end - start + 1; 2474 failrec->this_mirror = 0; 2475 failrec->bio_flags = 0; 2476 failrec->in_validation = 0; 2477 2478 read_lock(&em_tree->lock); 2479 em = lookup_extent_mapping(em_tree, start, failrec->len); 2480 if (!em) { 2481 read_unlock(&em_tree->lock); 2482 kfree(failrec); 2483 return -EIO; 2484 } 2485 2486 if (em->start > start || em->start + em->len <= start) { 2487 free_extent_map(em); 2488 em = NULL; 2489 } 2490 read_unlock(&em_tree->lock); 2491 if (!em) { 2492 kfree(failrec); 2493 return -EIO; 2494 } 2495 2496 logical = start - em->start; 2497 logical = em->block_start + logical; 2498 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 2499 logical = em->block_start; 2500 failrec->bio_flags = EXTENT_BIO_COMPRESSED; 2501 extent_set_compress_type(&failrec->bio_flags, 2502 em->compress_type); 2503 } 2504 2505 btrfs_debug(fs_info, 2506 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu", 2507 logical, start, failrec->len); 2508 2509 failrec->logical = logical; 2510 free_extent_map(em); 2511 2512 /* set the bits in the private failure tree */ 2513 ret = set_extent_bits(failure_tree, start, end, 2514 EXTENT_LOCKED | EXTENT_DIRTY); 2515 if (ret >= 0) 2516 ret = set_state_failrec(failure_tree, start, failrec); 2517 /* set the bits in the inode's tree */ 2518 if (ret >= 0) 2519 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED); 2520 if (ret < 0) { 2521 kfree(failrec); 2522 return ret; 2523 } 2524 } else { 2525 btrfs_debug(fs_info, 2526 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d", 2527 failrec->logical, failrec->start, failrec->len, 2528 failrec->in_validation); 2529 /* 2530 * when data can be on disk more than twice, add to failrec here 2531 * (e.g. with a list for failed_mirror) to make 2532 * clean_io_failure() clean all those errors at once. 2533 */ 2534 } 2535 2536 *failrec_ret = failrec; 2537 2538 return 0; 2539 } 2540 2541 static bool btrfs_check_repairable(struct inode *inode, bool needs_validation, 2542 struct io_failure_record *failrec, 2543 int failed_mirror) 2544 { 2545 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2546 int num_copies; 2547 2548 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len); 2549 if (num_copies == 1) { 2550 /* 2551 * we only have a single copy of the data, so don't bother with 2552 * all the retry and error correction code that follows. no 2553 * matter what the error is, it is very likely to persist. 2554 */ 2555 btrfs_debug(fs_info, 2556 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d", 2557 num_copies, failrec->this_mirror, failed_mirror); 2558 return false; 2559 } 2560 2561 /* 2562 * there are two premises: 2563 * a) deliver good data to the caller 2564 * b) correct the bad sectors on disk 2565 */ 2566 if (needs_validation) { 2567 /* 2568 * to fulfill b), we need to know the exact failing sectors, as 2569 * we don't want to rewrite any more than the failed ones. thus, 2570 * we need separate read requests for the failed bio 2571 * 2572 * if the following BUG_ON triggers, our validation request got 2573 * merged. we need separate requests for our algorithm to work. 2574 */ 2575 BUG_ON(failrec->in_validation); 2576 failrec->in_validation = 1; 2577 failrec->this_mirror = failed_mirror; 2578 } else { 2579 /* 2580 * we're ready to fulfill a) and b) alongside. get a good copy 2581 * of the failed sector and if we succeed, we have setup 2582 * everything for repair_io_failure to do the rest for us. 2583 */ 2584 if (failrec->in_validation) { 2585 BUG_ON(failrec->this_mirror != failed_mirror); 2586 failrec->in_validation = 0; 2587 failrec->this_mirror = 0; 2588 } 2589 failrec->failed_mirror = failed_mirror; 2590 failrec->this_mirror++; 2591 if (failrec->this_mirror == failed_mirror) 2592 failrec->this_mirror++; 2593 } 2594 2595 if (failrec->this_mirror > num_copies) { 2596 btrfs_debug(fs_info, 2597 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d", 2598 num_copies, failrec->this_mirror, failed_mirror); 2599 return false; 2600 } 2601 2602 return true; 2603 } 2604 2605 static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio) 2606 { 2607 u64 len = 0; 2608 const u32 blocksize = inode->i_sb->s_blocksize; 2609 2610 /* 2611 * If bi_status is BLK_STS_OK, then this was a checksum error, not an 2612 * I/O error. In this case, we already know exactly which sector was 2613 * bad, so we don't need to validate. 2614 */ 2615 if (bio->bi_status == BLK_STS_OK) 2616 return false; 2617 2618 /* 2619 * We need to validate each sector individually if the failed I/O was 2620 * for multiple sectors. 2621 * 2622 * There are a few possible bios that can end up here: 2623 * 1. A buffered read bio, which is not cloned. 2624 * 2. A direct I/O read bio, which is cloned. 2625 * 3. A (buffered or direct) repair bio, which is not cloned. 2626 * 2627 * For cloned bios (case 2), we can get the size from 2628 * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get 2629 * it from the bvecs. 2630 */ 2631 if (bio_flagged(bio, BIO_CLONED)) { 2632 if (btrfs_io_bio(bio)->iter.bi_size > blocksize) 2633 return true; 2634 } else { 2635 struct bio_vec *bvec; 2636 int i; 2637 2638 bio_for_each_bvec_all(bvec, bio, i) { 2639 len += bvec->bv_len; 2640 if (len > blocksize) 2641 return true; 2642 } 2643 } 2644 return false; 2645 } 2646 2647 blk_status_t btrfs_submit_read_repair(struct inode *inode, 2648 struct bio *failed_bio, u64 phy_offset, 2649 struct page *page, unsigned int pgoff, 2650 u64 start, u64 end, int failed_mirror, 2651 submit_bio_hook_t *submit_bio_hook) 2652 { 2653 struct io_failure_record *failrec; 2654 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2655 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 2656 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 2657 struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio); 2658 const int icsum = phy_offset >> inode->i_sb->s_blocksize_bits; 2659 bool need_validation; 2660 struct bio *repair_bio; 2661 struct btrfs_io_bio *repair_io_bio; 2662 blk_status_t status; 2663 int ret; 2664 2665 btrfs_debug(fs_info, 2666 "repair read error: read error at %llu", start); 2667 2668 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); 2669 2670 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); 2671 if (ret) 2672 return errno_to_blk_status(ret); 2673 2674 need_validation = btrfs_io_needs_validation(inode, failed_bio); 2675 2676 if (!btrfs_check_repairable(inode, need_validation, failrec, 2677 failed_mirror)) { 2678 free_io_failure(failure_tree, tree, failrec); 2679 return BLK_STS_IOERR; 2680 } 2681 2682 repair_bio = btrfs_io_bio_alloc(1); 2683 repair_io_bio = btrfs_io_bio(repair_bio); 2684 repair_bio->bi_opf = REQ_OP_READ; 2685 if (need_validation) 2686 repair_bio->bi_opf |= REQ_FAILFAST_DEV; 2687 repair_bio->bi_end_io = failed_bio->bi_end_io; 2688 repair_bio->bi_iter.bi_sector = failrec->logical >> 9; 2689 repair_bio->bi_private = failed_bio->bi_private; 2690 2691 if (failed_io_bio->csum) { 2692 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 2693 2694 repair_io_bio->csum = repair_io_bio->csum_inline; 2695 memcpy(repair_io_bio->csum, 2696 failed_io_bio->csum + csum_size * icsum, csum_size); 2697 } 2698 2699 bio_add_page(repair_bio, page, failrec->len, pgoff); 2700 repair_io_bio->logical = failrec->start; 2701 repair_io_bio->iter = repair_bio->bi_iter; 2702 2703 btrfs_debug(btrfs_sb(inode->i_sb), 2704 "repair read error: submitting new read to mirror %d, in_validation=%d", 2705 failrec->this_mirror, failrec->in_validation); 2706 2707 status = submit_bio_hook(inode, repair_bio, failrec->this_mirror, 2708 failrec->bio_flags); 2709 if (status) { 2710 free_io_failure(failure_tree, tree, failrec); 2711 bio_put(repair_bio); 2712 } 2713 return status; 2714 } 2715 2716 /* lots and lots of room for performance fixes in the end_bio funcs */ 2717 2718 void end_extent_writepage(struct page *page, int err, u64 start, u64 end) 2719 { 2720 int uptodate = (err == 0); 2721 int ret = 0; 2722 2723 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate); 2724 2725 if (!uptodate) { 2726 ClearPageUptodate(page); 2727 SetPageError(page); 2728 ret = err < 0 ? err : -EIO; 2729 mapping_set_error(page->mapping, ret); 2730 } 2731 } 2732 2733 /* 2734 * after a writepage IO is done, we need to: 2735 * clear the uptodate bits on error 2736 * clear the writeback bits in the extent tree for this IO 2737 * end_page_writeback if the page has no more pending IO 2738 * 2739 * Scheduling is not allowed, so the extent state tree is expected 2740 * to have one and only one object corresponding to this IO. 2741 */ 2742 static void end_bio_extent_writepage(struct bio *bio) 2743 { 2744 int error = blk_status_to_errno(bio->bi_status); 2745 struct bio_vec *bvec; 2746 u64 start; 2747 u64 end; 2748 struct bvec_iter_all iter_all; 2749 2750 ASSERT(!bio_flagged(bio, BIO_CLONED)); 2751 bio_for_each_segment_all(bvec, bio, iter_all) { 2752 struct page *page = bvec->bv_page; 2753 struct inode *inode = page->mapping->host; 2754 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2755 2756 /* We always issue full-page reads, but if some block 2757 * in a page fails to read, blk_update_request() will 2758 * advance bv_offset and adjust bv_len to compensate. 2759 * Print a warning for nonzero offsets, and an error 2760 * if they don't add up to a full page. */ 2761 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { 2762 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) 2763 btrfs_err(fs_info, 2764 "partial page write in btrfs with offset %u and length %u", 2765 bvec->bv_offset, bvec->bv_len); 2766 else 2767 btrfs_info(fs_info, 2768 "incomplete page write in btrfs with offset %u and length %u", 2769 bvec->bv_offset, bvec->bv_len); 2770 } 2771 2772 start = page_offset(page); 2773 end = start + bvec->bv_offset + bvec->bv_len - 1; 2774 2775 end_extent_writepage(page, error, start, end); 2776 end_page_writeback(page); 2777 } 2778 2779 bio_put(bio); 2780 } 2781 2782 static void 2783 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, 2784 int uptodate) 2785 { 2786 struct extent_state *cached = NULL; 2787 u64 end = start + len - 1; 2788 2789 if (uptodate && tree->track_uptodate) 2790 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); 2791 unlock_extent_cached_atomic(tree, start, end, &cached); 2792 } 2793 2794 /* 2795 * after a readpage IO is done, we need to: 2796 * clear the uptodate bits on error 2797 * set the uptodate bits if things worked 2798 * set the page up to date if all extents in the tree are uptodate 2799 * clear the lock bit in the extent tree 2800 * unlock the page if there are no other extents locked for it 2801 * 2802 * Scheduling is not allowed, so the extent state tree is expected 2803 * to have one and only one object corresponding to this IO. 2804 */ 2805 static void end_bio_extent_readpage(struct bio *bio) 2806 { 2807 struct bio_vec *bvec; 2808 int uptodate = !bio->bi_status; 2809 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 2810 struct extent_io_tree *tree, *failure_tree; 2811 u64 offset = 0; 2812 u64 start; 2813 u64 end; 2814 u64 len; 2815 u64 extent_start = 0; 2816 u64 extent_len = 0; 2817 int mirror; 2818 int ret; 2819 struct bvec_iter_all iter_all; 2820 2821 ASSERT(!bio_flagged(bio, BIO_CLONED)); 2822 bio_for_each_segment_all(bvec, bio, iter_all) { 2823 struct page *page = bvec->bv_page; 2824 struct inode *inode = page->mapping->host; 2825 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2826 bool data_inode = btrfs_ino(BTRFS_I(inode)) 2827 != BTRFS_BTREE_INODE_OBJECTID; 2828 2829 btrfs_debug(fs_info, 2830 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u", 2831 (u64)bio->bi_iter.bi_sector, bio->bi_status, 2832 io_bio->mirror_num); 2833 tree = &BTRFS_I(inode)->io_tree; 2834 failure_tree = &BTRFS_I(inode)->io_failure_tree; 2835 2836 /* We always issue full-page reads, but if some block 2837 * in a page fails to read, blk_update_request() will 2838 * advance bv_offset and adjust bv_len to compensate. 2839 * Print a warning for nonzero offsets, and an error 2840 * if they don't add up to a full page. */ 2841 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { 2842 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) 2843 btrfs_err(fs_info, 2844 "partial page read in btrfs with offset %u and length %u", 2845 bvec->bv_offset, bvec->bv_len); 2846 else 2847 btrfs_info(fs_info, 2848 "incomplete page read in btrfs with offset %u and length %u", 2849 bvec->bv_offset, bvec->bv_len); 2850 } 2851 2852 start = page_offset(page); 2853 end = start + bvec->bv_offset + bvec->bv_len - 1; 2854 len = bvec->bv_len; 2855 2856 mirror = io_bio->mirror_num; 2857 if (likely(uptodate)) { 2858 ret = tree->ops->readpage_end_io_hook(io_bio, offset, 2859 page, start, end, 2860 mirror); 2861 if (ret) 2862 uptodate = 0; 2863 else 2864 clean_io_failure(BTRFS_I(inode)->root->fs_info, 2865 failure_tree, tree, start, 2866 page, 2867 btrfs_ino(BTRFS_I(inode)), 0); 2868 } 2869 2870 if (likely(uptodate)) 2871 goto readpage_ok; 2872 2873 if (data_inode) { 2874 2875 /* 2876 * The generic bio_readpage_error handles errors the 2877 * following way: If possible, new read requests are 2878 * created and submitted and will end up in 2879 * end_bio_extent_readpage as well (if we're lucky, 2880 * not in the !uptodate case). In that case it returns 2881 * 0 and we just go on with the next page in our bio. 2882 * If it can't handle the error it will return -EIO and 2883 * we remain responsible for that page. 2884 */ 2885 if (!btrfs_submit_read_repair(inode, bio, offset, page, 2886 start - page_offset(page), 2887 start, end, mirror, 2888 tree->ops->submit_bio_hook)) { 2889 uptodate = !bio->bi_status; 2890 offset += len; 2891 continue; 2892 } 2893 } else { 2894 struct extent_buffer *eb; 2895 2896 eb = (struct extent_buffer *)page->private; 2897 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); 2898 eb->read_mirror = mirror; 2899 atomic_dec(&eb->io_pages); 2900 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, 2901 &eb->bflags)) 2902 btree_readahead_hook(eb, -EIO); 2903 } 2904 readpage_ok: 2905 if (likely(uptodate)) { 2906 loff_t i_size = i_size_read(inode); 2907 pgoff_t end_index = i_size >> PAGE_SHIFT; 2908 unsigned off; 2909 2910 /* Zero out the end if this page straddles i_size */ 2911 off = offset_in_page(i_size); 2912 if (page->index == end_index && off) 2913 zero_user_segment(page, off, PAGE_SIZE); 2914 SetPageUptodate(page); 2915 } else { 2916 ClearPageUptodate(page); 2917 SetPageError(page); 2918 } 2919 unlock_page(page); 2920 offset += len; 2921 2922 if (unlikely(!uptodate)) { 2923 if (extent_len) { 2924 endio_readpage_release_extent(tree, 2925 extent_start, 2926 extent_len, 1); 2927 extent_start = 0; 2928 extent_len = 0; 2929 } 2930 endio_readpage_release_extent(tree, start, 2931 end - start + 1, 0); 2932 } else if (!extent_len) { 2933 extent_start = start; 2934 extent_len = end + 1 - start; 2935 } else if (extent_start + extent_len == start) { 2936 extent_len += end + 1 - start; 2937 } else { 2938 endio_readpage_release_extent(tree, extent_start, 2939 extent_len, uptodate); 2940 extent_start = start; 2941 extent_len = end + 1 - start; 2942 } 2943 } 2944 2945 if (extent_len) 2946 endio_readpage_release_extent(tree, extent_start, extent_len, 2947 uptodate); 2948 btrfs_io_bio_free_csum(io_bio); 2949 bio_put(bio); 2950 } 2951 2952 /* 2953 * Initialize the members up to but not including 'bio'. Use after allocating a 2954 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of 2955 * 'bio' because use of __GFP_ZERO is not supported. 2956 */ 2957 static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio) 2958 { 2959 memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio)); 2960 } 2961 2962 /* 2963 * The following helpers allocate a bio. As it's backed by a bioset, it'll 2964 * never fail. We're returning a bio right now but you can call btrfs_io_bio 2965 * for the appropriate container_of magic 2966 */ 2967 struct bio *btrfs_bio_alloc(u64 first_byte) 2968 { 2969 struct bio *bio; 2970 2971 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset); 2972 bio->bi_iter.bi_sector = first_byte >> 9; 2973 btrfs_io_bio_init(btrfs_io_bio(bio)); 2974 return bio; 2975 } 2976 2977 struct bio *btrfs_bio_clone(struct bio *bio) 2978 { 2979 struct btrfs_io_bio *btrfs_bio; 2980 struct bio *new; 2981 2982 /* Bio allocation backed by a bioset does not fail */ 2983 new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset); 2984 btrfs_bio = btrfs_io_bio(new); 2985 btrfs_io_bio_init(btrfs_bio); 2986 btrfs_bio->iter = bio->bi_iter; 2987 return new; 2988 } 2989 2990 struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs) 2991 { 2992 struct bio *bio; 2993 2994 /* Bio allocation backed by a bioset does not fail */ 2995 bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset); 2996 btrfs_io_bio_init(btrfs_io_bio(bio)); 2997 return bio; 2998 } 2999 3000 struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size) 3001 { 3002 struct bio *bio; 3003 struct btrfs_io_bio *btrfs_bio; 3004 3005 /* this will never fail when it's backed by a bioset */ 3006 bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset); 3007 ASSERT(bio); 3008 3009 btrfs_bio = btrfs_io_bio(bio); 3010 btrfs_io_bio_init(btrfs_bio); 3011 3012 bio_trim(bio, offset >> 9, size >> 9); 3013 btrfs_bio->iter = bio->bi_iter; 3014 return bio; 3015 } 3016 3017 /* 3018 * @opf: bio REQ_OP_* and REQ_* flags as one value 3019 * @wbc: optional writeback control for io accounting 3020 * @page: page to add to the bio 3021 * @pg_offset: offset of the new bio or to check whether we are adding 3022 * a contiguous page to the previous one 3023 * @size: portion of page that we want to write 3024 * @offset: starting offset in the page 3025 * @bio_ret: must be valid pointer, newly allocated bio will be stored there 3026 * @end_io_func: end_io callback for new bio 3027 * @mirror_num: desired mirror to read/write 3028 * @prev_bio_flags: flags of previous bio to see if we can merge the current one 3029 * @bio_flags: flags of the current bio to see if we can merge them 3030 */ 3031 static int submit_extent_page(unsigned int opf, 3032 struct writeback_control *wbc, 3033 struct page *page, u64 offset, 3034 size_t size, unsigned long pg_offset, 3035 struct bio **bio_ret, 3036 bio_end_io_t end_io_func, 3037 int mirror_num, 3038 unsigned long prev_bio_flags, 3039 unsigned long bio_flags, 3040 bool force_bio_submit) 3041 { 3042 int ret = 0; 3043 struct bio *bio; 3044 size_t page_size = min_t(size_t, size, PAGE_SIZE); 3045 sector_t sector = offset >> 9; 3046 struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree; 3047 3048 ASSERT(bio_ret); 3049 3050 if (*bio_ret) { 3051 bool contig; 3052 bool can_merge = true; 3053 3054 bio = *bio_ret; 3055 if (prev_bio_flags & EXTENT_BIO_COMPRESSED) 3056 contig = bio->bi_iter.bi_sector == sector; 3057 else 3058 contig = bio_end_sector(bio) == sector; 3059 3060 ASSERT(tree->ops); 3061 if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags)) 3062 can_merge = false; 3063 3064 if (prev_bio_flags != bio_flags || !contig || !can_merge || 3065 force_bio_submit || 3066 bio_add_page(bio, page, page_size, pg_offset) < page_size) { 3067 ret = submit_one_bio(bio, mirror_num, prev_bio_flags); 3068 if (ret < 0) { 3069 *bio_ret = NULL; 3070 return ret; 3071 } 3072 bio = NULL; 3073 } else { 3074 if (wbc) 3075 wbc_account_cgroup_owner(wbc, page, page_size); 3076 return 0; 3077 } 3078 } 3079 3080 bio = btrfs_bio_alloc(offset); 3081 bio_add_page(bio, page, page_size, pg_offset); 3082 bio->bi_end_io = end_io_func; 3083 bio->bi_private = tree; 3084 bio->bi_write_hint = page->mapping->host->i_write_hint; 3085 bio->bi_opf = opf; 3086 if (wbc) { 3087 struct block_device *bdev; 3088 3089 bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev; 3090 bio_set_dev(bio, bdev); 3091 wbc_init_bio(wbc, bio); 3092 wbc_account_cgroup_owner(wbc, page, page_size); 3093 } 3094 3095 *bio_ret = bio; 3096 3097 return ret; 3098 } 3099 3100 static void attach_extent_buffer_page(struct extent_buffer *eb, 3101 struct page *page) 3102 { 3103 if (!PagePrivate(page)) 3104 attach_page_private(page, eb); 3105 else 3106 WARN_ON(page->private != (unsigned long)eb); 3107 } 3108 3109 void set_page_extent_mapped(struct page *page) 3110 { 3111 if (!PagePrivate(page)) 3112 attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE); 3113 } 3114 3115 static struct extent_map * 3116 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, 3117 u64 start, u64 len, get_extent_t *get_extent, 3118 struct extent_map **em_cached) 3119 { 3120 struct extent_map *em; 3121 3122 if (em_cached && *em_cached) { 3123 em = *em_cached; 3124 if (extent_map_in_tree(em) && start >= em->start && 3125 start < extent_map_end(em)) { 3126 refcount_inc(&em->refs); 3127 return em; 3128 } 3129 3130 free_extent_map(em); 3131 *em_cached = NULL; 3132 } 3133 3134 em = get_extent(BTRFS_I(inode), page, pg_offset, start, len); 3135 if (em_cached && !IS_ERR_OR_NULL(em)) { 3136 BUG_ON(*em_cached); 3137 refcount_inc(&em->refs); 3138 *em_cached = em; 3139 } 3140 return em; 3141 } 3142 /* 3143 * basic readpage implementation. Locked extent state structs are inserted 3144 * into the tree that are removed when the IO is done (by the end_io 3145 * handlers) 3146 * XXX JDM: This needs looking at to ensure proper page locking 3147 * return 0 on success, otherwise return error 3148 */ 3149 static int __do_readpage(struct page *page, 3150 get_extent_t *get_extent, 3151 struct extent_map **em_cached, 3152 struct bio **bio, int mirror_num, 3153 unsigned long *bio_flags, unsigned int read_flags, 3154 u64 *prev_em_start) 3155 { 3156 struct inode *inode = page->mapping->host; 3157 u64 start = page_offset(page); 3158 const u64 end = start + PAGE_SIZE - 1; 3159 u64 cur = start; 3160 u64 extent_offset; 3161 u64 last_byte = i_size_read(inode); 3162 u64 block_start; 3163 u64 cur_end; 3164 struct extent_map *em; 3165 int ret = 0; 3166 int nr = 0; 3167 size_t pg_offset = 0; 3168 size_t iosize; 3169 size_t disk_io_size; 3170 size_t blocksize = inode->i_sb->s_blocksize; 3171 unsigned long this_bio_flag = 0; 3172 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 3173 3174 set_page_extent_mapped(page); 3175 3176 if (!PageUptodate(page)) { 3177 if (cleancache_get_page(page) == 0) { 3178 BUG_ON(blocksize != PAGE_SIZE); 3179 unlock_extent(tree, start, end); 3180 goto out; 3181 } 3182 } 3183 3184 if (page->index == last_byte >> PAGE_SHIFT) { 3185 char *userpage; 3186 size_t zero_offset = offset_in_page(last_byte); 3187 3188 if (zero_offset) { 3189 iosize = PAGE_SIZE - zero_offset; 3190 userpage = kmap_atomic(page); 3191 memset(userpage + zero_offset, 0, iosize); 3192 flush_dcache_page(page); 3193 kunmap_atomic(userpage); 3194 } 3195 } 3196 while (cur <= end) { 3197 bool force_bio_submit = false; 3198 u64 offset; 3199 3200 if (cur >= last_byte) { 3201 char *userpage; 3202 struct extent_state *cached = NULL; 3203 3204 iosize = PAGE_SIZE - pg_offset; 3205 userpage = kmap_atomic(page); 3206 memset(userpage + pg_offset, 0, iosize); 3207 flush_dcache_page(page); 3208 kunmap_atomic(userpage); 3209 set_extent_uptodate(tree, cur, cur + iosize - 1, 3210 &cached, GFP_NOFS); 3211 unlock_extent_cached(tree, cur, 3212 cur + iosize - 1, &cached); 3213 break; 3214 } 3215 em = __get_extent_map(inode, page, pg_offset, cur, 3216 end - cur + 1, get_extent, em_cached); 3217 if (IS_ERR_OR_NULL(em)) { 3218 SetPageError(page); 3219 unlock_extent(tree, cur, end); 3220 break; 3221 } 3222 extent_offset = cur - em->start; 3223 BUG_ON(extent_map_end(em) <= cur); 3224 BUG_ON(end < cur); 3225 3226 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 3227 this_bio_flag |= EXTENT_BIO_COMPRESSED; 3228 extent_set_compress_type(&this_bio_flag, 3229 em->compress_type); 3230 } 3231 3232 iosize = min(extent_map_end(em) - cur, end - cur + 1); 3233 cur_end = min(extent_map_end(em) - 1, end); 3234 iosize = ALIGN(iosize, blocksize); 3235 if (this_bio_flag & EXTENT_BIO_COMPRESSED) { 3236 disk_io_size = em->block_len; 3237 offset = em->block_start; 3238 } else { 3239 offset = em->block_start + extent_offset; 3240 disk_io_size = iosize; 3241 } 3242 block_start = em->block_start; 3243 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 3244 block_start = EXTENT_MAP_HOLE; 3245 3246 /* 3247 * If we have a file range that points to a compressed extent 3248 * and it's followed by a consecutive file range that points to 3249 * to the same compressed extent (possibly with a different 3250 * offset and/or length, so it either points to the whole extent 3251 * or only part of it), we must make sure we do not submit a 3252 * single bio to populate the pages for the 2 ranges because 3253 * this makes the compressed extent read zero out the pages 3254 * belonging to the 2nd range. Imagine the following scenario: 3255 * 3256 * File layout 3257 * [0 - 8K] [8K - 24K] 3258 * | | 3259 * | | 3260 * points to extent X, points to extent X, 3261 * offset 4K, length of 8K offset 0, length 16K 3262 * 3263 * [extent X, compressed length = 4K uncompressed length = 16K] 3264 * 3265 * If the bio to read the compressed extent covers both ranges, 3266 * it will decompress extent X into the pages belonging to the 3267 * first range and then it will stop, zeroing out the remaining 3268 * pages that belong to the other range that points to extent X. 3269 * So here we make sure we submit 2 bios, one for the first 3270 * range and another one for the third range. Both will target 3271 * the same physical extent from disk, but we can't currently 3272 * make the compressed bio endio callback populate the pages 3273 * for both ranges because each compressed bio is tightly 3274 * coupled with a single extent map, and each range can have 3275 * an extent map with a different offset value relative to the 3276 * uncompressed data of our extent and different lengths. This 3277 * is a corner case so we prioritize correctness over 3278 * non-optimal behavior (submitting 2 bios for the same extent). 3279 */ 3280 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) && 3281 prev_em_start && *prev_em_start != (u64)-1 && 3282 *prev_em_start != em->start) 3283 force_bio_submit = true; 3284 3285 if (prev_em_start) 3286 *prev_em_start = em->start; 3287 3288 free_extent_map(em); 3289 em = NULL; 3290 3291 /* we've found a hole, just zero and go on */ 3292 if (block_start == EXTENT_MAP_HOLE) { 3293 char *userpage; 3294 struct extent_state *cached = NULL; 3295 3296 userpage = kmap_atomic(page); 3297 memset(userpage + pg_offset, 0, iosize); 3298 flush_dcache_page(page); 3299 kunmap_atomic(userpage); 3300 3301 set_extent_uptodate(tree, cur, cur + iosize - 1, 3302 &cached, GFP_NOFS); 3303 unlock_extent_cached(tree, cur, 3304 cur + iosize - 1, &cached); 3305 cur = cur + iosize; 3306 pg_offset += iosize; 3307 continue; 3308 } 3309 /* the get_extent function already copied into the page */ 3310 if (test_range_bit(tree, cur, cur_end, 3311 EXTENT_UPTODATE, 1, NULL)) { 3312 check_page_uptodate(tree, page); 3313 unlock_extent(tree, cur, cur + iosize - 1); 3314 cur = cur + iosize; 3315 pg_offset += iosize; 3316 continue; 3317 } 3318 /* we have an inline extent but it didn't get marked up 3319 * to date. Error out 3320 */ 3321 if (block_start == EXTENT_MAP_INLINE) { 3322 SetPageError(page); 3323 unlock_extent(tree, cur, cur + iosize - 1); 3324 cur = cur + iosize; 3325 pg_offset += iosize; 3326 continue; 3327 } 3328 3329 ret = submit_extent_page(REQ_OP_READ | read_flags, NULL, 3330 page, offset, disk_io_size, 3331 pg_offset, bio, 3332 end_bio_extent_readpage, mirror_num, 3333 *bio_flags, 3334 this_bio_flag, 3335 force_bio_submit); 3336 if (!ret) { 3337 nr++; 3338 *bio_flags = this_bio_flag; 3339 } else { 3340 SetPageError(page); 3341 unlock_extent(tree, cur, cur + iosize - 1); 3342 goto out; 3343 } 3344 cur = cur + iosize; 3345 pg_offset += iosize; 3346 } 3347 out: 3348 if (!nr) { 3349 if (!PageError(page)) 3350 SetPageUptodate(page); 3351 unlock_page(page); 3352 } 3353 return ret; 3354 } 3355 3356 static inline void contiguous_readpages(struct page *pages[], int nr_pages, 3357 u64 start, u64 end, 3358 struct extent_map **em_cached, 3359 struct bio **bio, 3360 unsigned long *bio_flags, 3361 u64 *prev_em_start) 3362 { 3363 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host); 3364 int index; 3365 3366 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); 3367 3368 for (index = 0; index < nr_pages; index++) { 3369 __do_readpage(pages[index], btrfs_get_extent, em_cached, 3370 bio, 0, bio_flags, REQ_RAHEAD, prev_em_start); 3371 put_page(pages[index]); 3372 } 3373 } 3374 3375 static int __extent_read_full_page(struct page *page, 3376 get_extent_t *get_extent, 3377 struct bio **bio, int mirror_num, 3378 unsigned long *bio_flags, 3379 unsigned int read_flags) 3380 { 3381 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); 3382 u64 start = page_offset(page); 3383 u64 end = start + PAGE_SIZE - 1; 3384 int ret; 3385 3386 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); 3387 3388 ret = __do_readpage(page, get_extent, NULL, bio, mirror_num, 3389 bio_flags, read_flags, NULL); 3390 return ret; 3391 } 3392 3393 int extent_read_full_page(struct page *page, get_extent_t *get_extent, 3394 int mirror_num) 3395 { 3396 struct bio *bio = NULL; 3397 unsigned long bio_flags = 0; 3398 int ret; 3399 3400 ret = __extent_read_full_page(page, get_extent, &bio, mirror_num, 3401 &bio_flags, 0); 3402 if (bio) 3403 ret = submit_one_bio(bio, mirror_num, bio_flags); 3404 return ret; 3405 } 3406 3407 static void update_nr_written(struct writeback_control *wbc, 3408 unsigned long nr_written) 3409 { 3410 wbc->nr_to_write -= nr_written; 3411 } 3412 3413 /* 3414 * helper for __extent_writepage, doing all of the delayed allocation setup. 3415 * 3416 * This returns 1 if btrfs_run_delalloc_range function did all the work required 3417 * to write the page (copy into inline extent). In this case the IO has 3418 * been started and the page is already unlocked. 3419 * 3420 * This returns 0 if all went well (page still locked) 3421 * This returns < 0 if there were errors (page still locked) 3422 */ 3423 static noinline_for_stack int writepage_delalloc(struct inode *inode, 3424 struct page *page, struct writeback_control *wbc, 3425 u64 delalloc_start, unsigned long *nr_written) 3426 { 3427 u64 page_end = delalloc_start + PAGE_SIZE - 1; 3428 bool found; 3429 u64 delalloc_to_write = 0; 3430 u64 delalloc_end = 0; 3431 int ret; 3432 int page_started = 0; 3433 3434 3435 while (delalloc_end < page_end) { 3436 found = find_lock_delalloc_range(inode, page, 3437 &delalloc_start, 3438 &delalloc_end); 3439 if (!found) { 3440 delalloc_start = delalloc_end + 1; 3441 continue; 3442 } 3443 ret = btrfs_run_delalloc_range(inode, page, delalloc_start, 3444 delalloc_end, &page_started, nr_written, wbc); 3445 if (ret) { 3446 SetPageError(page); 3447 /* 3448 * btrfs_run_delalloc_range should return < 0 for error 3449 * but just in case, we use > 0 here meaning the IO is 3450 * started, so we don't want to return > 0 unless 3451 * things are going well. 3452 */ 3453 ret = ret < 0 ? ret : -EIO; 3454 goto done; 3455 } 3456 /* 3457 * delalloc_end is already one less than the total length, so 3458 * we don't subtract one from PAGE_SIZE 3459 */ 3460 delalloc_to_write += (delalloc_end - delalloc_start + 3461 PAGE_SIZE) >> PAGE_SHIFT; 3462 delalloc_start = delalloc_end + 1; 3463 } 3464 if (wbc->nr_to_write < delalloc_to_write) { 3465 int thresh = 8192; 3466 3467 if (delalloc_to_write < thresh * 2) 3468 thresh = delalloc_to_write; 3469 wbc->nr_to_write = min_t(u64, delalloc_to_write, 3470 thresh); 3471 } 3472 3473 /* did the fill delalloc function already unlock and start 3474 * the IO? 3475 */ 3476 if (page_started) { 3477 /* 3478 * we've unlocked the page, so we can't update 3479 * the mapping's writeback index, just update 3480 * nr_to_write. 3481 */ 3482 wbc->nr_to_write -= *nr_written; 3483 return 1; 3484 } 3485 3486 ret = 0; 3487 3488 done: 3489 return ret; 3490 } 3491 3492 /* 3493 * helper for __extent_writepage. This calls the writepage start hooks, 3494 * and does the loop to map the page into extents and bios. 3495 * 3496 * We return 1 if the IO is started and the page is unlocked, 3497 * 0 if all went well (page still locked) 3498 * < 0 if there were errors (page still locked) 3499 */ 3500 static noinline_for_stack int __extent_writepage_io(struct inode *inode, 3501 struct page *page, 3502 struct writeback_control *wbc, 3503 struct extent_page_data *epd, 3504 loff_t i_size, 3505 unsigned long nr_written, 3506 int *nr_ret) 3507 { 3508 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 3509 u64 start = page_offset(page); 3510 u64 page_end = start + PAGE_SIZE - 1; 3511 u64 end; 3512 u64 cur = start; 3513 u64 extent_offset; 3514 u64 block_start; 3515 u64 iosize; 3516 struct extent_map *em; 3517 size_t pg_offset = 0; 3518 size_t blocksize; 3519 int ret = 0; 3520 int nr = 0; 3521 const unsigned int write_flags = wbc_to_write_flags(wbc); 3522 bool compressed; 3523 3524 ret = btrfs_writepage_cow_fixup(page, start, page_end); 3525 if (ret) { 3526 /* Fixup worker will requeue */ 3527 redirty_page_for_writepage(wbc, page); 3528 update_nr_written(wbc, nr_written); 3529 unlock_page(page); 3530 return 1; 3531 } 3532 3533 /* 3534 * we don't want to touch the inode after unlocking the page, 3535 * so we update the mapping writeback index now 3536 */ 3537 update_nr_written(wbc, nr_written + 1); 3538 3539 end = page_end; 3540 blocksize = inode->i_sb->s_blocksize; 3541 3542 while (cur <= end) { 3543 u64 em_end; 3544 u64 offset; 3545 3546 if (cur >= i_size) { 3547 btrfs_writepage_endio_finish_ordered(page, cur, 3548 page_end, 1); 3549 break; 3550 } 3551 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur, 3552 end - cur + 1); 3553 if (IS_ERR_OR_NULL(em)) { 3554 SetPageError(page); 3555 ret = PTR_ERR_OR_ZERO(em); 3556 break; 3557 } 3558 3559 extent_offset = cur - em->start; 3560 em_end = extent_map_end(em); 3561 BUG_ON(em_end <= cur); 3562 BUG_ON(end < cur); 3563 iosize = min(em_end - cur, end - cur + 1); 3564 iosize = ALIGN(iosize, blocksize); 3565 offset = em->block_start + extent_offset; 3566 block_start = em->block_start; 3567 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 3568 free_extent_map(em); 3569 em = NULL; 3570 3571 /* 3572 * compressed and inline extents are written through other 3573 * paths in the FS 3574 */ 3575 if (compressed || block_start == EXTENT_MAP_HOLE || 3576 block_start == EXTENT_MAP_INLINE) { 3577 if (compressed) 3578 nr++; 3579 else 3580 btrfs_writepage_endio_finish_ordered(page, cur, 3581 cur + iosize - 1, 1); 3582 cur += iosize; 3583 pg_offset += iosize; 3584 continue; 3585 } 3586 3587 btrfs_set_range_writeback(tree, cur, cur + iosize - 1); 3588 if (!PageWriteback(page)) { 3589 btrfs_err(BTRFS_I(inode)->root->fs_info, 3590 "page %lu not writeback, cur %llu end %llu", 3591 page->index, cur, end); 3592 } 3593 3594 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc, 3595 page, offset, iosize, pg_offset, 3596 &epd->bio, 3597 end_bio_extent_writepage, 3598 0, 0, 0, false); 3599 if (ret) { 3600 SetPageError(page); 3601 if (PageWriteback(page)) 3602 end_page_writeback(page); 3603 } 3604 3605 cur = cur + iosize; 3606 pg_offset += iosize; 3607 nr++; 3608 } 3609 *nr_ret = nr; 3610 return ret; 3611 } 3612 3613 /* 3614 * the writepage semantics are similar to regular writepage. extent 3615 * records are inserted to lock ranges in the tree, and as dirty areas 3616 * are found, they are marked writeback. Then the lock bits are removed 3617 * and the end_io handler clears the writeback ranges 3618 * 3619 * Return 0 if everything goes well. 3620 * Return <0 for error. 3621 */ 3622 static int __extent_writepage(struct page *page, struct writeback_control *wbc, 3623 struct extent_page_data *epd) 3624 { 3625 struct inode *inode = page->mapping->host; 3626 u64 start = page_offset(page); 3627 u64 page_end = start + PAGE_SIZE - 1; 3628 int ret; 3629 int nr = 0; 3630 size_t pg_offset; 3631 loff_t i_size = i_size_read(inode); 3632 unsigned long end_index = i_size >> PAGE_SHIFT; 3633 unsigned long nr_written = 0; 3634 3635 trace___extent_writepage(page, inode, wbc); 3636 3637 WARN_ON(!PageLocked(page)); 3638 3639 ClearPageError(page); 3640 3641 pg_offset = offset_in_page(i_size); 3642 if (page->index > end_index || 3643 (page->index == end_index && !pg_offset)) { 3644 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 3645 unlock_page(page); 3646 return 0; 3647 } 3648 3649 if (page->index == end_index) { 3650 char *userpage; 3651 3652 userpage = kmap_atomic(page); 3653 memset(userpage + pg_offset, 0, 3654 PAGE_SIZE - pg_offset); 3655 kunmap_atomic(userpage); 3656 flush_dcache_page(page); 3657 } 3658 3659 set_page_extent_mapped(page); 3660 3661 if (!epd->extent_locked) { 3662 ret = writepage_delalloc(inode, page, wbc, start, &nr_written); 3663 if (ret == 1) 3664 return 0; 3665 if (ret) 3666 goto done; 3667 } 3668 3669 ret = __extent_writepage_io(inode, page, wbc, epd, 3670 i_size, nr_written, &nr); 3671 if (ret == 1) 3672 return 0; 3673 3674 done: 3675 if (nr == 0) { 3676 /* make sure the mapping tag for page dirty gets cleared */ 3677 set_page_writeback(page); 3678 end_page_writeback(page); 3679 } 3680 if (PageError(page)) { 3681 ret = ret < 0 ? ret : -EIO; 3682 end_extent_writepage(page, ret, start, page_end); 3683 } 3684 unlock_page(page); 3685 ASSERT(ret <= 0); 3686 return ret; 3687 } 3688 3689 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) 3690 { 3691 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, 3692 TASK_UNINTERRUPTIBLE); 3693 } 3694 3695 static void end_extent_buffer_writeback(struct extent_buffer *eb) 3696 { 3697 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); 3698 smp_mb__after_atomic(); 3699 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); 3700 } 3701 3702 /* 3703 * Lock eb pages and flush the bio if we can't the locks 3704 * 3705 * Return 0 if nothing went wrong 3706 * Return >0 is same as 0, except bio is not submitted 3707 * Return <0 if something went wrong, no page is locked 3708 */ 3709 static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb, 3710 struct extent_page_data *epd) 3711 { 3712 struct btrfs_fs_info *fs_info = eb->fs_info; 3713 int i, num_pages, failed_page_nr; 3714 int flush = 0; 3715 int ret = 0; 3716 3717 if (!btrfs_try_tree_write_lock(eb)) { 3718 ret = flush_write_bio(epd); 3719 if (ret < 0) 3720 return ret; 3721 flush = 1; 3722 btrfs_tree_lock(eb); 3723 } 3724 3725 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { 3726 btrfs_tree_unlock(eb); 3727 if (!epd->sync_io) 3728 return 0; 3729 if (!flush) { 3730 ret = flush_write_bio(epd); 3731 if (ret < 0) 3732 return ret; 3733 flush = 1; 3734 } 3735 while (1) { 3736 wait_on_extent_buffer_writeback(eb); 3737 btrfs_tree_lock(eb); 3738 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) 3739 break; 3740 btrfs_tree_unlock(eb); 3741 } 3742 } 3743 3744 /* 3745 * We need to do this to prevent races in people who check if the eb is 3746 * under IO since we can end up having no IO bits set for a short period 3747 * of time. 3748 */ 3749 spin_lock(&eb->refs_lock); 3750 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { 3751 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); 3752 spin_unlock(&eb->refs_lock); 3753 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); 3754 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 3755 -eb->len, 3756 fs_info->dirty_metadata_batch); 3757 ret = 1; 3758 } else { 3759 spin_unlock(&eb->refs_lock); 3760 } 3761 3762 btrfs_tree_unlock(eb); 3763 3764 if (!ret) 3765 return ret; 3766 3767 num_pages = num_extent_pages(eb); 3768 for (i = 0; i < num_pages; i++) { 3769 struct page *p = eb->pages[i]; 3770 3771 if (!trylock_page(p)) { 3772 if (!flush) { 3773 int err; 3774 3775 err = flush_write_bio(epd); 3776 if (err < 0) { 3777 ret = err; 3778 failed_page_nr = i; 3779 goto err_unlock; 3780 } 3781 flush = 1; 3782 } 3783 lock_page(p); 3784 } 3785 } 3786 3787 return ret; 3788 err_unlock: 3789 /* Unlock already locked pages */ 3790 for (i = 0; i < failed_page_nr; i++) 3791 unlock_page(eb->pages[i]); 3792 /* 3793 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it. 3794 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can 3795 * be made and undo everything done before. 3796 */ 3797 btrfs_tree_lock(eb); 3798 spin_lock(&eb->refs_lock); 3799 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); 3800 end_extent_buffer_writeback(eb); 3801 spin_unlock(&eb->refs_lock); 3802 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len, 3803 fs_info->dirty_metadata_batch); 3804 btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); 3805 btrfs_tree_unlock(eb); 3806 return ret; 3807 } 3808 3809 static void set_btree_ioerr(struct page *page) 3810 { 3811 struct extent_buffer *eb = (struct extent_buffer *)page->private; 3812 struct btrfs_fs_info *fs_info; 3813 3814 SetPageError(page); 3815 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) 3816 return; 3817 3818 /* 3819 * If we error out, we should add back the dirty_metadata_bytes 3820 * to make it consistent. 3821 */ 3822 fs_info = eb->fs_info; 3823 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 3824 eb->len, fs_info->dirty_metadata_batch); 3825 3826 /* 3827 * If writeback for a btree extent that doesn't belong to a log tree 3828 * failed, increment the counter transaction->eb_write_errors. 3829 * We do this because while the transaction is running and before it's 3830 * committing (when we call filemap_fdata[write|wait]_range against 3831 * the btree inode), we might have 3832 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it 3833 * returns an error or an error happens during writeback, when we're 3834 * committing the transaction we wouldn't know about it, since the pages 3835 * can be no longer dirty nor marked anymore for writeback (if a 3836 * subsequent modification to the extent buffer didn't happen before the 3837 * transaction commit), which makes filemap_fdata[write|wait]_range not 3838 * able to find the pages tagged with SetPageError at transaction 3839 * commit time. So if this happens we must abort the transaction, 3840 * otherwise we commit a super block with btree roots that point to 3841 * btree nodes/leafs whose content on disk is invalid - either garbage 3842 * or the content of some node/leaf from a past generation that got 3843 * cowed or deleted and is no longer valid. 3844 * 3845 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would 3846 * not be enough - we need to distinguish between log tree extents vs 3847 * non-log tree extents, and the next filemap_fdatawait_range() call 3848 * will catch and clear such errors in the mapping - and that call might 3849 * be from a log sync and not from a transaction commit. Also, checking 3850 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is 3851 * not done and would not be reliable - the eb might have been released 3852 * from memory and reading it back again means that flag would not be 3853 * set (since it's a runtime flag, not persisted on disk). 3854 * 3855 * Using the flags below in the btree inode also makes us achieve the 3856 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started 3857 * writeback for all dirty pages and before filemap_fdatawait_range() 3858 * is called, the writeback for all dirty pages had already finished 3859 * with errors - because we were not using AS_EIO/AS_ENOSPC, 3860 * filemap_fdatawait_range() would return success, as it could not know 3861 * that writeback errors happened (the pages were no longer tagged for 3862 * writeback). 3863 */ 3864 switch (eb->log_index) { 3865 case -1: 3866 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags); 3867 break; 3868 case 0: 3869 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags); 3870 break; 3871 case 1: 3872 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags); 3873 break; 3874 default: 3875 BUG(); /* unexpected, logic error */ 3876 } 3877 } 3878 3879 static void end_bio_extent_buffer_writepage(struct bio *bio) 3880 { 3881 struct bio_vec *bvec; 3882 struct extent_buffer *eb; 3883 int done; 3884 struct bvec_iter_all iter_all; 3885 3886 ASSERT(!bio_flagged(bio, BIO_CLONED)); 3887 bio_for_each_segment_all(bvec, bio, iter_all) { 3888 struct page *page = bvec->bv_page; 3889 3890 eb = (struct extent_buffer *)page->private; 3891 BUG_ON(!eb); 3892 done = atomic_dec_and_test(&eb->io_pages); 3893 3894 if (bio->bi_status || 3895 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { 3896 ClearPageUptodate(page); 3897 set_btree_ioerr(page); 3898 } 3899 3900 end_page_writeback(page); 3901 3902 if (!done) 3903 continue; 3904 3905 end_extent_buffer_writeback(eb); 3906 } 3907 3908 bio_put(bio); 3909 } 3910 3911 static noinline_for_stack int write_one_eb(struct extent_buffer *eb, 3912 struct writeback_control *wbc, 3913 struct extent_page_data *epd) 3914 { 3915 u64 offset = eb->start; 3916 u32 nritems; 3917 int i, num_pages; 3918 unsigned long start, end; 3919 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META; 3920 int ret = 0; 3921 3922 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); 3923 num_pages = num_extent_pages(eb); 3924 atomic_set(&eb->io_pages, num_pages); 3925 3926 /* set btree blocks beyond nritems with 0 to avoid stale content. */ 3927 nritems = btrfs_header_nritems(eb); 3928 if (btrfs_header_level(eb) > 0) { 3929 end = btrfs_node_key_ptr_offset(nritems); 3930 3931 memzero_extent_buffer(eb, end, eb->len - end); 3932 } else { 3933 /* 3934 * leaf: 3935 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 3936 */ 3937 start = btrfs_item_nr_offset(nritems); 3938 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb); 3939 memzero_extent_buffer(eb, start, end - start); 3940 } 3941 3942 for (i = 0; i < num_pages; i++) { 3943 struct page *p = eb->pages[i]; 3944 3945 clear_page_dirty_for_io(p); 3946 set_page_writeback(p); 3947 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc, 3948 p, offset, PAGE_SIZE, 0, 3949 &epd->bio, 3950 end_bio_extent_buffer_writepage, 3951 0, 0, 0, false); 3952 if (ret) { 3953 set_btree_ioerr(p); 3954 if (PageWriteback(p)) 3955 end_page_writeback(p); 3956 if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) 3957 end_extent_buffer_writeback(eb); 3958 ret = -EIO; 3959 break; 3960 } 3961 offset += PAGE_SIZE; 3962 update_nr_written(wbc, 1); 3963 unlock_page(p); 3964 } 3965 3966 if (unlikely(ret)) { 3967 for (; i < num_pages; i++) { 3968 struct page *p = eb->pages[i]; 3969 clear_page_dirty_for_io(p); 3970 unlock_page(p); 3971 } 3972 } 3973 3974 return ret; 3975 } 3976 3977 int btree_write_cache_pages(struct address_space *mapping, 3978 struct writeback_control *wbc) 3979 { 3980 struct extent_buffer *eb, *prev_eb = NULL; 3981 struct extent_page_data epd = { 3982 .bio = NULL, 3983 .extent_locked = 0, 3984 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 3985 }; 3986 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; 3987 int ret = 0; 3988 int done = 0; 3989 int nr_to_write_done = 0; 3990 struct pagevec pvec; 3991 int nr_pages; 3992 pgoff_t index; 3993 pgoff_t end; /* Inclusive */ 3994 int scanned = 0; 3995 xa_mark_t tag; 3996 3997 pagevec_init(&pvec); 3998 if (wbc->range_cyclic) { 3999 index = mapping->writeback_index; /* Start from prev offset */ 4000 end = -1; 4001 /* 4002 * Start from the beginning does not need to cycle over the 4003 * range, mark it as scanned. 4004 */ 4005 scanned = (index == 0); 4006 } else { 4007 index = wbc->range_start >> PAGE_SHIFT; 4008 end = wbc->range_end >> PAGE_SHIFT; 4009 scanned = 1; 4010 } 4011 if (wbc->sync_mode == WB_SYNC_ALL) 4012 tag = PAGECACHE_TAG_TOWRITE; 4013 else 4014 tag = PAGECACHE_TAG_DIRTY; 4015 retry: 4016 if (wbc->sync_mode == WB_SYNC_ALL) 4017 tag_pages_for_writeback(mapping, index, end); 4018 while (!done && !nr_to_write_done && (index <= end) && 4019 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 4020 tag))) { 4021 unsigned i; 4022 4023 for (i = 0; i < nr_pages; i++) { 4024 struct page *page = pvec.pages[i]; 4025 4026 if (!PagePrivate(page)) 4027 continue; 4028 4029 spin_lock(&mapping->private_lock); 4030 if (!PagePrivate(page)) { 4031 spin_unlock(&mapping->private_lock); 4032 continue; 4033 } 4034 4035 eb = (struct extent_buffer *)page->private; 4036 4037 /* 4038 * Shouldn't happen and normally this would be a BUG_ON 4039 * but no sense in crashing the users box for something 4040 * we can survive anyway. 4041 */ 4042 if (WARN_ON(!eb)) { 4043 spin_unlock(&mapping->private_lock); 4044 continue; 4045 } 4046 4047 if (eb == prev_eb) { 4048 spin_unlock(&mapping->private_lock); 4049 continue; 4050 } 4051 4052 ret = atomic_inc_not_zero(&eb->refs); 4053 spin_unlock(&mapping->private_lock); 4054 if (!ret) 4055 continue; 4056 4057 prev_eb = eb; 4058 ret = lock_extent_buffer_for_io(eb, &epd); 4059 if (!ret) { 4060 free_extent_buffer(eb); 4061 continue; 4062 } else if (ret < 0) { 4063 done = 1; 4064 free_extent_buffer(eb); 4065 break; 4066 } 4067 4068 ret = write_one_eb(eb, wbc, &epd); 4069 if (ret) { 4070 done = 1; 4071 free_extent_buffer(eb); 4072 break; 4073 } 4074 free_extent_buffer(eb); 4075 4076 /* 4077 * the filesystem may choose to bump up nr_to_write. 4078 * We have to make sure to honor the new nr_to_write 4079 * at any time 4080 */ 4081 nr_to_write_done = wbc->nr_to_write <= 0; 4082 } 4083 pagevec_release(&pvec); 4084 cond_resched(); 4085 } 4086 if (!scanned && !done) { 4087 /* 4088 * We hit the last page and there is more work to be done: wrap 4089 * back to the start of the file 4090 */ 4091 scanned = 1; 4092 index = 0; 4093 goto retry; 4094 } 4095 ASSERT(ret <= 0); 4096 if (ret < 0) { 4097 end_write_bio(&epd, ret); 4098 return ret; 4099 } 4100 /* 4101 * If something went wrong, don't allow any metadata write bio to be 4102 * submitted. 4103 * 4104 * This would prevent use-after-free if we had dirty pages not 4105 * cleaned up, which can still happen by fuzzed images. 4106 * 4107 * - Bad extent tree 4108 * Allowing existing tree block to be allocated for other trees. 4109 * 4110 * - Log tree operations 4111 * Exiting tree blocks get allocated to log tree, bumps its 4112 * generation, then get cleaned in tree re-balance. 4113 * Such tree block will not be written back, since it's clean, 4114 * thus no WRITTEN flag set. 4115 * And after log writes back, this tree block is not traced by 4116 * any dirty extent_io_tree. 4117 * 4118 * - Offending tree block gets re-dirtied from its original owner 4119 * Since it has bumped generation, no WRITTEN flag, it can be 4120 * reused without COWing. This tree block will not be traced 4121 * by btrfs_transaction::dirty_pages. 4122 * 4123 * Now such dirty tree block will not be cleaned by any dirty 4124 * extent io tree. Thus we don't want to submit such wild eb 4125 * if the fs already has error. 4126 */ 4127 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 4128 ret = flush_write_bio(&epd); 4129 } else { 4130 ret = -EUCLEAN; 4131 end_write_bio(&epd, ret); 4132 } 4133 return ret; 4134 } 4135 4136 /** 4137 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. 4138 * @mapping: address space structure to write 4139 * @wbc: subtract the number of written pages from *@wbc->nr_to_write 4140 * @data: data passed to __extent_writepage function 4141 * 4142 * If a page is already under I/O, write_cache_pages() skips it, even 4143 * if it's dirty. This is desirable behaviour for memory-cleaning writeback, 4144 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() 4145 * and msync() need to guarantee that all the data which was dirty at the time 4146 * the call was made get new I/O started against them. If wbc->sync_mode is 4147 * WB_SYNC_ALL then we were called for data integrity and we must wait for 4148 * existing IO to complete. 4149 */ 4150 static int extent_write_cache_pages(struct address_space *mapping, 4151 struct writeback_control *wbc, 4152 struct extent_page_data *epd) 4153 { 4154 struct inode *inode = mapping->host; 4155 int ret = 0; 4156 int done = 0; 4157 int nr_to_write_done = 0; 4158 struct pagevec pvec; 4159 int nr_pages; 4160 pgoff_t index; 4161 pgoff_t end; /* Inclusive */ 4162 pgoff_t done_index; 4163 int range_whole = 0; 4164 int scanned = 0; 4165 xa_mark_t tag; 4166 4167 /* 4168 * We have to hold onto the inode so that ordered extents can do their 4169 * work when the IO finishes. The alternative to this is failing to add 4170 * an ordered extent if the igrab() fails there and that is a huge pain 4171 * to deal with, so instead just hold onto the inode throughout the 4172 * writepages operation. If it fails here we are freeing up the inode 4173 * anyway and we'd rather not waste our time writing out stuff that is 4174 * going to be truncated anyway. 4175 */ 4176 if (!igrab(inode)) 4177 return 0; 4178 4179 pagevec_init(&pvec); 4180 if (wbc->range_cyclic) { 4181 index = mapping->writeback_index; /* Start from prev offset */ 4182 end = -1; 4183 /* 4184 * Start from the beginning does not need to cycle over the 4185 * range, mark it as scanned. 4186 */ 4187 scanned = (index == 0); 4188 } else { 4189 index = wbc->range_start >> PAGE_SHIFT; 4190 end = wbc->range_end >> PAGE_SHIFT; 4191 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 4192 range_whole = 1; 4193 scanned = 1; 4194 } 4195 4196 /* 4197 * We do the tagged writepage as long as the snapshot flush bit is set 4198 * and we are the first one who do the filemap_flush() on this inode. 4199 * 4200 * The nr_to_write == LONG_MAX is needed to make sure other flushers do 4201 * not race in and drop the bit. 4202 */ 4203 if (range_whole && wbc->nr_to_write == LONG_MAX && 4204 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 4205 &BTRFS_I(inode)->runtime_flags)) 4206 wbc->tagged_writepages = 1; 4207 4208 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 4209 tag = PAGECACHE_TAG_TOWRITE; 4210 else 4211 tag = PAGECACHE_TAG_DIRTY; 4212 retry: 4213 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 4214 tag_pages_for_writeback(mapping, index, end); 4215 done_index = index; 4216 while (!done && !nr_to_write_done && (index <= end) && 4217 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, 4218 &index, end, tag))) { 4219 unsigned i; 4220 4221 for (i = 0; i < nr_pages; i++) { 4222 struct page *page = pvec.pages[i]; 4223 4224 done_index = page->index + 1; 4225 /* 4226 * At this point we hold neither the i_pages lock nor 4227 * the page lock: the page may be truncated or 4228 * invalidated (changing page->mapping to NULL), 4229 * or even swizzled back from swapper_space to 4230 * tmpfs file mapping 4231 */ 4232 if (!trylock_page(page)) { 4233 ret = flush_write_bio(epd); 4234 BUG_ON(ret < 0); 4235 lock_page(page); 4236 } 4237 4238 if (unlikely(page->mapping != mapping)) { 4239 unlock_page(page); 4240 continue; 4241 } 4242 4243 if (wbc->sync_mode != WB_SYNC_NONE) { 4244 if (PageWriteback(page)) { 4245 ret = flush_write_bio(epd); 4246 BUG_ON(ret < 0); 4247 } 4248 wait_on_page_writeback(page); 4249 } 4250 4251 if (PageWriteback(page) || 4252 !clear_page_dirty_for_io(page)) { 4253 unlock_page(page); 4254 continue; 4255 } 4256 4257 ret = __extent_writepage(page, wbc, epd); 4258 if (ret < 0) { 4259 done = 1; 4260 break; 4261 } 4262 4263 /* 4264 * the filesystem may choose to bump up nr_to_write. 4265 * We have to make sure to honor the new nr_to_write 4266 * at any time 4267 */ 4268 nr_to_write_done = wbc->nr_to_write <= 0; 4269 } 4270 pagevec_release(&pvec); 4271 cond_resched(); 4272 } 4273 if (!scanned && !done) { 4274 /* 4275 * We hit the last page and there is more work to be done: wrap 4276 * back to the start of the file 4277 */ 4278 scanned = 1; 4279 index = 0; 4280 4281 /* 4282 * If we're looping we could run into a page that is locked by a 4283 * writer and that writer could be waiting on writeback for a 4284 * page in our current bio, and thus deadlock, so flush the 4285 * write bio here. 4286 */ 4287 ret = flush_write_bio(epd); 4288 if (!ret) 4289 goto retry; 4290 } 4291 4292 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole)) 4293 mapping->writeback_index = done_index; 4294 4295 btrfs_add_delayed_iput(inode); 4296 return ret; 4297 } 4298 4299 int extent_write_full_page(struct page *page, struct writeback_control *wbc) 4300 { 4301 int ret; 4302 struct extent_page_data epd = { 4303 .bio = NULL, 4304 .extent_locked = 0, 4305 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 4306 }; 4307 4308 ret = __extent_writepage(page, wbc, &epd); 4309 ASSERT(ret <= 0); 4310 if (ret < 0) { 4311 end_write_bio(&epd, ret); 4312 return ret; 4313 } 4314 4315 ret = flush_write_bio(&epd); 4316 ASSERT(ret <= 0); 4317 return ret; 4318 } 4319 4320 int extent_write_locked_range(struct inode *inode, u64 start, u64 end, 4321 int mode) 4322 { 4323 int ret = 0; 4324 struct address_space *mapping = inode->i_mapping; 4325 struct page *page; 4326 unsigned long nr_pages = (end - start + PAGE_SIZE) >> 4327 PAGE_SHIFT; 4328 4329 struct extent_page_data epd = { 4330 .bio = NULL, 4331 .extent_locked = 1, 4332 .sync_io = mode == WB_SYNC_ALL, 4333 }; 4334 struct writeback_control wbc_writepages = { 4335 .sync_mode = mode, 4336 .nr_to_write = nr_pages * 2, 4337 .range_start = start, 4338 .range_end = end + 1, 4339 /* We're called from an async helper function */ 4340 .punt_to_cgroup = 1, 4341 .no_cgroup_owner = 1, 4342 }; 4343 4344 wbc_attach_fdatawrite_inode(&wbc_writepages, inode); 4345 while (start <= end) { 4346 page = find_get_page(mapping, start >> PAGE_SHIFT); 4347 if (clear_page_dirty_for_io(page)) 4348 ret = __extent_writepage(page, &wbc_writepages, &epd); 4349 else { 4350 btrfs_writepage_endio_finish_ordered(page, start, 4351 start + PAGE_SIZE - 1, 1); 4352 unlock_page(page); 4353 } 4354 put_page(page); 4355 start += PAGE_SIZE; 4356 } 4357 4358 ASSERT(ret <= 0); 4359 if (ret == 0) 4360 ret = flush_write_bio(&epd); 4361 else 4362 end_write_bio(&epd, ret); 4363 4364 wbc_detach_inode(&wbc_writepages); 4365 return ret; 4366 } 4367 4368 int extent_writepages(struct address_space *mapping, 4369 struct writeback_control *wbc) 4370 { 4371 int ret = 0; 4372 struct extent_page_data epd = { 4373 .bio = NULL, 4374 .extent_locked = 0, 4375 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 4376 }; 4377 4378 ret = extent_write_cache_pages(mapping, wbc, &epd); 4379 ASSERT(ret <= 0); 4380 if (ret < 0) { 4381 end_write_bio(&epd, ret); 4382 return ret; 4383 } 4384 ret = flush_write_bio(&epd); 4385 return ret; 4386 } 4387 4388 void extent_readahead(struct readahead_control *rac) 4389 { 4390 struct bio *bio = NULL; 4391 unsigned long bio_flags = 0; 4392 struct page *pagepool[16]; 4393 struct extent_map *em_cached = NULL; 4394 u64 prev_em_start = (u64)-1; 4395 int nr; 4396 4397 while ((nr = readahead_page_batch(rac, pagepool))) { 4398 u64 contig_start = page_offset(pagepool[0]); 4399 u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1; 4400 4401 ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end); 4402 4403 contiguous_readpages(pagepool, nr, contig_start, contig_end, 4404 &em_cached, &bio, &bio_flags, &prev_em_start); 4405 } 4406 4407 if (em_cached) 4408 free_extent_map(em_cached); 4409 4410 if (bio) { 4411 if (submit_one_bio(bio, 0, bio_flags)) 4412 return; 4413 } 4414 } 4415 4416 /* 4417 * basic invalidatepage code, this waits on any locked or writeback 4418 * ranges corresponding to the page, and then deletes any extent state 4419 * records from the tree 4420 */ 4421 int extent_invalidatepage(struct extent_io_tree *tree, 4422 struct page *page, unsigned long offset) 4423 { 4424 struct extent_state *cached_state = NULL; 4425 u64 start = page_offset(page); 4426 u64 end = start + PAGE_SIZE - 1; 4427 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 4428 4429 start += ALIGN(offset, blocksize); 4430 if (start > end) 4431 return 0; 4432 4433 lock_extent_bits(tree, start, end, &cached_state); 4434 wait_on_page_writeback(page); 4435 clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC | 4436 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state); 4437 return 0; 4438 } 4439 4440 /* 4441 * a helper for releasepage, this tests for areas of the page that 4442 * are locked or under IO and drops the related state bits if it is safe 4443 * to drop the page. 4444 */ 4445 static int try_release_extent_state(struct extent_io_tree *tree, 4446 struct page *page, gfp_t mask) 4447 { 4448 u64 start = page_offset(page); 4449 u64 end = start + PAGE_SIZE - 1; 4450 int ret = 1; 4451 4452 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) { 4453 ret = 0; 4454 } else { 4455 /* 4456 * at this point we can safely clear everything except the 4457 * locked bit and the nodatasum bit 4458 */ 4459 ret = __clear_extent_bit(tree, start, end, 4460 ~(EXTENT_LOCKED | EXTENT_NODATASUM), 4461 0, 0, NULL, mask, NULL); 4462 4463 /* if clear_extent_bit failed for enomem reasons, 4464 * we can't allow the release to continue. 4465 */ 4466 if (ret < 0) 4467 ret = 0; 4468 else 4469 ret = 1; 4470 } 4471 return ret; 4472 } 4473 4474 /* 4475 * a helper for releasepage. As long as there are no locked extents 4476 * in the range corresponding to the page, both state records and extent 4477 * map records are removed 4478 */ 4479 int try_release_extent_mapping(struct page *page, gfp_t mask) 4480 { 4481 struct extent_map *em; 4482 u64 start = page_offset(page); 4483 u64 end = start + PAGE_SIZE - 1; 4484 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); 4485 struct extent_io_tree *tree = &btrfs_inode->io_tree; 4486 struct extent_map_tree *map = &btrfs_inode->extent_tree; 4487 4488 if (gfpflags_allow_blocking(mask) && 4489 page->mapping->host->i_size > SZ_16M) { 4490 u64 len; 4491 while (start <= end) { 4492 len = end - start + 1; 4493 write_lock(&map->lock); 4494 em = lookup_extent_mapping(map, start, len); 4495 if (!em) { 4496 write_unlock(&map->lock); 4497 break; 4498 } 4499 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) || 4500 em->start != start) { 4501 write_unlock(&map->lock); 4502 free_extent_map(em); 4503 break; 4504 } 4505 if (!test_range_bit(tree, em->start, 4506 extent_map_end(em) - 1, 4507 EXTENT_LOCKED, 0, NULL)) { 4508 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4509 &btrfs_inode->runtime_flags); 4510 remove_extent_mapping(map, em); 4511 /* once for the rb tree */ 4512 free_extent_map(em); 4513 } 4514 start = extent_map_end(em); 4515 write_unlock(&map->lock); 4516 4517 /* once for us */ 4518 free_extent_map(em); 4519 } 4520 } 4521 return try_release_extent_state(tree, page, mask); 4522 } 4523 4524 /* 4525 * helper function for fiemap, which doesn't want to see any holes. 4526 * This maps until we find something past 'last' 4527 */ 4528 static struct extent_map *get_extent_skip_holes(struct inode *inode, 4529 u64 offset, u64 last) 4530 { 4531 u64 sectorsize = btrfs_inode_sectorsize(inode); 4532 struct extent_map *em; 4533 u64 len; 4534 4535 if (offset >= last) 4536 return NULL; 4537 4538 while (1) { 4539 len = last - offset; 4540 if (len == 0) 4541 break; 4542 len = ALIGN(len, sectorsize); 4543 em = btrfs_get_extent_fiemap(BTRFS_I(inode), offset, len); 4544 if (IS_ERR_OR_NULL(em)) 4545 return em; 4546 4547 /* if this isn't a hole return it */ 4548 if (em->block_start != EXTENT_MAP_HOLE) 4549 return em; 4550 4551 /* this is a hole, advance to the next extent */ 4552 offset = extent_map_end(em); 4553 free_extent_map(em); 4554 if (offset >= last) 4555 break; 4556 } 4557 return NULL; 4558 } 4559 4560 /* 4561 * To cache previous fiemap extent 4562 * 4563 * Will be used for merging fiemap extent 4564 */ 4565 struct fiemap_cache { 4566 u64 offset; 4567 u64 phys; 4568 u64 len; 4569 u32 flags; 4570 bool cached; 4571 }; 4572 4573 /* 4574 * Helper to submit fiemap extent. 4575 * 4576 * Will try to merge current fiemap extent specified by @offset, @phys, 4577 * @len and @flags with cached one. 4578 * And only when we fails to merge, cached one will be submitted as 4579 * fiemap extent. 4580 * 4581 * Return value is the same as fiemap_fill_next_extent(). 4582 */ 4583 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo, 4584 struct fiemap_cache *cache, 4585 u64 offset, u64 phys, u64 len, u32 flags) 4586 { 4587 int ret = 0; 4588 4589 if (!cache->cached) 4590 goto assign; 4591 4592 /* 4593 * Sanity check, extent_fiemap() should have ensured that new 4594 * fiemap extent won't overlap with cached one. 4595 * Not recoverable. 4596 * 4597 * NOTE: Physical address can overlap, due to compression 4598 */ 4599 if (cache->offset + cache->len > offset) { 4600 WARN_ON(1); 4601 return -EINVAL; 4602 } 4603 4604 /* 4605 * Only merges fiemap extents if 4606 * 1) Their logical addresses are continuous 4607 * 4608 * 2) Their physical addresses are continuous 4609 * So truly compressed (physical size smaller than logical size) 4610 * extents won't get merged with each other 4611 * 4612 * 3) Share same flags except FIEMAP_EXTENT_LAST 4613 * So regular extent won't get merged with prealloc extent 4614 */ 4615 if (cache->offset + cache->len == offset && 4616 cache->phys + cache->len == phys && 4617 (cache->flags & ~FIEMAP_EXTENT_LAST) == 4618 (flags & ~FIEMAP_EXTENT_LAST)) { 4619 cache->len += len; 4620 cache->flags |= flags; 4621 goto try_submit_last; 4622 } 4623 4624 /* Not mergeable, need to submit cached one */ 4625 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, 4626 cache->len, cache->flags); 4627 cache->cached = false; 4628 if (ret) 4629 return ret; 4630 assign: 4631 cache->cached = true; 4632 cache->offset = offset; 4633 cache->phys = phys; 4634 cache->len = len; 4635 cache->flags = flags; 4636 try_submit_last: 4637 if (cache->flags & FIEMAP_EXTENT_LAST) { 4638 ret = fiemap_fill_next_extent(fieinfo, cache->offset, 4639 cache->phys, cache->len, cache->flags); 4640 cache->cached = false; 4641 } 4642 return ret; 4643 } 4644 4645 /* 4646 * Emit last fiemap cache 4647 * 4648 * The last fiemap cache may still be cached in the following case: 4649 * 0 4k 8k 4650 * |<- Fiemap range ->| 4651 * |<------------ First extent ----------->| 4652 * 4653 * In this case, the first extent range will be cached but not emitted. 4654 * So we must emit it before ending extent_fiemap(). 4655 */ 4656 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo, 4657 struct fiemap_cache *cache) 4658 { 4659 int ret; 4660 4661 if (!cache->cached) 4662 return 0; 4663 4664 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, 4665 cache->len, cache->flags); 4666 cache->cached = false; 4667 if (ret > 0) 4668 ret = 0; 4669 return ret; 4670 } 4671 4672 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4673 __u64 start, __u64 len) 4674 { 4675 int ret = 0; 4676 u64 off = start; 4677 u64 max = start + len; 4678 u32 flags = 0; 4679 u32 found_type; 4680 u64 last; 4681 u64 last_for_get_extent = 0; 4682 u64 disko = 0; 4683 u64 isize = i_size_read(inode); 4684 struct btrfs_key found_key; 4685 struct extent_map *em = NULL; 4686 struct extent_state *cached_state = NULL; 4687 struct btrfs_path *path; 4688 struct btrfs_root *root = BTRFS_I(inode)->root; 4689 struct fiemap_cache cache = { 0 }; 4690 struct ulist *roots; 4691 struct ulist *tmp_ulist; 4692 int end = 0; 4693 u64 em_start = 0; 4694 u64 em_len = 0; 4695 u64 em_end = 0; 4696 4697 if (len == 0) 4698 return -EINVAL; 4699 4700 path = btrfs_alloc_path(); 4701 if (!path) 4702 return -ENOMEM; 4703 path->leave_spinning = 1; 4704 4705 roots = ulist_alloc(GFP_KERNEL); 4706 tmp_ulist = ulist_alloc(GFP_KERNEL); 4707 if (!roots || !tmp_ulist) { 4708 ret = -ENOMEM; 4709 goto out_free_ulist; 4710 } 4711 4712 start = round_down(start, btrfs_inode_sectorsize(inode)); 4713 len = round_up(max, btrfs_inode_sectorsize(inode)) - start; 4714 4715 /* 4716 * lookup the last file extent. We're not using i_size here 4717 * because there might be preallocation past i_size 4718 */ 4719 ret = btrfs_lookup_file_extent(NULL, root, path, 4720 btrfs_ino(BTRFS_I(inode)), -1, 0); 4721 if (ret < 0) { 4722 goto out_free_ulist; 4723 } else { 4724 WARN_ON(!ret); 4725 if (ret == 1) 4726 ret = 0; 4727 } 4728 4729 path->slots[0]--; 4730 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 4731 found_type = found_key.type; 4732 4733 /* No extents, but there might be delalloc bits */ 4734 if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) || 4735 found_type != BTRFS_EXTENT_DATA_KEY) { 4736 /* have to trust i_size as the end */ 4737 last = (u64)-1; 4738 last_for_get_extent = isize; 4739 } else { 4740 /* 4741 * remember the start of the last extent. There are a 4742 * bunch of different factors that go into the length of the 4743 * extent, so its much less complex to remember where it started 4744 */ 4745 last = found_key.offset; 4746 last_for_get_extent = last + 1; 4747 } 4748 btrfs_release_path(path); 4749 4750 /* 4751 * we might have some extents allocated but more delalloc past those 4752 * extents. so, we trust isize unless the start of the last extent is 4753 * beyond isize 4754 */ 4755 if (last < isize) { 4756 last = (u64)-1; 4757 last_for_get_extent = isize; 4758 } 4759 4760 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 4761 &cached_state); 4762 4763 em = get_extent_skip_holes(inode, start, last_for_get_extent); 4764 if (!em) 4765 goto out; 4766 if (IS_ERR(em)) { 4767 ret = PTR_ERR(em); 4768 goto out; 4769 } 4770 4771 while (!end) { 4772 u64 offset_in_extent = 0; 4773 4774 /* break if the extent we found is outside the range */ 4775 if (em->start >= max || extent_map_end(em) < off) 4776 break; 4777 4778 /* 4779 * get_extent may return an extent that starts before our 4780 * requested range. We have to make sure the ranges 4781 * we return to fiemap always move forward and don't 4782 * overlap, so adjust the offsets here 4783 */ 4784 em_start = max(em->start, off); 4785 4786 /* 4787 * record the offset from the start of the extent 4788 * for adjusting the disk offset below. Only do this if the 4789 * extent isn't compressed since our in ram offset may be past 4790 * what we have actually allocated on disk. 4791 */ 4792 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 4793 offset_in_extent = em_start - em->start; 4794 em_end = extent_map_end(em); 4795 em_len = em_end - em_start; 4796 flags = 0; 4797 if (em->block_start < EXTENT_MAP_LAST_BYTE) 4798 disko = em->block_start + offset_in_extent; 4799 else 4800 disko = 0; 4801 4802 /* 4803 * bump off for our next call to get_extent 4804 */ 4805 off = extent_map_end(em); 4806 if (off >= max) 4807 end = 1; 4808 4809 if (em->block_start == EXTENT_MAP_LAST_BYTE) { 4810 end = 1; 4811 flags |= FIEMAP_EXTENT_LAST; 4812 } else if (em->block_start == EXTENT_MAP_INLINE) { 4813 flags |= (FIEMAP_EXTENT_DATA_INLINE | 4814 FIEMAP_EXTENT_NOT_ALIGNED); 4815 } else if (em->block_start == EXTENT_MAP_DELALLOC) { 4816 flags |= (FIEMAP_EXTENT_DELALLOC | 4817 FIEMAP_EXTENT_UNKNOWN); 4818 } else if (fieinfo->fi_extents_max) { 4819 u64 bytenr = em->block_start - 4820 (em->start - em->orig_start); 4821 4822 /* 4823 * As btrfs supports shared space, this information 4824 * can be exported to userspace tools via 4825 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0 4826 * then we're just getting a count and we can skip the 4827 * lookup stuff. 4828 */ 4829 ret = btrfs_check_shared(root, 4830 btrfs_ino(BTRFS_I(inode)), 4831 bytenr, roots, tmp_ulist); 4832 if (ret < 0) 4833 goto out_free; 4834 if (ret) 4835 flags |= FIEMAP_EXTENT_SHARED; 4836 ret = 0; 4837 } 4838 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 4839 flags |= FIEMAP_EXTENT_ENCODED; 4840 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 4841 flags |= FIEMAP_EXTENT_UNWRITTEN; 4842 4843 free_extent_map(em); 4844 em = NULL; 4845 if ((em_start >= last) || em_len == (u64)-1 || 4846 (last == (u64)-1 && isize <= em_end)) { 4847 flags |= FIEMAP_EXTENT_LAST; 4848 end = 1; 4849 } 4850 4851 /* now scan forward to see if this is really the last extent. */ 4852 em = get_extent_skip_holes(inode, off, last_for_get_extent); 4853 if (IS_ERR(em)) { 4854 ret = PTR_ERR(em); 4855 goto out; 4856 } 4857 if (!em) { 4858 flags |= FIEMAP_EXTENT_LAST; 4859 end = 1; 4860 } 4861 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko, 4862 em_len, flags); 4863 if (ret) { 4864 if (ret == 1) 4865 ret = 0; 4866 goto out_free; 4867 } 4868 } 4869 out_free: 4870 if (!ret) 4871 ret = emit_last_fiemap_cache(fieinfo, &cache); 4872 free_extent_map(em); 4873 out: 4874 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1, 4875 &cached_state); 4876 4877 out_free_ulist: 4878 btrfs_free_path(path); 4879 ulist_free(roots); 4880 ulist_free(tmp_ulist); 4881 return ret; 4882 } 4883 4884 static void __free_extent_buffer(struct extent_buffer *eb) 4885 { 4886 kmem_cache_free(extent_buffer_cache, eb); 4887 } 4888 4889 int extent_buffer_under_io(const struct extent_buffer *eb) 4890 { 4891 return (atomic_read(&eb->io_pages) || 4892 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || 4893 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 4894 } 4895 4896 /* 4897 * Release all pages attached to the extent buffer. 4898 */ 4899 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb) 4900 { 4901 int i; 4902 int num_pages; 4903 int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); 4904 4905 BUG_ON(extent_buffer_under_io(eb)); 4906 4907 num_pages = num_extent_pages(eb); 4908 for (i = 0; i < num_pages; i++) { 4909 struct page *page = eb->pages[i]; 4910 4911 if (!page) 4912 continue; 4913 if (mapped) 4914 spin_lock(&page->mapping->private_lock); 4915 /* 4916 * We do this since we'll remove the pages after we've 4917 * removed the eb from the radix tree, so we could race 4918 * and have this page now attached to the new eb. So 4919 * only clear page_private if it's still connected to 4920 * this eb. 4921 */ 4922 if (PagePrivate(page) && 4923 page->private == (unsigned long)eb) { 4924 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 4925 BUG_ON(PageDirty(page)); 4926 BUG_ON(PageWriteback(page)); 4927 /* 4928 * We need to make sure we haven't be attached 4929 * to a new eb. 4930 */ 4931 detach_page_private(page); 4932 } 4933 4934 if (mapped) 4935 spin_unlock(&page->mapping->private_lock); 4936 4937 /* One for when we allocated the page */ 4938 put_page(page); 4939 } 4940 } 4941 4942 /* 4943 * Helper for releasing the extent buffer. 4944 */ 4945 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) 4946 { 4947 btrfs_release_extent_buffer_pages(eb); 4948 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list); 4949 __free_extent_buffer(eb); 4950 } 4951 4952 static struct extent_buffer * 4953 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, 4954 unsigned long len) 4955 { 4956 struct extent_buffer *eb = NULL; 4957 4958 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); 4959 eb->start = start; 4960 eb->len = len; 4961 eb->fs_info = fs_info; 4962 eb->bflags = 0; 4963 rwlock_init(&eb->lock); 4964 atomic_set(&eb->blocking_readers, 0); 4965 eb->blocking_writers = 0; 4966 eb->lock_nested = false; 4967 init_waitqueue_head(&eb->write_lock_wq); 4968 init_waitqueue_head(&eb->read_lock_wq); 4969 4970 btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list, 4971 &fs_info->allocated_ebs); 4972 4973 spin_lock_init(&eb->refs_lock); 4974 atomic_set(&eb->refs, 1); 4975 atomic_set(&eb->io_pages, 0); 4976 4977 /* 4978 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages 4979 */ 4980 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE 4981 > MAX_INLINE_EXTENT_BUFFER_SIZE); 4982 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE); 4983 4984 #ifdef CONFIG_BTRFS_DEBUG 4985 eb->spinning_writers = 0; 4986 atomic_set(&eb->spinning_readers, 0); 4987 atomic_set(&eb->read_locks, 0); 4988 eb->write_locks = 0; 4989 #endif 4990 4991 return eb; 4992 } 4993 4994 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src) 4995 { 4996 int i; 4997 struct page *p; 4998 struct extent_buffer *new; 4999 int num_pages = num_extent_pages(src); 5000 5001 new = __alloc_extent_buffer(src->fs_info, src->start, src->len); 5002 if (new == NULL) 5003 return NULL; 5004 5005 for (i = 0; i < num_pages; i++) { 5006 p = alloc_page(GFP_NOFS); 5007 if (!p) { 5008 btrfs_release_extent_buffer(new); 5009 return NULL; 5010 } 5011 attach_extent_buffer_page(new, p); 5012 WARN_ON(PageDirty(p)); 5013 SetPageUptodate(p); 5014 new->pages[i] = p; 5015 copy_page(page_address(p), page_address(src->pages[i])); 5016 } 5017 5018 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags); 5019 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags); 5020 5021 return new; 5022 } 5023 5024 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, 5025 u64 start, unsigned long len) 5026 { 5027 struct extent_buffer *eb; 5028 int num_pages; 5029 int i; 5030 5031 eb = __alloc_extent_buffer(fs_info, start, len); 5032 if (!eb) 5033 return NULL; 5034 5035 num_pages = num_extent_pages(eb); 5036 for (i = 0; i < num_pages; i++) { 5037 eb->pages[i] = alloc_page(GFP_NOFS); 5038 if (!eb->pages[i]) 5039 goto err; 5040 } 5041 set_extent_buffer_uptodate(eb); 5042 btrfs_set_header_nritems(eb, 0); 5043 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); 5044 5045 return eb; 5046 err: 5047 for (; i > 0; i--) 5048 __free_page(eb->pages[i - 1]); 5049 __free_extent_buffer(eb); 5050 return NULL; 5051 } 5052 5053 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, 5054 u64 start) 5055 { 5056 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize); 5057 } 5058 5059 static void check_buffer_tree_ref(struct extent_buffer *eb) 5060 { 5061 int refs; 5062 /* 5063 * The TREE_REF bit is first set when the extent_buffer is added 5064 * to the radix tree. It is also reset, if unset, when a new reference 5065 * is created by find_extent_buffer. 5066 * 5067 * It is only cleared in two cases: freeing the last non-tree 5068 * reference to the extent_buffer when its STALE bit is set or 5069 * calling releasepage when the tree reference is the only reference. 5070 * 5071 * In both cases, care is taken to ensure that the extent_buffer's 5072 * pages are not under io. However, releasepage can be concurrently 5073 * called with creating new references, which is prone to race 5074 * conditions between the calls to check_buffer_tree_ref in those 5075 * codepaths and clearing TREE_REF in try_release_extent_buffer. 5076 * 5077 * The actual lifetime of the extent_buffer in the radix tree is 5078 * adequately protected by the refcount, but the TREE_REF bit and 5079 * its corresponding reference are not. To protect against this 5080 * class of races, we call check_buffer_tree_ref from the codepaths 5081 * which trigger io after they set eb->io_pages. Note that once io is 5082 * initiated, TREE_REF can no longer be cleared, so that is the 5083 * moment at which any such race is best fixed. 5084 */ 5085 refs = atomic_read(&eb->refs); 5086 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 5087 return; 5088 5089 spin_lock(&eb->refs_lock); 5090 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 5091 atomic_inc(&eb->refs); 5092 spin_unlock(&eb->refs_lock); 5093 } 5094 5095 static void mark_extent_buffer_accessed(struct extent_buffer *eb, 5096 struct page *accessed) 5097 { 5098 int num_pages, i; 5099 5100 check_buffer_tree_ref(eb); 5101 5102 num_pages = num_extent_pages(eb); 5103 for (i = 0; i < num_pages; i++) { 5104 struct page *p = eb->pages[i]; 5105 5106 if (p != accessed) 5107 mark_page_accessed(p); 5108 } 5109 } 5110 5111 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, 5112 u64 start) 5113 { 5114 struct extent_buffer *eb; 5115 5116 rcu_read_lock(); 5117 eb = radix_tree_lookup(&fs_info->buffer_radix, 5118 start >> PAGE_SHIFT); 5119 if (eb && atomic_inc_not_zero(&eb->refs)) { 5120 rcu_read_unlock(); 5121 /* 5122 * Lock our eb's refs_lock to avoid races with 5123 * free_extent_buffer. When we get our eb it might be flagged 5124 * with EXTENT_BUFFER_STALE and another task running 5125 * free_extent_buffer might have seen that flag set, 5126 * eb->refs == 2, that the buffer isn't under IO (dirty and 5127 * writeback flags not set) and it's still in the tree (flag 5128 * EXTENT_BUFFER_TREE_REF set), therefore being in the process 5129 * of decrementing the extent buffer's reference count twice. 5130 * So here we could race and increment the eb's reference count, 5131 * clear its stale flag, mark it as dirty and drop our reference 5132 * before the other task finishes executing free_extent_buffer, 5133 * which would later result in an attempt to free an extent 5134 * buffer that is dirty. 5135 */ 5136 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { 5137 spin_lock(&eb->refs_lock); 5138 spin_unlock(&eb->refs_lock); 5139 } 5140 mark_extent_buffer_accessed(eb, NULL); 5141 return eb; 5142 } 5143 rcu_read_unlock(); 5144 5145 return NULL; 5146 } 5147 5148 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 5149 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, 5150 u64 start) 5151 { 5152 struct extent_buffer *eb, *exists = NULL; 5153 int ret; 5154 5155 eb = find_extent_buffer(fs_info, start); 5156 if (eb) 5157 return eb; 5158 eb = alloc_dummy_extent_buffer(fs_info, start); 5159 if (!eb) 5160 return ERR_PTR(-ENOMEM); 5161 eb->fs_info = fs_info; 5162 again: 5163 ret = radix_tree_preload(GFP_NOFS); 5164 if (ret) { 5165 exists = ERR_PTR(ret); 5166 goto free_eb; 5167 } 5168 spin_lock(&fs_info->buffer_lock); 5169 ret = radix_tree_insert(&fs_info->buffer_radix, 5170 start >> PAGE_SHIFT, eb); 5171 spin_unlock(&fs_info->buffer_lock); 5172 radix_tree_preload_end(); 5173 if (ret == -EEXIST) { 5174 exists = find_extent_buffer(fs_info, start); 5175 if (exists) 5176 goto free_eb; 5177 else 5178 goto again; 5179 } 5180 check_buffer_tree_ref(eb); 5181 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); 5182 5183 return eb; 5184 free_eb: 5185 btrfs_release_extent_buffer(eb); 5186 return exists; 5187 } 5188 #endif 5189 5190 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, 5191 u64 start) 5192 { 5193 unsigned long len = fs_info->nodesize; 5194 int num_pages; 5195 int i; 5196 unsigned long index = start >> PAGE_SHIFT; 5197 struct extent_buffer *eb; 5198 struct extent_buffer *exists = NULL; 5199 struct page *p; 5200 struct address_space *mapping = fs_info->btree_inode->i_mapping; 5201 int uptodate = 1; 5202 int ret; 5203 5204 if (!IS_ALIGNED(start, fs_info->sectorsize)) { 5205 btrfs_err(fs_info, "bad tree block start %llu", start); 5206 return ERR_PTR(-EINVAL); 5207 } 5208 5209 eb = find_extent_buffer(fs_info, start); 5210 if (eb) 5211 return eb; 5212 5213 eb = __alloc_extent_buffer(fs_info, start, len); 5214 if (!eb) 5215 return ERR_PTR(-ENOMEM); 5216 5217 num_pages = num_extent_pages(eb); 5218 for (i = 0; i < num_pages; i++, index++) { 5219 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL); 5220 if (!p) { 5221 exists = ERR_PTR(-ENOMEM); 5222 goto free_eb; 5223 } 5224 5225 spin_lock(&mapping->private_lock); 5226 if (PagePrivate(p)) { 5227 /* 5228 * We could have already allocated an eb for this page 5229 * and attached one so lets see if we can get a ref on 5230 * the existing eb, and if we can we know it's good and 5231 * we can just return that one, else we know we can just 5232 * overwrite page->private. 5233 */ 5234 exists = (struct extent_buffer *)p->private; 5235 if (atomic_inc_not_zero(&exists->refs)) { 5236 spin_unlock(&mapping->private_lock); 5237 unlock_page(p); 5238 put_page(p); 5239 mark_extent_buffer_accessed(exists, p); 5240 goto free_eb; 5241 } 5242 exists = NULL; 5243 5244 /* 5245 * Do this so attach doesn't complain and we need to 5246 * drop the ref the old guy had. 5247 */ 5248 ClearPagePrivate(p); 5249 WARN_ON(PageDirty(p)); 5250 put_page(p); 5251 } 5252 attach_extent_buffer_page(eb, p); 5253 spin_unlock(&mapping->private_lock); 5254 WARN_ON(PageDirty(p)); 5255 eb->pages[i] = p; 5256 if (!PageUptodate(p)) 5257 uptodate = 0; 5258 5259 /* 5260 * We can't unlock the pages just yet since the extent buffer 5261 * hasn't been properly inserted in the radix tree, this 5262 * opens a race with btree_releasepage which can free a page 5263 * while we are still filling in all pages for the buffer and 5264 * we could crash. 5265 */ 5266 } 5267 if (uptodate) 5268 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 5269 again: 5270 ret = radix_tree_preload(GFP_NOFS); 5271 if (ret) { 5272 exists = ERR_PTR(ret); 5273 goto free_eb; 5274 } 5275 5276 spin_lock(&fs_info->buffer_lock); 5277 ret = radix_tree_insert(&fs_info->buffer_radix, 5278 start >> PAGE_SHIFT, eb); 5279 spin_unlock(&fs_info->buffer_lock); 5280 radix_tree_preload_end(); 5281 if (ret == -EEXIST) { 5282 exists = find_extent_buffer(fs_info, start); 5283 if (exists) 5284 goto free_eb; 5285 else 5286 goto again; 5287 } 5288 /* add one reference for the tree */ 5289 check_buffer_tree_ref(eb); 5290 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); 5291 5292 /* 5293 * Now it's safe to unlock the pages because any calls to 5294 * btree_releasepage will correctly detect that a page belongs to a 5295 * live buffer and won't free them prematurely. 5296 */ 5297 for (i = 0; i < num_pages; i++) 5298 unlock_page(eb->pages[i]); 5299 return eb; 5300 5301 free_eb: 5302 WARN_ON(!atomic_dec_and_test(&eb->refs)); 5303 for (i = 0; i < num_pages; i++) { 5304 if (eb->pages[i]) 5305 unlock_page(eb->pages[i]); 5306 } 5307 5308 btrfs_release_extent_buffer(eb); 5309 return exists; 5310 } 5311 5312 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head) 5313 { 5314 struct extent_buffer *eb = 5315 container_of(head, struct extent_buffer, rcu_head); 5316 5317 __free_extent_buffer(eb); 5318 } 5319 5320 static int release_extent_buffer(struct extent_buffer *eb) 5321 __releases(&eb->refs_lock) 5322 { 5323 lockdep_assert_held(&eb->refs_lock); 5324 5325 WARN_ON(atomic_read(&eb->refs) == 0); 5326 if (atomic_dec_and_test(&eb->refs)) { 5327 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { 5328 struct btrfs_fs_info *fs_info = eb->fs_info; 5329 5330 spin_unlock(&eb->refs_lock); 5331 5332 spin_lock(&fs_info->buffer_lock); 5333 radix_tree_delete(&fs_info->buffer_radix, 5334 eb->start >> PAGE_SHIFT); 5335 spin_unlock(&fs_info->buffer_lock); 5336 } else { 5337 spin_unlock(&eb->refs_lock); 5338 } 5339 5340 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list); 5341 /* Should be safe to release our pages at this point */ 5342 btrfs_release_extent_buffer_pages(eb); 5343 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 5344 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { 5345 __free_extent_buffer(eb); 5346 return 1; 5347 } 5348 #endif 5349 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); 5350 return 1; 5351 } 5352 spin_unlock(&eb->refs_lock); 5353 5354 return 0; 5355 } 5356 5357 void free_extent_buffer(struct extent_buffer *eb) 5358 { 5359 int refs; 5360 int old; 5361 if (!eb) 5362 return; 5363 5364 while (1) { 5365 refs = atomic_read(&eb->refs); 5366 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) 5367 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && 5368 refs == 1)) 5369 break; 5370 old = atomic_cmpxchg(&eb->refs, refs, refs - 1); 5371 if (old == refs) 5372 return; 5373 } 5374 5375 spin_lock(&eb->refs_lock); 5376 if (atomic_read(&eb->refs) == 2 && 5377 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && 5378 !extent_buffer_under_io(eb) && 5379 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 5380 atomic_dec(&eb->refs); 5381 5382 /* 5383 * I know this is terrible, but it's temporary until we stop tracking 5384 * the uptodate bits and such for the extent buffers. 5385 */ 5386 release_extent_buffer(eb); 5387 } 5388 5389 void free_extent_buffer_stale(struct extent_buffer *eb) 5390 { 5391 if (!eb) 5392 return; 5393 5394 spin_lock(&eb->refs_lock); 5395 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); 5396 5397 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && 5398 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 5399 atomic_dec(&eb->refs); 5400 release_extent_buffer(eb); 5401 } 5402 5403 void clear_extent_buffer_dirty(const struct extent_buffer *eb) 5404 { 5405 int i; 5406 int num_pages; 5407 struct page *page; 5408 5409 num_pages = num_extent_pages(eb); 5410 5411 for (i = 0; i < num_pages; i++) { 5412 page = eb->pages[i]; 5413 if (!PageDirty(page)) 5414 continue; 5415 5416 lock_page(page); 5417 WARN_ON(!PagePrivate(page)); 5418 5419 clear_page_dirty_for_io(page); 5420 xa_lock_irq(&page->mapping->i_pages); 5421 if (!PageDirty(page)) 5422 __xa_clear_mark(&page->mapping->i_pages, 5423 page_index(page), PAGECACHE_TAG_DIRTY); 5424 xa_unlock_irq(&page->mapping->i_pages); 5425 ClearPageError(page); 5426 unlock_page(page); 5427 } 5428 WARN_ON(atomic_read(&eb->refs) == 0); 5429 } 5430 5431 bool set_extent_buffer_dirty(struct extent_buffer *eb) 5432 { 5433 int i; 5434 int num_pages; 5435 bool was_dirty; 5436 5437 check_buffer_tree_ref(eb); 5438 5439 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); 5440 5441 num_pages = num_extent_pages(eb); 5442 WARN_ON(atomic_read(&eb->refs) == 0); 5443 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); 5444 5445 if (!was_dirty) 5446 for (i = 0; i < num_pages; i++) 5447 set_page_dirty(eb->pages[i]); 5448 5449 #ifdef CONFIG_BTRFS_DEBUG 5450 for (i = 0; i < num_pages; i++) 5451 ASSERT(PageDirty(eb->pages[i])); 5452 #endif 5453 5454 return was_dirty; 5455 } 5456 5457 void clear_extent_buffer_uptodate(struct extent_buffer *eb) 5458 { 5459 int i; 5460 struct page *page; 5461 int num_pages; 5462 5463 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 5464 num_pages = num_extent_pages(eb); 5465 for (i = 0; i < num_pages; i++) { 5466 page = eb->pages[i]; 5467 if (page) 5468 ClearPageUptodate(page); 5469 } 5470 } 5471 5472 void set_extent_buffer_uptodate(struct extent_buffer *eb) 5473 { 5474 int i; 5475 struct page *page; 5476 int num_pages; 5477 5478 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 5479 num_pages = num_extent_pages(eb); 5480 for (i = 0; i < num_pages; i++) { 5481 page = eb->pages[i]; 5482 SetPageUptodate(page); 5483 } 5484 } 5485 5486 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) 5487 { 5488 int i; 5489 struct page *page; 5490 int err; 5491 int ret = 0; 5492 int locked_pages = 0; 5493 int all_uptodate = 1; 5494 int num_pages; 5495 unsigned long num_reads = 0; 5496 struct bio *bio = NULL; 5497 unsigned long bio_flags = 0; 5498 5499 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 5500 return 0; 5501 5502 num_pages = num_extent_pages(eb); 5503 for (i = 0; i < num_pages; i++) { 5504 page = eb->pages[i]; 5505 if (wait == WAIT_NONE) { 5506 if (!trylock_page(page)) 5507 goto unlock_exit; 5508 } else { 5509 lock_page(page); 5510 } 5511 locked_pages++; 5512 } 5513 /* 5514 * We need to firstly lock all pages to make sure that 5515 * the uptodate bit of our pages won't be affected by 5516 * clear_extent_buffer_uptodate(). 5517 */ 5518 for (i = 0; i < num_pages; i++) { 5519 page = eb->pages[i]; 5520 if (!PageUptodate(page)) { 5521 num_reads++; 5522 all_uptodate = 0; 5523 } 5524 } 5525 5526 if (all_uptodate) { 5527 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 5528 goto unlock_exit; 5529 } 5530 5531 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); 5532 eb->read_mirror = 0; 5533 atomic_set(&eb->io_pages, num_reads); 5534 /* 5535 * It is possible for releasepage to clear the TREE_REF bit before we 5536 * set io_pages. See check_buffer_tree_ref for a more detailed comment. 5537 */ 5538 check_buffer_tree_ref(eb); 5539 for (i = 0; i < num_pages; i++) { 5540 page = eb->pages[i]; 5541 5542 if (!PageUptodate(page)) { 5543 if (ret) { 5544 atomic_dec(&eb->io_pages); 5545 unlock_page(page); 5546 continue; 5547 } 5548 5549 ClearPageError(page); 5550 err = __extent_read_full_page(page, 5551 btree_get_extent, &bio, 5552 mirror_num, &bio_flags, 5553 REQ_META); 5554 if (err) { 5555 ret = err; 5556 /* 5557 * We use &bio in above __extent_read_full_page, 5558 * so we ensure that if it returns error, the 5559 * current page fails to add itself to bio and 5560 * it's been unlocked. 5561 * 5562 * We must dec io_pages by ourselves. 5563 */ 5564 atomic_dec(&eb->io_pages); 5565 } 5566 } else { 5567 unlock_page(page); 5568 } 5569 } 5570 5571 if (bio) { 5572 err = submit_one_bio(bio, mirror_num, bio_flags); 5573 if (err) 5574 return err; 5575 } 5576 5577 if (ret || wait != WAIT_COMPLETE) 5578 return ret; 5579 5580 for (i = 0; i < num_pages; i++) { 5581 page = eb->pages[i]; 5582 wait_on_page_locked(page); 5583 if (!PageUptodate(page)) 5584 ret = -EIO; 5585 } 5586 5587 return ret; 5588 5589 unlock_exit: 5590 while (locked_pages > 0) { 5591 locked_pages--; 5592 page = eb->pages[locked_pages]; 5593 unlock_page(page); 5594 } 5595 return ret; 5596 } 5597 5598 void read_extent_buffer(const struct extent_buffer *eb, void *dstv, 5599 unsigned long start, unsigned long len) 5600 { 5601 size_t cur; 5602 size_t offset; 5603 struct page *page; 5604 char *kaddr; 5605 char *dst = (char *)dstv; 5606 unsigned long i = start >> PAGE_SHIFT; 5607 5608 if (start + len > eb->len) { 5609 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n", 5610 eb->start, eb->len, start, len); 5611 memset(dst, 0, len); 5612 return; 5613 } 5614 5615 offset = offset_in_page(start); 5616 5617 while (len > 0) { 5618 page = eb->pages[i]; 5619 5620 cur = min(len, (PAGE_SIZE - offset)); 5621 kaddr = page_address(page); 5622 memcpy(dst, kaddr + offset, cur); 5623 5624 dst += cur; 5625 len -= cur; 5626 offset = 0; 5627 i++; 5628 } 5629 } 5630 5631 int read_extent_buffer_to_user(const struct extent_buffer *eb, 5632 void __user *dstv, 5633 unsigned long start, unsigned long len) 5634 { 5635 size_t cur; 5636 size_t offset; 5637 struct page *page; 5638 char *kaddr; 5639 char __user *dst = (char __user *)dstv; 5640 unsigned long i = start >> PAGE_SHIFT; 5641 int ret = 0; 5642 5643 WARN_ON(start > eb->len); 5644 WARN_ON(start + len > eb->start + eb->len); 5645 5646 offset = offset_in_page(start); 5647 5648 while (len > 0) { 5649 page = eb->pages[i]; 5650 5651 cur = min(len, (PAGE_SIZE - offset)); 5652 kaddr = page_address(page); 5653 if (copy_to_user(dst, kaddr + offset, cur)) { 5654 ret = -EFAULT; 5655 break; 5656 } 5657 5658 dst += cur; 5659 len -= cur; 5660 offset = 0; 5661 i++; 5662 } 5663 5664 return ret; 5665 } 5666 5667 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, 5668 unsigned long start, unsigned long len) 5669 { 5670 size_t cur; 5671 size_t offset; 5672 struct page *page; 5673 char *kaddr; 5674 char *ptr = (char *)ptrv; 5675 unsigned long i = start >> PAGE_SHIFT; 5676 int ret = 0; 5677 5678 WARN_ON(start > eb->len); 5679 WARN_ON(start + len > eb->start + eb->len); 5680 5681 offset = offset_in_page(start); 5682 5683 while (len > 0) { 5684 page = eb->pages[i]; 5685 5686 cur = min(len, (PAGE_SIZE - offset)); 5687 5688 kaddr = page_address(page); 5689 ret = memcmp(ptr, kaddr + offset, cur); 5690 if (ret) 5691 break; 5692 5693 ptr += cur; 5694 len -= cur; 5695 offset = 0; 5696 i++; 5697 } 5698 return ret; 5699 } 5700 5701 void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, 5702 const void *srcv) 5703 { 5704 char *kaddr; 5705 5706 WARN_ON(!PageUptodate(eb->pages[0])); 5707 kaddr = page_address(eb->pages[0]); 5708 memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv, 5709 BTRFS_FSID_SIZE); 5710 } 5711 5712 void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv) 5713 { 5714 char *kaddr; 5715 5716 WARN_ON(!PageUptodate(eb->pages[0])); 5717 kaddr = page_address(eb->pages[0]); 5718 memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv, 5719 BTRFS_FSID_SIZE); 5720 } 5721 5722 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, 5723 unsigned long start, unsigned long len) 5724 { 5725 size_t cur; 5726 size_t offset; 5727 struct page *page; 5728 char *kaddr; 5729 char *src = (char *)srcv; 5730 unsigned long i = start >> PAGE_SHIFT; 5731 5732 WARN_ON(start > eb->len); 5733 WARN_ON(start + len > eb->start + eb->len); 5734 5735 offset = offset_in_page(start); 5736 5737 while (len > 0) { 5738 page = eb->pages[i]; 5739 WARN_ON(!PageUptodate(page)); 5740 5741 cur = min(len, PAGE_SIZE - offset); 5742 kaddr = page_address(page); 5743 memcpy(kaddr + offset, src, cur); 5744 5745 src += cur; 5746 len -= cur; 5747 offset = 0; 5748 i++; 5749 } 5750 } 5751 5752 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, 5753 unsigned long len) 5754 { 5755 size_t cur; 5756 size_t offset; 5757 struct page *page; 5758 char *kaddr; 5759 unsigned long i = start >> PAGE_SHIFT; 5760 5761 WARN_ON(start > eb->len); 5762 WARN_ON(start + len > eb->start + eb->len); 5763 5764 offset = offset_in_page(start); 5765 5766 while (len > 0) { 5767 page = eb->pages[i]; 5768 WARN_ON(!PageUptodate(page)); 5769 5770 cur = min(len, PAGE_SIZE - offset); 5771 kaddr = page_address(page); 5772 memset(kaddr + offset, 0, cur); 5773 5774 len -= cur; 5775 offset = 0; 5776 i++; 5777 } 5778 } 5779 5780 void copy_extent_buffer_full(const struct extent_buffer *dst, 5781 const struct extent_buffer *src) 5782 { 5783 int i; 5784 int num_pages; 5785 5786 ASSERT(dst->len == src->len); 5787 5788 num_pages = num_extent_pages(dst); 5789 for (i = 0; i < num_pages; i++) 5790 copy_page(page_address(dst->pages[i]), 5791 page_address(src->pages[i])); 5792 } 5793 5794 void copy_extent_buffer(const struct extent_buffer *dst, 5795 const struct extent_buffer *src, 5796 unsigned long dst_offset, unsigned long src_offset, 5797 unsigned long len) 5798 { 5799 u64 dst_len = dst->len; 5800 size_t cur; 5801 size_t offset; 5802 struct page *page; 5803 char *kaddr; 5804 unsigned long i = dst_offset >> PAGE_SHIFT; 5805 5806 WARN_ON(src->len != dst_len); 5807 5808 offset = offset_in_page(dst_offset); 5809 5810 while (len > 0) { 5811 page = dst->pages[i]; 5812 WARN_ON(!PageUptodate(page)); 5813 5814 cur = min(len, (unsigned long)(PAGE_SIZE - offset)); 5815 5816 kaddr = page_address(page); 5817 read_extent_buffer(src, kaddr + offset, src_offset, cur); 5818 5819 src_offset += cur; 5820 len -= cur; 5821 offset = 0; 5822 i++; 5823 } 5824 } 5825 5826 /* 5827 * eb_bitmap_offset() - calculate the page and offset of the byte containing the 5828 * given bit number 5829 * @eb: the extent buffer 5830 * @start: offset of the bitmap item in the extent buffer 5831 * @nr: bit number 5832 * @page_index: return index of the page in the extent buffer that contains the 5833 * given bit number 5834 * @page_offset: return offset into the page given by page_index 5835 * 5836 * This helper hides the ugliness of finding the byte in an extent buffer which 5837 * contains a given bit. 5838 */ 5839 static inline void eb_bitmap_offset(const struct extent_buffer *eb, 5840 unsigned long start, unsigned long nr, 5841 unsigned long *page_index, 5842 size_t *page_offset) 5843 { 5844 size_t byte_offset = BIT_BYTE(nr); 5845 size_t offset; 5846 5847 /* 5848 * The byte we want is the offset of the extent buffer + the offset of 5849 * the bitmap item in the extent buffer + the offset of the byte in the 5850 * bitmap item. 5851 */ 5852 offset = start + byte_offset; 5853 5854 *page_index = offset >> PAGE_SHIFT; 5855 *page_offset = offset_in_page(offset); 5856 } 5857 5858 /** 5859 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set 5860 * @eb: the extent buffer 5861 * @start: offset of the bitmap item in the extent buffer 5862 * @nr: bit number to test 5863 */ 5864 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, 5865 unsigned long nr) 5866 { 5867 u8 *kaddr; 5868 struct page *page; 5869 unsigned long i; 5870 size_t offset; 5871 5872 eb_bitmap_offset(eb, start, nr, &i, &offset); 5873 page = eb->pages[i]; 5874 WARN_ON(!PageUptodate(page)); 5875 kaddr = page_address(page); 5876 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); 5877 } 5878 5879 /** 5880 * extent_buffer_bitmap_set - set an area of a bitmap 5881 * @eb: the extent buffer 5882 * @start: offset of the bitmap item in the extent buffer 5883 * @pos: bit number of the first bit 5884 * @len: number of bits to set 5885 */ 5886 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, 5887 unsigned long pos, unsigned long len) 5888 { 5889 u8 *kaddr; 5890 struct page *page; 5891 unsigned long i; 5892 size_t offset; 5893 const unsigned int size = pos + len; 5894 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE); 5895 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos); 5896 5897 eb_bitmap_offset(eb, start, pos, &i, &offset); 5898 page = eb->pages[i]; 5899 WARN_ON(!PageUptodate(page)); 5900 kaddr = page_address(page); 5901 5902 while (len >= bits_to_set) { 5903 kaddr[offset] |= mask_to_set; 5904 len -= bits_to_set; 5905 bits_to_set = BITS_PER_BYTE; 5906 mask_to_set = ~0; 5907 if (++offset >= PAGE_SIZE && len > 0) { 5908 offset = 0; 5909 page = eb->pages[++i]; 5910 WARN_ON(!PageUptodate(page)); 5911 kaddr = page_address(page); 5912 } 5913 } 5914 if (len) { 5915 mask_to_set &= BITMAP_LAST_BYTE_MASK(size); 5916 kaddr[offset] |= mask_to_set; 5917 } 5918 } 5919 5920 5921 /** 5922 * extent_buffer_bitmap_clear - clear an area of a bitmap 5923 * @eb: the extent buffer 5924 * @start: offset of the bitmap item in the extent buffer 5925 * @pos: bit number of the first bit 5926 * @len: number of bits to clear 5927 */ 5928 void extent_buffer_bitmap_clear(const struct extent_buffer *eb, 5929 unsigned long start, unsigned long pos, 5930 unsigned long len) 5931 { 5932 u8 *kaddr; 5933 struct page *page; 5934 unsigned long i; 5935 size_t offset; 5936 const unsigned int size = pos + len; 5937 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE); 5938 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos); 5939 5940 eb_bitmap_offset(eb, start, pos, &i, &offset); 5941 page = eb->pages[i]; 5942 WARN_ON(!PageUptodate(page)); 5943 kaddr = page_address(page); 5944 5945 while (len >= bits_to_clear) { 5946 kaddr[offset] &= ~mask_to_clear; 5947 len -= bits_to_clear; 5948 bits_to_clear = BITS_PER_BYTE; 5949 mask_to_clear = ~0; 5950 if (++offset >= PAGE_SIZE && len > 0) { 5951 offset = 0; 5952 page = eb->pages[++i]; 5953 WARN_ON(!PageUptodate(page)); 5954 kaddr = page_address(page); 5955 } 5956 } 5957 if (len) { 5958 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size); 5959 kaddr[offset] &= ~mask_to_clear; 5960 } 5961 } 5962 5963 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) 5964 { 5965 unsigned long distance = (src > dst) ? src - dst : dst - src; 5966 return distance < len; 5967 } 5968 5969 static void copy_pages(struct page *dst_page, struct page *src_page, 5970 unsigned long dst_off, unsigned long src_off, 5971 unsigned long len) 5972 { 5973 char *dst_kaddr = page_address(dst_page); 5974 char *src_kaddr; 5975 int must_memmove = 0; 5976 5977 if (dst_page != src_page) { 5978 src_kaddr = page_address(src_page); 5979 } else { 5980 src_kaddr = dst_kaddr; 5981 if (areas_overlap(src_off, dst_off, len)) 5982 must_memmove = 1; 5983 } 5984 5985 if (must_memmove) 5986 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len); 5987 else 5988 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 5989 } 5990 5991 void memcpy_extent_buffer(const struct extent_buffer *dst, 5992 unsigned long dst_offset, unsigned long src_offset, 5993 unsigned long len) 5994 { 5995 struct btrfs_fs_info *fs_info = dst->fs_info; 5996 size_t cur; 5997 size_t dst_off_in_page; 5998 size_t src_off_in_page; 5999 unsigned long dst_i; 6000 unsigned long src_i; 6001 6002 if (src_offset + len > dst->len) { 6003 btrfs_err(fs_info, 6004 "memmove bogus src_offset %lu move len %lu dst len %lu", 6005 src_offset, len, dst->len); 6006 BUG(); 6007 } 6008 if (dst_offset + len > dst->len) { 6009 btrfs_err(fs_info, 6010 "memmove bogus dst_offset %lu move len %lu dst len %lu", 6011 dst_offset, len, dst->len); 6012 BUG(); 6013 } 6014 6015 while (len > 0) { 6016 dst_off_in_page = offset_in_page(dst_offset); 6017 src_off_in_page = offset_in_page(src_offset); 6018 6019 dst_i = dst_offset >> PAGE_SHIFT; 6020 src_i = src_offset >> PAGE_SHIFT; 6021 6022 cur = min(len, (unsigned long)(PAGE_SIZE - 6023 src_off_in_page)); 6024 cur = min_t(unsigned long, cur, 6025 (unsigned long)(PAGE_SIZE - dst_off_in_page)); 6026 6027 copy_pages(dst->pages[dst_i], dst->pages[src_i], 6028 dst_off_in_page, src_off_in_page, cur); 6029 6030 src_offset += cur; 6031 dst_offset += cur; 6032 len -= cur; 6033 } 6034 } 6035 6036 void memmove_extent_buffer(const struct extent_buffer *dst, 6037 unsigned long dst_offset, unsigned long src_offset, 6038 unsigned long len) 6039 { 6040 struct btrfs_fs_info *fs_info = dst->fs_info; 6041 size_t cur; 6042 size_t dst_off_in_page; 6043 size_t src_off_in_page; 6044 unsigned long dst_end = dst_offset + len - 1; 6045 unsigned long src_end = src_offset + len - 1; 6046 unsigned long dst_i; 6047 unsigned long src_i; 6048 6049 if (src_offset + len > dst->len) { 6050 btrfs_err(fs_info, 6051 "memmove bogus src_offset %lu move len %lu len %lu", 6052 src_offset, len, dst->len); 6053 BUG(); 6054 } 6055 if (dst_offset + len > dst->len) { 6056 btrfs_err(fs_info, 6057 "memmove bogus dst_offset %lu move len %lu len %lu", 6058 dst_offset, len, dst->len); 6059 BUG(); 6060 } 6061 if (dst_offset < src_offset) { 6062 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 6063 return; 6064 } 6065 while (len > 0) { 6066 dst_i = dst_end >> PAGE_SHIFT; 6067 src_i = src_end >> PAGE_SHIFT; 6068 6069 dst_off_in_page = offset_in_page(dst_end); 6070 src_off_in_page = offset_in_page(src_end); 6071 6072 cur = min_t(unsigned long, len, src_off_in_page + 1); 6073 cur = min(cur, dst_off_in_page + 1); 6074 copy_pages(dst->pages[dst_i], dst->pages[src_i], 6075 dst_off_in_page - cur + 1, 6076 src_off_in_page - cur + 1, cur); 6077 6078 dst_end -= cur; 6079 src_end -= cur; 6080 len -= cur; 6081 } 6082 } 6083 6084 int try_release_extent_buffer(struct page *page) 6085 { 6086 struct extent_buffer *eb; 6087 6088 /* 6089 * We need to make sure nobody is attaching this page to an eb right 6090 * now. 6091 */ 6092 spin_lock(&page->mapping->private_lock); 6093 if (!PagePrivate(page)) { 6094 spin_unlock(&page->mapping->private_lock); 6095 return 1; 6096 } 6097 6098 eb = (struct extent_buffer *)page->private; 6099 BUG_ON(!eb); 6100 6101 /* 6102 * This is a little awful but should be ok, we need to make sure that 6103 * the eb doesn't disappear out from under us while we're looking at 6104 * this page. 6105 */ 6106 spin_lock(&eb->refs_lock); 6107 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { 6108 spin_unlock(&eb->refs_lock); 6109 spin_unlock(&page->mapping->private_lock); 6110 return 0; 6111 } 6112 spin_unlock(&page->mapping->private_lock); 6113 6114 /* 6115 * If tree ref isn't set then we know the ref on this eb is a real ref, 6116 * so just return, this page will likely be freed soon anyway. 6117 */ 6118 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { 6119 spin_unlock(&eb->refs_lock); 6120 return 0; 6121 } 6122 6123 return release_extent_buffer(eb); 6124 } 6125