1 #include <linux/bitops.h> 2 #include <linux/slab.h> 3 #include <linux/bio.h> 4 #include <linux/mm.h> 5 #include <linux/pagemap.h> 6 #include <linux/page-flags.h> 7 #include <linux/spinlock.h> 8 #include <linux/blkdev.h> 9 #include <linux/swap.h> 10 #include <linux/writeback.h> 11 #include <linux/pagevec.h> 12 #include <linux/prefetch.h> 13 #include <linux/cleancache.h> 14 #include "extent_io.h" 15 #include "extent_map.h" 16 #include "ctree.h" 17 #include "btrfs_inode.h" 18 #include "volumes.h" 19 #include "check-integrity.h" 20 #include "locking.h" 21 #include "rcu-string.h" 22 #include "backref.h" 23 24 static struct kmem_cache *extent_state_cache; 25 static struct kmem_cache *extent_buffer_cache; 26 static struct bio_set *btrfs_bioset; 27 28 #ifdef CONFIG_BTRFS_DEBUG 29 static LIST_HEAD(buffers); 30 static LIST_HEAD(states); 31 32 static DEFINE_SPINLOCK(leak_lock); 33 34 static inline 35 void btrfs_leak_debug_add(struct list_head *new, struct list_head *head) 36 { 37 unsigned long flags; 38 39 spin_lock_irqsave(&leak_lock, flags); 40 list_add(new, head); 41 spin_unlock_irqrestore(&leak_lock, flags); 42 } 43 44 static inline 45 void btrfs_leak_debug_del(struct list_head *entry) 46 { 47 unsigned long flags; 48 49 spin_lock_irqsave(&leak_lock, flags); 50 list_del(entry); 51 spin_unlock_irqrestore(&leak_lock, flags); 52 } 53 54 static inline 55 void btrfs_leak_debug_check(void) 56 { 57 struct extent_state *state; 58 struct extent_buffer *eb; 59 60 while (!list_empty(&states)) { 61 state = list_entry(states.next, struct extent_state, leak_list); 62 printk(KERN_ERR "BTRFS: state leak: start %llu end %llu " 63 "state %lu in tree %p refs %d\n", 64 state->start, state->end, state->state, state->tree, 65 atomic_read(&state->refs)); 66 list_del(&state->leak_list); 67 kmem_cache_free(extent_state_cache, state); 68 } 69 70 while (!list_empty(&buffers)) { 71 eb = list_entry(buffers.next, struct extent_buffer, leak_list); 72 printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu " 73 "refs %d\n", 74 eb->start, eb->len, atomic_read(&eb->refs)); 75 list_del(&eb->leak_list); 76 kmem_cache_free(extent_buffer_cache, eb); 77 } 78 } 79 80 #define btrfs_debug_check_extent_io_range(tree, start, end) \ 81 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end)) 82 static inline void __btrfs_debug_check_extent_io_range(const char *caller, 83 struct extent_io_tree *tree, u64 start, u64 end) 84 { 85 struct inode *inode; 86 u64 isize; 87 88 if (!tree->mapping) 89 return; 90 91 inode = tree->mapping->host; 92 isize = i_size_read(inode); 93 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { 94 printk_ratelimited(KERN_DEBUG 95 "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n", 96 caller, btrfs_ino(inode), isize, start, end); 97 } 98 } 99 #else 100 #define btrfs_leak_debug_add(new, head) do {} while (0) 101 #define btrfs_leak_debug_del(entry) do {} while (0) 102 #define btrfs_leak_debug_check() do {} while (0) 103 #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) 104 #endif 105 106 #define BUFFER_LRU_MAX 64 107 108 struct tree_entry { 109 u64 start; 110 u64 end; 111 struct rb_node rb_node; 112 }; 113 114 struct extent_page_data { 115 struct bio *bio; 116 struct extent_io_tree *tree; 117 get_extent_t *get_extent; 118 unsigned long bio_flags; 119 120 /* tells writepage not to lock the state bits for this range 121 * it still does the unlocking 122 */ 123 unsigned int extent_locked:1; 124 125 /* tells the submit_bio code to use a WRITE_SYNC */ 126 unsigned int sync_io:1; 127 }; 128 129 static noinline void flush_write_bio(void *data); 130 static inline struct btrfs_fs_info * 131 tree_fs_info(struct extent_io_tree *tree) 132 { 133 if (!tree->mapping) 134 return NULL; 135 return btrfs_sb(tree->mapping->host->i_sb); 136 } 137 138 int __init extent_io_init(void) 139 { 140 extent_state_cache = kmem_cache_create("btrfs_extent_state", 141 sizeof(struct extent_state), 0, 142 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 143 if (!extent_state_cache) 144 return -ENOMEM; 145 146 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer", 147 sizeof(struct extent_buffer), 0, 148 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 149 if (!extent_buffer_cache) 150 goto free_state_cache; 151 152 btrfs_bioset = bioset_create(BIO_POOL_SIZE, 153 offsetof(struct btrfs_io_bio, bio)); 154 if (!btrfs_bioset) 155 goto free_buffer_cache; 156 157 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE)) 158 goto free_bioset; 159 160 return 0; 161 162 free_bioset: 163 bioset_free(btrfs_bioset); 164 btrfs_bioset = NULL; 165 166 free_buffer_cache: 167 kmem_cache_destroy(extent_buffer_cache); 168 extent_buffer_cache = NULL; 169 170 free_state_cache: 171 kmem_cache_destroy(extent_state_cache); 172 extent_state_cache = NULL; 173 return -ENOMEM; 174 } 175 176 void extent_io_exit(void) 177 { 178 btrfs_leak_debug_check(); 179 180 /* 181 * Make sure all delayed rcu free are flushed before we 182 * destroy caches. 183 */ 184 rcu_barrier(); 185 if (extent_state_cache) 186 kmem_cache_destroy(extent_state_cache); 187 if (extent_buffer_cache) 188 kmem_cache_destroy(extent_buffer_cache); 189 if (btrfs_bioset) 190 bioset_free(btrfs_bioset); 191 } 192 193 void extent_io_tree_init(struct extent_io_tree *tree, 194 struct address_space *mapping) 195 { 196 tree->state = RB_ROOT; 197 tree->ops = NULL; 198 tree->dirty_bytes = 0; 199 spin_lock_init(&tree->lock); 200 tree->mapping = mapping; 201 } 202 203 static struct extent_state *alloc_extent_state(gfp_t mask) 204 { 205 struct extent_state *state; 206 207 state = kmem_cache_alloc(extent_state_cache, mask); 208 if (!state) 209 return state; 210 state->state = 0; 211 state->private = 0; 212 state->tree = NULL; 213 btrfs_leak_debug_add(&state->leak_list, &states); 214 atomic_set(&state->refs, 1); 215 init_waitqueue_head(&state->wq); 216 trace_alloc_extent_state(state, mask, _RET_IP_); 217 return state; 218 } 219 220 void free_extent_state(struct extent_state *state) 221 { 222 if (!state) 223 return; 224 if (atomic_dec_and_test(&state->refs)) { 225 WARN_ON(state->tree); 226 btrfs_leak_debug_del(&state->leak_list); 227 trace_free_extent_state(state, _RET_IP_); 228 kmem_cache_free(extent_state_cache, state); 229 } 230 } 231 232 static struct rb_node *tree_insert(struct rb_root *root, 233 struct rb_node *search_start, 234 u64 offset, 235 struct rb_node *node, 236 struct rb_node ***p_in, 237 struct rb_node **parent_in) 238 { 239 struct rb_node **p; 240 struct rb_node *parent = NULL; 241 struct tree_entry *entry; 242 243 if (p_in && parent_in) { 244 p = *p_in; 245 parent = *parent_in; 246 goto do_insert; 247 } 248 249 p = search_start ? &search_start : &root->rb_node; 250 while (*p) { 251 parent = *p; 252 entry = rb_entry(parent, struct tree_entry, rb_node); 253 254 if (offset < entry->start) 255 p = &(*p)->rb_left; 256 else if (offset > entry->end) 257 p = &(*p)->rb_right; 258 else 259 return parent; 260 } 261 262 do_insert: 263 rb_link_node(node, parent, p); 264 rb_insert_color(node, root); 265 return NULL; 266 } 267 268 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, 269 struct rb_node **prev_ret, 270 struct rb_node **next_ret, 271 struct rb_node ***p_ret, 272 struct rb_node **parent_ret) 273 { 274 struct rb_root *root = &tree->state; 275 struct rb_node **n = &root->rb_node; 276 struct rb_node *prev = NULL; 277 struct rb_node *orig_prev = NULL; 278 struct tree_entry *entry; 279 struct tree_entry *prev_entry = NULL; 280 281 while (*n) { 282 prev = *n; 283 entry = rb_entry(prev, struct tree_entry, rb_node); 284 prev_entry = entry; 285 286 if (offset < entry->start) 287 n = &(*n)->rb_left; 288 else if (offset > entry->end) 289 n = &(*n)->rb_right; 290 else 291 return *n; 292 } 293 294 if (p_ret) 295 *p_ret = n; 296 if (parent_ret) 297 *parent_ret = prev; 298 299 if (prev_ret) { 300 orig_prev = prev; 301 while (prev && offset > prev_entry->end) { 302 prev = rb_next(prev); 303 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 304 } 305 *prev_ret = prev; 306 prev = orig_prev; 307 } 308 309 if (next_ret) { 310 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 311 while (prev && offset < prev_entry->start) { 312 prev = rb_prev(prev); 313 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 314 } 315 *next_ret = prev; 316 } 317 return NULL; 318 } 319 320 static inline struct rb_node * 321 tree_search_for_insert(struct extent_io_tree *tree, 322 u64 offset, 323 struct rb_node ***p_ret, 324 struct rb_node **parent_ret) 325 { 326 struct rb_node *prev = NULL; 327 struct rb_node *ret; 328 329 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret); 330 if (!ret) 331 return prev; 332 return ret; 333 } 334 335 static inline struct rb_node *tree_search(struct extent_io_tree *tree, 336 u64 offset) 337 { 338 return tree_search_for_insert(tree, offset, NULL, NULL); 339 } 340 341 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, 342 struct extent_state *other) 343 { 344 if (tree->ops && tree->ops->merge_extent_hook) 345 tree->ops->merge_extent_hook(tree->mapping->host, new, 346 other); 347 } 348 349 /* 350 * utility function to look for merge candidates inside a given range. 351 * Any extents with matching state are merged together into a single 352 * extent in the tree. Extents with EXTENT_IO in their state field 353 * are not merged because the end_io handlers need to be able to do 354 * operations on them without sleeping (or doing allocations/splits). 355 * 356 * This should be called with the tree lock held. 357 */ 358 static void merge_state(struct extent_io_tree *tree, 359 struct extent_state *state) 360 { 361 struct extent_state *other; 362 struct rb_node *other_node; 363 364 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) 365 return; 366 367 other_node = rb_prev(&state->rb_node); 368 if (other_node) { 369 other = rb_entry(other_node, struct extent_state, rb_node); 370 if (other->end == state->start - 1 && 371 other->state == state->state) { 372 merge_cb(tree, state, other); 373 state->start = other->start; 374 other->tree = NULL; 375 rb_erase(&other->rb_node, &tree->state); 376 free_extent_state(other); 377 } 378 } 379 other_node = rb_next(&state->rb_node); 380 if (other_node) { 381 other = rb_entry(other_node, struct extent_state, rb_node); 382 if (other->start == state->end + 1 && 383 other->state == state->state) { 384 merge_cb(tree, state, other); 385 state->end = other->end; 386 other->tree = NULL; 387 rb_erase(&other->rb_node, &tree->state); 388 free_extent_state(other); 389 } 390 } 391 } 392 393 static void set_state_cb(struct extent_io_tree *tree, 394 struct extent_state *state, unsigned long *bits) 395 { 396 if (tree->ops && tree->ops->set_bit_hook) 397 tree->ops->set_bit_hook(tree->mapping->host, state, bits); 398 } 399 400 static void clear_state_cb(struct extent_io_tree *tree, 401 struct extent_state *state, unsigned long *bits) 402 { 403 if (tree->ops && tree->ops->clear_bit_hook) 404 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); 405 } 406 407 static void set_state_bits(struct extent_io_tree *tree, 408 struct extent_state *state, unsigned long *bits); 409 410 /* 411 * insert an extent_state struct into the tree. 'bits' are set on the 412 * struct before it is inserted. 413 * 414 * This may return -EEXIST if the extent is already there, in which case the 415 * state struct is freed. 416 * 417 * The tree lock is not taken internally. This is a utility function and 418 * probably isn't what you want to call (see set/clear_extent_bit). 419 */ 420 static int insert_state(struct extent_io_tree *tree, 421 struct extent_state *state, u64 start, u64 end, 422 struct rb_node ***p, 423 struct rb_node **parent, 424 unsigned long *bits) 425 { 426 struct rb_node *node; 427 428 if (end < start) 429 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n", 430 end, start); 431 state->start = start; 432 state->end = end; 433 434 set_state_bits(tree, state, bits); 435 436 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent); 437 if (node) { 438 struct extent_state *found; 439 found = rb_entry(node, struct extent_state, rb_node); 440 printk(KERN_ERR "BTRFS: found node %llu %llu on insert of " 441 "%llu %llu\n", 442 found->start, found->end, start, end); 443 return -EEXIST; 444 } 445 state->tree = tree; 446 merge_state(tree, state); 447 return 0; 448 } 449 450 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig, 451 u64 split) 452 { 453 if (tree->ops && tree->ops->split_extent_hook) 454 tree->ops->split_extent_hook(tree->mapping->host, orig, split); 455 } 456 457 /* 458 * split a given extent state struct in two, inserting the preallocated 459 * struct 'prealloc' as the newly created second half. 'split' indicates an 460 * offset inside 'orig' where it should be split. 461 * 462 * Before calling, 463 * the tree has 'orig' at [orig->start, orig->end]. After calling, there 464 * are two extent state structs in the tree: 465 * prealloc: [orig->start, split - 1] 466 * orig: [ split, orig->end ] 467 * 468 * The tree locks are not taken by this function. They need to be held 469 * by the caller. 470 */ 471 static int split_state(struct extent_io_tree *tree, struct extent_state *orig, 472 struct extent_state *prealloc, u64 split) 473 { 474 struct rb_node *node; 475 476 split_cb(tree, orig, split); 477 478 prealloc->start = orig->start; 479 prealloc->end = split - 1; 480 prealloc->state = orig->state; 481 orig->start = split; 482 483 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end, 484 &prealloc->rb_node, NULL, NULL); 485 if (node) { 486 free_extent_state(prealloc); 487 return -EEXIST; 488 } 489 prealloc->tree = tree; 490 return 0; 491 } 492 493 static struct extent_state *next_state(struct extent_state *state) 494 { 495 struct rb_node *next = rb_next(&state->rb_node); 496 if (next) 497 return rb_entry(next, struct extent_state, rb_node); 498 else 499 return NULL; 500 } 501 502 /* 503 * utility function to clear some bits in an extent state struct. 504 * it will optionally wake up any one waiting on this state (wake == 1). 505 * 506 * If no bits are set on the state struct after clearing things, the 507 * struct is freed and removed from the tree 508 */ 509 static struct extent_state *clear_state_bit(struct extent_io_tree *tree, 510 struct extent_state *state, 511 unsigned long *bits, int wake) 512 { 513 struct extent_state *next; 514 unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS; 515 516 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { 517 u64 range = state->end - state->start + 1; 518 WARN_ON(range > tree->dirty_bytes); 519 tree->dirty_bytes -= range; 520 } 521 clear_state_cb(tree, state, bits); 522 state->state &= ~bits_to_clear; 523 if (wake) 524 wake_up(&state->wq); 525 if (state->state == 0) { 526 next = next_state(state); 527 if (state->tree) { 528 rb_erase(&state->rb_node, &tree->state); 529 state->tree = NULL; 530 free_extent_state(state); 531 } else { 532 WARN_ON(1); 533 } 534 } else { 535 merge_state(tree, state); 536 next = next_state(state); 537 } 538 return next; 539 } 540 541 static struct extent_state * 542 alloc_extent_state_atomic(struct extent_state *prealloc) 543 { 544 if (!prealloc) 545 prealloc = alloc_extent_state(GFP_ATOMIC); 546 547 return prealloc; 548 } 549 550 static void extent_io_tree_panic(struct extent_io_tree *tree, int err) 551 { 552 btrfs_panic(tree_fs_info(tree), err, "Locking error: " 553 "Extent tree was modified by another " 554 "thread while locked."); 555 } 556 557 /* 558 * clear some bits on a range in the tree. This may require splitting 559 * or inserting elements in the tree, so the gfp mask is used to 560 * indicate which allocations or sleeping are allowed. 561 * 562 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove 563 * the given range from the tree regardless of state (ie for truncate). 564 * 565 * the range [start, end] is inclusive. 566 * 567 * This takes the tree lock, and returns 0 on success and < 0 on error. 568 */ 569 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 570 unsigned long bits, int wake, int delete, 571 struct extent_state **cached_state, 572 gfp_t mask) 573 { 574 struct extent_state *state; 575 struct extent_state *cached; 576 struct extent_state *prealloc = NULL; 577 struct rb_node *node; 578 u64 last_end; 579 int err; 580 int clear = 0; 581 582 btrfs_debug_check_extent_io_range(tree, start, end); 583 584 if (bits & EXTENT_DELALLOC) 585 bits |= EXTENT_NORESERVE; 586 587 if (delete) 588 bits |= ~EXTENT_CTLBITS; 589 bits |= EXTENT_FIRST_DELALLOC; 590 591 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY)) 592 clear = 1; 593 again: 594 if (!prealloc && (mask & __GFP_WAIT)) { 595 prealloc = alloc_extent_state(mask); 596 if (!prealloc) 597 return -ENOMEM; 598 } 599 600 spin_lock(&tree->lock); 601 if (cached_state) { 602 cached = *cached_state; 603 604 if (clear) { 605 *cached_state = NULL; 606 cached_state = NULL; 607 } 608 609 if (cached && cached->tree && cached->start <= start && 610 cached->end > start) { 611 if (clear) 612 atomic_dec(&cached->refs); 613 state = cached; 614 goto hit_next; 615 } 616 if (clear) 617 free_extent_state(cached); 618 } 619 /* 620 * this search will find the extents that end after 621 * our range starts 622 */ 623 node = tree_search(tree, start); 624 if (!node) 625 goto out; 626 state = rb_entry(node, struct extent_state, rb_node); 627 hit_next: 628 if (state->start > end) 629 goto out; 630 WARN_ON(state->end < start); 631 last_end = state->end; 632 633 /* the state doesn't have the wanted bits, go ahead */ 634 if (!(state->state & bits)) { 635 state = next_state(state); 636 goto next; 637 } 638 639 /* 640 * | ---- desired range ---- | 641 * | state | or 642 * | ------------- state -------------- | 643 * 644 * We need to split the extent we found, and may flip 645 * bits on second half. 646 * 647 * If the extent we found extends past our range, we 648 * just split and search again. It'll get split again 649 * the next time though. 650 * 651 * If the extent we found is inside our range, we clear 652 * the desired bit on it. 653 */ 654 655 if (state->start < start) { 656 prealloc = alloc_extent_state_atomic(prealloc); 657 BUG_ON(!prealloc); 658 err = split_state(tree, state, prealloc, start); 659 if (err) 660 extent_io_tree_panic(tree, err); 661 662 prealloc = NULL; 663 if (err) 664 goto out; 665 if (state->end <= end) { 666 state = clear_state_bit(tree, state, &bits, wake); 667 goto next; 668 } 669 goto search_again; 670 } 671 /* 672 * | ---- desired range ---- | 673 * | state | 674 * We need to split the extent, and clear the bit 675 * on the first half 676 */ 677 if (state->start <= end && state->end > end) { 678 prealloc = alloc_extent_state_atomic(prealloc); 679 BUG_ON(!prealloc); 680 err = split_state(tree, state, prealloc, end + 1); 681 if (err) 682 extent_io_tree_panic(tree, err); 683 684 if (wake) 685 wake_up(&state->wq); 686 687 clear_state_bit(tree, prealloc, &bits, wake); 688 689 prealloc = NULL; 690 goto out; 691 } 692 693 state = clear_state_bit(tree, state, &bits, wake); 694 next: 695 if (last_end == (u64)-1) 696 goto out; 697 start = last_end + 1; 698 if (start <= end && state && !need_resched()) 699 goto hit_next; 700 goto search_again; 701 702 out: 703 spin_unlock(&tree->lock); 704 if (prealloc) 705 free_extent_state(prealloc); 706 707 return 0; 708 709 search_again: 710 if (start > end) 711 goto out; 712 spin_unlock(&tree->lock); 713 if (mask & __GFP_WAIT) 714 cond_resched(); 715 goto again; 716 } 717 718 static void wait_on_state(struct extent_io_tree *tree, 719 struct extent_state *state) 720 __releases(tree->lock) 721 __acquires(tree->lock) 722 { 723 DEFINE_WAIT(wait); 724 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 725 spin_unlock(&tree->lock); 726 schedule(); 727 spin_lock(&tree->lock); 728 finish_wait(&state->wq, &wait); 729 } 730 731 /* 732 * waits for one or more bits to clear on a range in the state tree. 733 * The range [start, end] is inclusive. 734 * The tree lock is taken by this function 735 */ 736 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 737 unsigned long bits) 738 { 739 struct extent_state *state; 740 struct rb_node *node; 741 742 btrfs_debug_check_extent_io_range(tree, start, end); 743 744 spin_lock(&tree->lock); 745 again: 746 while (1) { 747 /* 748 * this search will find all the extents that end after 749 * our range starts 750 */ 751 node = tree_search(tree, start); 752 process_node: 753 if (!node) 754 break; 755 756 state = rb_entry(node, struct extent_state, rb_node); 757 758 if (state->start > end) 759 goto out; 760 761 if (state->state & bits) { 762 start = state->start; 763 atomic_inc(&state->refs); 764 wait_on_state(tree, state); 765 free_extent_state(state); 766 goto again; 767 } 768 start = state->end + 1; 769 770 if (start > end) 771 break; 772 773 if (!cond_resched_lock(&tree->lock)) { 774 node = rb_next(node); 775 goto process_node; 776 } 777 } 778 out: 779 spin_unlock(&tree->lock); 780 } 781 782 static void set_state_bits(struct extent_io_tree *tree, 783 struct extent_state *state, 784 unsigned long *bits) 785 { 786 unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS; 787 788 set_state_cb(tree, state, bits); 789 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 790 u64 range = state->end - state->start + 1; 791 tree->dirty_bytes += range; 792 } 793 state->state |= bits_to_set; 794 } 795 796 static void cache_state(struct extent_state *state, 797 struct extent_state **cached_ptr) 798 { 799 if (cached_ptr && !(*cached_ptr)) { 800 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) { 801 *cached_ptr = state; 802 atomic_inc(&state->refs); 803 } 804 } 805 } 806 807 /* 808 * set some bits on a range in the tree. This may require allocations or 809 * sleeping, so the gfp mask is used to indicate what is allowed. 810 * 811 * If any of the exclusive bits are set, this will fail with -EEXIST if some 812 * part of the range already has the desired bits set. The start of the 813 * existing range is returned in failed_start in this case. 814 * 815 * [start, end] is inclusive This takes the tree lock. 816 */ 817 818 static int __must_check 819 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 820 unsigned long bits, unsigned long exclusive_bits, 821 u64 *failed_start, struct extent_state **cached_state, 822 gfp_t mask) 823 { 824 struct extent_state *state; 825 struct extent_state *prealloc = NULL; 826 struct rb_node *node; 827 struct rb_node **p; 828 struct rb_node *parent; 829 int err = 0; 830 u64 last_start; 831 u64 last_end; 832 833 btrfs_debug_check_extent_io_range(tree, start, end); 834 835 bits |= EXTENT_FIRST_DELALLOC; 836 again: 837 if (!prealloc && (mask & __GFP_WAIT)) { 838 prealloc = alloc_extent_state(mask); 839 BUG_ON(!prealloc); 840 } 841 842 spin_lock(&tree->lock); 843 if (cached_state && *cached_state) { 844 state = *cached_state; 845 if (state->start <= start && state->end > start && 846 state->tree) { 847 node = &state->rb_node; 848 goto hit_next; 849 } 850 } 851 /* 852 * this search will find all the extents that end after 853 * our range starts. 854 */ 855 node = tree_search_for_insert(tree, start, &p, &parent); 856 if (!node) { 857 prealloc = alloc_extent_state_atomic(prealloc); 858 BUG_ON(!prealloc); 859 err = insert_state(tree, prealloc, start, end, 860 &p, &parent, &bits); 861 if (err) 862 extent_io_tree_panic(tree, err); 863 864 cache_state(prealloc, cached_state); 865 prealloc = NULL; 866 goto out; 867 } 868 state = rb_entry(node, struct extent_state, rb_node); 869 hit_next: 870 last_start = state->start; 871 last_end = state->end; 872 873 /* 874 * | ---- desired range ---- | 875 * | state | 876 * 877 * Just lock what we found and keep going 878 */ 879 if (state->start == start && state->end <= end) { 880 if (state->state & exclusive_bits) { 881 *failed_start = state->start; 882 err = -EEXIST; 883 goto out; 884 } 885 886 set_state_bits(tree, state, &bits); 887 cache_state(state, cached_state); 888 merge_state(tree, state); 889 if (last_end == (u64)-1) 890 goto out; 891 start = last_end + 1; 892 state = next_state(state); 893 if (start < end && state && state->start == start && 894 !need_resched()) 895 goto hit_next; 896 goto search_again; 897 } 898 899 /* 900 * | ---- desired range ---- | 901 * | state | 902 * or 903 * | ------------- state -------------- | 904 * 905 * We need to split the extent we found, and may flip bits on 906 * second half. 907 * 908 * If the extent we found extends past our 909 * range, we just split and search again. It'll get split 910 * again the next time though. 911 * 912 * If the extent we found is inside our range, we set the 913 * desired bit on it. 914 */ 915 if (state->start < start) { 916 if (state->state & exclusive_bits) { 917 *failed_start = start; 918 err = -EEXIST; 919 goto out; 920 } 921 922 prealloc = alloc_extent_state_atomic(prealloc); 923 BUG_ON(!prealloc); 924 err = split_state(tree, state, prealloc, start); 925 if (err) 926 extent_io_tree_panic(tree, err); 927 928 prealloc = NULL; 929 if (err) 930 goto out; 931 if (state->end <= end) { 932 set_state_bits(tree, state, &bits); 933 cache_state(state, cached_state); 934 merge_state(tree, state); 935 if (last_end == (u64)-1) 936 goto out; 937 start = last_end + 1; 938 state = next_state(state); 939 if (start < end && state && state->start == start && 940 !need_resched()) 941 goto hit_next; 942 } 943 goto search_again; 944 } 945 /* 946 * | ---- desired range ---- | 947 * | state | or | state | 948 * 949 * There's a hole, we need to insert something in it and 950 * ignore the extent we found. 951 */ 952 if (state->start > start) { 953 u64 this_end; 954 if (end < last_start) 955 this_end = end; 956 else 957 this_end = last_start - 1; 958 959 prealloc = alloc_extent_state_atomic(prealloc); 960 BUG_ON(!prealloc); 961 962 /* 963 * Avoid to free 'prealloc' if it can be merged with 964 * the later extent. 965 */ 966 err = insert_state(tree, prealloc, start, this_end, 967 NULL, NULL, &bits); 968 if (err) 969 extent_io_tree_panic(tree, err); 970 971 cache_state(prealloc, cached_state); 972 prealloc = NULL; 973 start = this_end + 1; 974 goto search_again; 975 } 976 /* 977 * | ---- desired range ---- | 978 * | state | 979 * We need to split the extent, and set the bit 980 * on the first half 981 */ 982 if (state->start <= end && state->end > end) { 983 if (state->state & exclusive_bits) { 984 *failed_start = start; 985 err = -EEXIST; 986 goto out; 987 } 988 989 prealloc = alloc_extent_state_atomic(prealloc); 990 BUG_ON(!prealloc); 991 err = split_state(tree, state, prealloc, end + 1); 992 if (err) 993 extent_io_tree_panic(tree, err); 994 995 set_state_bits(tree, prealloc, &bits); 996 cache_state(prealloc, cached_state); 997 merge_state(tree, prealloc); 998 prealloc = NULL; 999 goto out; 1000 } 1001 1002 goto search_again; 1003 1004 out: 1005 spin_unlock(&tree->lock); 1006 if (prealloc) 1007 free_extent_state(prealloc); 1008 1009 return err; 1010 1011 search_again: 1012 if (start > end) 1013 goto out; 1014 spin_unlock(&tree->lock); 1015 if (mask & __GFP_WAIT) 1016 cond_resched(); 1017 goto again; 1018 } 1019 1020 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 1021 unsigned long bits, u64 * failed_start, 1022 struct extent_state **cached_state, gfp_t mask) 1023 { 1024 return __set_extent_bit(tree, start, end, bits, 0, failed_start, 1025 cached_state, mask); 1026 } 1027 1028 1029 /** 1030 * convert_extent_bit - convert all bits in a given range from one bit to 1031 * another 1032 * @tree: the io tree to search 1033 * @start: the start offset in bytes 1034 * @end: the end offset in bytes (inclusive) 1035 * @bits: the bits to set in this range 1036 * @clear_bits: the bits to clear in this range 1037 * @cached_state: state that we're going to cache 1038 * @mask: the allocation mask 1039 * 1040 * This will go through and set bits for the given range. If any states exist 1041 * already in this range they are set with the given bit and cleared of the 1042 * clear_bits. This is only meant to be used by things that are mergeable, ie 1043 * converting from say DELALLOC to DIRTY. This is not meant to be used with 1044 * boundary bits like LOCK. 1045 */ 1046 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 1047 unsigned long bits, unsigned long clear_bits, 1048 struct extent_state **cached_state, gfp_t mask) 1049 { 1050 struct extent_state *state; 1051 struct extent_state *prealloc = NULL; 1052 struct rb_node *node; 1053 struct rb_node **p; 1054 struct rb_node *parent; 1055 int err = 0; 1056 u64 last_start; 1057 u64 last_end; 1058 1059 btrfs_debug_check_extent_io_range(tree, start, end); 1060 1061 again: 1062 if (!prealloc && (mask & __GFP_WAIT)) { 1063 prealloc = alloc_extent_state(mask); 1064 if (!prealloc) 1065 return -ENOMEM; 1066 } 1067 1068 spin_lock(&tree->lock); 1069 if (cached_state && *cached_state) { 1070 state = *cached_state; 1071 if (state->start <= start && state->end > start && 1072 state->tree) { 1073 node = &state->rb_node; 1074 goto hit_next; 1075 } 1076 } 1077 1078 /* 1079 * this search will find all the extents that end after 1080 * our range starts. 1081 */ 1082 node = tree_search_for_insert(tree, start, &p, &parent); 1083 if (!node) { 1084 prealloc = alloc_extent_state_atomic(prealloc); 1085 if (!prealloc) { 1086 err = -ENOMEM; 1087 goto out; 1088 } 1089 err = insert_state(tree, prealloc, start, end, 1090 &p, &parent, &bits); 1091 if (err) 1092 extent_io_tree_panic(tree, err); 1093 cache_state(prealloc, cached_state); 1094 prealloc = NULL; 1095 goto out; 1096 } 1097 state = rb_entry(node, struct extent_state, rb_node); 1098 hit_next: 1099 last_start = state->start; 1100 last_end = state->end; 1101 1102 /* 1103 * | ---- desired range ---- | 1104 * | state | 1105 * 1106 * Just lock what we found and keep going 1107 */ 1108 if (state->start == start && state->end <= end) { 1109 set_state_bits(tree, state, &bits); 1110 cache_state(state, cached_state); 1111 state = clear_state_bit(tree, state, &clear_bits, 0); 1112 if (last_end == (u64)-1) 1113 goto out; 1114 start = last_end + 1; 1115 if (start < end && state && state->start == start && 1116 !need_resched()) 1117 goto hit_next; 1118 goto search_again; 1119 } 1120 1121 /* 1122 * | ---- desired range ---- | 1123 * | state | 1124 * or 1125 * | ------------- state -------------- | 1126 * 1127 * We need to split the extent we found, and may flip bits on 1128 * second half. 1129 * 1130 * If the extent we found extends past our 1131 * range, we just split and search again. It'll get split 1132 * again the next time though. 1133 * 1134 * If the extent we found is inside our range, we set the 1135 * desired bit on it. 1136 */ 1137 if (state->start < start) { 1138 prealloc = alloc_extent_state_atomic(prealloc); 1139 if (!prealloc) { 1140 err = -ENOMEM; 1141 goto out; 1142 } 1143 err = split_state(tree, state, prealloc, start); 1144 if (err) 1145 extent_io_tree_panic(tree, err); 1146 prealloc = NULL; 1147 if (err) 1148 goto out; 1149 if (state->end <= end) { 1150 set_state_bits(tree, state, &bits); 1151 cache_state(state, cached_state); 1152 state = clear_state_bit(tree, state, &clear_bits, 0); 1153 if (last_end == (u64)-1) 1154 goto out; 1155 start = last_end + 1; 1156 if (start < end && state && state->start == start && 1157 !need_resched()) 1158 goto hit_next; 1159 } 1160 goto search_again; 1161 } 1162 /* 1163 * | ---- desired range ---- | 1164 * | state | or | state | 1165 * 1166 * There's a hole, we need to insert something in it and 1167 * ignore the extent we found. 1168 */ 1169 if (state->start > start) { 1170 u64 this_end; 1171 if (end < last_start) 1172 this_end = end; 1173 else 1174 this_end = last_start - 1; 1175 1176 prealloc = alloc_extent_state_atomic(prealloc); 1177 if (!prealloc) { 1178 err = -ENOMEM; 1179 goto out; 1180 } 1181 1182 /* 1183 * Avoid to free 'prealloc' if it can be merged with 1184 * the later extent. 1185 */ 1186 err = insert_state(tree, prealloc, start, this_end, 1187 NULL, NULL, &bits); 1188 if (err) 1189 extent_io_tree_panic(tree, err); 1190 cache_state(prealloc, cached_state); 1191 prealloc = NULL; 1192 start = this_end + 1; 1193 goto search_again; 1194 } 1195 /* 1196 * | ---- desired range ---- | 1197 * | state | 1198 * We need to split the extent, and set the bit 1199 * on the first half 1200 */ 1201 if (state->start <= end && state->end > end) { 1202 prealloc = alloc_extent_state_atomic(prealloc); 1203 if (!prealloc) { 1204 err = -ENOMEM; 1205 goto out; 1206 } 1207 1208 err = split_state(tree, state, prealloc, end + 1); 1209 if (err) 1210 extent_io_tree_panic(tree, err); 1211 1212 set_state_bits(tree, prealloc, &bits); 1213 cache_state(prealloc, cached_state); 1214 clear_state_bit(tree, prealloc, &clear_bits, 0); 1215 prealloc = NULL; 1216 goto out; 1217 } 1218 1219 goto search_again; 1220 1221 out: 1222 spin_unlock(&tree->lock); 1223 if (prealloc) 1224 free_extent_state(prealloc); 1225 1226 return err; 1227 1228 search_again: 1229 if (start > end) 1230 goto out; 1231 spin_unlock(&tree->lock); 1232 if (mask & __GFP_WAIT) 1233 cond_resched(); 1234 goto again; 1235 } 1236 1237 /* wrappers around set/clear extent bit */ 1238 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1239 gfp_t mask) 1240 { 1241 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, 1242 NULL, mask); 1243 } 1244 1245 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1246 unsigned long bits, gfp_t mask) 1247 { 1248 return set_extent_bit(tree, start, end, bits, NULL, 1249 NULL, mask); 1250 } 1251 1252 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1253 unsigned long bits, gfp_t mask) 1254 { 1255 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask); 1256 } 1257 1258 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, 1259 struct extent_state **cached_state, gfp_t mask) 1260 { 1261 return set_extent_bit(tree, start, end, 1262 EXTENT_DELALLOC | EXTENT_UPTODATE, 1263 NULL, cached_state, mask); 1264 } 1265 1266 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, 1267 struct extent_state **cached_state, gfp_t mask) 1268 { 1269 return set_extent_bit(tree, start, end, 1270 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, 1271 NULL, cached_state, mask); 1272 } 1273 1274 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1275 gfp_t mask) 1276 { 1277 return clear_extent_bit(tree, start, end, 1278 EXTENT_DIRTY | EXTENT_DELALLOC | 1279 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask); 1280 } 1281 1282 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 1283 gfp_t mask) 1284 { 1285 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, 1286 NULL, mask); 1287 } 1288 1289 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 1290 struct extent_state **cached_state, gfp_t mask) 1291 { 1292 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, 1293 cached_state, mask); 1294 } 1295 1296 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 1297 struct extent_state **cached_state, gfp_t mask) 1298 { 1299 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, 1300 cached_state, mask); 1301 } 1302 1303 /* 1304 * either insert or lock state struct between start and end use mask to tell 1305 * us if waiting is desired. 1306 */ 1307 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1308 unsigned long bits, struct extent_state **cached_state) 1309 { 1310 int err; 1311 u64 failed_start; 1312 while (1) { 1313 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, 1314 EXTENT_LOCKED, &failed_start, 1315 cached_state, GFP_NOFS); 1316 if (err == -EEXIST) { 1317 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 1318 start = failed_start; 1319 } else 1320 break; 1321 WARN_ON(start > end); 1322 } 1323 return err; 1324 } 1325 1326 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) 1327 { 1328 return lock_extent_bits(tree, start, end, 0, NULL); 1329 } 1330 1331 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) 1332 { 1333 int err; 1334 u64 failed_start; 1335 1336 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, 1337 &failed_start, NULL, GFP_NOFS); 1338 if (err == -EEXIST) { 1339 if (failed_start > start) 1340 clear_extent_bit(tree, start, failed_start - 1, 1341 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS); 1342 return 0; 1343 } 1344 return 1; 1345 } 1346 1347 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, 1348 struct extent_state **cached, gfp_t mask) 1349 { 1350 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, 1351 mask); 1352 } 1353 1354 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) 1355 { 1356 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, 1357 GFP_NOFS); 1358 } 1359 1360 int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) 1361 { 1362 unsigned long index = start >> PAGE_CACHE_SHIFT; 1363 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1364 struct page *page; 1365 1366 while (index <= end_index) { 1367 page = find_get_page(inode->i_mapping, index); 1368 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1369 clear_page_dirty_for_io(page); 1370 page_cache_release(page); 1371 index++; 1372 } 1373 return 0; 1374 } 1375 1376 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) 1377 { 1378 unsigned long index = start >> PAGE_CACHE_SHIFT; 1379 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1380 struct page *page; 1381 1382 while (index <= end_index) { 1383 page = find_get_page(inode->i_mapping, index); 1384 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1385 account_page_redirty(page); 1386 __set_page_dirty_nobuffers(page); 1387 page_cache_release(page); 1388 index++; 1389 } 1390 return 0; 1391 } 1392 1393 /* 1394 * helper function to set both pages and extents in the tree writeback 1395 */ 1396 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1397 { 1398 unsigned long index = start >> PAGE_CACHE_SHIFT; 1399 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1400 struct page *page; 1401 1402 while (index <= end_index) { 1403 page = find_get_page(tree->mapping, index); 1404 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1405 set_page_writeback(page); 1406 page_cache_release(page); 1407 index++; 1408 } 1409 return 0; 1410 } 1411 1412 /* find the first state struct with 'bits' set after 'start', and 1413 * return it. tree->lock must be held. NULL will returned if 1414 * nothing was found after 'start' 1415 */ 1416 static struct extent_state * 1417 find_first_extent_bit_state(struct extent_io_tree *tree, 1418 u64 start, unsigned long bits) 1419 { 1420 struct rb_node *node; 1421 struct extent_state *state; 1422 1423 /* 1424 * this search will find all the extents that end after 1425 * our range starts. 1426 */ 1427 node = tree_search(tree, start); 1428 if (!node) 1429 goto out; 1430 1431 while (1) { 1432 state = rb_entry(node, struct extent_state, rb_node); 1433 if (state->end >= start && (state->state & bits)) 1434 return state; 1435 1436 node = rb_next(node); 1437 if (!node) 1438 break; 1439 } 1440 out: 1441 return NULL; 1442 } 1443 1444 /* 1445 * find the first offset in the io tree with 'bits' set. zero is 1446 * returned if we find something, and *start_ret and *end_ret are 1447 * set to reflect the state struct that was found. 1448 * 1449 * If nothing was found, 1 is returned. If found something, return 0. 1450 */ 1451 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 1452 u64 *start_ret, u64 *end_ret, unsigned long bits, 1453 struct extent_state **cached_state) 1454 { 1455 struct extent_state *state; 1456 struct rb_node *n; 1457 int ret = 1; 1458 1459 spin_lock(&tree->lock); 1460 if (cached_state && *cached_state) { 1461 state = *cached_state; 1462 if (state->end == start - 1 && state->tree) { 1463 n = rb_next(&state->rb_node); 1464 while (n) { 1465 state = rb_entry(n, struct extent_state, 1466 rb_node); 1467 if (state->state & bits) 1468 goto got_it; 1469 n = rb_next(n); 1470 } 1471 free_extent_state(*cached_state); 1472 *cached_state = NULL; 1473 goto out; 1474 } 1475 free_extent_state(*cached_state); 1476 *cached_state = NULL; 1477 } 1478 1479 state = find_first_extent_bit_state(tree, start, bits); 1480 got_it: 1481 if (state) { 1482 cache_state(state, cached_state); 1483 *start_ret = state->start; 1484 *end_ret = state->end; 1485 ret = 0; 1486 } 1487 out: 1488 spin_unlock(&tree->lock); 1489 return ret; 1490 } 1491 1492 /* 1493 * find a contiguous range of bytes in the file marked as delalloc, not 1494 * more than 'max_bytes'. start and end are used to return the range, 1495 * 1496 * 1 is returned if we find something, 0 if nothing was in the tree 1497 */ 1498 static noinline u64 find_delalloc_range(struct extent_io_tree *tree, 1499 u64 *start, u64 *end, u64 max_bytes, 1500 struct extent_state **cached_state) 1501 { 1502 struct rb_node *node; 1503 struct extent_state *state; 1504 u64 cur_start = *start; 1505 u64 found = 0; 1506 u64 total_bytes = 0; 1507 1508 spin_lock(&tree->lock); 1509 1510 /* 1511 * this search will find all the extents that end after 1512 * our range starts. 1513 */ 1514 node = tree_search(tree, cur_start); 1515 if (!node) { 1516 if (!found) 1517 *end = (u64)-1; 1518 goto out; 1519 } 1520 1521 while (1) { 1522 state = rb_entry(node, struct extent_state, rb_node); 1523 if (found && (state->start != cur_start || 1524 (state->state & EXTENT_BOUNDARY))) { 1525 goto out; 1526 } 1527 if (!(state->state & EXTENT_DELALLOC)) { 1528 if (!found) 1529 *end = state->end; 1530 goto out; 1531 } 1532 if (!found) { 1533 *start = state->start; 1534 *cached_state = state; 1535 atomic_inc(&state->refs); 1536 } 1537 found++; 1538 *end = state->end; 1539 cur_start = state->end + 1; 1540 node = rb_next(node); 1541 total_bytes += state->end - state->start + 1; 1542 if (total_bytes >= max_bytes) 1543 break; 1544 if (!node) 1545 break; 1546 } 1547 out: 1548 spin_unlock(&tree->lock); 1549 return found; 1550 } 1551 1552 static noinline void __unlock_for_delalloc(struct inode *inode, 1553 struct page *locked_page, 1554 u64 start, u64 end) 1555 { 1556 int ret; 1557 struct page *pages[16]; 1558 unsigned long index = start >> PAGE_CACHE_SHIFT; 1559 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1560 unsigned long nr_pages = end_index - index + 1; 1561 int i; 1562 1563 if (index == locked_page->index && end_index == index) 1564 return; 1565 1566 while (nr_pages > 0) { 1567 ret = find_get_pages_contig(inode->i_mapping, index, 1568 min_t(unsigned long, nr_pages, 1569 ARRAY_SIZE(pages)), pages); 1570 for (i = 0; i < ret; i++) { 1571 if (pages[i] != locked_page) 1572 unlock_page(pages[i]); 1573 page_cache_release(pages[i]); 1574 } 1575 nr_pages -= ret; 1576 index += ret; 1577 cond_resched(); 1578 } 1579 } 1580 1581 static noinline int lock_delalloc_pages(struct inode *inode, 1582 struct page *locked_page, 1583 u64 delalloc_start, 1584 u64 delalloc_end) 1585 { 1586 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; 1587 unsigned long start_index = index; 1588 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; 1589 unsigned long pages_locked = 0; 1590 struct page *pages[16]; 1591 unsigned long nrpages; 1592 int ret; 1593 int i; 1594 1595 /* the caller is responsible for locking the start index */ 1596 if (index == locked_page->index && index == end_index) 1597 return 0; 1598 1599 /* skip the page at the start index */ 1600 nrpages = end_index - index + 1; 1601 while (nrpages > 0) { 1602 ret = find_get_pages_contig(inode->i_mapping, index, 1603 min_t(unsigned long, 1604 nrpages, ARRAY_SIZE(pages)), pages); 1605 if (ret == 0) { 1606 ret = -EAGAIN; 1607 goto done; 1608 } 1609 /* now we have an array of pages, lock them all */ 1610 for (i = 0; i < ret; i++) { 1611 /* 1612 * the caller is taking responsibility for 1613 * locked_page 1614 */ 1615 if (pages[i] != locked_page) { 1616 lock_page(pages[i]); 1617 if (!PageDirty(pages[i]) || 1618 pages[i]->mapping != inode->i_mapping) { 1619 ret = -EAGAIN; 1620 unlock_page(pages[i]); 1621 page_cache_release(pages[i]); 1622 goto done; 1623 } 1624 } 1625 page_cache_release(pages[i]); 1626 pages_locked++; 1627 } 1628 nrpages -= ret; 1629 index += ret; 1630 cond_resched(); 1631 } 1632 ret = 0; 1633 done: 1634 if (ret && pages_locked) { 1635 __unlock_for_delalloc(inode, locked_page, 1636 delalloc_start, 1637 ((u64)(start_index + pages_locked - 1)) << 1638 PAGE_CACHE_SHIFT); 1639 } 1640 return ret; 1641 } 1642 1643 /* 1644 * find a contiguous range of bytes in the file marked as delalloc, not 1645 * more than 'max_bytes'. start and end are used to return the range, 1646 * 1647 * 1 is returned if we find something, 0 if nothing was in the tree 1648 */ 1649 STATIC u64 find_lock_delalloc_range(struct inode *inode, 1650 struct extent_io_tree *tree, 1651 struct page *locked_page, u64 *start, 1652 u64 *end, u64 max_bytes) 1653 { 1654 u64 delalloc_start; 1655 u64 delalloc_end; 1656 u64 found; 1657 struct extent_state *cached_state = NULL; 1658 int ret; 1659 int loops = 0; 1660 1661 again: 1662 /* step one, find a bunch of delalloc bytes starting at start */ 1663 delalloc_start = *start; 1664 delalloc_end = 0; 1665 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, 1666 max_bytes, &cached_state); 1667 if (!found || delalloc_end <= *start) { 1668 *start = delalloc_start; 1669 *end = delalloc_end; 1670 free_extent_state(cached_state); 1671 return 0; 1672 } 1673 1674 /* 1675 * start comes from the offset of locked_page. We have to lock 1676 * pages in order, so we can't process delalloc bytes before 1677 * locked_page 1678 */ 1679 if (delalloc_start < *start) 1680 delalloc_start = *start; 1681 1682 /* 1683 * make sure to limit the number of pages we try to lock down 1684 */ 1685 if (delalloc_end + 1 - delalloc_start > max_bytes) 1686 delalloc_end = delalloc_start + max_bytes - 1; 1687 1688 /* step two, lock all the pages after the page that has start */ 1689 ret = lock_delalloc_pages(inode, locked_page, 1690 delalloc_start, delalloc_end); 1691 if (ret == -EAGAIN) { 1692 /* some of the pages are gone, lets avoid looping by 1693 * shortening the size of the delalloc range we're searching 1694 */ 1695 free_extent_state(cached_state); 1696 cached_state = NULL; 1697 if (!loops) { 1698 max_bytes = PAGE_CACHE_SIZE; 1699 loops = 1; 1700 goto again; 1701 } else { 1702 found = 0; 1703 goto out_failed; 1704 } 1705 } 1706 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */ 1707 1708 /* step three, lock the state bits for the whole range */ 1709 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state); 1710 1711 /* then test to make sure it is all still delalloc */ 1712 ret = test_range_bit(tree, delalloc_start, delalloc_end, 1713 EXTENT_DELALLOC, 1, cached_state); 1714 if (!ret) { 1715 unlock_extent_cached(tree, delalloc_start, delalloc_end, 1716 &cached_state, GFP_NOFS); 1717 __unlock_for_delalloc(inode, locked_page, 1718 delalloc_start, delalloc_end); 1719 cond_resched(); 1720 goto again; 1721 } 1722 free_extent_state(cached_state); 1723 *start = delalloc_start; 1724 *end = delalloc_end; 1725 out_failed: 1726 return found; 1727 } 1728 1729 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, 1730 struct page *locked_page, 1731 unsigned long clear_bits, 1732 unsigned long page_ops) 1733 { 1734 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 1735 int ret; 1736 struct page *pages[16]; 1737 unsigned long index = start >> PAGE_CACHE_SHIFT; 1738 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1739 unsigned long nr_pages = end_index - index + 1; 1740 int i; 1741 1742 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); 1743 if (page_ops == 0) 1744 return 0; 1745 1746 while (nr_pages > 0) { 1747 ret = find_get_pages_contig(inode->i_mapping, index, 1748 min_t(unsigned long, 1749 nr_pages, ARRAY_SIZE(pages)), pages); 1750 for (i = 0; i < ret; i++) { 1751 1752 if (page_ops & PAGE_SET_PRIVATE2) 1753 SetPagePrivate2(pages[i]); 1754 1755 if (pages[i] == locked_page) { 1756 page_cache_release(pages[i]); 1757 continue; 1758 } 1759 if (page_ops & PAGE_CLEAR_DIRTY) 1760 clear_page_dirty_for_io(pages[i]); 1761 if (page_ops & PAGE_SET_WRITEBACK) 1762 set_page_writeback(pages[i]); 1763 if (page_ops & PAGE_END_WRITEBACK) 1764 end_page_writeback(pages[i]); 1765 if (page_ops & PAGE_UNLOCK) 1766 unlock_page(pages[i]); 1767 page_cache_release(pages[i]); 1768 } 1769 nr_pages -= ret; 1770 index += ret; 1771 cond_resched(); 1772 } 1773 return 0; 1774 } 1775 1776 /* 1777 * count the number of bytes in the tree that have a given bit(s) 1778 * set. This can be fairly slow, except for EXTENT_DIRTY which is 1779 * cached. The total number found is returned. 1780 */ 1781 u64 count_range_bits(struct extent_io_tree *tree, 1782 u64 *start, u64 search_end, u64 max_bytes, 1783 unsigned long bits, int contig) 1784 { 1785 struct rb_node *node; 1786 struct extent_state *state; 1787 u64 cur_start = *start; 1788 u64 total_bytes = 0; 1789 u64 last = 0; 1790 int found = 0; 1791 1792 if (WARN_ON(search_end <= cur_start)) 1793 return 0; 1794 1795 spin_lock(&tree->lock); 1796 if (cur_start == 0 && bits == EXTENT_DIRTY) { 1797 total_bytes = tree->dirty_bytes; 1798 goto out; 1799 } 1800 /* 1801 * this search will find all the extents that end after 1802 * our range starts. 1803 */ 1804 node = tree_search(tree, cur_start); 1805 if (!node) 1806 goto out; 1807 1808 while (1) { 1809 state = rb_entry(node, struct extent_state, rb_node); 1810 if (state->start > search_end) 1811 break; 1812 if (contig && found && state->start > last + 1) 1813 break; 1814 if (state->end >= cur_start && (state->state & bits) == bits) { 1815 total_bytes += min(search_end, state->end) + 1 - 1816 max(cur_start, state->start); 1817 if (total_bytes >= max_bytes) 1818 break; 1819 if (!found) { 1820 *start = max(cur_start, state->start); 1821 found = 1; 1822 } 1823 last = state->end; 1824 } else if (contig && found) { 1825 break; 1826 } 1827 node = rb_next(node); 1828 if (!node) 1829 break; 1830 } 1831 out: 1832 spin_unlock(&tree->lock); 1833 return total_bytes; 1834 } 1835 1836 /* 1837 * set the private field for a given byte offset in the tree. If there isn't 1838 * an extent_state there already, this does nothing. 1839 */ 1840 static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) 1841 { 1842 struct rb_node *node; 1843 struct extent_state *state; 1844 int ret = 0; 1845 1846 spin_lock(&tree->lock); 1847 /* 1848 * this search will find all the extents that end after 1849 * our range starts. 1850 */ 1851 node = tree_search(tree, start); 1852 if (!node) { 1853 ret = -ENOENT; 1854 goto out; 1855 } 1856 state = rb_entry(node, struct extent_state, rb_node); 1857 if (state->start != start) { 1858 ret = -ENOENT; 1859 goto out; 1860 } 1861 state->private = private; 1862 out: 1863 spin_unlock(&tree->lock); 1864 return ret; 1865 } 1866 1867 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) 1868 { 1869 struct rb_node *node; 1870 struct extent_state *state; 1871 int ret = 0; 1872 1873 spin_lock(&tree->lock); 1874 /* 1875 * this search will find all the extents that end after 1876 * our range starts. 1877 */ 1878 node = tree_search(tree, start); 1879 if (!node) { 1880 ret = -ENOENT; 1881 goto out; 1882 } 1883 state = rb_entry(node, struct extent_state, rb_node); 1884 if (state->start != start) { 1885 ret = -ENOENT; 1886 goto out; 1887 } 1888 *private = state->private; 1889 out: 1890 spin_unlock(&tree->lock); 1891 return ret; 1892 } 1893 1894 /* 1895 * searches a range in the state tree for a given mask. 1896 * If 'filled' == 1, this returns 1 only if every extent in the tree 1897 * has the bits set. Otherwise, 1 is returned if any bit in the 1898 * range is found set. 1899 */ 1900 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 1901 unsigned long bits, int filled, struct extent_state *cached) 1902 { 1903 struct extent_state *state = NULL; 1904 struct rb_node *node; 1905 int bitset = 0; 1906 1907 spin_lock(&tree->lock); 1908 if (cached && cached->tree && cached->start <= start && 1909 cached->end > start) 1910 node = &cached->rb_node; 1911 else 1912 node = tree_search(tree, start); 1913 while (node && start <= end) { 1914 state = rb_entry(node, struct extent_state, rb_node); 1915 1916 if (filled && state->start > start) { 1917 bitset = 0; 1918 break; 1919 } 1920 1921 if (state->start > end) 1922 break; 1923 1924 if (state->state & bits) { 1925 bitset = 1; 1926 if (!filled) 1927 break; 1928 } else if (filled) { 1929 bitset = 0; 1930 break; 1931 } 1932 1933 if (state->end == (u64)-1) 1934 break; 1935 1936 start = state->end + 1; 1937 if (start > end) 1938 break; 1939 node = rb_next(node); 1940 if (!node) { 1941 if (filled) 1942 bitset = 0; 1943 break; 1944 } 1945 } 1946 spin_unlock(&tree->lock); 1947 return bitset; 1948 } 1949 1950 /* 1951 * helper function to set a given page up to date if all the 1952 * extents in the tree for that page are up to date 1953 */ 1954 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) 1955 { 1956 u64 start = page_offset(page); 1957 u64 end = start + PAGE_CACHE_SIZE - 1; 1958 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 1959 SetPageUptodate(page); 1960 } 1961 1962 /* 1963 * When IO fails, either with EIO or csum verification fails, we 1964 * try other mirrors that might have a good copy of the data. This 1965 * io_failure_record is used to record state as we go through all the 1966 * mirrors. If another mirror has good data, the page is set up to date 1967 * and things continue. If a good mirror can't be found, the original 1968 * bio end_io callback is called to indicate things have failed. 1969 */ 1970 struct io_failure_record { 1971 struct page *page; 1972 u64 start; 1973 u64 len; 1974 u64 logical; 1975 unsigned long bio_flags; 1976 int this_mirror; 1977 int failed_mirror; 1978 int in_validation; 1979 }; 1980 1981 static int free_io_failure(struct inode *inode, struct io_failure_record *rec, 1982 int did_repair) 1983 { 1984 int ret; 1985 int err = 0; 1986 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 1987 1988 set_state_private(failure_tree, rec->start, 0); 1989 ret = clear_extent_bits(failure_tree, rec->start, 1990 rec->start + rec->len - 1, 1991 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); 1992 if (ret) 1993 err = ret; 1994 1995 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, 1996 rec->start + rec->len - 1, 1997 EXTENT_DAMAGED, GFP_NOFS); 1998 if (ret && !err) 1999 err = ret; 2000 2001 kfree(rec); 2002 return err; 2003 } 2004 2005 /* 2006 * this bypasses the standard btrfs submit functions deliberately, as 2007 * the standard behavior is to write all copies in a raid setup. here we only 2008 * want to write the one bad copy. so we do the mapping for ourselves and issue 2009 * submit_bio directly. 2010 * to avoid any synchronization issues, wait for the data after writing, which 2011 * actually prevents the read that triggered the error from finishing. 2012 * currently, there can be no more than two copies of every data bit. thus, 2013 * exactly one rewrite is required. 2014 */ 2015 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, 2016 u64 length, u64 logical, struct page *page, 2017 int mirror_num) 2018 { 2019 struct bio *bio; 2020 struct btrfs_device *dev; 2021 u64 map_length = 0; 2022 u64 sector; 2023 struct btrfs_bio *bbio = NULL; 2024 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 2025 int ret; 2026 2027 ASSERT(!(fs_info->sb->s_flags & MS_RDONLY)); 2028 BUG_ON(!mirror_num); 2029 2030 /* we can't repair anything in raid56 yet */ 2031 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num)) 2032 return 0; 2033 2034 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 2035 if (!bio) 2036 return -EIO; 2037 bio->bi_iter.bi_size = 0; 2038 map_length = length; 2039 2040 ret = btrfs_map_block(fs_info, WRITE, logical, 2041 &map_length, &bbio, mirror_num); 2042 if (ret) { 2043 bio_put(bio); 2044 return -EIO; 2045 } 2046 BUG_ON(mirror_num != bbio->mirror_num); 2047 sector = bbio->stripes[mirror_num-1].physical >> 9; 2048 bio->bi_iter.bi_sector = sector; 2049 dev = bbio->stripes[mirror_num-1].dev; 2050 kfree(bbio); 2051 if (!dev || !dev->bdev || !dev->writeable) { 2052 bio_put(bio); 2053 return -EIO; 2054 } 2055 bio->bi_bdev = dev->bdev; 2056 bio_add_page(bio, page, length, start - page_offset(page)); 2057 2058 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { 2059 /* try to remap that extent elsewhere? */ 2060 bio_put(bio); 2061 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 2062 return -EIO; 2063 } 2064 2065 printk_ratelimited_in_rcu(KERN_INFO 2066 "BTRFS: read error corrected: ino %lu off %llu " 2067 "(dev %s sector %llu)\n", page->mapping->host->i_ino, 2068 start, rcu_str_deref(dev->name), sector); 2069 2070 bio_put(bio); 2071 return 0; 2072 } 2073 2074 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb, 2075 int mirror_num) 2076 { 2077 u64 start = eb->start; 2078 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); 2079 int ret = 0; 2080 2081 if (root->fs_info->sb->s_flags & MS_RDONLY) 2082 return -EROFS; 2083 2084 for (i = 0; i < num_pages; i++) { 2085 struct page *p = extent_buffer_page(eb, i); 2086 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE, 2087 start, p, mirror_num); 2088 if (ret) 2089 break; 2090 start += PAGE_CACHE_SIZE; 2091 } 2092 2093 return ret; 2094 } 2095 2096 /* 2097 * each time an IO finishes, we do a fast check in the IO failure tree 2098 * to see if we need to process or clean up an io_failure_record 2099 */ 2100 static int clean_io_failure(u64 start, struct page *page) 2101 { 2102 u64 private; 2103 u64 private_failure; 2104 struct io_failure_record *failrec; 2105 struct inode *inode = page->mapping->host; 2106 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 2107 struct extent_state *state; 2108 int num_copies; 2109 int did_repair = 0; 2110 int ret; 2111 2112 private = 0; 2113 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, 2114 (u64)-1, 1, EXTENT_DIRTY, 0); 2115 if (!ret) 2116 return 0; 2117 2118 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start, 2119 &private_failure); 2120 if (ret) 2121 return 0; 2122 2123 failrec = (struct io_failure_record *)(unsigned long) private_failure; 2124 BUG_ON(!failrec->this_mirror); 2125 2126 if (failrec->in_validation) { 2127 /* there was no real error, just free the record */ 2128 pr_debug("clean_io_failure: freeing dummy error at %llu\n", 2129 failrec->start); 2130 did_repair = 1; 2131 goto out; 2132 } 2133 if (fs_info->sb->s_flags & MS_RDONLY) 2134 goto out; 2135 2136 spin_lock(&BTRFS_I(inode)->io_tree.lock); 2137 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, 2138 failrec->start, 2139 EXTENT_LOCKED); 2140 spin_unlock(&BTRFS_I(inode)->io_tree.lock); 2141 2142 if (state && state->start <= failrec->start && 2143 state->end >= failrec->start + failrec->len - 1) { 2144 num_copies = btrfs_num_copies(fs_info, failrec->logical, 2145 failrec->len); 2146 if (num_copies > 1) { 2147 ret = repair_io_failure(fs_info, start, failrec->len, 2148 failrec->logical, page, 2149 failrec->failed_mirror); 2150 did_repair = !ret; 2151 } 2152 ret = 0; 2153 } 2154 2155 out: 2156 if (!ret) 2157 ret = free_io_failure(inode, failrec, did_repair); 2158 2159 return ret; 2160 } 2161 2162 /* 2163 * this is a generic handler for readpage errors (default 2164 * readpage_io_failed_hook). if other copies exist, read those and write back 2165 * good data to the failed position. does not investigate in remapping the 2166 * failed extent elsewhere, hoping the device will be smart enough to do this as 2167 * needed 2168 */ 2169 2170 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, 2171 struct page *page, u64 start, u64 end, 2172 int failed_mirror) 2173 { 2174 struct io_failure_record *failrec = NULL; 2175 u64 private; 2176 struct extent_map *em; 2177 struct inode *inode = page->mapping->host; 2178 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 2179 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 2180 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 2181 struct bio *bio; 2182 struct btrfs_io_bio *btrfs_failed_bio; 2183 struct btrfs_io_bio *btrfs_bio; 2184 int num_copies; 2185 int ret; 2186 int read_mode; 2187 u64 logical; 2188 2189 BUG_ON(failed_bio->bi_rw & REQ_WRITE); 2190 2191 ret = get_state_private(failure_tree, start, &private); 2192 if (ret) { 2193 failrec = kzalloc(sizeof(*failrec), GFP_NOFS); 2194 if (!failrec) 2195 return -ENOMEM; 2196 failrec->start = start; 2197 failrec->len = end - start + 1; 2198 failrec->this_mirror = 0; 2199 failrec->bio_flags = 0; 2200 failrec->in_validation = 0; 2201 2202 read_lock(&em_tree->lock); 2203 em = lookup_extent_mapping(em_tree, start, failrec->len); 2204 if (!em) { 2205 read_unlock(&em_tree->lock); 2206 kfree(failrec); 2207 return -EIO; 2208 } 2209 2210 if (em->start > start || em->start + em->len <= start) { 2211 free_extent_map(em); 2212 em = NULL; 2213 } 2214 read_unlock(&em_tree->lock); 2215 2216 if (!em) { 2217 kfree(failrec); 2218 return -EIO; 2219 } 2220 logical = start - em->start; 2221 logical = em->block_start + logical; 2222 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 2223 logical = em->block_start; 2224 failrec->bio_flags = EXTENT_BIO_COMPRESSED; 2225 extent_set_compress_type(&failrec->bio_flags, 2226 em->compress_type); 2227 } 2228 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, " 2229 "len=%llu\n", logical, start, failrec->len); 2230 failrec->logical = logical; 2231 free_extent_map(em); 2232 2233 /* set the bits in the private failure tree */ 2234 ret = set_extent_bits(failure_tree, start, end, 2235 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); 2236 if (ret >= 0) 2237 ret = set_state_private(failure_tree, start, 2238 (u64)(unsigned long)failrec); 2239 /* set the bits in the inode's tree */ 2240 if (ret >= 0) 2241 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, 2242 GFP_NOFS); 2243 if (ret < 0) { 2244 kfree(failrec); 2245 return ret; 2246 } 2247 } else { 2248 failrec = (struct io_failure_record *)(unsigned long)private; 2249 pr_debug("bio_readpage_error: (found) logical=%llu, " 2250 "start=%llu, len=%llu, validation=%d\n", 2251 failrec->logical, failrec->start, failrec->len, 2252 failrec->in_validation); 2253 /* 2254 * when data can be on disk more than twice, add to failrec here 2255 * (e.g. with a list for failed_mirror) to make 2256 * clean_io_failure() clean all those errors at once. 2257 */ 2258 } 2259 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info, 2260 failrec->logical, failrec->len); 2261 if (num_copies == 1) { 2262 /* 2263 * we only have a single copy of the data, so don't bother with 2264 * all the retry and error correction code that follows. no 2265 * matter what the error is, it is very likely to persist. 2266 */ 2267 pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n", 2268 num_copies, failrec->this_mirror, failed_mirror); 2269 free_io_failure(inode, failrec, 0); 2270 return -EIO; 2271 } 2272 2273 /* 2274 * there are two premises: 2275 * a) deliver good data to the caller 2276 * b) correct the bad sectors on disk 2277 */ 2278 if (failed_bio->bi_vcnt > 1) { 2279 /* 2280 * to fulfill b), we need to know the exact failing sectors, as 2281 * we don't want to rewrite any more than the failed ones. thus, 2282 * we need separate read requests for the failed bio 2283 * 2284 * if the following BUG_ON triggers, our validation request got 2285 * merged. we need separate requests for our algorithm to work. 2286 */ 2287 BUG_ON(failrec->in_validation); 2288 failrec->in_validation = 1; 2289 failrec->this_mirror = failed_mirror; 2290 read_mode = READ_SYNC | REQ_FAILFAST_DEV; 2291 } else { 2292 /* 2293 * we're ready to fulfill a) and b) alongside. get a good copy 2294 * of the failed sector and if we succeed, we have setup 2295 * everything for repair_io_failure to do the rest for us. 2296 */ 2297 if (failrec->in_validation) { 2298 BUG_ON(failrec->this_mirror != failed_mirror); 2299 failrec->in_validation = 0; 2300 failrec->this_mirror = 0; 2301 } 2302 failrec->failed_mirror = failed_mirror; 2303 failrec->this_mirror++; 2304 if (failrec->this_mirror == failed_mirror) 2305 failrec->this_mirror++; 2306 read_mode = READ_SYNC; 2307 } 2308 2309 if (failrec->this_mirror > num_copies) { 2310 pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n", 2311 num_copies, failrec->this_mirror, failed_mirror); 2312 free_io_failure(inode, failrec, 0); 2313 return -EIO; 2314 } 2315 2316 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 2317 if (!bio) { 2318 free_io_failure(inode, failrec, 0); 2319 return -EIO; 2320 } 2321 bio->bi_end_io = failed_bio->bi_end_io; 2322 bio->bi_iter.bi_sector = failrec->logical >> 9; 2323 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 2324 bio->bi_iter.bi_size = 0; 2325 2326 btrfs_failed_bio = btrfs_io_bio(failed_bio); 2327 if (btrfs_failed_bio->csum) { 2328 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 2329 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 2330 2331 btrfs_bio = btrfs_io_bio(bio); 2332 btrfs_bio->csum = btrfs_bio->csum_inline; 2333 phy_offset >>= inode->i_sb->s_blocksize_bits; 2334 phy_offset *= csum_size; 2335 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + phy_offset, 2336 csum_size); 2337 } 2338 2339 bio_add_page(bio, page, failrec->len, start - page_offset(page)); 2340 2341 pr_debug("bio_readpage_error: submitting new read[%#x] to " 2342 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode, 2343 failrec->this_mirror, num_copies, failrec->in_validation); 2344 2345 ret = tree->ops->submit_bio_hook(inode, read_mode, bio, 2346 failrec->this_mirror, 2347 failrec->bio_flags, 0); 2348 return ret; 2349 } 2350 2351 /* lots and lots of room for performance fixes in the end_bio funcs */ 2352 2353 int end_extent_writepage(struct page *page, int err, u64 start, u64 end) 2354 { 2355 int uptodate = (err == 0); 2356 struct extent_io_tree *tree; 2357 int ret = 0; 2358 2359 tree = &BTRFS_I(page->mapping->host)->io_tree; 2360 2361 if (tree->ops && tree->ops->writepage_end_io_hook) { 2362 ret = tree->ops->writepage_end_io_hook(page, start, 2363 end, NULL, uptodate); 2364 if (ret) 2365 uptodate = 0; 2366 } 2367 2368 if (!uptodate) { 2369 ClearPageUptodate(page); 2370 SetPageError(page); 2371 ret = ret < 0 ? ret : -EIO; 2372 mapping_set_error(page->mapping, ret); 2373 } 2374 return 0; 2375 } 2376 2377 /* 2378 * after a writepage IO is done, we need to: 2379 * clear the uptodate bits on error 2380 * clear the writeback bits in the extent tree for this IO 2381 * end_page_writeback if the page has no more pending IO 2382 * 2383 * Scheduling is not allowed, so the extent state tree is expected 2384 * to have one and only one object corresponding to this IO. 2385 */ 2386 static void end_bio_extent_writepage(struct bio *bio, int err) 2387 { 2388 struct bio_vec *bvec; 2389 u64 start; 2390 u64 end; 2391 int i; 2392 2393 bio_for_each_segment_all(bvec, bio, i) { 2394 struct page *page = bvec->bv_page; 2395 2396 /* We always issue full-page reads, but if some block 2397 * in a page fails to read, blk_update_request() will 2398 * advance bv_offset and adjust bv_len to compensate. 2399 * Print a warning for nonzero offsets, and an error 2400 * if they don't add up to a full page. */ 2401 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { 2402 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) 2403 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, 2404 "partial page write in btrfs with offset %u and length %u", 2405 bvec->bv_offset, bvec->bv_len); 2406 else 2407 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info, 2408 "incomplete page write in btrfs with offset %u and " 2409 "length %u", 2410 bvec->bv_offset, bvec->bv_len); 2411 } 2412 2413 start = page_offset(page); 2414 end = start + bvec->bv_offset + bvec->bv_len - 1; 2415 2416 if (end_extent_writepage(page, err, start, end)) 2417 continue; 2418 2419 end_page_writeback(page); 2420 } 2421 2422 bio_put(bio); 2423 } 2424 2425 static void 2426 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, 2427 int uptodate) 2428 { 2429 struct extent_state *cached = NULL; 2430 u64 end = start + len - 1; 2431 2432 if (uptodate && tree->track_uptodate) 2433 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); 2434 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); 2435 } 2436 2437 /* 2438 * after a readpage IO is done, we need to: 2439 * clear the uptodate bits on error 2440 * set the uptodate bits if things worked 2441 * set the page up to date if all extents in the tree are uptodate 2442 * clear the lock bit in the extent tree 2443 * unlock the page if there are no other extents locked for it 2444 * 2445 * Scheduling is not allowed, so the extent state tree is expected 2446 * to have one and only one object corresponding to this IO. 2447 */ 2448 static void end_bio_extent_readpage(struct bio *bio, int err) 2449 { 2450 struct bio_vec *bvec; 2451 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 2452 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 2453 struct extent_io_tree *tree; 2454 u64 offset = 0; 2455 u64 start; 2456 u64 end; 2457 u64 len; 2458 u64 extent_start = 0; 2459 u64 extent_len = 0; 2460 int mirror; 2461 int ret; 2462 int i; 2463 2464 if (err) 2465 uptodate = 0; 2466 2467 bio_for_each_segment_all(bvec, bio, i) { 2468 struct page *page = bvec->bv_page; 2469 struct inode *inode = page->mapping->host; 2470 2471 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " 2472 "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err, 2473 io_bio->mirror_num); 2474 tree = &BTRFS_I(inode)->io_tree; 2475 2476 /* We always issue full-page reads, but if some block 2477 * in a page fails to read, blk_update_request() will 2478 * advance bv_offset and adjust bv_len to compensate. 2479 * Print a warning for nonzero offsets, and an error 2480 * if they don't add up to a full page. */ 2481 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { 2482 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) 2483 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, 2484 "partial page read in btrfs with offset %u and length %u", 2485 bvec->bv_offset, bvec->bv_len); 2486 else 2487 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info, 2488 "incomplete page read in btrfs with offset %u and " 2489 "length %u", 2490 bvec->bv_offset, bvec->bv_len); 2491 } 2492 2493 start = page_offset(page); 2494 end = start + bvec->bv_offset + bvec->bv_len - 1; 2495 len = bvec->bv_len; 2496 2497 mirror = io_bio->mirror_num; 2498 if (likely(uptodate && tree->ops && 2499 tree->ops->readpage_end_io_hook)) { 2500 ret = tree->ops->readpage_end_io_hook(io_bio, offset, 2501 page, start, end, 2502 mirror); 2503 if (ret) 2504 uptodate = 0; 2505 else 2506 clean_io_failure(start, page); 2507 } 2508 2509 if (likely(uptodate)) 2510 goto readpage_ok; 2511 2512 if (tree->ops && tree->ops->readpage_io_failed_hook) { 2513 ret = tree->ops->readpage_io_failed_hook(page, mirror); 2514 if (!ret && !err && 2515 test_bit(BIO_UPTODATE, &bio->bi_flags)) 2516 uptodate = 1; 2517 } else { 2518 /* 2519 * The generic bio_readpage_error handles errors the 2520 * following way: If possible, new read requests are 2521 * created and submitted and will end up in 2522 * end_bio_extent_readpage as well (if we're lucky, not 2523 * in the !uptodate case). In that case it returns 0 and 2524 * we just go on with the next page in our bio. If it 2525 * can't handle the error it will return -EIO and we 2526 * remain responsible for that page. 2527 */ 2528 ret = bio_readpage_error(bio, offset, page, start, end, 2529 mirror); 2530 if (ret == 0) { 2531 uptodate = 2532 test_bit(BIO_UPTODATE, &bio->bi_flags); 2533 if (err) 2534 uptodate = 0; 2535 continue; 2536 } 2537 } 2538 readpage_ok: 2539 if (likely(uptodate)) { 2540 loff_t i_size = i_size_read(inode); 2541 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2542 unsigned offset; 2543 2544 /* Zero out the end if this page straddles i_size */ 2545 offset = i_size & (PAGE_CACHE_SIZE-1); 2546 if (page->index == end_index && offset) 2547 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2548 SetPageUptodate(page); 2549 } else { 2550 ClearPageUptodate(page); 2551 SetPageError(page); 2552 } 2553 unlock_page(page); 2554 offset += len; 2555 2556 if (unlikely(!uptodate)) { 2557 if (extent_len) { 2558 endio_readpage_release_extent(tree, 2559 extent_start, 2560 extent_len, 1); 2561 extent_start = 0; 2562 extent_len = 0; 2563 } 2564 endio_readpage_release_extent(tree, start, 2565 end - start + 1, 0); 2566 } else if (!extent_len) { 2567 extent_start = start; 2568 extent_len = end + 1 - start; 2569 } else if (extent_start + extent_len == start) { 2570 extent_len += end + 1 - start; 2571 } else { 2572 endio_readpage_release_extent(tree, extent_start, 2573 extent_len, uptodate); 2574 extent_start = start; 2575 extent_len = end + 1 - start; 2576 } 2577 } 2578 2579 if (extent_len) 2580 endio_readpage_release_extent(tree, extent_start, extent_len, 2581 uptodate); 2582 if (io_bio->end_io) 2583 io_bio->end_io(io_bio, err); 2584 bio_put(bio); 2585 } 2586 2587 /* 2588 * this allocates from the btrfs_bioset. We're returning a bio right now 2589 * but you can call btrfs_io_bio for the appropriate container_of magic 2590 */ 2591 struct bio * 2592 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 2593 gfp_t gfp_flags) 2594 { 2595 struct btrfs_io_bio *btrfs_bio; 2596 struct bio *bio; 2597 2598 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset); 2599 2600 if (bio == NULL && (current->flags & PF_MEMALLOC)) { 2601 while (!bio && (nr_vecs /= 2)) { 2602 bio = bio_alloc_bioset(gfp_flags, 2603 nr_vecs, btrfs_bioset); 2604 } 2605 } 2606 2607 if (bio) { 2608 bio->bi_bdev = bdev; 2609 bio->bi_iter.bi_sector = first_sector; 2610 btrfs_bio = btrfs_io_bio(bio); 2611 btrfs_bio->csum = NULL; 2612 btrfs_bio->csum_allocated = NULL; 2613 btrfs_bio->end_io = NULL; 2614 } 2615 return bio; 2616 } 2617 2618 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask) 2619 { 2620 return bio_clone_bioset(bio, gfp_mask, btrfs_bioset); 2621 } 2622 2623 2624 /* this also allocates from the btrfs_bioset */ 2625 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 2626 { 2627 struct btrfs_io_bio *btrfs_bio; 2628 struct bio *bio; 2629 2630 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset); 2631 if (bio) { 2632 btrfs_bio = btrfs_io_bio(bio); 2633 btrfs_bio->csum = NULL; 2634 btrfs_bio->csum_allocated = NULL; 2635 btrfs_bio->end_io = NULL; 2636 } 2637 return bio; 2638 } 2639 2640 2641 static int __must_check submit_one_bio(int rw, struct bio *bio, 2642 int mirror_num, unsigned long bio_flags) 2643 { 2644 int ret = 0; 2645 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 2646 struct page *page = bvec->bv_page; 2647 struct extent_io_tree *tree = bio->bi_private; 2648 u64 start; 2649 2650 start = page_offset(page) + bvec->bv_offset; 2651 2652 bio->bi_private = NULL; 2653 2654 bio_get(bio); 2655 2656 if (tree->ops && tree->ops->submit_bio_hook) 2657 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, 2658 mirror_num, bio_flags, start); 2659 else 2660 btrfsic_submit_bio(rw, bio); 2661 2662 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 2663 ret = -EOPNOTSUPP; 2664 bio_put(bio); 2665 return ret; 2666 } 2667 2668 static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page, 2669 unsigned long offset, size_t size, struct bio *bio, 2670 unsigned long bio_flags) 2671 { 2672 int ret = 0; 2673 if (tree->ops && tree->ops->merge_bio_hook) 2674 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio, 2675 bio_flags); 2676 BUG_ON(ret < 0); 2677 return ret; 2678 2679 } 2680 2681 static int submit_extent_page(int rw, struct extent_io_tree *tree, 2682 struct page *page, sector_t sector, 2683 size_t size, unsigned long offset, 2684 struct block_device *bdev, 2685 struct bio **bio_ret, 2686 unsigned long max_pages, 2687 bio_end_io_t end_io_func, 2688 int mirror_num, 2689 unsigned long prev_bio_flags, 2690 unsigned long bio_flags) 2691 { 2692 int ret = 0; 2693 struct bio *bio; 2694 int nr; 2695 int contig = 0; 2696 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED; 2697 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; 2698 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); 2699 2700 if (bio_ret && *bio_ret) { 2701 bio = *bio_ret; 2702 if (old_compressed) 2703 contig = bio->bi_iter.bi_sector == sector; 2704 else 2705 contig = bio_end_sector(bio) == sector; 2706 2707 if (prev_bio_flags != bio_flags || !contig || 2708 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || 2709 bio_add_page(bio, page, page_size, offset) < page_size) { 2710 ret = submit_one_bio(rw, bio, mirror_num, 2711 prev_bio_flags); 2712 if (ret < 0) 2713 return ret; 2714 bio = NULL; 2715 } else { 2716 return 0; 2717 } 2718 } 2719 if (this_compressed) 2720 nr = BIO_MAX_PAGES; 2721 else 2722 nr = bio_get_nr_vecs(bdev); 2723 2724 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); 2725 if (!bio) 2726 return -ENOMEM; 2727 2728 bio_add_page(bio, page, page_size, offset); 2729 bio->bi_end_io = end_io_func; 2730 bio->bi_private = tree; 2731 2732 if (bio_ret) 2733 *bio_ret = bio; 2734 else 2735 ret = submit_one_bio(rw, bio, mirror_num, bio_flags); 2736 2737 return ret; 2738 } 2739 2740 static void attach_extent_buffer_page(struct extent_buffer *eb, 2741 struct page *page) 2742 { 2743 if (!PagePrivate(page)) { 2744 SetPagePrivate(page); 2745 page_cache_get(page); 2746 set_page_private(page, (unsigned long)eb); 2747 } else { 2748 WARN_ON(page->private != (unsigned long)eb); 2749 } 2750 } 2751 2752 void set_page_extent_mapped(struct page *page) 2753 { 2754 if (!PagePrivate(page)) { 2755 SetPagePrivate(page); 2756 page_cache_get(page); 2757 set_page_private(page, EXTENT_PAGE_PRIVATE); 2758 } 2759 } 2760 2761 static struct extent_map * 2762 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, 2763 u64 start, u64 len, get_extent_t *get_extent, 2764 struct extent_map **em_cached) 2765 { 2766 struct extent_map *em; 2767 2768 if (em_cached && *em_cached) { 2769 em = *em_cached; 2770 if (extent_map_in_tree(em) && start >= em->start && 2771 start < extent_map_end(em)) { 2772 atomic_inc(&em->refs); 2773 return em; 2774 } 2775 2776 free_extent_map(em); 2777 *em_cached = NULL; 2778 } 2779 2780 em = get_extent(inode, page, pg_offset, start, len, 0); 2781 if (em_cached && !IS_ERR_OR_NULL(em)) { 2782 BUG_ON(*em_cached); 2783 atomic_inc(&em->refs); 2784 *em_cached = em; 2785 } 2786 return em; 2787 } 2788 /* 2789 * basic readpage implementation. Locked extent state structs are inserted 2790 * into the tree that are removed when the IO is done (by the end_io 2791 * handlers) 2792 * XXX JDM: This needs looking at to ensure proper page locking 2793 */ 2794 static int __do_readpage(struct extent_io_tree *tree, 2795 struct page *page, 2796 get_extent_t *get_extent, 2797 struct extent_map **em_cached, 2798 struct bio **bio, int mirror_num, 2799 unsigned long *bio_flags, int rw) 2800 { 2801 struct inode *inode = page->mapping->host; 2802 u64 start = page_offset(page); 2803 u64 page_end = start + PAGE_CACHE_SIZE - 1; 2804 u64 end; 2805 u64 cur = start; 2806 u64 extent_offset; 2807 u64 last_byte = i_size_read(inode); 2808 u64 block_start; 2809 u64 cur_end; 2810 sector_t sector; 2811 struct extent_map *em; 2812 struct block_device *bdev; 2813 int ret; 2814 int nr = 0; 2815 int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED; 2816 size_t pg_offset = 0; 2817 size_t iosize; 2818 size_t disk_io_size; 2819 size_t blocksize = inode->i_sb->s_blocksize; 2820 unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED; 2821 2822 set_page_extent_mapped(page); 2823 2824 end = page_end; 2825 if (!PageUptodate(page)) { 2826 if (cleancache_get_page(page) == 0) { 2827 BUG_ON(blocksize != PAGE_SIZE); 2828 unlock_extent(tree, start, end); 2829 goto out; 2830 } 2831 } 2832 2833 if (page->index == last_byte >> PAGE_CACHE_SHIFT) { 2834 char *userpage; 2835 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); 2836 2837 if (zero_offset) { 2838 iosize = PAGE_CACHE_SIZE - zero_offset; 2839 userpage = kmap_atomic(page); 2840 memset(userpage + zero_offset, 0, iosize); 2841 flush_dcache_page(page); 2842 kunmap_atomic(userpage); 2843 } 2844 } 2845 while (cur <= end) { 2846 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2847 2848 if (cur >= last_byte) { 2849 char *userpage; 2850 struct extent_state *cached = NULL; 2851 2852 iosize = PAGE_CACHE_SIZE - pg_offset; 2853 userpage = kmap_atomic(page); 2854 memset(userpage + pg_offset, 0, iosize); 2855 flush_dcache_page(page); 2856 kunmap_atomic(userpage); 2857 set_extent_uptodate(tree, cur, cur + iosize - 1, 2858 &cached, GFP_NOFS); 2859 if (!parent_locked) 2860 unlock_extent_cached(tree, cur, 2861 cur + iosize - 1, 2862 &cached, GFP_NOFS); 2863 break; 2864 } 2865 em = __get_extent_map(inode, page, pg_offset, cur, 2866 end - cur + 1, get_extent, em_cached); 2867 if (IS_ERR_OR_NULL(em)) { 2868 SetPageError(page); 2869 if (!parent_locked) 2870 unlock_extent(tree, cur, end); 2871 break; 2872 } 2873 extent_offset = cur - em->start; 2874 BUG_ON(extent_map_end(em) <= cur); 2875 BUG_ON(end < cur); 2876 2877 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 2878 this_bio_flag |= EXTENT_BIO_COMPRESSED; 2879 extent_set_compress_type(&this_bio_flag, 2880 em->compress_type); 2881 } 2882 2883 iosize = min(extent_map_end(em) - cur, end - cur + 1); 2884 cur_end = min(extent_map_end(em) - 1, end); 2885 iosize = ALIGN(iosize, blocksize); 2886 if (this_bio_flag & EXTENT_BIO_COMPRESSED) { 2887 disk_io_size = em->block_len; 2888 sector = em->block_start >> 9; 2889 } else { 2890 sector = (em->block_start + extent_offset) >> 9; 2891 disk_io_size = iosize; 2892 } 2893 bdev = em->bdev; 2894 block_start = em->block_start; 2895 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 2896 block_start = EXTENT_MAP_HOLE; 2897 free_extent_map(em); 2898 em = NULL; 2899 2900 /* we've found a hole, just zero and go on */ 2901 if (block_start == EXTENT_MAP_HOLE) { 2902 char *userpage; 2903 struct extent_state *cached = NULL; 2904 2905 userpage = kmap_atomic(page); 2906 memset(userpage + pg_offset, 0, iosize); 2907 flush_dcache_page(page); 2908 kunmap_atomic(userpage); 2909 2910 set_extent_uptodate(tree, cur, cur + iosize - 1, 2911 &cached, GFP_NOFS); 2912 unlock_extent_cached(tree, cur, cur + iosize - 1, 2913 &cached, GFP_NOFS); 2914 cur = cur + iosize; 2915 pg_offset += iosize; 2916 continue; 2917 } 2918 /* the get_extent function already copied into the page */ 2919 if (test_range_bit(tree, cur, cur_end, 2920 EXTENT_UPTODATE, 1, NULL)) { 2921 check_page_uptodate(tree, page); 2922 if (!parent_locked) 2923 unlock_extent(tree, cur, cur + iosize - 1); 2924 cur = cur + iosize; 2925 pg_offset += iosize; 2926 continue; 2927 } 2928 /* we have an inline extent but it didn't get marked up 2929 * to date. Error out 2930 */ 2931 if (block_start == EXTENT_MAP_INLINE) { 2932 SetPageError(page); 2933 if (!parent_locked) 2934 unlock_extent(tree, cur, cur + iosize - 1); 2935 cur = cur + iosize; 2936 pg_offset += iosize; 2937 continue; 2938 } 2939 2940 pnr -= page->index; 2941 ret = submit_extent_page(rw, tree, page, 2942 sector, disk_io_size, pg_offset, 2943 bdev, bio, pnr, 2944 end_bio_extent_readpage, mirror_num, 2945 *bio_flags, 2946 this_bio_flag); 2947 if (!ret) { 2948 nr++; 2949 *bio_flags = this_bio_flag; 2950 } else { 2951 SetPageError(page); 2952 if (!parent_locked) 2953 unlock_extent(tree, cur, cur + iosize - 1); 2954 } 2955 cur = cur + iosize; 2956 pg_offset += iosize; 2957 } 2958 out: 2959 if (!nr) { 2960 if (!PageError(page)) 2961 SetPageUptodate(page); 2962 unlock_page(page); 2963 } 2964 return 0; 2965 } 2966 2967 static inline void __do_contiguous_readpages(struct extent_io_tree *tree, 2968 struct page *pages[], int nr_pages, 2969 u64 start, u64 end, 2970 get_extent_t *get_extent, 2971 struct extent_map **em_cached, 2972 struct bio **bio, int mirror_num, 2973 unsigned long *bio_flags, int rw) 2974 { 2975 struct inode *inode; 2976 struct btrfs_ordered_extent *ordered; 2977 int index; 2978 2979 inode = pages[0]->mapping->host; 2980 while (1) { 2981 lock_extent(tree, start, end); 2982 ordered = btrfs_lookup_ordered_range(inode, start, 2983 end - start + 1); 2984 if (!ordered) 2985 break; 2986 unlock_extent(tree, start, end); 2987 btrfs_start_ordered_extent(inode, ordered, 1); 2988 btrfs_put_ordered_extent(ordered); 2989 } 2990 2991 for (index = 0; index < nr_pages; index++) { 2992 __do_readpage(tree, pages[index], get_extent, em_cached, bio, 2993 mirror_num, bio_flags, rw); 2994 page_cache_release(pages[index]); 2995 } 2996 } 2997 2998 static void __extent_readpages(struct extent_io_tree *tree, 2999 struct page *pages[], 3000 int nr_pages, get_extent_t *get_extent, 3001 struct extent_map **em_cached, 3002 struct bio **bio, int mirror_num, 3003 unsigned long *bio_flags, int rw) 3004 { 3005 u64 start = 0; 3006 u64 end = 0; 3007 u64 page_start; 3008 int index; 3009 int first_index = 0; 3010 3011 for (index = 0; index < nr_pages; index++) { 3012 page_start = page_offset(pages[index]); 3013 if (!end) { 3014 start = page_start; 3015 end = start + PAGE_CACHE_SIZE - 1; 3016 first_index = index; 3017 } else if (end + 1 == page_start) { 3018 end += PAGE_CACHE_SIZE; 3019 } else { 3020 __do_contiguous_readpages(tree, &pages[first_index], 3021 index - first_index, start, 3022 end, get_extent, em_cached, 3023 bio, mirror_num, bio_flags, 3024 rw); 3025 start = page_start; 3026 end = start + PAGE_CACHE_SIZE - 1; 3027 first_index = index; 3028 } 3029 } 3030 3031 if (end) 3032 __do_contiguous_readpages(tree, &pages[first_index], 3033 index - first_index, start, 3034 end, get_extent, em_cached, bio, 3035 mirror_num, bio_flags, rw); 3036 } 3037 3038 static int __extent_read_full_page(struct extent_io_tree *tree, 3039 struct page *page, 3040 get_extent_t *get_extent, 3041 struct bio **bio, int mirror_num, 3042 unsigned long *bio_flags, int rw) 3043 { 3044 struct inode *inode = page->mapping->host; 3045 struct btrfs_ordered_extent *ordered; 3046 u64 start = page_offset(page); 3047 u64 end = start + PAGE_CACHE_SIZE - 1; 3048 int ret; 3049 3050 while (1) { 3051 lock_extent(tree, start, end); 3052 ordered = btrfs_lookup_ordered_extent(inode, start); 3053 if (!ordered) 3054 break; 3055 unlock_extent(tree, start, end); 3056 btrfs_start_ordered_extent(inode, ordered, 1); 3057 btrfs_put_ordered_extent(ordered); 3058 } 3059 3060 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, 3061 bio_flags, rw); 3062 return ret; 3063 } 3064 3065 int extent_read_full_page(struct extent_io_tree *tree, struct page *page, 3066 get_extent_t *get_extent, int mirror_num) 3067 { 3068 struct bio *bio = NULL; 3069 unsigned long bio_flags = 0; 3070 int ret; 3071 3072 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, 3073 &bio_flags, READ); 3074 if (bio) 3075 ret = submit_one_bio(READ, bio, mirror_num, bio_flags); 3076 return ret; 3077 } 3078 3079 int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page, 3080 get_extent_t *get_extent, int mirror_num) 3081 { 3082 struct bio *bio = NULL; 3083 unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED; 3084 int ret; 3085 3086 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, 3087 &bio_flags, READ); 3088 if (bio) 3089 ret = submit_one_bio(READ, bio, mirror_num, bio_flags); 3090 return ret; 3091 } 3092 3093 static noinline void update_nr_written(struct page *page, 3094 struct writeback_control *wbc, 3095 unsigned long nr_written) 3096 { 3097 wbc->nr_to_write -= nr_written; 3098 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && 3099 wbc->range_start == 0 && wbc->range_end == LLONG_MAX)) 3100 page->mapping->writeback_index = page->index + nr_written; 3101 } 3102 3103 /* 3104 * helper for __extent_writepage, doing all of the delayed allocation setup. 3105 * 3106 * This returns 1 if our fill_delalloc function did all the work required 3107 * to write the page (copy into inline extent). In this case the IO has 3108 * been started and the page is already unlocked. 3109 * 3110 * This returns 0 if all went well (page still locked) 3111 * This returns < 0 if there were errors (page still locked) 3112 */ 3113 static noinline_for_stack int writepage_delalloc(struct inode *inode, 3114 struct page *page, struct writeback_control *wbc, 3115 struct extent_page_data *epd, 3116 u64 delalloc_start, 3117 unsigned long *nr_written) 3118 { 3119 struct extent_io_tree *tree = epd->tree; 3120 u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1; 3121 u64 nr_delalloc; 3122 u64 delalloc_to_write = 0; 3123 u64 delalloc_end = 0; 3124 int ret; 3125 int page_started = 0; 3126 3127 if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc) 3128 return 0; 3129 3130 while (delalloc_end < page_end) { 3131 nr_delalloc = find_lock_delalloc_range(inode, tree, 3132 page, 3133 &delalloc_start, 3134 &delalloc_end, 3135 128 * 1024 * 1024); 3136 if (nr_delalloc == 0) { 3137 delalloc_start = delalloc_end + 1; 3138 continue; 3139 } 3140 ret = tree->ops->fill_delalloc(inode, page, 3141 delalloc_start, 3142 delalloc_end, 3143 &page_started, 3144 nr_written); 3145 /* File system has been set read-only */ 3146 if (ret) { 3147 SetPageError(page); 3148 /* fill_delalloc should be return < 0 for error 3149 * but just in case, we use > 0 here meaning the 3150 * IO is started, so we don't want to return > 0 3151 * unless things are going well. 3152 */ 3153 ret = ret < 0 ? ret : -EIO; 3154 goto done; 3155 } 3156 /* 3157 * delalloc_end is already one less than the total 3158 * length, so we don't subtract one from 3159 * PAGE_CACHE_SIZE 3160 */ 3161 delalloc_to_write += (delalloc_end - delalloc_start + 3162 PAGE_CACHE_SIZE) >> 3163 PAGE_CACHE_SHIFT; 3164 delalloc_start = delalloc_end + 1; 3165 } 3166 if (wbc->nr_to_write < delalloc_to_write) { 3167 int thresh = 8192; 3168 3169 if (delalloc_to_write < thresh * 2) 3170 thresh = delalloc_to_write; 3171 wbc->nr_to_write = min_t(u64, delalloc_to_write, 3172 thresh); 3173 } 3174 3175 /* did the fill delalloc function already unlock and start 3176 * the IO? 3177 */ 3178 if (page_started) { 3179 /* 3180 * we've unlocked the page, so we can't update 3181 * the mapping's writeback index, just update 3182 * nr_to_write. 3183 */ 3184 wbc->nr_to_write -= *nr_written; 3185 return 1; 3186 } 3187 3188 ret = 0; 3189 3190 done: 3191 return ret; 3192 } 3193 3194 /* 3195 * helper for __extent_writepage. This calls the writepage start hooks, 3196 * and does the loop to map the page into extents and bios. 3197 * 3198 * We return 1 if the IO is started and the page is unlocked, 3199 * 0 if all went well (page still locked) 3200 * < 0 if there were errors (page still locked) 3201 */ 3202 static noinline_for_stack int __extent_writepage_io(struct inode *inode, 3203 struct page *page, 3204 struct writeback_control *wbc, 3205 struct extent_page_data *epd, 3206 loff_t i_size, 3207 unsigned long nr_written, 3208 int write_flags, int *nr_ret) 3209 { 3210 struct extent_io_tree *tree = epd->tree; 3211 u64 start = page_offset(page); 3212 u64 page_end = start + PAGE_CACHE_SIZE - 1; 3213 u64 end; 3214 u64 cur = start; 3215 u64 extent_offset; 3216 u64 block_start; 3217 u64 iosize; 3218 sector_t sector; 3219 struct extent_state *cached_state = NULL; 3220 struct extent_map *em; 3221 struct block_device *bdev; 3222 size_t pg_offset = 0; 3223 size_t blocksize; 3224 int ret = 0; 3225 int nr = 0; 3226 bool compressed; 3227 3228 if (tree->ops && tree->ops->writepage_start_hook) { 3229 ret = tree->ops->writepage_start_hook(page, start, 3230 page_end); 3231 if (ret) { 3232 /* Fixup worker will requeue */ 3233 if (ret == -EBUSY) 3234 wbc->pages_skipped++; 3235 else 3236 redirty_page_for_writepage(wbc, page); 3237 3238 update_nr_written(page, wbc, nr_written); 3239 unlock_page(page); 3240 ret = 1; 3241 goto done_unlocked; 3242 } 3243 } 3244 3245 /* 3246 * we don't want to touch the inode after unlocking the page, 3247 * so we update the mapping writeback index now 3248 */ 3249 update_nr_written(page, wbc, nr_written + 1); 3250 3251 end = page_end; 3252 if (i_size <= start) { 3253 if (tree->ops && tree->ops->writepage_end_io_hook) 3254 tree->ops->writepage_end_io_hook(page, start, 3255 page_end, NULL, 1); 3256 goto done; 3257 } 3258 3259 blocksize = inode->i_sb->s_blocksize; 3260 3261 while (cur <= end) { 3262 u64 em_end; 3263 if (cur >= i_size) { 3264 if (tree->ops && tree->ops->writepage_end_io_hook) 3265 tree->ops->writepage_end_io_hook(page, cur, 3266 page_end, NULL, 1); 3267 break; 3268 } 3269 em = epd->get_extent(inode, page, pg_offset, cur, 3270 end - cur + 1, 1); 3271 if (IS_ERR_OR_NULL(em)) { 3272 SetPageError(page); 3273 ret = PTR_ERR_OR_ZERO(em); 3274 break; 3275 } 3276 3277 extent_offset = cur - em->start; 3278 em_end = extent_map_end(em); 3279 BUG_ON(em_end <= cur); 3280 BUG_ON(end < cur); 3281 iosize = min(em_end - cur, end - cur + 1); 3282 iosize = ALIGN(iosize, blocksize); 3283 sector = (em->block_start + extent_offset) >> 9; 3284 bdev = em->bdev; 3285 block_start = em->block_start; 3286 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 3287 free_extent_map(em); 3288 em = NULL; 3289 3290 /* 3291 * compressed and inline extents are written through other 3292 * paths in the FS 3293 */ 3294 if (compressed || block_start == EXTENT_MAP_HOLE || 3295 block_start == EXTENT_MAP_INLINE) { 3296 /* 3297 * end_io notification does not happen here for 3298 * compressed extents 3299 */ 3300 if (!compressed && tree->ops && 3301 tree->ops->writepage_end_io_hook) 3302 tree->ops->writepage_end_io_hook(page, cur, 3303 cur + iosize - 1, 3304 NULL, 1); 3305 else if (compressed) { 3306 /* we don't want to end_page_writeback on 3307 * a compressed extent. this happens 3308 * elsewhere 3309 */ 3310 nr++; 3311 } 3312 3313 cur += iosize; 3314 pg_offset += iosize; 3315 continue; 3316 } 3317 3318 if (tree->ops && tree->ops->writepage_io_hook) { 3319 ret = tree->ops->writepage_io_hook(page, cur, 3320 cur + iosize - 1); 3321 } else { 3322 ret = 0; 3323 } 3324 if (ret) { 3325 SetPageError(page); 3326 } else { 3327 unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1; 3328 3329 set_range_writeback(tree, cur, cur + iosize - 1); 3330 if (!PageWriteback(page)) { 3331 btrfs_err(BTRFS_I(inode)->root->fs_info, 3332 "page %lu not writeback, cur %llu end %llu", 3333 page->index, cur, end); 3334 } 3335 3336 ret = submit_extent_page(write_flags, tree, page, 3337 sector, iosize, pg_offset, 3338 bdev, &epd->bio, max_nr, 3339 end_bio_extent_writepage, 3340 0, 0, 0); 3341 if (ret) 3342 SetPageError(page); 3343 } 3344 cur = cur + iosize; 3345 pg_offset += iosize; 3346 nr++; 3347 } 3348 done: 3349 *nr_ret = nr; 3350 3351 done_unlocked: 3352 3353 /* drop our reference on any cached states */ 3354 free_extent_state(cached_state); 3355 return ret; 3356 } 3357 3358 /* 3359 * the writepage semantics are similar to regular writepage. extent 3360 * records are inserted to lock ranges in the tree, and as dirty areas 3361 * are found, they are marked writeback. Then the lock bits are removed 3362 * and the end_io handler clears the writeback ranges 3363 */ 3364 static int __extent_writepage(struct page *page, struct writeback_control *wbc, 3365 void *data) 3366 { 3367 struct inode *inode = page->mapping->host; 3368 struct extent_page_data *epd = data; 3369 u64 start = page_offset(page); 3370 u64 page_end = start + PAGE_CACHE_SIZE - 1; 3371 int ret; 3372 int nr = 0; 3373 size_t pg_offset = 0; 3374 loff_t i_size = i_size_read(inode); 3375 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 3376 int write_flags; 3377 unsigned long nr_written = 0; 3378 3379 if (wbc->sync_mode == WB_SYNC_ALL) 3380 write_flags = WRITE_SYNC; 3381 else 3382 write_flags = WRITE; 3383 3384 trace___extent_writepage(page, inode, wbc); 3385 3386 WARN_ON(!PageLocked(page)); 3387 3388 ClearPageError(page); 3389 3390 pg_offset = i_size & (PAGE_CACHE_SIZE - 1); 3391 if (page->index > end_index || 3392 (page->index == end_index && !pg_offset)) { 3393 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 3394 unlock_page(page); 3395 return 0; 3396 } 3397 3398 if (page->index == end_index) { 3399 char *userpage; 3400 3401 userpage = kmap_atomic(page); 3402 memset(userpage + pg_offset, 0, 3403 PAGE_CACHE_SIZE - pg_offset); 3404 kunmap_atomic(userpage); 3405 flush_dcache_page(page); 3406 } 3407 3408 pg_offset = 0; 3409 3410 set_page_extent_mapped(page); 3411 3412 ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written); 3413 if (ret == 1) 3414 goto done_unlocked; 3415 if (ret) 3416 goto done; 3417 3418 ret = __extent_writepage_io(inode, page, wbc, epd, 3419 i_size, nr_written, write_flags, &nr); 3420 if (ret == 1) 3421 goto done_unlocked; 3422 3423 done: 3424 if (nr == 0) { 3425 /* make sure the mapping tag for page dirty gets cleared */ 3426 set_page_writeback(page); 3427 end_page_writeback(page); 3428 } 3429 if (PageError(page)) { 3430 ret = ret < 0 ? ret : -EIO; 3431 end_extent_writepage(page, ret, start, page_end); 3432 } 3433 unlock_page(page); 3434 return ret; 3435 3436 done_unlocked: 3437 return 0; 3438 } 3439 3440 static int eb_wait(void *word) 3441 { 3442 io_schedule(); 3443 return 0; 3444 } 3445 3446 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) 3447 { 3448 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait, 3449 TASK_UNINTERRUPTIBLE); 3450 } 3451 3452 static noinline_for_stack int 3453 lock_extent_buffer_for_io(struct extent_buffer *eb, 3454 struct btrfs_fs_info *fs_info, 3455 struct extent_page_data *epd) 3456 { 3457 unsigned long i, num_pages; 3458 int flush = 0; 3459 int ret = 0; 3460 3461 if (!btrfs_try_tree_write_lock(eb)) { 3462 flush = 1; 3463 flush_write_bio(epd); 3464 btrfs_tree_lock(eb); 3465 } 3466 3467 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { 3468 btrfs_tree_unlock(eb); 3469 if (!epd->sync_io) 3470 return 0; 3471 if (!flush) { 3472 flush_write_bio(epd); 3473 flush = 1; 3474 } 3475 while (1) { 3476 wait_on_extent_buffer_writeback(eb); 3477 btrfs_tree_lock(eb); 3478 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) 3479 break; 3480 btrfs_tree_unlock(eb); 3481 } 3482 } 3483 3484 /* 3485 * We need to do this to prevent races in people who check if the eb is 3486 * under IO since we can end up having no IO bits set for a short period 3487 * of time. 3488 */ 3489 spin_lock(&eb->refs_lock); 3490 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { 3491 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); 3492 spin_unlock(&eb->refs_lock); 3493 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); 3494 __percpu_counter_add(&fs_info->dirty_metadata_bytes, 3495 -eb->len, 3496 fs_info->dirty_metadata_batch); 3497 ret = 1; 3498 } else { 3499 spin_unlock(&eb->refs_lock); 3500 } 3501 3502 btrfs_tree_unlock(eb); 3503 3504 if (!ret) 3505 return ret; 3506 3507 num_pages = num_extent_pages(eb->start, eb->len); 3508 for (i = 0; i < num_pages; i++) { 3509 struct page *p = extent_buffer_page(eb, i); 3510 3511 if (!trylock_page(p)) { 3512 if (!flush) { 3513 flush_write_bio(epd); 3514 flush = 1; 3515 } 3516 lock_page(p); 3517 } 3518 } 3519 3520 return ret; 3521 } 3522 3523 static void end_extent_buffer_writeback(struct extent_buffer *eb) 3524 { 3525 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); 3526 smp_mb__after_atomic(); 3527 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); 3528 } 3529 3530 static void end_bio_extent_buffer_writepage(struct bio *bio, int err) 3531 { 3532 struct bio_vec *bvec; 3533 struct extent_buffer *eb; 3534 int i, done; 3535 3536 bio_for_each_segment_all(bvec, bio, i) { 3537 struct page *page = bvec->bv_page; 3538 3539 eb = (struct extent_buffer *)page->private; 3540 BUG_ON(!eb); 3541 done = atomic_dec_and_test(&eb->io_pages); 3542 3543 if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { 3544 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 3545 ClearPageUptodate(page); 3546 SetPageError(page); 3547 } 3548 3549 end_page_writeback(page); 3550 3551 if (!done) 3552 continue; 3553 3554 end_extent_buffer_writeback(eb); 3555 } 3556 3557 bio_put(bio); 3558 } 3559 3560 static noinline_for_stack int write_one_eb(struct extent_buffer *eb, 3561 struct btrfs_fs_info *fs_info, 3562 struct writeback_control *wbc, 3563 struct extent_page_data *epd) 3564 { 3565 struct block_device *bdev = fs_info->fs_devices->latest_bdev; 3566 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree; 3567 u64 offset = eb->start; 3568 unsigned long i, num_pages; 3569 unsigned long bio_flags = 0; 3570 int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META; 3571 int ret = 0; 3572 3573 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 3574 num_pages = num_extent_pages(eb->start, eb->len); 3575 atomic_set(&eb->io_pages, num_pages); 3576 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID) 3577 bio_flags = EXTENT_BIO_TREE_LOG; 3578 3579 for (i = 0; i < num_pages; i++) { 3580 struct page *p = extent_buffer_page(eb, i); 3581 3582 clear_page_dirty_for_io(p); 3583 set_page_writeback(p); 3584 ret = submit_extent_page(rw, tree, p, offset >> 9, 3585 PAGE_CACHE_SIZE, 0, bdev, &epd->bio, 3586 -1, end_bio_extent_buffer_writepage, 3587 0, epd->bio_flags, bio_flags); 3588 epd->bio_flags = bio_flags; 3589 if (ret) { 3590 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 3591 SetPageError(p); 3592 if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) 3593 end_extent_buffer_writeback(eb); 3594 ret = -EIO; 3595 break; 3596 } 3597 offset += PAGE_CACHE_SIZE; 3598 update_nr_written(p, wbc, 1); 3599 unlock_page(p); 3600 } 3601 3602 if (unlikely(ret)) { 3603 for (; i < num_pages; i++) { 3604 struct page *p = extent_buffer_page(eb, i); 3605 unlock_page(p); 3606 } 3607 } 3608 3609 return ret; 3610 } 3611 3612 int btree_write_cache_pages(struct address_space *mapping, 3613 struct writeback_control *wbc) 3614 { 3615 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree; 3616 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; 3617 struct extent_buffer *eb, *prev_eb = NULL; 3618 struct extent_page_data epd = { 3619 .bio = NULL, 3620 .tree = tree, 3621 .extent_locked = 0, 3622 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 3623 .bio_flags = 0, 3624 }; 3625 int ret = 0; 3626 int done = 0; 3627 int nr_to_write_done = 0; 3628 struct pagevec pvec; 3629 int nr_pages; 3630 pgoff_t index; 3631 pgoff_t end; /* Inclusive */ 3632 int scanned = 0; 3633 int tag; 3634 3635 pagevec_init(&pvec, 0); 3636 if (wbc->range_cyclic) { 3637 index = mapping->writeback_index; /* Start from prev offset */ 3638 end = -1; 3639 } else { 3640 index = wbc->range_start >> PAGE_CACHE_SHIFT; 3641 end = wbc->range_end >> PAGE_CACHE_SHIFT; 3642 scanned = 1; 3643 } 3644 if (wbc->sync_mode == WB_SYNC_ALL) 3645 tag = PAGECACHE_TAG_TOWRITE; 3646 else 3647 tag = PAGECACHE_TAG_DIRTY; 3648 retry: 3649 if (wbc->sync_mode == WB_SYNC_ALL) 3650 tag_pages_for_writeback(mapping, index, end); 3651 while (!done && !nr_to_write_done && (index <= end) && 3652 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 3653 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { 3654 unsigned i; 3655 3656 scanned = 1; 3657 for (i = 0; i < nr_pages; i++) { 3658 struct page *page = pvec.pages[i]; 3659 3660 if (!PagePrivate(page)) 3661 continue; 3662 3663 if (!wbc->range_cyclic && page->index > end) { 3664 done = 1; 3665 break; 3666 } 3667 3668 spin_lock(&mapping->private_lock); 3669 if (!PagePrivate(page)) { 3670 spin_unlock(&mapping->private_lock); 3671 continue; 3672 } 3673 3674 eb = (struct extent_buffer *)page->private; 3675 3676 /* 3677 * Shouldn't happen and normally this would be a BUG_ON 3678 * but no sense in crashing the users box for something 3679 * we can survive anyway. 3680 */ 3681 if (WARN_ON(!eb)) { 3682 spin_unlock(&mapping->private_lock); 3683 continue; 3684 } 3685 3686 if (eb == prev_eb) { 3687 spin_unlock(&mapping->private_lock); 3688 continue; 3689 } 3690 3691 ret = atomic_inc_not_zero(&eb->refs); 3692 spin_unlock(&mapping->private_lock); 3693 if (!ret) 3694 continue; 3695 3696 prev_eb = eb; 3697 ret = lock_extent_buffer_for_io(eb, fs_info, &epd); 3698 if (!ret) { 3699 free_extent_buffer(eb); 3700 continue; 3701 } 3702 3703 ret = write_one_eb(eb, fs_info, wbc, &epd); 3704 if (ret) { 3705 done = 1; 3706 free_extent_buffer(eb); 3707 break; 3708 } 3709 free_extent_buffer(eb); 3710 3711 /* 3712 * the filesystem may choose to bump up nr_to_write. 3713 * We have to make sure to honor the new nr_to_write 3714 * at any time 3715 */ 3716 nr_to_write_done = wbc->nr_to_write <= 0; 3717 } 3718 pagevec_release(&pvec); 3719 cond_resched(); 3720 } 3721 if (!scanned && !done) { 3722 /* 3723 * We hit the last page and there is more work to be done: wrap 3724 * back to the start of the file 3725 */ 3726 scanned = 1; 3727 index = 0; 3728 goto retry; 3729 } 3730 flush_write_bio(&epd); 3731 return ret; 3732 } 3733 3734 /** 3735 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. 3736 * @mapping: address space structure to write 3737 * @wbc: subtract the number of written pages from *@wbc->nr_to_write 3738 * @writepage: function called for each page 3739 * @data: data passed to writepage function 3740 * 3741 * If a page is already under I/O, write_cache_pages() skips it, even 3742 * if it's dirty. This is desirable behaviour for memory-cleaning writeback, 3743 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() 3744 * and msync() need to guarantee that all the data which was dirty at the time 3745 * the call was made get new I/O started against them. If wbc->sync_mode is 3746 * WB_SYNC_ALL then we were called for data integrity and we must wait for 3747 * existing IO to complete. 3748 */ 3749 static int extent_write_cache_pages(struct extent_io_tree *tree, 3750 struct address_space *mapping, 3751 struct writeback_control *wbc, 3752 writepage_t writepage, void *data, 3753 void (*flush_fn)(void *)) 3754 { 3755 struct inode *inode = mapping->host; 3756 int ret = 0; 3757 int done = 0; 3758 int err = 0; 3759 int nr_to_write_done = 0; 3760 struct pagevec pvec; 3761 int nr_pages; 3762 pgoff_t index; 3763 pgoff_t end; /* Inclusive */ 3764 int scanned = 0; 3765 int tag; 3766 3767 /* 3768 * We have to hold onto the inode so that ordered extents can do their 3769 * work when the IO finishes. The alternative to this is failing to add 3770 * an ordered extent if the igrab() fails there and that is a huge pain 3771 * to deal with, so instead just hold onto the inode throughout the 3772 * writepages operation. If it fails here we are freeing up the inode 3773 * anyway and we'd rather not waste our time writing out stuff that is 3774 * going to be truncated anyway. 3775 */ 3776 if (!igrab(inode)) 3777 return 0; 3778 3779 pagevec_init(&pvec, 0); 3780 if (wbc->range_cyclic) { 3781 index = mapping->writeback_index; /* Start from prev offset */ 3782 end = -1; 3783 } else { 3784 index = wbc->range_start >> PAGE_CACHE_SHIFT; 3785 end = wbc->range_end >> PAGE_CACHE_SHIFT; 3786 scanned = 1; 3787 } 3788 if (wbc->sync_mode == WB_SYNC_ALL) 3789 tag = PAGECACHE_TAG_TOWRITE; 3790 else 3791 tag = PAGECACHE_TAG_DIRTY; 3792 retry: 3793 if (wbc->sync_mode == WB_SYNC_ALL) 3794 tag_pages_for_writeback(mapping, index, end); 3795 while (!done && !nr_to_write_done && (index <= end) && 3796 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 3797 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { 3798 unsigned i; 3799 3800 scanned = 1; 3801 for (i = 0; i < nr_pages; i++) { 3802 struct page *page = pvec.pages[i]; 3803 3804 /* 3805 * At this point we hold neither mapping->tree_lock nor 3806 * lock on the page itself: the page may be truncated or 3807 * invalidated (changing page->mapping to NULL), or even 3808 * swizzled back from swapper_space to tmpfs file 3809 * mapping 3810 */ 3811 if (!trylock_page(page)) { 3812 flush_fn(data); 3813 lock_page(page); 3814 } 3815 3816 if (unlikely(page->mapping != mapping)) { 3817 unlock_page(page); 3818 continue; 3819 } 3820 3821 if (!wbc->range_cyclic && page->index > end) { 3822 done = 1; 3823 unlock_page(page); 3824 continue; 3825 } 3826 3827 if (wbc->sync_mode != WB_SYNC_NONE) { 3828 if (PageWriteback(page)) 3829 flush_fn(data); 3830 wait_on_page_writeback(page); 3831 } 3832 3833 if (PageWriteback(page) || 3834 !clear_page_dirty_for_io(page)) { 3835 unlock_page(page); 3836 continue; 3837 } 3838 3839 ret = (*writepage)(page, wbc, data); 3840 3841 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) { 3842 unlock_page(page); 3843 ret = 0; 3844 } 3845 if (!err && ret < 0) 3846 err = ret; 3847 3848 /* 3849 * the filesystem may choose to bump up nr_to_write. 3850 * We have to make sure to honor the new nr_to_write 3851 * at any time 3852 */ 3853 nr_to_write_done = wbc->nr_to_write <= 0; 3854 } 3855 pagevec_release(&pvec); 3856 cond_resched(); 3857 } 3858 if (!scanned && !done && !err) { 3859 /* 3860 * We hit the last page and there is more work to be done: wrap 3861 * back to the start of the file 3862 */ 3863 scanned = 1; 3864 index = 0; 3865 goto retry; 3866 } 3867 btrfs_add_delayed_iput(inode); 3868 return err; 3869 } 3870 3871 static void flush_epd_write_bio(struct extent_page_data *epd) 3872 { 3873 if (epd->bio) { 3874 int rw = WRITE; 3875 int ret; 3876 3877 if (epd->sync_io) 3878 rw = WRITE_SYNC; 3879 3880 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags); 3881 BUG_ON(ret < 0); /* -ENOMEM */ 3882 epd->bio = NULL; 3883 } 3884 } 3885 3886 static noinline void flush_write_bio(void *data) 3887 { 3888 struct extent_page_data *epd = data; 3889 flush_epd_write_bio(epd); 3890 } 3891 3892 int extent_write_full_page(struct extent_io_tree *tree, struct page *page, 3893 get_extent_t *get_extent, 3894 struct writeback_control *wbc) 3895 { 3896 int ret; 3897 struct extent_page_data epd = { 3898 .bio = NULL, 3899 .tree = tree, 3900 .get_extent = get_extent, 3901 .extent_locked = 0, 3902 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 3903 .bio_flags = 0, 3904 }; 3905 3906 ret = __extent_writepage(page, wbc, &epd); 3907 3908 flush_epd_write_bio(&epd); 3909 return ret; 3910 } 3911 3912 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, 3913 u64 start, u64 end, get_extent_t *get_extent, 3914 int mode) 3915 { 3916 int ret = 0; 3917 struct address_space *mapping = inode->i_mapping; 3918 struct page *page; 3919 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> 3920 PAGE_CACHE_SHIFT; 3921 3922 struct extent_page_data epd = { 3923 .bio = NULL, 3924 .tree = tree, 3925 .get_extent = get_extent, 3926 .extent_locked = 1, 3927 .sync_io = mode == WB_SYNC_ALL, 3928 .bio_flags = 0, 3929 }; 3930 struct writeback_control wbc_writepages = { 3931 .sync_mode = mode, 3932 .nr_to_write = nr_pages * 2, 3933 .range_start = start, 3934 .range_end = end + 1, 3935 }; 3936 3937 while (start <= end) { 3938 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 3939 if (clear_page_dirty_for_io(page)) 3940 ret = __extent_writepage(page, &wbc_writepages, &epd); 3941 else { 3942 if (tree->ops && tree->ops->writepage_end_io_hook) 3943 tree->ops->writepage_end_io_hook(page, start, 3944 start + PAGE_CACHE_SIZE - 1, 3945 NULL, 1); 3946 unlock_page(page); 3947 } 3948 page_cache_release(page); 3949 start += PAGE_CACHE_SIZE; 3950 } 3951 3952 flush_epd_write_bio(&epd); 3953 return ret; 3954 } 3955 3956 int extent_writepages(struct extent_io_tree *tree, 3957 struct address_space *mapping, 3958 get_extent_t *get_extent, 3959 struct writeback_control *wbc) 3960 { 3961 int ret = 0; 3962 struct extent_page_data epd = { 3963 .bio = NULL, 3964 .tree = tree, 3965 .get_extent = get_extent, 3966 .extent_locked = 0, 3967 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 3968 .bio_flags = 0, 3969 }; 3970 3971 ret = extent_write_cache_pages(tree, mapping, wbc, 3972 __extent_writepage, &epd, 3973 flush_write_bio); 3974 flush_epd_write_bio(&epd); 3975 return ret; 3976 } 3977 3978 int extent_readpages(struct extent_io_tree *tree, 3979 struct address_space *mapping, 3980 struct list_head *pages, unsigned nr_pages, 3981 get_extent_t get_extent) 3982 { 3983 struct bio *bio = NULL; 3984 unsigned page_idx; 3985 unsigned long bio_flags = 0; 3986 struct page *pagepool[16]; 3987 struct page *page; 3988 struct extent_map *em_cached = NULL; 3989 int nr = 0; 3990 3991 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 3992 page = list_entry(pages->prev, struct page, lru); 3993 3994 prefetchw(&page->flags); 3995 list_del(&page->lru); 3996 if (add_to_page_cache_lru(page, mapping, 3997 page->index, GFP_NOFS)) { 3998 page_cache_release(page); 3999 continue; 4000 } 4001 4002 pagepool[nr++] = page; 4003 if (nr < ARRAY_SIZE(pagepool)) 4004 continue; 4005 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, 4006 &bio, 0, &bio_flags, READ); 4007 nr = 0; 4008 } 4009 if (nr) 4010 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, 4011 &bio, 0, &bio_flags, READ); 4012 4013 if (em_cached) 4014 free_extent_map(em_cached); 4015 4016 BUG_ON(!list_empty(pages)); 4017 if (bio) 4018 return submit_one_bio(READ, bio, 0, bio_flags); 4019 return 0; 4020 } 4021 4022 /* 4023 * basic invalidatepage code, this waits on any locked or writeback 4024 * ranges corresponding to the page, and then deletes any extent state 4025 * records from the tree 4026 */ 4027 int extent_invalidatepage(struct extent_io_tree *tree, 4028 struct page *page, unsigned long offset) 4029 { 4030 struct extent_state *cached_state = NULL; 4031 u64 start = page_offset(page); 4032 u64 end = start + PAGE_CACHE_SIZE - 1; 4033 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 4034 4035 start += ALIGN(offset, blocksize); 4036 if (start > end) 4037 return 0; 4038 4039 lock_extent_bits(tree, start, end, 0, &cached_state); 4040 wait_on_page_writeback(page); 4041 clear_extent_bit(tree, start, end, 4042 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 4043 EXTENT_DO_ACCOUNTING, 4044 1, 1, &cached_state, GFP_NOFS); 4045 return 0; 4046 } 4047 4048 /* 4049 * a helper for releasepage, this tests for areas of the page that 4050 * are locked or under IO and drops the related state bits if it is safe 4051 * to drop the page. 4052 */ 4053 static int try_release_extent_state(struct extent_map_tree *map, 4054 struct extent_io_tree *tree, 4055 struct page *page, gfp_t mask) 4056 { 4057 u64 start = page_offset(page); 4058 u64 end = start + PAGE_CACHE_SIZE - 1; 4059 int ret = 1; 4060 4061 if (test_range_bit(tree, start, end, 4062 EXTENT_IOBITS, 0, NULL)) 4063 ret = 0; 4064 else { 4065 if ((mask & GFP_NOFS) == GFP_NOFS) 4066 mask = GFP_NOFS; 4067 /* 4068 * at this point we can safely clear everything except the 4069 * locked bit and the nodatasum bit 4070 */ 4071 ret = clear_extent_bit(tree, start, end, 4072 ~(EXTENT_LOCKED | EXTENT_NODATASUM), 4073 0, 0, NULL, mask); 4074 4075 /* if clear_extent_bit failed for enomem reasons, 4076 * we can't allow the release to continue. 4077 */ 4078 if (ret < 0) 4079 ret = 0; 4080 else 4081 ret = 1; 4082 } 4083 return ret; 4084 } 4085 4086 /* 4087 * a helper for releasepage. As long as there are no locked extents 4088 * in the range corresponding to the page, both state records and extent 4089 * map records are removed 4090 */ 4091 int try_release_extent_mapping(struct extent_map_tree *map, 4092 struct extent_io_tree *tree, struct page *page, 4093 gfp_t mask) 4094 { 4095 struct extent_map *em; 4096 u64 start = page_offset(page); 4097 u64 end = start + PAGE_CACHE_SIZE - 1; 4098 4099 if ((mask & __GFP_WAIT) && 4100 page->mapping->host->i_size > 16 * 1024 * 1024) { 4101 u64 len; 4102 while (start <= end) { 4103 len = end - start + 1; 4104 write_lock(&map->lock); 4105 em = lookup_extent_mapping(map, start, len); 4106 if (!em) { 4107 write_unlock(&map->lock); 4108 break; 4109 } 4110 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) || 4111 em->start != start) { 4112 write_unlock(&map->lock); 4113 free_extent_map(em); 4114 break; 4115 } 4116 if (!test_range_bit(tree, em->start, 4117 extent_map_end(em) - 1, 4118 EXTENT_LOCKED | EXTENT_WRITEBACK, 4119 0, NULL)) { 4120 remove_extent_mapping(map, em); 4121 /* once for the rb tree */ 4122 free_extent_map(em); 4123 } 4124 start = extent_map_end(em); 4125 write_unlock(&map->lock); 4126 4127 /* once for us */ 4128 free_extent_map(em); 4129 } 4130 } 4131 return try_release_extent_state(map, tree, page, mask); 4132 } 4133 4134 /* 4135 * helper function for fiemap, which doesn't want to see any holes. 4136 * This maps until we find something past 'last' 4137 */ 4138 static struct extent_map *get_extent_skip_holes(struct inode *inode, 4139 u64 offset, 4140 u64 last, 4141 get_extent_t *get_extent) 4142 { 4143 u64 sectorsize = BTRFS_I(inode)->root->sectorsize; 4144 struct extent_map *em; 4145 u64 len; 4146 4147 if (offset >= last) 4148 return NULL; 4149 4150 while (1) { 4151 len = last - offset; 4152 if (len == 0) 4153 break; 4154 len = ALIGN(len, sectorsize); 4155 em = get_extent(inode, NULL, 0, offset, len, 0); 4156 if (IS_ERR_OR_NULL(em)) 4157 return em; 4158 4159 /* if this isn't a hole return it */ 4160 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) && 4161 em->block_start != EXTENT_MAP_HOLE) { 4162 return em; 4163 } 4164 4165 /* this is a hole, advance to the next extent */ 4166 offset = extent_map_end(em); 4167 free_extent_map(em); 4168 if (offset >= last) 4169 break; 4170 } 4171 return NULL; 4172 } 4173 4174 static noinline int count_ext_ref(u64 inum, u64 offset, u64 root_id, void *ctx) 4175 { 4176 unsigned long cnt = *((unsigned long *)ctx); 4177 4178 cnt++; 4179 *((unsigned long *)ctx) = cnt; 4180 4181 /* Now we're sure that the extent is shared. */ 4182 if (cnt > 1) 4183 return 1; 4184 return 0; 4185 } 4186 4187 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4188 __u64 start, __u64 len, get_extent_t *get_extent) 4189 { 4190 int ret = 0; 4191 u64 off = start; 4192 u64 max = start + len; 4193 u32 flags = 0; 4194 u32 found_type; 4195 u64 last; 4196 u64 last_for_get_extent = 0; 4197 u64 disko = 0; 4198 u64 isize = i_size_read(inode); 4199 struct btrfs_key found_key; 4200 struct extent_map *em = NULL; 4201 struct extent_state *cached_state = NULL; 4202 struct btrfs_path *path; 4203 int end = 0; 4204 u64 em_start = 0; 4205 u64 em_len = 0; 4206 u64 em_end = 0; 4207 4208 if (len == 0) 4209 return -EINVAL; 4210 4211 path = btrfs_alloc_path(); 4212 if (!path) 4213 return -ENOMEM; 4214 path->leave_spinning = 1; 4215 4216 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize); 4217 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize); 4218 4219 /* 4220 * lookup the last file extent. We're not using i_size here 4221 * because there might be preallocation past i_size 4222 */ 4223 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, 4224 path, btrfs_ino(inode), -1, 0); 4225 if (ret < 0) { 4226 btrfs_free_path(path); 4227 return ret; 4228 } 4229 WARN_ON(!ret); 4230 path->slots[0]--; 4231 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 4232 found_type = btrfs_key_type(&found_key); 4233 4234 /* No extents, but there might be delalloc bits */ 4235 if (found_key.objectid != btrfs_ino(inode) || 4236 found_type != BTRFS_EXTENT_DATA_KEY) { 4237 /* have to trust i_size as the end */ 4238 last = (u64)-1; 4239 last_for_get_extent = isize; 4240 } else { 4241 /* 4242 * remember the start of the last extent. There are a 4243 * bunch of different factors that go into the length of the 4244 * extent, so its much less complex to remember where it started 4245 */ 4246 last = found_key.offset; 4247 last_for_get_extent = last + 1; 4248 } 4249 btrfs_release_path(path); 4250 4251 /* 4252 * we might have some extents allocated but more delalloc past those 4253 * extents. so, we trust isize unless the start of the last extent is 4254 * beyond isize 4255 */ 4256 if (last < isize) { 4257 last = (u64)-1; 4258 last_for_get_extent = isize; 4259 } 4260 4261 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0, 4262 &cached_state); 4263 4264 em = get_extent_skip_holes(inode, start, last_for_get_extent, 4265 get_extent); 4266 if (!em) 4267 goto out; 4268 if (IS_ERR(em)) { 4269 ret = PTR_ERR(em); 4270 goto out; 4271 } 4272 4273 while (!end) { 4274 u64 offset_in_extent = 0; 4275 4276 /* break if the extent we found is outside the range */ 4277 if (em->start >= max || extent_map_end(em) < off) 4278 break; 4279 4280 /* 4281 * get_extent may return an extent that starts before our 4282 * requested range. We have to make sure the ranges 4283 * we return to fiemap always move forward and don't 4284 * overlap, so adjust the offsets here 4285 */ 4286 em_start = max(em->start, off); 4287 4288 /* 4289 * record the offset from the start of the extent 4290 * for adjusting the disk offset below. Only do this if the 4291 * extent isn't compressed since our in ram offset may be past 4292 * what we have actually allocated on disk. 4293 */ 4294 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 4295 offset_in_extent = em_start - em->start; 4296 em_end = extent_map_end(em); 4297 em_len = em_end - em_start; 4298 disko = 0; 4299 flags = 0; 4300 4301 /* 4302 * bump off for our next call to get_extent 4303 */ 4304 off = extent_map_end(em); 4305 if (off >= max) 4306 end = 1; 4307 4308 if (em->block_start == EXTENT_MAP_LAST_BYTE) { 4309 end = 1; 4310 flags |= FIEMAP_EXTENT_LAST; 4311 } else if (em->block_start == EXTENT_MAP_INLINE) { 4312 flags |= (FIEMAP_EXTENT_DATA_INLINE | 4313 FIEMAP_EXTENT_NOT_ALIGNED); 4314 } else if (em->block_start == EXTENT_MAP_DELALLOC) { 4315 flags |= (FIEMAP_EXTENT_DELALLOC | 4316 FIEMAP_EXTENT_UNKNOWN); 4317 } else { 4318 unsigned long ref_cnt = 0; 4319 4320 disko = em->block_start + offset_in_extent; 4321 4322 /* 4323 * As btrfs supports shared space, this information 4324 * can be exported to userspace tools via 4325 * flag FIEMAP_EXTENT_SHARED. 4326 */ 4327 ret = iterate_inodes_from_logical( 4328 em->block_start, 4329 BTRFS_I(inode)->root->fs_info, 4330 path, count_ext_ref, &ref_cnt); 4331 if (ret < 0 && ret != -ENOENT) 4332 goto out_free; 4333 4334 if (ref_cnt > 1) 4335 flags |= FIEMAP_EXTENT_SHARED; 4336 } 4337 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 4338 flags |= FIEMAP_EXTENT_ENCODED; 4339 4340 free_extent_map(em); 4341 em = NULL; 4342 if ((em_start >= last) || em_len == (u64)-1 || 4343 (last == (u64)-1 && isize <= em_end)) { 4344 flags |= FIEMAP_EXTENT_LAST; 4345 end = 1; 4346 } 4347 4348 /* now scan forward to see if this is really the last extent. */ 4349 em = get_extent_skip_holes(inode, off, last_for_get_extent, 4350 get_extent); 4351 if (IS_ERR(em)) { 4352 ret = PTR_ERR(em); 4353 goto out; 4354 } 4355 if (!em) { 4356 flags |= FIEMAP_EXTENT_LAST; 4357 end = 1; 4358 } 4359 ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 4360 em_len, flags); 4361 if (ret) 4362 goto out_free; 4363 } 4364 out_free: 4365 free_extent_map(em); 4366 out: 4367 btrfs_free_path(path); 4368 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1, 4369 &cached_state, GFP_NOFS); 4370 return ret; 4371 } 4372 4373 static void __free_extent_buffer(struct extent_buffer *eb) 4374 { 4375 btrfs_leak_debug_del(&eb->leak_list); 4376 kmem_cache_free(extent_buffer_cache, eb); 4377 } 4378 4379 int extent_buffer_under_io(struct extent_buffer *eb) 4380 { 4381 return (atomic_read(&eb->io_pages) || 4382 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || 4383 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 4384 } 4385 4386 /* 4387 * Helper for releasing extent buffer page. 4388 */ 4389 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb, 4390 unsigned long start_idx) 4391 { 4392 unsigned long index; 4393 unsigned long num_pages; 4394 struct page *page; 4395 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags); 4396 4397 BUG_ON(extent_buffer_under_io(eb)); 4398 4399 num_pages = num_extent_pages(eb->start, eb->len); 4400 index = start_idx + num_pages; 4401 if (start_idx >= index) 4402 return; 4403 4404 do { 4405 index--; 4406 page = extent_buffer_page(eb, index); 4407 if (page && mapped) { 4408 spin_lock(&page->mapping->private_lock); 4409 /* 4410 * We do this since we'll remove the pages after we've 4411 * removed the eb from the radix tree, so we could race 4412 * and have this page now attached to the new eb. So 4413 * only clear page_private if it's still connected to 4414 * this eb. 4415 */ 4416 if (PagePrivate(page) && 4417 page->private == (unsigned long)eb) { 4418 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 4419 BUG_ON(PageDirty(page)); 4420 BUG_ON(PageWriteback(page)); 4421 /* 4422 * We need to make sure we haven't be attached 4423 * to a new eb. 4424 */ 4425 ClearPagePrivate(page); 4426 set_page_private(page, 0); 4427 /* One for the page private */ 4428 page_cache_release(page); 4429 } 4430 spin_unlock(&page->mapping->private_lock); 4431 4432 } 4433 if (page) { 4434 /* One for when we alloced the page */ 4435 page_cache_release(page); 4436 } 4437 } while (index != start_idx); 4438 } 4439 4440 /* 4441 * Helper for releasing the extent buffer. 4442 */ 4443 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) 4444 { 4445 btrfs_release_extent_buffer_page(eb, 0); 4446 __free_extent_buffer(eb); 4447 } 4448 4449 static struct extent_buffer * 4450 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, 4451 unsigned long len, gfp_t mask) 4452 { 4453 struct extent_buffer *eb = NULL; 4454 4455 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 4456 if (eb == NULL) 4457 return NULL; 4458 eb->start = start; 4459 eb->len = len; 4460 eb->fs_info = fs_info; 4461 eb->bflags = 0; 4462 rwlock_init(&eb->lock); 4463 atomic_set(&eb->write_locks, 0); 4464 atomic_set(&eb->read_locks, 0); 4465 atomic_set(&eb->blocking_readers, 0); 4466 atomic_set(&eb->blocking_writers, 0); 4467 atomic_set(&eb->spinning_readers, 0); 4468 atomic_set(&eb->spinning_writers, 0); 4469 eb->lock_nested = 0; 4470 init_waitqueue_head(&eb->write_lock_wq); 4471 init_waitqueue_head(&eb->read_lock_wq); 4472 4473 btrfs_leak_debug_add(&eb->leak_list, &buffers); 4474 4475 spin_lock_init(&eb->refs_lock); 4476 atomic_set(&eb->refs, 1); 4477 atomic_set(&eb->io_pages, 0); 4478 4479 /* 4480 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages 4481 */ 4482 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE 4483 > MAX_INLINE_EXTENT_BUFFER_SIZE); 4484 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE); 4485 4486 return eb; 4487 } 4488 4489 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) 4490 { 4491 unsigned long i; 4492 struct page *p; 4493 struct extent_buffer *new; 4494 unsigned long num_pages = num_extent_pages(src->start, src->len); 4495 4496 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS); 4497 if (new == NULL) 4498 return NULL; 4499 4500 for (i = 0; i < num_pages; i++) { 4501 p = alloc_page(GFP_NOFS); 4502 if (!p) { 4503 btrfs_release_extent_buffer(new); 4504 return NULL; 4505 } 4506 attach_extent_buffer_page(new, p); 4507 WARN_ON(PageDirty(p)); 4508 SetPageUptodate(p); 4509 new->pages[i] = p; 4510 } 4511 4512 copy_extent_buffer(new, src, 0, 0, src->len); 4513 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags); 4514 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags); 4515 4516 return new; 4517 } 4518 4519 struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) 4520 { 4521 struct extent_buffer *eb; 4522 unsigned long num_pages = num_extent_pages(0, len); 4523 unsigned long i; 4524 4525 eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS); 4526 if (!eb) 4527 return NULL; 4528 4529 for (i = 0; i < num_pages; i++) { 4530 eb->pages[i] = alloc_page(GFP_NOFS); 4531 if (!eb->pages[i]) 4532 goto err; 4533 } 4534 set_extent_buffer_uptodate(eb); 4535 btrfs_set_header_nritems(eb, 0); 4536 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags); 4537 4538 return eb; 4539 err: 4540 for (; i > 0; i--) 4541 __free_page(eb->pages[i - 1]); 4542 __free_extent_buffer(eb); 4543 return NULL; 4544 } 4545 4546 static void check_buffer_tree_ref(struct extent_buffer *eb) 4547 { 4548 int refs; 4549 /* the ref bit is tricky. We have to make sure it is set 4550 * if we have the buffer dirty. Otherwise the 4551 * code to free a buffer can end up dropping a dirty 4552 * page 4553 * 4554 * Once the ref bit is set, it won't go away while the 4555 * buffer is dirty or in writeback, and it also won't 4556 * go away while we have the reference count on the 4557 * eb bumped. 4558 * 4559 * We can't just set the ref bit without bumping the 4560 * ref on the eb because free_extent_buffer might 4561 * see the ref bit and try to clear it. If this happens 4562 * free_extent_buffer might end up dropping our original 4563 * ref by mistake and freeing the page before we are able 4564 * to add one more ref. 4565 * 4566 * So bump the ref count first, then set the bit. If someone 4567 * beat us to it, drop the ref we added. 4568 */ 4569 refs = atomic_read(&eb->refs); 4570 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 4571 return; 4572 4573 spin_lock(&eb->refs_lock); 4574 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 4575 atomic_inc(&eb->refs); 4576 spin_unlock(&eb->refs_lock); 4577 } 4578 4579 static void mark_extent_buffer_accessed(struct extent_buffer *eb, 4580 struct page *accessed) 4581 { 4582 unsigned long num_pages, i; 4583 4584 check_buffer_tree_ref(eb); 4585 4586 num_pages = num_extent_pages(eb->start, eb->len); 4587 for (i = 0; i < num_pages; i++) { 4588 struct page *p = extent_buffer_page(eb, i); 4589 if (p != accessed) 4590 mark_page_accessed(p); 4591 } 4592 } 4593 4594 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, 4595 u64 start) 4596 { 4597 struct extent_buffer *eb; 4598 4599 rcu_read_lock(); 4600 eb = radix_tree_lookup(&fs_info->buffer_radix, 4601 start >> PAGE_CACHE_SHIFT); 4602 if (eb && atomic_inc_not_zero(&eb->refs)) { 4603 rcu_read_unlock(); 4604 mark_extent_buffer_accessed(eb, NULL); 4605 return eb; 4606 } 4607 rcu_read_unlock(); 4608 4609 return NULL; 4610 } 4611 4612 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4613 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, 4614 u64 start, unsigned long len) 4615 { 4616 struct extent_buffer *eb, *exists = NULL; 4617 int ret; 4618 4619 eb = find_extent_buffer(fs_info, start); 4620 if (eb) 4621 return eb; 4622 eb = alloc_dummy_extent_buffer(start, len); 4623 if (!eb) 4624 return NULL; 4625 eb->fs_info = fs_info; 4626 again: 4627 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); 4628 if (ret) 4629 goto free_eb; 4630 spin_lock(&fs_info->buffer_lock); 4631 ret = radix_tree_insert(&fs_info->buffer_radix, 4632 start >> PAGE_CACHE_SHIFT, eb); 4633 spin_unlock(&fs_info->buffer_lock); 4634 radix_tree_preload_end(); 4635 if (ret == -EEXIST) { 4636 exists = find_extent_buffer(fs_info, start); 4637 if (exists) 4638 goto free_eb; 4639 else 4640 goto again; 4641 } 4642 check_buffer_tree_ref(eb); 4643 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); 4644 4645 /* 4646 * We will free dummy extent buffer's if they come into 4647 * free_extent_buffer with a ref count of 2, but if we are using this we 4648 * want the buffers to stay in memory until we're done with them, so 4649 * bump the ref count again. 4650 */ 4651 atomic_inc(&eb->refs); 4652 return eb; 4653 free_eb: 4654 btrfs_release_extent_buffer(eb); 4655 return exists; 4656 } 4657 #endif 4658 4659 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, 4660 u64 start, unsigned long len) 4661 { 4662 unsigned long num_pages = num_extent_pages(start, len); 4663 unsigned long i; 4664 unsigned long index = start >> PAGE_CACHE_SHIFT; 4665 struct extent_buffer *eb; 4666 struct extent_buffer *exists = NULL; 4667 struct page *p; 4668 struct address_space *mapping = fs_info->btree_inode->i_mapping; 4669 int uptodate = 1; 4670 int ret; 4671 4672 eb = find_extent_buffer(fs_info, start); 4673 if (eb) 4674 return eb; 4675 4676 eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS); 4677 if (!eb) 4678 return NULL; 4679 4680 for (i = 0; i < num_pages; i++, index++) { 4681 p = find_or_create_page(mapping, index, GFP_NOFS); 4682 if (!p) 4683 goto free_eb; 4684 4685 spin_lock(&mapping->private_lock); 4686 if (PagePrivate(p)) { 4687 /* 4688 * We could have already allocated an eb for this page 4689 * and attached one so lets see if we can get a ref on 4690 * the existing eb, and if we can we know it's good and 4691 * we can just return that one, else we know we can just 4692 * overwrite page->private. 4693 */ 4694 exists = (struct extent_buffer *)p->private; 4695 if (atomic_inc_not_zero(&exists->refs)) { 4696 spin_unlock(&mapping->private_lock); 4697 unlock_page(p); 4698 page_cache_release(p); 4699 mark_extent_buffer_accessed(exists, p); 4700 goto free_eb; 4701 } 4702 4703 /* 4704 * Do this so attach doesn't complain and we need to 4705 * drop the ref the old guy had. 4706 */ 4707 ClearPagePrivate(p); 4708 WARN_ON(PageDirty(p)); 4709 page_cache_release(p); 4710 } 4711 attach_extent_buffer_page(eb, p); 4712 spin_unlock(&mapping->private_lock); 4713 WARN_ON(PageDirty(p)); 4714 eb->pages[i] = p; 4715 if (!PageUptodate(p)) 4716 uptodate = 0; 4717 4718 /* 4719 * see below about how we avoid a nasty race with release page 4720 * and why we unlock later 4721 */ 4722 } 4723 if (uptodate) 4724 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4725 again: 4726 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); 4727 if (ret) 4728 goto free_eb; 4729 4730 spin_lock(&fs_info->buffer_lock); 4731 ret = radix_tree_insert(&fs_info->buffer_radix, 4732 start >> PAGE_CACHE_SHIFT, eb); 4733 spin_unlock(&fs_info->buffer_lock); 4734 radix_tree_preload_end(); 4735 if (ret == -EEXIST) { 4736 exists = find_extent_buffer(fs_info, start); 4737 if (exists) 4738 goto free_eb; 4739 else 4740 goto again; 4741 } 4742 /* add one reference for the tree */ 4743 check_buffer_tree_ref(eb); 4744 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); 4745 4746 /* 4747 * there is a race where release page may have 4748 * tried to find this extent buffer in the radix 4749 * but failed. It will tell the VM it is safe to 4750 * reclaim the, and it will clear the page private bit. 4751 * We must make sure to set the page private bit properly 4752 * after the extent buffer is in the radix tree so 4753 * it doesn't get lost 4754 */ 4755 SetPageChecked(eb->pages[0]); 4756 for (i = 1; i < num_pages; i++) { 4757 p = extent_buffer_page(eb, i); 4758 ClearPageChecked(p); 4759 unlock_page(p); 4760 } 4761 unlock_page(eb->pages[0]); 4762 return eb; 4763 4764 free_eb: 4765 for (i = 0; i < num_pages; i++) { 4766 if (eb->pages[i]) 4767 unlock_page(eb->pages[i]); 4768 } 4769 4770 WARN_ON(!atomic_dec_and_test(&eb->refs)); 4771 btrfs_release_extent_buffer(eb); 4772 return exists; 4773 } 4774 4775 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head) 4776 { 4777 struct extent_buffer *eb = 4778 container_of(head, struct extent_buffer, rcu_head); 4779 4780 __free_extent_buffer(eb); 4781 } 4782 4783 /* Expects to have eb->eb_lock already held */ 4784 static int release_extent_buffer(struct extent_buffer *eb) 4785 { 4786 WARN_ON(atomic_read(&eb->refs) == 0); 4787 if (atomic_dec_and_test(&eb->refs)) { 4788 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { 4789 struct btrfs_fs_info *fs_info = eb->fs_info; 4790 4791 spin_unlock(&eb->refs_lock); 4792 4793 spin_lock(&fs_info->buffer_lock); 4794 radix_tree_delete(&fs_info->buffer_radix, 4795 eb->start >> PAGE_CACHE_SHIFT); 4796 spin_unlock(&fs_info->buffer_lock); 4797 } else { 4798 spin_unlock(&eb->refs_lock); 4799 } 4800 4801 /* Should be safe to release our pages at this point */ 4802 btrfs_release_extent_buffer_page(eb, 0); 4803 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); 4804 return 1; 4805 } 4806 spin_unlock(&eb->refs_lock); 4807 4808 return 0; 4809 } 4810 4811 void free_extent_buffer(struct extent_buffer *eb) 4812 { 4813 int refs; 4814 int old; 4815 if (!eb) 4816 return; 4817 4818 while (1) { 4819 refs = atomic_read(&eb->refs); 4820 if (refs <= 3) 4821 break; 4822 old = atomic_cmpxchg(&eb->refs, refs, refs - 1); 4823 if (old == refs) 4824 return; 4825 } 4826 4827 spin_lock(&eb->refs_lock); 4828 if (atomic_read(&eb->refs) == 2 && 4829 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) 4830 atomic_dec(&eb->refs); 4831 4832 if (atomic_read(&eb->refs) == 2 && 4833 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && 4834 !extent_buffer_under_io(eb) && 4835 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 4836 atomic_dec(&eb->refs); 4837 4838 /* 4839 * I know this is terrible, but it's temporary until we stop tracking 4840 * the uptodate bits and such for the extent buffers. 4841 */ 4842 release_extent_buffer(eb); 4843 } 4844 4845 void free_extent_buffer_stale(struct extent_buffer *eb) 4846 { 4847 if (!eb) 4848 return; 4849 4850 spin_lock(&eb->refs_lock); 4851 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); 4852 4853 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && 4854 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 4855 atomic_dec(&eb->refs); 4856 release_extent_buffer(eb); 4857 } 4858 4859 void clear_extent_buffer_dirty(struct extent_buffer *eb) 4860 { 4861 unsigned long i; 4862 unsigned long num_pages; 4863 struct page *page; 4864 4865 num_pages = num_extent_pages(eb->start, eb->len); 4866 4867 for (i = 0; i < num_pages; i++) { 4868 page = extent_buffer_page(eb, i); 4869 if (!PageDirty(page)) 4870 continue; 4871 4872 lock_page(page); 4873 WARN_ON(!PagePrivate(page)); 4874 4875 clear_page_dirty_for_io(page); 4876 spin_lock_irq(&page->mapping->tree_lock); 4877 if (!PageDirty(page)) { 4878 radix_tree_tag_clear(&page->mapping->page_tree, 4879 page_index(page), 4880 PAGECACHE_TAG_DIRTY); 4881 } 4882 spin_unlock_irq(&page->mapping->tree_lock); 4883 ClearPageError(page); 4884 unlock_page(page); 4885 } 4886 WARN_ON(atomic_read(&eb->refs) == 0); 4887 } 4888 4889 int set_extent_buffer_dirty(struct extent_buffer *eb) 4890 { 4891 unsigned long i; 4892 unsigned long num_pages; 4893 int was_dirty = 0; 4894 4895 check_buffer_tree_ref(eb); 4896 4897 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); 4898 4899 num_pages = num_extent_pages(eb->start, eb->len); 4900 WARN_ON(atomic_read(&eb->refs) == 0); 4901 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); 4902 4903 for (i = 0; i < num_pages; i++) 4904 set_page_dirty(extent_buffer_page(eb, i)); 4905 return was_dirty; 4906 } 4907 4908 int clear_extent_buffer_uptodate(struct extent_buffer *eb) 4909 { 4910 unsigned long i; 4911 struct page *page; 4912 unsigned long num_pages; 4913 4914 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4915 num_pages = num_extent_pages(eb->start, eb->len); 4916 for (i = 0; i < num_pages; i++) { 4917 page = extent_buffer_page(eb, i); 4918 if (page) 4919 ClearPageUptodate(page); 4920 } 4921 return 0; 4922 } 4923 4924 int set_extent_buffer_uptodate(struct extent_buffer *eb) 4925 { 4926 unsigned long i; 4927 struct page *page; 4928 unsigned long num_pages; 4929 4930 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4931 num_pages = num_extent_pages(eb->start, eb->len); 4932 for (i = 0; i < num_pages; i++) { 4933 page = extent_buffer_page(eb, i); 4934 SetPageUptodate(page); 4935 } 4936 return 0; 4937 } 4938 4939 int extent_buffer_uptodate(struct extent_buffer *eb) 4940 { 4941 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4942 } 4943 4944 int read_extent_buffer_pages(struct extent_io_tree *tree, 4945 struct extent_buffer *eb, u64 start, int wait, 4946 get_extent_t *get_extent, int mirror_num) 4947 { 4948 unsigned long i; 4949 unsigned long start_i; 4950 struct page *page; 4951 int err; 4952 int ret = 0; 4953 int locked_pages = 0; 4954 int all_uptodate = 1; 4955 unsigned long num_pages; 4956 unsigned long num_reads = 0; 4957 struct bio *bio = NULL; 4958 unsigned long bio_flags = 0; 4959 4960 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 4961 return 0; 4962 4963 if (start) { 4964 WARN_ON(start < eb->start); 4965 start_i = (start >> PAGE_CACHE_SHIFT) - 4966 (eb->start >> PAGE_CACHE_SHIFT); 4967 } else { 4968 start_i = 0; 4969 } 4970 4971 num_pages = num_extent_pages(eb->start, eb->len); 4972 for (i = start_i; i < num_pages; i++) { 4973 page = extent_buffer_page(eb, i); 4974 if (wait == WAIT_NONE) { 4975 if (!trylock_page(page)) 4976 goto unlock_exit; 4977 } else { 4978 lock_page(page); 4979 } 4980 locked_pages++; 4981 if (!PageUptodate(page)) { 4982 num_reads++; 4983 all_uptodate = 0; 4984 } 4985 } 4986 if (all_uptodate) { 4987 if (start_i == 0) 4988 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4989 goto unlock_exit; 4990 } 4991 4992 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 4993 eb->read_mirror = 0; 4994 atomic_set(&eb->io_pages, num_reads); 4995 for (i = start_i; i < num_pages; i++) { 4996 page = extent_buffer_page(eb, i); 4997 if (!PageUptodate(page)) { 4998 ClearPageError(page); 4999 err = __extent_read_full_page(tree, page, 5000 get_extent, &bio, 5001 mirror_num, &bio_flags, 5002 READ | REQ_META); 5003 if (err) 5004 ret = err; 5005 } else { 5006 unlock_page(page); 5007 } 5008 } 5009 5010 if (bio) { 5011 err = submit_one_bio(READ | REQ_META, bio, mirror_num, 5012 bio_flags); 5013 if (err) 5014 return err; 5015 } 5016 5017 if (ret || wait != WAIT_COMPLETE) 5018 return ret; 5019 5020 for (i = start_i; i < num_pages; i++) { 5021 page = extent_buffer_page(eb, i); 5022 wait_on_page_locked(page); 5023 if (!PageUptodate(page)) 5024 ret = -EIO; 5025 } 5026 5027 return ret; 5028 5029 unlock_exit: 5030 i = start_i; 5031 while (locked_pages > 0) { 5032 page = extent_buffer_page(eb, i); 5033 i++; 5034 unlock_page(page); 5035 locked_pages--; 5036 } 5037 return ret; 5038 } 5039 5040 void read_extent_buffer(struct extent_buffer *eb, void *dstv, 5041 unsigned long start, 5042 unsigned long len) 5043 { 5044 size_t cur; 5045 size_t offset; 5046 struct page *page; 5047 char *kaddr; 5048 char *dst = (char *)dstv; 5049 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5050 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5051 5052 WARN_ON(start > eb->len); 5053 WARN_ON(start + len > eb->start + eb->len); 5054 5055 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5056 5057 while (len > 0) { 5058 page = extent_buffer_page(eb, i); 5059 5060 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5061 kaddr = page_address(page); 5062 memcpy(dst, kaddr + offset, cur); 5063 5064 dst += cur; 5065 len -= cur; 5066 offset = 0; 5067 i++; 5068 } 5069 } 5070 5071 int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv, 5072 unsigned long start, 5073 unsigned long len) 5074 { 5075 size_t cur; 5076 size_t offset; 5077 struct page *page; 5078 char *kaddr; 5079 char __user *dst = (char __user *)dstv; 5080 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5081 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5082 int ret = 0; 5083 5084 WARN_ON(start > eb->len); 5085 WARN_ON(start + len > eb->start + eb->len); 5086 5087 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5088 5089 while (len > 0) { 5090 page = extent_buffer_page(eb, i); 5091 5092 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5093 kaddr = page_address(page); 5094 if (copy_to_user(dst, kaddr + offset, cur)) { 5095 ret = -EFAULT; 5096 break; 5097 } 5098 5099 dst += cur; 5100 len -= cur; 5101 offset = 0; 5102 i++; 5103 } 5104 5105 return ret; 5106 } 5107 5108 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 5109 unsigned long min_len, char **map, 5110 unsigned long *map_start, 5111 unsigned long *map_len) 5112 { 5113 size_t offset = start & (PAGE_CACHE_SIZE - 1); 5114 char *kaddr; 5115 struct page *p; 5116 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5117 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5118 unsigned long end_i = (start_offset + start + min_len - 1) >> 5119 PAGE_CACHE_SHIFT; 5120 5121 if (i != end_i) 5122 return -EINVAL; 5123 5124 if (i == 0) { 5125 offset = start_offset; 5126 *map_start = 0; 5127 } else { 5128 offset = 0; 5129 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; 5130 } 5131 5132 if (start + min_len > eb->len) { 5133 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, " 5134 "wanted %lu %lu\n", 5135 eb->start, eb->len, start, min_len); 5136 return -EINVAL; 5137 } 5138 5139 p = extent_buffer_page(eb, i); 5140 kaddr = page_address(p); 5141 *map = kaddr + offset; 5142 *map_len = PAGE_CACHE_SIZE - offset; 5143 return 0; 5144 } 5145 5146 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, 5147 unsigned long start, 5148 unsigned long len) 5149 { 5150 size_t cur; 5151 size_t offset; 5152 struct page *page; 5153 char *kaddr; 5154 char *ptr = (char *)ptrv; 5155 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5156 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5157 int ret = 0; 5158 5159 WARN_ON(start > eb->len); 5160 WARN_ON(start + len > eb->start + eb->len); 5161 5162 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5163 5164 while (len > 0) { 5165 page = extent_buffer_page(eb, i); 5166 5167 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5168 5169 kaddr = page_address(page); 5170 ret = memcmp(ptr, kaddr + offset, cur); 5171 if (ret) 5172 break; 5173 5174 ptr += cur; 5175 len -= cur; 5176 offset = 0; 5177 i++; 5178 } 5179 return ret; 5180 } 5181 5182 void write_extent_buffer(struct extent_buffer *eb, const void *srcv, 5183 unsigned long start, unsigned long len) 5184 { 5185 size_t cur; 5186 size_t offset; 5187 struct page *page; 5188 char *kaddr; 5189 char *src = (char *)srcv; 5190 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5191 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5192 5193 WARN_ON(start > eb->len); 5194 WARN_ON(start + len > eb->start + eb->len); 5195 5196 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5197 5198 while (len > 0) { 5199 page = extent_buffer_page(eb, i); 5200 WARN_ON(!PageUptodate(page)); 5201 5202 cur = min(len, PAGE_CACHE_SIZE - offset); 5203 kaddr = page_address(page); 5204 memcpy(kaddr + offset, src, cur); 5205 5206 src += cur; 5207 len -= cur; 5208 offset = 0; 5209 i++; 5210 } 5211 } 5212 5213 void memset_extent_buffer(struct extent_buffer *eb, char c, 5214 unsigned long start, unsigned long len) 5215 { 5216 size_t cur; 5217 size_t offset; 5218 struct page *page; 5219 char *kaddr; 5220 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5221 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5222 5223 WARN_ON(start > eb->len); 5224 WARN_ON(start + len > eb->start + eb->len); 5225 5226 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5227 5228 while (len > 0) { 5229 page = extent_buffer_page(eb, i); 5230 WARN_ON(!PageUptodate(page)); 5231 5232 cur = min(len, PAGE_CACHE_SIZE - offset); 5233 kaddr = page_address(page); 5234 memset(kaddr + offset, c, cur); 5235 5236 len -= cur; 5237 offset = 0; 5238 i++; 5239 } 5240 } 5241 5242 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, 5243 unsigned long dst_offset, unsigned long src_offset, 5244 unsigned long len) 5245 { 5246 u64 dst_len = dst->len; 5247 size_t cur; 5248 size_t offset; 5249 struct page *page; 5250 char *kaddr; 5251 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5252 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 5253 5254 WARN_ON(src->len != dst_len); 5255 5256 offset = (start_offset + dst_offset) & 5257 (PAGE_CACHE_SIZE - 1); 5258 5259 while (len > 0) { 5260 page = extent_buffer_page(dst, i); 5261 WARN_ON(!PageUptodate(page)); 5262 5263 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 5264 5265 kaddr = page_address(page); 5266 read_extent_buffer(src, kaddr + offset, src_offset, cur); 5267 5268 src_offset += cur; 5269 len -= cur; 5270 offset = 0; 5271 i++; 5272 } 5273 } 5274 5275 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) 5276 { 5277 unsigned long distance = (src > dst) ? src - dst : dst - src; 5278 return distance < len; 5279 } 5280 5281 static void copy_pages(struct page *dst_page, struct page *src_page, 5282 unsigned long dst_off, unsigned long src_off, 5283 unsigned long len) 5284 { 5285 char *dst_kaddr = page_address(dst_page); 5286 char *src_kaddr; 5287 int must_memmove = 0; 5288 5289 if (dst_page != src_page) { 5290 src_kaddr = page_address(src_page); 5291 } else { 5292 src_kaddr = dst_kaddr; 5293 if (areas_overlap(src_off, dst_off, len)) 5294 must_memmove = 1; 5295 } 5296 5297 if (must_memmove) 5298 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len); 5299 else 5300 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 5301 } 5302 5303 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 5304 unsigned long src_offset, unsigned long len) 5305 { 5306 size_t cur; 5307 size_t dst_off_in_page; 5308 size_t src_off_in_page; 5309 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5310 unsigned long dst_i; 5311 unsigned long src_i; 5312 5313 if (src_offset + len > dst->len) { 5314 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move " 5315 "len %lu dst len %lu\n", src_offset, len, dst->len); 5316 BUG_ON(1); 5317 } 5318 if (dst_offset + len > dst->len) { 5319 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move " 5320 "len %lu dst len %lu\n", dst_offset, len, dst->len); 5321 BUG_ON(1); 5322 } 5323 5324 while (len > 0) { 5325 dst_off_in_page = (start_offset + dst_offset) & 5326 (PAGE_CACHE_SIZE - 1); 5327 src_off_in_page = (start_offset + src_offset) & 5328 (PAGE_CACHE_SIZE - 1); 5329 5330 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 5331 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; 5332 5333 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - 5334 src_off_in_page)); 5335 cur = min_t(unsigned long, cur, 5336 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); 5337 5338 copy_pages(extent_buffer_page(dst, dst_i), 5339 extent_buffer_page(dst, src_i), 5340 dst_off_in_page, src_off_in_page, cur); 5341 5342 src_offset += cur; 5343 dst_offset += cur; 5344 len -= cur; 5345 } 5346 } 5347 5348 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 5349 unsigned long src_offset, unsigned long len) 5350 { 5351 size_t cur; 5352 size_t dst_off_in_page; 5353 size_t src_off_in_page; 5354 unsigned long dst_end = dst_offset + len - 1; 5355 unsigned long src_end = src_offset + len - 1; 5356 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5357 unsigned long dst_i; 5358 unsigned long src_i; 5359 5360 if (src_offset + len > dst->len) { 5361 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move " 5362 "len %lu len %lu\n", src_offset, len, dst->len); 5363 BUG_ON(1); 5364 } 5365 if (dst_offset + len > dst->len) { 5366 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move " 5367 "len %lu len %lu\n", dst_offset, len, dst->len); 5368 BUG_ON(1); 5369 } 5370 if (dst_offset < src_offset) { 5371 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 5372 return; 5373 } 5374 while (len > 0) { 5375 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 5376 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 5377 5378 dst_off_in_page = (start_offset + dst_end) & 5379 (PAGE_CACHE_SIZE - 1); 5380 src_off_in_page = (start_offset + src_end) & 5381 (PAGE_CACHE_SIZE - 1); 5382 5383 cur = min_t(unsigned long, len, src_off_in_page + 1); 5384 cur = min(cur, dst_off_in_page + 1); 5385 copy_pages(extent_buffer_page(dst, dst_i), 5386 extent_buffer_page(dst, src_i), 5387 dst_off_in_page - cur + 1, 5388 src_off_in_page - cur + 1, cur); 5389 5390 dst_end -= cur; 5391 src_end -= cur; 5392 len -= cur; 5393 } 5394 } 5395 5396 int try_release_extent_buffer(struct page *page) 5397 { 5398 struct extent_buffer *eb; 5399 5400 /* 5401 * We need to make sure noboody is attaching this page to an eb right 5402 * now. 5403 */ 5404 spin_lock(&page->mapping->private_lock); 5405 if (!PagePrivate(page)) { 5406 spin_unlock(&page->mapping->private_lock); 5407 return 1; 5408 } 5409 5410 eb = (struct extent_buffer *)page->private; 5411 BUG_ON(!eb); 5412 5413 /* 5414 * This is a little awful but should be ok, we need to make sure that 5415 * the eb doesn't disappear out from under us while we're looking at 5416 * this page. 5417 */ 5418 spin_lock(&eb->refs_lock); 5419 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { 5420 spin_unlock(&eb->refs_lock); 5421 spin_unlock(&page->mapping->private_lock); 5422 return 0; 5423 } 5424 spin_unlock(&page->mapping->private_lock); 5425 5426 /* 5427 * If tree ref isn't set then we know the ref on this eb is a real ref, 5428 * so just return, this page will likely be freed soon anyway. 5429 */ 5430 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { 5431 spin_unlock(&eb->refs_lock); 5432 return 0; 5433 } 5434 5435 return release_extent_buffer(eb); 5436 } 5437