1 #include <linux/bitops.h> 2 #include <linux/slab.h> 3 #include <linux/bio.h> 4 #include <linux/mm.h> 5 #include <linux/pagemap.h> 6 #include <linux/page-flags.h> 7 #include <linux/module.h> 8 #include <linux/spinlock.h> 9 #include <linux/blkdev.h> 10 #include <linux/swap.h> 11 #include <linux/writeback.h> 12 #include <linux/pagevec.h> 13 #include <linux/prefetch.h> 14 #include <linux/cleancache.h> 15 #include "extent_io.h" 16 #include "extent_map.h" 17 #include "compat.h" 18 #include "ctree.h" 19 #include "btrfs_inode.h" 20 #include "volumes.h" 21 22 static struct kmem_cache *extent_state_cache; 23 static struct kmem_cache *extent_buffer_cache; 24 25 static LIST_HEAD(buffers); 26 static LIST_HEAD(states); 27 28 #define LEAK_DEBUG 0 29 #if LEAK_DEBUG 30 static DEFINE_SPINLOCK(leak_lock); 31 #endif 32 33 #define BUFFER_LRU_MAX 64 34 35 struct tree_entry { 36 u64 start; 37 u64 end; 38 struct rb_node rb_node; 39 }; 40 41 struct extent_page_data { 42 struct bio *bio; 43 struct extent_io_tree *tree; 44 get_extent_t *get_extent; 45 46 /* tells writepage not to lock the state bits for this range 47 * it still does the unlocking 48 */ 49 unsigned int extent_locked:1; 50 51 /* tells the submit_bio code to use a WRITE_SYNC */ 52 unsigned int sync_io:1; 53 }; 54 55 int __init extent_io_init(void) 56 { 57 extent_state_cache = kmem_cache_create("extent_state", 58 sizeof(struct extent_state), 0, 59 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 60 if (!extent_state_cache) 61 return -ENOMEM; 62 63 extent_buffer_cache = kmem_cache_create("extent_buffers", 64 sizeof(struct extent_buffer), 0, 65 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 66 if (!extent_buffer_cache) 67 goto free_state_cache; 68 return 0; 69 70 free_state_cache: 71 kmem_cache_destroy(extent_state_cache); 72 return -ENOMEM; 73 } 74 75 void extent_io_exit(void) 76 { 77 struct extent_state *state; 78 struct extent_buffer *eb; 79 80 while (!list_empty(&states)) { 81 state = list_entry(states.next, struct extent_state, leak_list); 82 printk(KERN_ERR "btrfs state leak: start %llu end %llu " 83 "state %lu in tree %p refs %d\n", 84 (unsigned long long)state->start, 85 (unsigned long long)state->end, 86 state->state, state->tree, atomic_read(&state->refs)); 87 list_del(&state->leak_list); 88 kmem_cache_free(extent_state_cache, state); 89 90 } 91 92 while (!list_empty(&buffers)) { 93 eb = list_entry(buffers.next, struct extent_buffer, leak_list); 94 printk(KERN_ERR "btrfs buffer leak start %llu len %lu " 95 "refs %d\n", (unsigned long long)eb->start, 96 eb->len, atomic_read(&eb->refs)); 97 list_del(&eb->leak_list); 98 kmem_cache_free(extent_buffer_cache, eb); 99 } 100 if (extent_state_cache) 101 kmem_cache_destroy(extent_state_cache); 102 if (extent_buffer_cache) 103 kmem_cache_destroy(extent_buffer_cache); 104 } 105 106 void extent_io_tree_init(struct extent_io_tree *tree, 107 struct address_space *mapping) 108 { 109 tree->state = RB_ROOT; 110 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); 111 tree->ops = NULL; 112 tree->dirty_bytes = 0; 113 spin_lock_init(&tree->lock); 114 spin_lock_init(&tree->buffer_lock); 115 tree->mapping = mapping; 116 } 117 118 static struct extent_state *alloc_extent_state(gfp_t mask) 119 { 120 struct extent_state *state; 121 #if LEAK_DEBUG 122 unsigned long flags; 123 #endif 124 125 state = kmem_cache_alloc(extent_state_cache, mask); 126 if (!state) 127 return state; 128 state->state = 0; 129 state->private = 0; 130 state->tree = NULL; 131 #if LEAK_DEBUG 132 spin_lock_irqsave(&leak_lock, flags); 133 list_add(&state->leak_list, &states); 134 spin_unlock_irqrestore(&leak_lock, flags); 135 #endif 136 atomic_set(&state->refs, 1); 137 init_waitqueue_head(&state->wq); 138 return state; 139 } 140 141 void free_extent_state(struct extent_state *state) 142 { 143 if (!state) 144 return; 145 if (atomic_dec_and_test(&state->refs)) { 146 #if LEAK_DEBUG 147 unsigned long flags; 148 #endif 149 WARN_ON(state->tree); 150 #if LEAK_DEBUG 151 spin_lock_irqsave(&leak_lock, flags); 152 list_del(&state->leak_list); 153 spin_unlock_irqrestore(&leak_lock, flags); 154 #endif 155 kmem_cache_free(extent_state_cache, state); 156 } 157 } 158 159 static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 160 struct rb_node *node) 161 { 162 struct rb_node **p = &root->rb_node; 163 struct rb_node *parent = NULL; 164 struct tree_entry *entry; 165 166 while (*p) { 167 parent = *p; 168 entry = rb_entry(parent, struct tree_entry, rb_node); 169 170 if (offset < entry->start) 171 p = &(*p)->rb_left; 172 else if (offset > entry->end) 173 p = &(*p)->rb_right; 174 else 175 return parent; 176 } 177 178 entry = rb_entry(node, struct tree_entry, rb_node); 179 rb_link_node(node, parent, p); 180 rb_insert_color(node, root); 181 return NULL; 182 } 183 184 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, 185 struct rb_node **prev_ret, 186 struct rb_node **next_ret) 187 { 188 struct rb_root *root = &tree->state; 189 struct rb_node *n = root->rb_node; 190 struct rb_node *prev = NULL; 191 struct rb_node *orig_prev = NULL; 192 struct tree_entry *entry; 193 struct tree_entry *prev_entry = NULL; 194 195 while (n) { 196 entry = rb_entry(n, struct tree_entry, rb_node); 197 prev = n; 198 prev_entry = entry; 199 200 if (offset < entry->start) 201 n = n->rb_left; 202 else if (offset > entry->end) 203 n = n->rb_right; 204 else 205 return n; 206 } 207 208 if (prev_ret) { 209 orig_prev = prev; 210 while (prev && offset > prev_entry->end) { 211 prev = rb_next(prev); 212 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 213 } 214 *prev_ret = prev; 215 prev = orig_prev; 216 } 217 218 if (next_ret) { 219 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 220 while (prev && offset < prev_entry->start) { 221 prev = rb_prev(prev); 222 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 223 } 224 *next_ret = prev; 225 } 226 return NULL; 227 } 228 229 static inline struct rb_node *tree_search(struct extent_io_tree *tree, 230 u64 offset) 231 { 232 struct rb_node *prev = NULL; 233 struct rb_node *ret; 234 235 ret = __etree_search(tree, offset, &prev, NULL); 236 if (!ret) 237 return prev; 238 return ret; 239 } 240 241 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, 242 struct extent_state *other) 243 { 244 if (tree->ops && tree->ops->merge_extent_hook) 245 tree->ops->merge_extent_hook(tree->mapping->host, new, 246 other); 247 } 248 249 /* 250 * utility function to look for merge candidates inside a given range. 251 * Any extents with matching state are merged together into a single 252 * extent in the tree. Extents with EXTENT_IO in their state field 253 * are not merged because the end_io handlers need to be able to do 254 * operations on them without sleeping (or doing allocations/splits). 255 * 256 * This should be called with the tree lock held. 257 */ 258 static void merge_state(struct extent_io_tree *tree, 259 struct extent_state *state) 260 { 261 struct extent_state *other; 262 struct rb_node *other_node; 263 264 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) 265 return; 266 267 other_node = rb_prev(&state->rb_node); 268 if (other_node) { 269 other = rb_entry(other_node, struct extent_state, rb_node); 270 if (other->end == state->start - 1 && 271 other->state == state->state) { 272 merge_cb(tree, state, other); 273 state->start = other->start; 274 other->tree = NULL; 275 rb_erase(&other->rb_node, &tree->state); 276 free_extent_state(other); 277 } 278 } 279 other_node = rb_next(&state->rb_node); 280 if (other_node) { 281 other = rb_entry(other_node, struct extent_state, rb_node); 282 if (other->start == state->end + 1 && 283 other->state == state->state) { 284 merge_cb(tree, state, other); 285 state->end = other->end; 286 other->tree = NULL; 287 rb_erase(&other->rb_node, &tree->state); 288 free_extent_state(other); 289 } 290 } 291 } 292 293 static void set_state_cb(struct extent_io_tree *tree, 294 struct extent_state *state, int *bits) 295 { 296 if (tree->ops && tree->ops->set_bit_hook) 297 tree->ops->set_bit_hook(tree->mapping->host, state, bits); 298 } 299 300 static void clear_state_cb(struct extent_io_tree *tree, 301 struct extent_state *state, int *bits) 302 { 303 if (tree->ops && tree->ops->clear_bit_hook) 304 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); 305 } 306 307 static void set_state_bits(struct extent_io_tree *tree, 308 struct extent_state *state, int *bits); 309 310 /* 311 * insert an extent_state struct into the tree. 'bits' are set on the 312 * struct before it is inserted. 313 * 314 * This may return -EEXIST if the extent is already there, in which case the 315 * state struct is freed. 316 * 317 * The tree lock is not taken internally. This is a utility function and 318 * probably isn't what you want to call (see set/clear_extent_bit). 319 */ 320 static int insert_state(struct extent_io_tree *tree, 321 struct extent_state *state, u64 start, u64 end, 322 int *bits) 323 { 324 struct rb_node *node; 325 326 if (end < start) { 327 printk(KERN_ERR "btrfs end < start %llu %llu\n", 328 (unsigned long long)end, 329 (unsigned long long)start); 330 WARN_ON(1); 331 } 332 state->start = start; 333 state->end = end; 334 335 set_state_bits(tree, state, bits); 336 337 node = tree_insert(&tree->state, end, &state->rb_node); 338 if (node) { 339 struct extent_state *found; 340 found = rb_entry(node, struct extent_state, rb_node); 341 printk(KERN_ERR "btrfs found node %llu %llu on insert of " 342 "%llu %llu\n", (unsigned long long)found->start, 343 (unsigned long long)found->end, 344 (unsigned long long)start, (unsigned long long)end); 345 return -EEXIST; 346 } 347 state->tree = tree; 348 merge_state(tree, state); 349 return 0; 350 } 351 352 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig, 353 u64 split) 354 { 355 if (tree->ops && tree->ops->split_extent_hook) 356 tree->ops->split_extent_hook(tree->mapping->host, orig, split); 357 } 358 359 /* 360 * split a given extent state struct in two, inserting the preallocated 361 * struct 'prealloc' as the newly created second half. 'split' indicates an 362 * offset inside 'orig' where it should be split. 363 * 364 * Before calling, 365 * the tree has 'orig' at [orig->start, orig->end]. After calling, there 366 * are two extent state structs in the tree: 367 * prealloc: [orig->start, split - 1] 368 * orig: [ split, orig->end ] 369 * 370 * The tree locks are not taken by this function. They need to be held 371 * by the caller. 372 */ 373 static int split_state(struct extent_io_tree *tree, struct extent_state *orig, 374 struct extent_state *prealloc, u64 split) 375 { 376 struct rb_node *node; 377 378 split_cb(tree, orig, split); 379 380 prealloc->start = orig->start; 381 prealloc->end = split - 1; 382 prealloc->state = orig->state; 383 orig->start = split; 384 385 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); 386 if (node) { 387 free_extent_state(prealloc); 388 return -EEXIST; 389 } 390 prealloc->tree = tree; 391 return 0; 392 } 393 394 /* 395 * utility function to clear some bits in an extent state struct. 396 * it will optionally wake up any one waiting on this state (wake == 1), or 397 * forcibly remove the state from the tree (delete == 1). 398 * 399 * If no bits are set on the state struct after clearing things, the 400 * struct is freed and removed from the tree 401 */ 402 static int clear_state_bit(struct extent_io_tree *tree, 403 struct extent_state *state, 404 int *bits, int wake) 405 { 406 int bits_to_clear = *bits & ~EXTENT_CTLBITS; 407 int ret = state->state & bits_to_clear; 408 409 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { 410 u64 range = state->end - state->start + 1; 411 WARN_ON(range > tree->dirty_bytes); 412 tree->dirty_bytes -= range; 413 } 414 clear_state_cb(tree, state, bits); 415 state->state &= ~bits_to_clear; 416 if (wake) 417 wake_up(&state->wq); 418 if (state->state == 0) { 419 if (state->tree) { 420 rb_erase(&state->rb_node, &tree->state); 421 state->tree = NULL; 422 free_extent_state(state); 423 } else { 424 WARN_ON(1); 425 } 426 } else { 427 merge_state(tree, state); 428 } 429 return ret; 430 } 431 432 static struct extent_state * 433 alloc_extent_state_atomic(struct extent_state *prealloc) 434 { 435 if (!prealloc) 436 prealloc = alloc_extent_state(GFP_ATOMIC); 437 438 return prealloc; 439 } 440 441 /* 442 * clear some bits on a range in the tree. This may require splitting 443 * or inserting elements in the tree, so the gfp mask is used to 444 * indicate which allocations or sleeping are allowed. 445 * 446 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove 447 * the given range from the tree regardless of state (ie for truncate). 448 * 449 * the range [start, end] is inclusive. 450 * 451 * This takes the tree lock, and returns < 0 on error, > 0 if any of the 452 * bits were already set, or zero if none of the bits were already set. 453 */ 454 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 455 int bits, int wake, int delete, 456 struct extent_state **cached_state, 457 gfp_t mask) 458 { 459 struct extent_state *state; 460 struct extent_state *cached; 461 struct extent_state *prealloc = NULL; 462 struct rb_node *next_node; 463 struct rb_node *node; 464 u64 last_end; 465 int err; 466 int set = 0; 467 int clear = 0; 468 469 if (delete) 470 bits |= ~EXTENT_CTLBITS; 471 bits |= EXTENT_FIRST_DELALLOC; 472 473 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY)) 474 clear = 1; 475 again: 476 if (!prealloc && (mask & __GFP_WAIT)) { 477 prealloc = alloc_extent_state(mask); 478 if (!prealloc) 479 return -ENOMEM; 480 } 481 482 spin_lock(&tree->lock); 483 if (cached_state) { 484 cached = *cached_state; 485 486 if (clear) { 487 *cached_state = NULL; 488 cached_state = NULL; 489 } 490 491 if (cached && cached->tree && cached->start <= start && 492 cached->end > start) { 493 if (clear) 494 atomic_dec(&cached->refs); 495 state = cached; 496 goto hit_next; 497 } 498 if (clear) 499 free_extent_state(cached); 500 } 501 /* 502 * this search will find the extents that end after 503 * our range starts 504 */ 505 node = tree_search(tree, start); 506 if (!node) 507 goto out; 508 state = rb_entry(node, struct extent_state, rb_node); 509 hit_next: 510 if (state->start > end) 511 goto out; 512 WARN_ON(state->end < start); 513 last_end = state->end; 514 515 /* 516 * | ---- desired range ---- | 517 * | state | or 518 * | ------------- state -------------- | 519 * 520 * We need to split the extent we found, and may flip 521 * bits on second half. 522 * 523 * If the extent we found extends past our range, we 524 * just split and search again. It'll get split again 525 * the next time though. 526 * 527 * If the extent we found is inside our range, we clear 528 * the desired bit on it. 529 */ 530 531 if (state->start < start) { 532 prealloc = alloc_extent_state_atomic(prealloc); 533 BUG_ON(!prealloc); 534 err = split_state(tree, state, prealloc, start); 535 BUG_ON(err == -EEXIST); 536 prealloc = NULL; 537 if (err) 538 goto out; 539 if (state->end <= end) { 540 set |= clear_state_bit(tree, state, &bits, wake); 541 if (last_end == (u64)-1) 542 goto out; 543 start = last_end + 1; 544 } 545 goto search_again; 546 } 547 /* 548 * | ---- desired range ---- | 549 * | state | 550 * We need to split the extent, and clear the bit 551 * on the first half 552 */ 553 if (state->start <= end && state->end > end) { 554 prealloc = alloc_extent_state_atomic(prealloc); 555 BUG_ON(!prealloc); 556 err = split_state(tree, state, prealloc, end + 1); 557 BUG_ON(err == -EEXIST); 558 if (wake) 559 wake_up(&state->wq); 560 561 set |= clear_state_bit(tree, prealloc, &bits, wake); 562 563 prealloc = NULL; 564 goto out; 565 } 566 567 if (state->end < end && prealloc && !need_resched()) 568 next_node = rb_next(&state->rb_node); 569 else 570 next_node = NULL; 571 572 set |= clear_state_bit(tree, state, &bits, wake); 573 if (last_end == (u64)-1) 574 goto out; 575 start = last_end + 1; 576 if (start <= end && next_node) { 577 state = rb_entry(next_node, struct extent_state, 578 rb_node); 579 if (state->start == start) 580 goto hit_next; 581 } 582 goto search_again; 583 584 out: 585 spin_unlock(&tree->lock); 586 if (prealloc) 587 free_extent_state(prealloc); 588 589 return set; 590 591 search_again: 592 if (start > end) 593 goto out; 594 spin_unlock(&tree->lock); 595 if (mask & __GFP_WAIT) 596 cond_resched(); 597 goto again; 598 } 599 600 static int wait_on_state(struct extent_io_tree *tree, 601 struct extent_state *state) 602 __releases(tree->lock) 603 __acquires(tree->lock) 604 { 605 DEFINE_WAIT(wait); 606 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 607 spin_unlock(&tree->lock); 608 schedule(); 609 spin_lock(&tree->lock); 610 finish_wait(&state->wq, &wait); 611 return 0; 612 } 613 614 /* 615 * waits for one or more bits to clear on a range in the state tree. 616 * The range [start, end] is inclusive. 617 * The tree lock is taken by this function 618 */ 619 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) 620 { 621 struct extent_state *state; 622 struct rb_node *node; 623 624 spin_lock(&tree->lock); 625 again: 626 while (1) { 627 /* 628 * this search will find all the extents that end after 629 * our range starts 630 */ 631 node = tree_search(tree, start); 632 if (!node) 633 break; 634 635 state = rb_entry(node, struct extent_state, rb_node); 636 637 if (state->start > end) 638 goto out; 639 640 if (state->state & bits) { 641 start = state->start; 642 atomic_inc(&state->refs); 643 wait_on_state(tree, state); 644 free_extent_state(state); 645 goto again; 646 } 647 start = state->end + 1; 648 649 if (start > end) 650 break; 651 652 cond_resched_lock(&tree->lock); 653 } 654 out: 655 spin_unlock(&tree->lock); 656 return 0; 657 } 658 659 static void set_state_bits(struct extent_io_tree *tree, 660 struct extent_state *state, 661 int *bits) 662 { 663 int bits_to_set = *bits & ~EXTENT_CTLBITS; 664 665 set_state_cb(tree, state, bits); 666 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 667 u64 range = state->end - state->start + 1; 668 tree->dirty_bytes += range; 669 } 670 state->state |= bits_to_set; 671 } 672 673 static void cache_state(struct extent_state *state, 674 struct extent_state **cached_ptr) 675 { 676 if (cached_ptr && !(*cached_ptr)) { 677 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) { 678 *cached_ptr = state; 679 atomic_inc(&state->refs); 680 } 681 } 682 } 683 684 static void uncache_state(struct extent_state **cached_ptr) 685 { 686 if (cached_ptr && (*cached_ptr)) { 687 struct extent_state *state = *cached_ptr; 688 *cached_ptr = NULL; 689 free_extent_state(state); 690 } 691 } 692 693 /* 694 * set some bits on a range in the tree. This may require allocations or 695 * sleeping, so the gfp mask is used to indicate what is allowed. 696 * 697 * If any of the exclusive bits are set, this will fail with -EEXIST if some 698 * part of the range already has the desired bits set. The start of the 699 * existing range is returned in failed_start in this case. 700 * 701 * [start, end] is inclusive This takes the tree lock. 702 */ 703 704 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 705 int bits, int exclusive_bits, u64 *failed_start, 706 struct extent_state **cached_state, gfp_t mask) 707 { 708 struct extent_state *state; 709 struct extent_state *prealloc = NULL; 710 struct rb_node *node; 711 int err = 0; 712 u64 last_start; 713 u64 last_end; 714 715 bits |= EXTENT_FIRST_DELALLOC; 716 again: 717 if (!prealloc && (mask & __GFP_WAIT)) { 718 prealloc = alloc_extent_state(mask); 719 BUG_ON(!prealloc); 720 } 721 722 spin_lock(&tree->lock); 723 if (cached_state && *cached_state) { 724 state = *cached_state; 725 if (state->start <= start && state->end > start && 726 state->tree) { 727 node = &state->rb_node; 728 goto hit_next; 729 } 730 } 731 /* 732 * this search will find all the extents that end after 733 * our range starts. 734 */ 735 node = tree_search(tree, start); 736 if (!node) { 737 prealloc = alloc_extent_state_atomic(prealloc); 738 BUG_ON(!prealloc); 739 err = insert_state(tree, prealloc, start, end, &bits); 740 prealloc = NULL; 741 BUG_ON(err == -EEXIST); 742 goto out; 743 } 744 state = rb_entry(node, struct extent_state, rb_node); 745 hit_next: 746 last_start = state->start; 747 last_end = state->end; 748 749 /* 750 * | ---- desired range ---- | 751 * | state | 752 * 753 * Just lock what we found and keep going 754 */ 755 if (state->start == start && state->end <= end) { 756 struct rb_node *next_node; 757 if (state->state & exclusive_bits) { 758 *failed_start = state->start; 759 err = -EEXIST; 760 goto out; 761 } 762 763 set_state_bits(tree, state, &bits); 764 765 cache_state(state, cached_state); 766 merge_state(tree, state); 767 if (last_end == (u64)-1) 768 goto out; 769 770 start = last_end + 1; 771 next_node = rb_next(&state->rb_node); 772 if (next_node && start < end && prealloc && !need_resched()) { 773 state = rb_entry(next_node, struct extent_state, 774 rb_node); 775 if (state->start == start) 776 goto hit_next; 777 } 778 goto search_again; 779 } 780 781 /* 782 * | ---- desired range ---- | 783 * | state | 784 * or 785 * | ------------- state -------------- | 786 * 787 * We need to split the extent we found, and may flip bits on 788 * second half. 789 * 790 * If the extent we found extends past our 791 * range, we just split and search again. It'll get split 792 * again the next time though. 793 * 794 * If the extent we found is inside our range, we set the 795 * desired bit on it. 796 */ 797 if (state->start < start) { 798 if (state->state & exclusive_bits) { 799 *failed_start = start; 800 err = -EEXIST; 801 goto out; 802 } 803 804 prealloc = alloc_extent_state_atomic(prealloc); 805 BUG_ON(!prealloc); 806 err = split_state(tree, state, prealloc, start); 807 BUG_ON(err == -EEXIST); 808 prealloc = NULL; 809 if (err) 810 goto out; 811 if (state->end <= end) { 812 set_state_bits(tree, state, &bits); 813 cache_state(state, cached_state); 814 merge_state(tree, state); 815 if (last_end == (u64)-1) 816 goto out; 817 start = last_end + 1; 818 } 819 goto search_again; 820 } 821 /* 822 * | ---- desired range ---- | 823 * | state | or | state | 824 * 825 * There's a hole, we need to insert something in it and 826 * ignore the extent we found. 827 */ 828 if (state->start > start) { 829 u64 this_end; 830 if (end < last_start) 831 this_end = end; 832 else 833 this_end = last_start - 1; 834 835 prealloc = alloc_extent_state_atomic(prealloc); 836 BUG_ON(!prealloc); 837 838 /* 839 * Avoid to free 'prealloc' if it can be merged with 840 * the later extent. 841 */ 842 err = insert_state(tree, prealloc, start, this_end, 843 &bits); 844 BUG_ON(err == -EEXIST); 845 if (err) { 846 free_extent_state(prealloc); 847 prealloc = NULL; 848 goto out; 849 } 850 cache_state(prealloc, cached_state); 851 prealloc = NULL; 852 start = this_end + 1; 853 goto search_again; 854 } 855 /* 856 * | ---- desired range ---- | 857 * | state | 858 * We need to split the extent, and set the bit 859 * on the first half 860 */ 861 if (state->start <= end && state->end > end) { 862 if (state->state & exclusive_bits) { 863 *failed_start = start; 864 err = -EEXIST; 865 goto out; 866 } 867 868 prealloc = alloc_extent_state_atomic(prealloc); 869 BUG_ON(!prealloc); 870 err = split_state(tree, state, prealloc, end + 1); 871 BUG_ON(err == -EEXIST); 872 873 set_state_bits(tree, prealloc, &bits); 874 cache_state(prealloc, cached_state); 875 merge_state(tree, prealloc); 876 prealloc = NULL; 877 goto out; 878 } 879 880 goto search_again; 881 882 out: 883 spin_unlock(&tree->lock); 884 if (prealloc) 885 free_extent_state(prealloc); 886 887 return err; 888 889 search_again: 890 if (start > end) 891 goto out; 892 spin_unlock(&tree->lock); 893 if (mask & __GFP_WAIT) 894 cond_resched(); 895 goto again; 896 } 897 898 /** 899 * convert_extent - convert all bits in a given range from one bit to another 900 * @tree: the io tree to search 901 * @start: the start offset in bytes 902 * @end: the end offset in bytes (inclusive) 903 * @bits: the bits to set in this range 904 * @clear_bits: the bits to clear in this range 905 * @mask: the allocation mask 906 * 907 * This will go through and set bits for the given range. If any states exist 908 * already in this range they are set with the given bit and cleared of the 909 * clear_bits. This is only meant to be used by things that are mergeable, ie 910 * converting from say DELALLOC to DIRTY. This is not meant to be used with 911 * boundary bits like LOCK. 912 */ 913 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 914 int bits, int clear_bits, gfp_t mask) 915 { 916 struct extent_state *state; 917 struct extent_state *prealloc = NULL; 918 struct rb_node *node; 919 int err = 0; 920 u64 last_start; 921 u64 last_end; 922 923 again: 924 if (!prealloc && (mask & __GFP_WAIT)) { 925 prealloc = alloc_extent_state(mask); 926 if (!prealloc) 927 return -ENOMEM; 928 } 929 930 spin_lock(&tree->lock); 931 /* 932 * this search will find all the extents that end after 933 * our range starts. 934 */ 935 node = tree_search(tree, start); 936 if (!node) { 937 prealloc = alloc_extent_state_atomic(prealloc); 938 if (!prealloc) 939 return -ENOMEM; 940 err = insert_state(tree, prealloc, start, end, &bits); 941 prealloc = NULL; 942 BUG_ON(err == -EEXIST); 943 goto out; 944 } 945 state = rb_entry(node, struct extent_state, rb_node); 946 hit_next: 947 last_start = state->start; 948 last_end = state->end; 949 950 /* 951 * | ---- desired range ---- | 952 * | state | 953 * 954 * Just lock what we found and keep going 955 */ 956 if (state->start == start && state->end <= end) { 957 struct rb_node *next_node; 958 959 set_state_bits(tree, state, &bits); 960 clear_state_bit(tree, state, &clear_bits, 0); 961 962 merge_state(tree, state); 963 if (last_end == (u64)-1) 964 goto out; 965 966 start = last_end + 1; 967 next_node = rb_next(&state->rb_node); 968 if (next_node && start < end && prealloc && !need_resched()) { 969 state = rb_entry(next_node, struct extent_state, 970 rb_node); 971 if (state->start == start) 972 goto hit_next; 973 } 974 goto search_again; 975 } 976 977 /* 978 * | ---- desired range ---- | 979 * | state | 980 * or 981 * | ------------- state -------------- | 982 * 983 * We need to split the extent we found, and may flip bits on 984 * second half. 985 * 986 * If the extent we found extends past our 987 * range, we just split and search again. It'll get split 988 * again the next time though. 989 * 990 * If the extent we found is inside our range, we set the 991 * desired bit on it. 992 */ 993 if (state->start < start) { 994 prealloc = alloc_extent_state_atomic(prealloc); 995 if (!prealloc) 996 return -ENOMEM; 997 err = split_state(tree, state, prealloc, start); 998 BUG_ON(err == -EEXIST); 999 prealloc = NULL; 1000 if (err) 1001 goto out; 1002 if (state->end <= end) { 1003 set_state_bits(tree, state, &bits); 1004 clear_state_bit(tree, state, &clear_bits, 0); 1005 merge_state(tree, state); 1006 if (last_end == (u64)-1) 1007 goto out; 1008 start = last_end + 1; 1009 } 1010 goto search_again; 1011 } 1012 /* 1013 * | ---- desired range ---- | 1014 * | state | or | state | 1015 * 1016 * There's a hole, we need to insert something in it and 1017 * ignore the extent we found. 1018 */ 1019 if (state->start > start) { 1020 u64 this_end; 1021 if (end < last_start) 1022 this_end = end; 1023 else 1024 this_end = last_start - 1; 1025 1026 prealloc = alloc_extent_state_atomic(prealloc); 1027 if (!prealloc) 1028 return -ENOMEM; 1029 1030 /* 1031 * Avoid to free 'prealloc' if it can be merged with 1032 * the later extent. 1033 */ 1034 err = insert_state(tree, prealloc, start, this_end, 1035 &bits); 1036 BUG_ON(err == -EEXIST); 1037 if (err) { 1038 free_extent_state(prealloc); 1039 prealloc = NULL; 1040 goto out; 1041 } 1042 prealloc = NULL; 1043 start = this_end + 1; 1044 goto search_again; 1045 } 1046 /* 1047 * | ---- desired range ---- | 1048 * | state | 1049 * We need to split the extent, and set the bit 1050 * on the first half 1051 */ 1052 if (state->start <= end && state->end > end) { 1053 prealloc = alloc_extent_state_atomic(prealloc); 1054 if (!prealloc) 1055 return -ENOMEM; 1056 1057 err = split_state(tree, state, prealloc, end + 1); 1058 BUG_ON(err == -EEXIST); 1059 1060 set_state_bits(tree, prealloc, &bits); 1061 clear_state_bit(tree, prealloc, &clear_bits, 0); 1062 1063 merge_state(tree, prealloc); 1064 prealloc = NULL; 1065 goto out; 1066 } 1067 1068 goto search_again; 1069 1070 out: 1071 spin_unlock(&tree->lock); 1072 if (prealloc) 1073 free_extent_state(prealloc); 1074 1075 return err; 1076 1077 search_again: 1078 if (start > end) 1079 goto out; 1080 spin_unlock(&tree->lock); 1081 if (mask & __GFP_WAIT) 1082 cond_resched(); 1083 goto again; 1084 } 1085 1086 /* wrappers around set/clear extent bit */ 1087 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1088 gfp_t mask) 1089 { 1090 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 1091 NULL, mask); 1092 } 1093 1094 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1095 int bits, gfp_t mask) 1096 { 1097 return set_extent_bit(tree, start, end, bits, 0, NULL, 1098 NULL, mask); 1099 } 1100 1101 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1102 int bits, gfp_t mask) 1103 { 1104 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask); 1105 } 1106 1107 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, 1108 struct extent_state **cached_state, gfp_t mask) 1109 { 1110 return set_extent_bit(tree, start, end, 1111 EXTENT_DELALLOC | EXTENT_UPTODATE, 1112 0, NULL, cached_state, mask); 1113 } 1114 1115 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1116 gfp_t mask) 1117 { 1118 return clear_extent_bit(tree, start, end, 1119 EXTENT_DIRTY | EXTENT_DELALLOC | 1120 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask); 1121 } 1122 1123 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 1124 gfp_t mask) 1125 { 1126 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 1127 NULL, mask); 1128 } 1129 1130 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 1131 struct extent_state **cached_state, gfp_t mask) 1132 { 1133 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 1134 NULL, cached_state, mask); 1135 } 1136 1137 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 1138 u64 end, struct extent_state **cached_state, 1139 gfp_t mask) 1140 { 1141 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, 1142 cached_state, mask); 1143 } 1144 1145 /* 1146 * either insert or lock state struct between start and end use mask to tell 1147 * us if waiting is desired. 1148 */ 1149 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1150 int bits, struct extent_state **cached_state, gfp_t mask) 1151 { 1152 int err; 1153 u64 failed_start; 1154 while (1) { 1155 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, 1156 EXTENT_LOCKED, &failed_start, 1157 cached_state, mask); 1158 if (err == -EEXIST && (mask & __GFP_WAIT)) { 1159 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 1160 start = failed_start; 1161 } else { 1162 break; 1163 } 1164 WARN_ON(start > end); 1165 } 1166 return err; 1167 } 1168 1169 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1170 { 1171 return lock_extent_bits(tree, start, end, 0, NULL, mask); 1172 } 1173 1174 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 1175 gfp_t mask) 1176 { 1177 int err; 1178 u64 failed_start; 1179 1180 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, 1181 &failed_start, NULL, mask); 1182 if (err == -EEXIST) { 1183 if (failed_start > start) 1184 clear_extent_bit(tree, start, failed_start - 1, 1185 EXTENT_LOCKED, 1, 0, NULL, mask); 1186 return 0; 1187 } 1188 return 1; 1189 } 1190 1191 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, 1192 struct extent_state **cached, gfp_t mask) 1193 { 1194 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, 1195 mask); 1196 } 1197 1198 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1199 { 1200 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, 1201 mask); 1202 } 1203 1204 /* 1205 * helper function to set both pages and extents in the tree writeback 1206 */ 1207 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1208 { 1209 unsigned long index = start >> PAGE_CACHE_SHIFT; 1210 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1211 struct page *page; 1212 1213 while (index <= end_index) { 1214 page = find_get_page(tree->mapping, index); 1215 BUG_ON(!page); 1216 set_page_writeback(page); 1217 page_cache_release(page); 1218 index++; 1219 } 1220 return 0; 1221 } 1222 1223 /* find the first state struct with 'bits' set after 'start', and 1224 * return it. tree->lock must be held. NULL will returned if 1225 * nothing was found after 'start' 1226 */ 1227 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, 1228 u64 start, int bits) 1229 { 1230 struct rb_node *node; 1231 struct extent_state *state; 1232 1233 /* 1234 * this search will find all the extents that end after 1235 * our range starts. 1236 */ 1237 node = tree_search(tree, start); 1238 if (!node) 1239 goto out; 1240 1241 while (1) { 1242 state = rb_entry(node, struct extent_state, rb_node); 1243 if (state->end >= start && (state->state & bits)) 1244 return state; 1245 1246 node = rb_next(node); 1247 if (!node) 1248 break; 1249 } 1250 out: 1251 return NULL; 1252 } 1253 1254 /* 1255 * find the first offset in the io tree with 'bits' set. zero is 1256 * returned if we find something, and *start_ret and *end_ret are 1257 * set to reflect the state struct that was found. 1258 * 1259 * If nothing was found, 1 is returned, < 0 on error 1260 */ 1261 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 1262 u64 *start_ret, u64 *end_ret, int bits) 1263 { 1264 struct extent_state *state; 1265 int ret = 1; 1266 1267 spin_lock(&tree->lock); 1268 state = find_first_extent_bit_state(tree, start, bits); 1269 if (state) { 1270 *start_ret = state->start; 1271 *end_ret = state->end; 1272 ret = 0; 1273 } 1274 spin_unlock(&tree->lock); 1275 return ret; 1276 } 1277 1278 /* 1279 * find a contiguous range of bytes in the file marked as delalloc, not 1280 * more than 'max_bytes'. start and end are used to return the range, 1281 * 1282 * 1 is returned if we find something, 0 if nothing was in the tree 1283 */ 1284 static noinline u64 find_delalloc_range(struct extent_io_tree *tree, 1285 u64 *start, u64 *end, u64 max_bytes, 1286 struct extent_state **cached_state) 1287 { 1288 struct rb_node *node; 1289 struct extent_state *state; 1290 u64 cur_start = *start; 1291 u64 found = 0; 1292 u64 total_bytes = 0; 1293 1294 spin_lock(&tree->lock); 1295 1296 /* 1297 * this search will find all the extents that end after 1298 * our range starts. 1299 */ 1300 node = tree_search(tree, cur_start); 1301 if (!node) { 1302 if (!found) 1303 *end = (u64)-1; 1304 goto out; 1305 } 1306 1307 while (1) { 1308 state = rb_entry(node, struct extent_state, rb_node); 1309 if (found && (state->start != cur_start || 1310 (state->state & EXTENT_BOUNDARY))) { 1311 goto out; 1312 } 1313 if (!(state->state & EXTENT_DELALLOC)) { 1314 if (!found) 1315 *end = state->end; 1316 goto out; 1317 } 1318 if (!found) { 1319 *start = state->start; 1320 *cached_state = state; 1321 atomic_inc(&state->refs); 1322 } 1323 found++; 1324 *end = state->end; 1325 cur_start = state->end + 1; 1326 node = rb_next(node); 1327 if (!node) 1328 break; 1329 total_bytes += state->end - state->start + 1; 1330 if (total_bytes >= max_bytes) 1331 break; 1332 } 1333 out: 1334 spin_unlock(&tree->lock); 1335 return found; 1336 } 1337 1338 static noinline int __unlock_for_delalloc(struct inode *inode, 1339 struct page *locked_page, 1340 u64 start, u64 end) 1341 { 1342 int ret; 1343 struct page *pages[16]; 1344 unsigned long index = start >> PAGE_CACHE_SHIFT; 1345 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1346 unsigned long nr_pages = end_index - index + 1; 1347 int i; 1348 1349 if (index == locked_page->index && end_index == index) 1350 return 0; 1351 1352 while (nr_pages > 0) { 1353 ret = find_get_pages_contig(inode->i_mapping, index, 1354 min_t(unsigned long, nr_pages, 1355 ARRAY_SIZE(pages)), pages); 1356 for (i = 0; i < ret; i++) { 1357 if (pages[i] != locked_page) 1358 unlock_page(pages[i]); 1359 page_cache_release(pages[i]); 1360 } 1361 nr_pages -= ret; 1362 index += ret; 1363 cond_resched(); 1364 } 1365 return 0; 1366 } 1367 1368 static noinline int lock_delalloc_pages(struct inode *inode, 1369 struct page *locked_page, 1370 u64 delalloc_start, 1371 u64 delalloc_end) 1372 { 1373 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; 1374 unsigned long start_index = index; 1375 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; 1376 unsigned long pages_locked = 0; 1377 struct page *pages[16]; 1378 unsigned long nrpages; 1379 int ret; 1380 int i; 1381 1382 /* the caller is responsible for locking the start index */ 1383 if (index == locked_page->index && index == end_index) 1384 return 0; 1385 1386 /* skip the page at the start index */ 1387 nrpages = end_index - index + 1; 1388 while (nrpages > 0) { 1389 ret = find_get_pages_contig(inode->i_mapping, index, 1390 min_t(unsigned long, 1391 nrpages, ARRAY_SIZE(pages)), pages); 1392 if (ret == 0) { 1393 ret = -EAGAIN; 1394 goto done; 1395 } 1396 /* now we have an array of pages, lock them all */ 1397 for (i = 0; i < ret; i++) { 1398 /* 1399 * the caller is taking responsibility for 1400 * locked_page 1401 */ 1402 if (pages[i] != locked_page) { 1403 lock_page(pages[i]); 1404 if (!PageDirty(pages[i]) || 1405 pages[i]->mapping != inode->i_mapping) { 1406 ret = -EAGAIN; 1407 unlock_page(pages[i]); 1408 page_cache_release(pages[i]); 1409 goto done; 1410 } 1411 } 1412 page_cache_release(pages[i]); 1413 pages_locked++; 1414 } 1415 nrpages -= ret; 1416 index += ret; 1417 cond_resched(); 1418 } 1419 ret = 0; 1420 done: 1421 if (ret && pages_locked) { 1422 __unlock_for_delalloc(inode, locked_page, 1423 delalloc_start, 1424 ((u64)(start_index + pages_locked - 1)) << 1425 PAGE_CACHE_SHIFT); 1426 } 1427 return ret; 1428 } 1429 1430 /* 1431 * find a contiguous range of bytes in the file marked as delalloc, not 1432 * more than 'max_bytes'. start and end are used to return the range, 1433 * 1434 * 1 is returned if we find something, 0 if nothing was in the tree 1435 */ 1436 static noinline u64 find_lock_delalloc_range(struct inode *inode, 1437 struct extent_io_tree *tree, 1438 struct page *locked_page, 1439 u64 *start, u64 *end, 1440 u64 max_bytes) 1441 { 1442 u64 delalloc_start; 1443 u64 delalloc_end; 1444 u64 found; 1445 struct extent_state *cached_state = NULL; 1446 int ret; 1447 int loops = 0; 1448 1449 again: 1450 /* step one, find a bunch of delalloc bytes starting at start */ 1451 delalloc_start = *start; 1452 delalloc_end = 0; 1453 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, 1454 max_bytes, &cached_state); 1455 if (!found || delalloc_end <= *start) { 1456 *start = delalloc_start; 1457 *end = delalloc_end; 1458 free_extent_state(cached_state); 1459 return found; 1460 } 1461 1462 /* 1463 * start comes from the offset of locked_page. We have to lock 1464 * pages in order, so we can't process delalloc bytes before 1465 * locked_page 1466 */ 1467 if (delalloc_start < *start) 1468 delalloc_start = *start; 1469 1470 /* 1471 * make sure to limit the number of pages we try to lock down 1472 * if we're looping. 1473 */ 1474 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) 1475 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; 1476 1477 /* step two, lock all the pages after the page that has start */ 1478 ret = lock_delalloc_pages(inode, locked_page, 1479 delalloc_start, delalloc_end); 1480 if (ret == -EAGAIN) { 1481 /* some of the pages are gone, lets avoid looping by 1482 * shortening the size of the delalloc range we're searching 1483 */ 1484 free_extent_state(cached_state); 1485 if (!loops) { 1486 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1); 1487 max_bytes = PAGE_CACHE_SIZE - offset; 1488 loops = 1; 1489 goto again; 1490 } else { 1491 found = 0; 1492 goto out_failed; 1493 } 1494 } 1495 BUG_ON(ret); 1496 1497 /* step three, lock the state bits for the whole range */ 1498 lock_extent_bits(tree, delalloc_start, delalloc_end, 1499 0, &cached_state, GFP_NOFS); 1500 1501 /* then test to make sure it is all still delalloc */ 1502 ret = test_range_bit(tree, delalloc_start, delalloc_end, 1503 EXTENT_DELALLOC, 1, cached_state); 1504 if (!ret) { 1505 unlock_extent_cached(tree, delalloc_start, delalloc_end, 1506 &cached_state, GFP_NOFS); 1507 __unlock_for_delalloc(inode, locked_page, 1508 delalloc_start, delalloc_end); 1509 cond_resched(); 1510 goto again; 1511 } 1512 free_extent_state(cached_state); 1513 *start = delalloc_start; 1514 *end = delalloc_end; 1515 out_failed: 1516 return found; 1517 } 1518 1519 int extent_clear_unlock_delalloc(struct inode *inode, 1520 struct extent_io_tree *tree, 1521 u64 start, u64 end, struct page *locked_page, 1522 unsigned long op) 1523 { 1524 int ret; 1525 struct page *pages[16]; 1526 unsigned long index = start >> PAGE_CACHE_SHIFT; 1527 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1528 unsigned long nr_pages = end_index - index + 1; 1529 int i; 1530 int clear_bits = 0; 1531 1532 if (op & EXTENT_CLEAR_UNLOCK) 1533 clear_bits |= EXTENT_LOCKED; 1534 if (op & EXTENT_CLEAR_DIRTY) 1535 clear_bits |= EXTENT_DIRTY; 1536 1537 if (op & EXTENT_CLEAR_DELALLOC) 1538 clear_bits |= EXTENT_DELALLOC; 1539 1540 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); 1541 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | 1542 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK | 1543 EXTENT_SET_PRIVATE2))) 1544 return 0; 1545 1546 while (nr_pages > 0) { 1547 ret = find_get_pages_contig(inode->i_mapping, index, 1548 min_t(unsigned long, 1549 nr_pages, ARRAY_SIZE(pages)), pages); 1550 for (i = 0; i < ret; i++) { 1551 1552 if (op & EXTENT_SET_PRIVATE2) 1553 SetPagePrivate2(pages[i]); 1554 1555 if (pages[i] == locked_page) { 1556 page_cache_release(pages[i]); 1557 continue; 1558 } 1559 if (op & EXTENT_CLEAR_DIRTY) 1560 clear_page_dirty_for_io(pages[i]); 1561 if (op & EXTENT_SET_WRITEBACK) 1562 set_page_writeback(pages[i]); 1563 if (op & EXTENT_END_WRITEBACK) 1564 end_page_writeback(pages[i]); 1565 if (op & EXTENT_CLEAR_UNLOCK_PAGE) 1566 unlock_page(pages[i]); 1567 page_cache_release(pages[i]); 1568 } 1569 nr_pages -= ret; 1570 index += ret; 1571 cond_resched(); 1572 } 1573 return 0; 1574 } 1575 1576 /* 1577 * count the number of bytes in the tree that have a given bit(s) 1578 * set. This can be fairly slow, except for EXTENT_DIRTY which is 1579 * cached. The total number found is returned. 1580 */ 1581 u64 count_range_bits(struct extent_io_tree *tree, 1582 u64 *start, u64 search_end, u64 max_bytes, 1583 unsigned long bits, int contig) 1584 { 1585 struct rb_node *node; 1586 struct extent_state *state; 1587 u64 cur_start = *start; 1588 u64 total_bytes = 0; 1589 u64 last = 0; 1590 int found = 0; 1591 1592 if (search_end <= cur_start) { 1593 WARN_ON(1); 1594 return 0; 1595 } 1596 1597 spin_lock(&tree->lock); 1598 if (cur_start == 0 && bits == EXTENT_DIRTY) { 1599 total_bytes = tree->dirty_bytes; 1600 goto out; 1601 } 1602 /* 1603 * this search will find all the extents that end after 1604 * our range starts. 1605 */ 1606 node = tree_search(tree, cur_start); 1607 if (!node) 1608 goto out; 1609 1610 while (1) { 1611 state = rb_entry(node, struct extent_state, rb_node); 1612 if (state->start > search_end) 1613 break; 1614 if (contig && found && state->start > last + 1) 1615 break; 1616 if (state->end >= cur_start && (state->state & bits) == bits) { 1617 total_bytes += min(search_end, state->end) + 1 - 1618 max(cur_start, state->start); 1619 if (total_bytes >= max_bytes) 1620 break; 1621 if (!found) { 1622 *start = max(cur_start, state->start); 1623 found = 1; 1624 } 1625 last = state->end; 1626 } else if (contig && found) { 1627 break; 1628 } 1629 node = rb_next(node); 1630 if (!node) 1631 break; 1632 } 1633 out: 1634 spin_unlock(&tree->lock); 1635 return total_bytes; 1636 } 1637 1638 /* 1639 * set the private field for a given byte offset in the tree. If there isn't 1640 * an extent_state there already, this does nothing. 1641 */ 1642 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) 1643 { 1644 struct rb_node *node; 1645 struct extent_state *state; 1646 int ret = 0; 1647 1648 spin_lock(&tree->lock); 1649 /* 1650 * this search will find all the extents that end after 1651 * our range starts. 1652 */ 1653 node = tree_search(tree, start); 1654 if (!node) { 1655 ret = -ENOENT; 1656 goto out; 1657 } 1658 state = rb_entry(node, struct extent_state, rb_node); 1659 if (state->start != start) { 1660 ret = -ENOENT; 1661 goto out; 1662 } 1663 state->private = private; 1664 out: 1665 spin_unlock(&tree->lock); 1666 return ret; 1667 } 1668 1669 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) 1670 { 1671 struct rb_node *node; 1672 struct extent_state *state; 1673 int ret = 0; 1674 1675 spin_lock(&tree->lock); 1676 /* 1677 * this search will find all the extents that end after 1678 * our range starts. 1679 */ 1680 node = tree_search(tree, start); 1681 if (!node) { 1682 ret = -ENOENT; 1683 goto out; 1684 } 1685 state = rb_entry(node, struct extent_state, rb_node); 1686 if (state->start != start) { 1687 ret = -ENOENT; 1688 goto out; 1689 } 1690 *private = state->private; 1691 out: 1692 spin_unlock(&tree->lock); 1693 return ret; 1694 } 1695 1696 /* 1697 * searches a range in the state tree for a given mask. 1698 * If 'filled' == 1, this returns 1 only if every extent in the tree 1699 * has the bits set. Otherwise, 1 is returned if any bit in the 1700 * range is found set. 1701 */ 1702 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 1703 int bits, int filled, struct extent_state *cached) 1704 { 1705 struct extent_state *state = NULL; 1706 struct rb_node *node; 1707 int bitset = 0; 1708 1709 spin_lock(&tree->lock); 1710 if (cached && cached->tree && cached->start <= start && 1711 cached->end > start) 1712 node = &cached->rb_node; 1713 else 1714 node = tree_search(tree, start); 1715 while (node && start <= end) { 1716 state = rb_entry(node, struct extent_state, rb_node); 1717 1718 if (filled && state->start > start) { 1719 bitset = 0; 1720 break; 1721 } 1722 1723 if (state->start > end) 1724 break; 1725 1726 if (state->state & bits) { 1727 bitset = 1; 1728 if (!filled) 1729 break; 1730 } else if (filled) { 1731 bitset = 0; 1732 break; 1733 } 1734 1735 if (state->end == (u64)-1) 1736 break; 1737 1738 start = state->end + 1; 1739 if (start > end) 1740 break; 1741 node = rb_next(node); 1742 if (!node) { 1743 if (filled) 1744 bitset = 0; 1745 break; 1746 } 1747 } 1748 spin_unlock(&tree->lock); 1749 return bitset; 1750 } 1751 1752 /* 1753 * helper function to set a given page up to date if all the 1754 * extents in the tree for that page are up to date 1755 */ 1756 static int check_page_uptodate(struct extent_io_tree *tree, 1757 struct page *page) 1758 { 1759 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1760 u64 end = start + PAGE_CACHE_SIZE - 1; 1761 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 1762 SetPageUptodate(page); 1763 return 0; 1764 } 1765 1766 /* 1767 * helper function to unlock a page if all the extents in the tree 1768 * for that page are unlocked 1769 */ 1770 static int check_page_locked(struct extent_io_tree *tree, 1771 struct page *page) 1772 { 1773 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1774 u64 end = start + PAGE_CACHE_SIZE - 1; 1775 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) 1776 unlock_page(page); 1777 return 0; 1778 } 1779 1780 /* 1781 * helper function to end page writeback if all the extents 1782 * in the tree for that page are done with writeback 1783 */ 1784 static int check_page_writeback(struct extent_io_tree *tree, 1785 struct page *page) 1786 { 1787 end_page_writeback(page); 1788 return 0; 1789 } 1790 1791 /* 1792 * When IO fails, either with EIO or csum verification fails, we 1793 * try other mirrors that might have a good copy of the data. This 1794 * io_failure_record is used to record state as we go through all the 1795 * mirrors. If another mirror has good data, the page is set up to date 1796 * and things continue. If a good mirror can't be found, the original 1797 * bio end_io callback is called to indicate things have failed. 1798 */ 1799 struct io_failure_record { 1800 struct page *page; 1801 u64 start; 1802 u64 len; 1803 u64 logical; 1804 unsigned long bio_flags; 1805 int this_mirror; 1806 int failed_mirror; 1807 int in_validation; 1808 }; 1809 1810 static int free_io_failure(struct inode *inode, struct io_failure_record *rec, 1811 int did_repair) 1812 { 1813 int ret; 1814 int err = 0; 1815 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 1816 1817 set_state_private(failure_tree, rec->start, 0); 1818 ret = clear_extent_bits(failure_tree, rec->start, 1819 rec->start + rec->len - 1, 1820 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); 1821 if (ret) 1822 err = ret; 1823 1824 if (did_repair) { 1825 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, 1826 rec->start + rec->len - 1, 1827 EXTENT_DAMAGED, GFP_NOFS); 1828 if (ret && !err) 1829 err = ret; 1830 } 1831 1832 kfree(rec); 1833 return err; 1834 } 1835 1836 static void repair_io_failure_callback(struct bio *bio, int err) 1837 { 1838 complete(bio->bi_private); 1839 } 1840 1841 /* 1842 * this bypasses the standard btrfs submit functions deliberately, as 1843 * the standard behavior is to write all copies in a raid setup. here we only 1844 * want to write the one bad copy. so we do the mapping for ourselves and issue 1845 * submit_bio directly. 1846 * to avoid any synchonization issues, wait for the data after writing, which 1847 * actually prevents the read that triggered the error from finishing. 1848 * currently, there can be no more than two copies of every data bit. thus, 1849 * exactly one rewrite is required. 1850 */ 1851 int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, 1852 u64 length, u64 logical, struct page *page, 1853 int mirror_num) 1854 { 1855 struct bio *bio; 1856 struct btrfs_device *dev; 1857 DECLARE_COMPLETION_ONSTACK(compl); 1858 u64 map_length = 0; 1859 u64 sector; 1860 struct btrfs_bio *bbio = NULL; 1861 int ret; 1862 1863 BUG_ON(!mirror_num); 1864 1865 bio = bio_alloc(GFP_NOFS, 1); 1866 if (!bio) 1867 return -EIO; 1868 bio->bi_private = &compl; 1869 bio->bi_end_io = repair_io_failure_callback; 1870 bio->bi_size = 0; 1871 map_length = length; 1872 1873 ret = btrfs_map_block(map_tree, WRITE, logical, 1874 &map_length, &bbio, mirror_num); 1875 if (ret) { 1876 bio_put(bio); 1877 return -EIO; 1878 } 1879 BUG_ON(mirror_num != bbio->mirror_num); 1880 sector = bbio->stripes[mirror_num-1].physical >> 9; 1881 bio->bi_sector = sector; 1882 dev = bbio->stripes[mirror_num-1].dev; 1883 kfree(bbio); 1884 if (!dev || !dev->bdev || !dev->writeable) { 1885 bio_put(bio); 1886 return -EIO; 1887 } 1888 bio->bi_bdev = dev->bdev; 1889 bio_add_page(bio, page, length, start-page_offset(page)); 1890 submit_bio(WRITE_SYNC, bio); 1891 wait_for_completion(&compl); 1892 1893 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 1894 /* try to remap that extent elsewhere? */ 1895 bio_put(bio); 1896 return -EIO; 1897 } 1898 1899 printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s " 1900 "sector %llu)\n", page->mapping->host->i_ino, start, 1901 dev->name, sector); 1902 1903 bio_put(bio); 1904 return 0; 1905 } 1906 1907 /* 1908 * each time an IO finishes, we do a fast check in the IO failure tree 1909 * to see if we need to process or clean up an io_failure_record 1910 */ 1911 static int clean_io_failure(u64 start, struct page *page) 1912 { 1913 u64 private; 1914 u64 private_failure; 1915 struct io_failure_record *failrec; 1916 struct btrfs_mapping_tree *map_tree; 1917 struct extent_state *state; 1918 int num_copies; 1919 int did_repair = 0; 1920 int ret; 1921 struct inode *inode = page->mapping->host; 1922 1923 private = 0; 1924 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, 1925 (u64)-1, 1, EXTENT_DIRTY, 0); 1926 if (!ret) 1927 return 0; 1928 1929 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start, 1930 &private_failure); 1931 if (ret) 1932 return 0; 1933 1934 failrec = (struct io_failure_record *)(unsigned long) private_failure; 1935 BUG_ON(!failrec->this_mirror); 1936 1937 if (failrec->in_validation) { 1938 /* there was no real error, just free the record */ 1939 pr_debug("clean_io_failure: freeing dummy error at %llu\n", 1940 failrec->start); 1941 did_repair = 1; 1942 goto out; 1943 } 1944 1945 spin_lock(&BTRFS_I(inode)->io_tree.lock); 1946 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, 1947 failrec->start, 1948 EXTENT_LOCKED); 1949 spin_unlock(&BTRFS_I(inode)->io_tree.lock); 1950 1951 if (state && state->start == failrec->start) { 1952 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree; 1953 num_copies = btrfs_num_copies(map_tree, failrec->logical, 1954 failrec->len); 1955 if (num_copies > 1) { 1956 ret = repair_io_failure(map_tree, start, failrec->len, 1957 failrec->logical, page, 1958 failrec->failed_mirror); 1959 did_repair = !ret; 1960 } 1961 } 1962 1963 out: 1964 if (!ret) 1965 ret = free_io_failure(inode, failrec, did_repair); 1966 1967 return ret; 1968 } 1969 1970 /* 1971 * this is a generic handler for readpage errors (default 1972 * readpage_io_failed_hook). if other copies exist, read those and write back 1973 * good data to the failed position. does not investigate in remapping the 1974 * failed extent elsewhere, hoping the device will be smart enough to do this as 1975 * needed 1976 */ 1977 1978 static int bio_readpage_error(struct bio *failed_bio, struct page *page, 1979 u64 start, u64 end, int failed_mirror, 1980 struct extent_state *state) 1981 { 1982 struct io_failure_record *failrec = NULL; 1983 u64 private; 1984 struct extent_map *em; 1985 struct inode *inode = page->mapping->host; 1986 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 1987 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 1988 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 1989 struct bio *bio; 1990 int num_copies; 1991 int ret; 1992 int read_mode; 1993 u64 logical; 1994 1995 BUG_ON(failed_bio->bi_rw & REQ_WRITE); 1996 1997 ret = get_state_private(failure_tree, start, &private); 1998 if (ret) { 1999 failrec = kzalloc(sizeof(*failrec), GFP_NOFS); 2000 if (!failrec) 2001 return -ENOMEM; 2002 failrec->start = start; 2003 failrec->len = end - start + 1; 2004 failrec->this_mirror = 0; 2005 failrec->bio_flags = 0; 2006 failrec->in_validation = 0; 2007 2008 read_lock(&em_tree->lock); 2009 em = lookup_extent_mapping(em_tree, start, failrec->len); 2010 if (!em) { 2011 read_unlock(&em_tree->lock); 2012 kfree(failrec); 2013 return -EIO; 2014 } 2015 2016 if (em->start > start || em->start + em->len < start) { 2017 free_extent_map(em); 2018 em = NULL; 2019 } 2020 read_unlock(&em_tree->lock); 2021 2022 if (!em || IS_ERR(em)) { 2023 kfree(failrec); 2024 return -EIO; 2025 } 2026 logical = start - em->start; 2027 logical = em->block_start + logical; 2028 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 2029 logical = em->block_start; 2030 failrec->bio_flags = EXTENT_BIO_COMPRESSED; 2031 extent_set_compress_type(&failrec->bio_flags, 2032 em->compress_type); 2033 } 2034 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, " 2035 "len=%llu\n", logical, start, failrec->len); 2036 failrec->logical = logical; 2037 free_extent_map(em); 2038 2039 /* set the bits in the private failure tree */ 2040 ret = set_extent_bits(failure_tree, start, end, 2041 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); 2042 if (ret >= 0) 2043 ret = set_state_private(failure_tree, start, 2044 (u64)(unsigned long)failrec); 2045 /* set the bits in the inode's tree */ 2046 if (ret >= 0) 2047 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, 2048 GFP_NOFS); 2049 if (ret < 0) { 2050 kfree(failrec); 2051 return ret; 2052 } 2053 } else { 2054 failrec = (struct io_failure_record *)(unsigned long)private; 2055 pr_debug("bio_readpage_error: (found) logical=%llu, " 2056 "start=%llu, len=%llu, validation=%d\n", 2057 failrec->logical, failrec->start, failrec->len, 2058 failrec->in_validation); 2059 /* 2060 * when data can be on disk more than twice, add to failrec here 2061 * (e.g. with a list for failed_mirror) to make 2062 * clean_io_failure() clean all those errors at once. 2063 */ 2064 } 2065 num_copies = btrfs_num_copies( 2066 &BTRFS_I(inode)->root->fs_info->mapping_tree, 2067 failrec->logical, failrec->len); 2068 if (num_copies == 1) { 2069 /* 2070 * we only have a single copy of the data, so don't bother with 2071 * all the retry and error correction code that follows. no 2072 * matter what the error is, it is very likely to persist. 2073 */ 2074 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. " 2075 "state=%p, num_copies=%d, next_mirror %d, " 2076 "failed_mirror %d\n", state, num_copies, 2077 failrec->this_mirror, failed_mirror); 2078 free_io_failure(inode, failrec, 0); 2079 return -EIO; 2080 } 2081 2082 if (!state) { 2083 spin_lock(&tree->lock); 2084 state = find_first_extent_bit_state(tree, failrec->start, 2085 EXTENT_LOCKED); 2086 if (state && state->start != failrec->start) 2087 state = NULL; 2088 spin_unlock(&tree->lock); 2089 } 2090 2091 /* 2092 * there are two premises: 2093 * a) deliver good data to the caller 2094 * b) correct the bad sectors on disk 2095 */ 2096 if (failed_bio->bi_vcnt > 1) { 2097 /* 2098 * to fulfill b), we need to know the exact failing sectors, as 2099 * we don't want to rewrite any more than the failed ones. thus, 2100 * we need separate read requests for the failed bio 2101 * 2102 * if the following BUG_ON triggers, our validation request got 2103 * merged. we need separate requests for our algorithm to work. 2104 */ 2105 BUG_ON(failrec->in_validation); 2106 failrec->in_validation = 1; 2107 failrec->this_mirror = failed_mirror; 2108 read_mode = READ_SYNC | REQ_FAILFAST_DEV; 2109 } else { 2110 /* 2111 * we're ready to fulfill a) and b) alongside. get a good copy 2112 * of the failed sector and if we succeed, we have setup 2113 * everything for repair_io_failure to do the rest for us. 2114 */ 2115 if (failrec->in_validation) { 2116 BUG_ON(failrec->this_mirror != failed_mirror); 2117 failrec->in_validation = 0; 2118 failrec->this_mirror = 0; 2119 } 2120 failrec->failed_mirror = failed_mirror; 2121 failrec->this_mirror++; 2122 if (failrec->this_mirror == failed_mirror) 2123 failrec->this_mirror++; 2124 read_mode = READ_SYNC; 2125 } 2126 2127 if (!state || failrec->this_mirror > num_copies) { 2128 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, " 2129 "next_mirror %d, failed_mirror %d\n", state, 2130 num_copies, failrec->this_mirror, failed_mirror); 2131 free_io_failure(inode, failrec, 0); 2132 return -EIO; 2133 } 2134 2135 bio = bio_alloc(GFP_NOFS, 1); 2136 bio->bi_private = state; 2137 bio->bi_end_io = failed_bio->bi_end_io; 2138 bio->bi_sector = failrec->logical >> 9; 2139 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 2140 bio->bi_size = 0; 2141 2142 bio_add_page(bio, page, failrec->len, start - page_offset(page)); 2143 2144 pr_debug("bio_readpage_error: submitting new read[%#x] to " 2145 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode, 2146 failrec->this_mirror, num_copies, failrec->in_validation); 2147 2148 tree->ops->submit_bio_hook(inode, read_mode, bio, failrec->this_mirror, 2149 failrec->bio_flags, 0); 2150 return 0; 2151 } 2152 2153 /* lots and lots of room for performance fixes in the end_bio funcs */ 2154 2155 /* 2156 * after a writepage IO is done, we need to: 2157 * clear the uptodate bits on error 2158 * clear the writeback bits in the extent tree for this IO 2159 * end_page_writeback if the page has no more pending IO 2160 * 2161 * Scheduling is not allowed, so the extent state tree is expected 2162 * to have one and only one object corresponding to this IO. 2163 */ 2164 static void end_bio_extent_writepage(struct bio *bio, int err) 2165 { 2166 int uptodate = err == 0; 2167 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 2168 struct extent_io_tree *tree; 2169 u64 start; 2170 u64 end; 2171 int whole_page; 2172 int ret; 2173 2174 do { 2175 struct page *page = bvec->bv_page; 2176 tree = &BTRFS_I(page->mapping->host)->io_tree; 2177 2178 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 2179 bvec->bv_offset; 2180 end = start + bvec->bv_len - 1; 2181 2182 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 2183 whole_page = 1; 2184 else 2185 whole_page = 0; 2186 2187 if (--bvec >= bio->bi_io_vec) 2188 prefetchw(&bvec->bv_page->flags); 2189 if (tree->ops && tree->ops->writepage_end_io_hook) { 2190 ret = tree->ops->writepage_end_io_hook(page, start, 2191 end, NULL, uptodate); 2192 if (ret) 2193 uptodate = 0; 2194 } 2195 2196 if (!uptodate && tree->ops && 2197 tree->ops->writepage_io_failed_hook) { 2198 ret = tree->ops->writepage_io_failed_hook(bio, page, 2199 start, end, NULL); 2200 if (ret == 0) { 2201 uptodate = (err == 0); 2202 continue; 2203 } 2204 } 2205 2206 if (!uptodate) { 2207 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); 2208 ClearPageUptodate(page); 2209 SetPageError(page); 2210 } 2211 2212 if (whole_page) 2213 end_page_writeback(page); 2214 else 2215 check_page_writeback(tree, page); 2216 } while (bvec >= bio->bi_io_vec); 2217 2218 bio_put(bio); 2219 } 2220 2221 /* 2222 * after a readpage IO is done, we need to: 2223 * clear the uptodate bits on error 2224 * set the uptodate bits if things worked 2225 * set the page up to date if all extents in the tree are uptodate 2226 * clear the lock bit in the extent tree 2227 * unlock the page if there are no other extents locked for it 2228 * 2229 * Scheduling is not allowed, so the extent state tree is expected 2230 * to have one and only one object corresponding to this IO. 2231 */ 2232 static void end_bio_extent_readpage(struct bio *bio, int err) 2233 { 2234 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 2235 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; 2236 struct bio_vec *bvec = bio->bi_io_vec; 2237 struct extent_io_tree *tree; 2238 u64 start; 2239 u64 end; 2240 int whole_page; 2241 int ret; 2242 2243 if (err) 2244 uptodate = 0; 2245 2246 do { 2247 struct page *page = bvec->bv_page; 2248 struct extent_state *cached = NULL; 2249 struct extent_state *state; 2250 2251 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, " 2252 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err, 2253 (long int)bio->bi_bdev); 2254 tree = &BTRFS_I(page->mapping->host)->io_tree; 2255 2256 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 2257 bvec->bv_offset; 2258 end = start + bvec->bv_len - 1; 2259 2260 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 2261 whole_page = 1; 2262 else 2263 whole_page = 0; 2264 2265 if (++bvec <= bvec_end) 2266 prefetchw(&bvec->bv_page->flags); 2267 2268 spin_lock(&tree->lock); 2269 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED); 2270 if (state && state->start == start) { 2271 /* 2272 * take a reference on the state, unlock will drop 2273 * the ref 2274 */ 2275 cache_state(state, &cached); 2276 } 2277 spin_unlock(&tree->lock); 2278 2279 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 2280 ret = tree->ops->readpage_end_io_hook(page, start, end, 2281 state); 2282 if (ret) 2283 uptodate = 0; 2284 else 2285 clean_io_failure(start, page); 2286 } 2287 if (!uptodate) { 2288 u64 failed_mirror; 2289 failed_mirror = (u64)bio->bi_bdev; 2290 if (tree->ops && tree->ops->readpage_io_failed_hook) 2291 ret = tree->ops->readpage_io_failed_hook( 2292 bio, page, start, end, 2293 failed_mirror, state); 2294 else 2295 ret = bio_readpage_error(bio, page, start, end, 2296 failed_mirror, NULL); 2297 if (ret == 0) { 2298 uptodate = 2299 test_bit(BIO_UPTODATE, &bio->bi_flags); 2300 if (err) 2301 uptodate = 0; 2302 uncache_state(&cached); 2303 continue; 2304 } 2305 } 2306 2307 if (uptodate) { 2308 set_extent_uptodate(tree, start, end, &cached, 2309 GFP_ATOMIC); 2310 } 2311 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); 2312 2313 if (whole_page) { 2314 if (uptodate) { 2315 SetPageUptodate(page); 2316 } else { 2317 ClearPageUptodate(page); 2318 SetPageError(page); 2319 } 2320 unlock_page(page); 2321 } else { 2322 if (uptodate) { 2323 check_page_uptodate(tree, page); 2324 } else { 2325 ClearPageUptodate(page); 2326 SetPageError(page); 2327 } 2328 check_page_locked(tree, page); 2329 } 2330 } while (bvec <= bvec_end); 2331 2332 bio_put(bio); 2333 } 2334 2335 struct bio * 2336 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 2337 gfp_t gfp_flags) 2338 { 2339 struct bio *bio; 2340 2341 bio = bio_alloc(gfp_flags, nr_vecs); 2342 2343 if (bio == NULL && (current->flags & PF_MEMALLOC)) { 2344 while (!bio && (nr_vecs /= 2)) 2345 bio = bio_alloc(gfp_flags, nr_vecs); 2346 } 2347 2348 if (bio) { 2349 bio->bi_size = 0; 2350 bio->bi_bdev = bdev; 2351 bio->bi_sector = first_sector; 2352 } 2353 return bio; 2354 } 2355 2356 static int submit_one_bio(int rw, struct bio *bio, int mirror_num, 2357 unsigned long bio_flags) 2358 { 2359 int ret = 0; 2360 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 2361 struct page *page = bvec->bv_page; 2362 struct extent_io_tree *tree = bio->bi_private; 2363 u64 start; 2364 2365 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 2366 2367 bio->bi_private = NULL; 2368 2369 bio_get(bio); 2370 2371 if (tree->ops && tree->ops->submit_bio_hook) 2372 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, 2373 mirror_num, bio_flags, start); 2374 else 2375 submit_bio(rw, bio); 2376 2377 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 2378 ret = -EOPNOTSUPP; 2379 bio_put(bio); 2380 return ret; 2381 } 2382 2383 static int submit_extent_page(int rw, struct extent_io_tree *tree, 2384 struct page *page, sector_t sector, 2385 size_t size, unsigned long offset, 2386 struct block_device *bdev, 2387 struct bio **bio_ret, 2388 unsigned long max_pages, 2389 bio_end_io_t end_io_func, 2390 int mirror_num, 2391 unsigned long prev_bio_flags, 2392 unsigned long bio_flags) 2393 { 2394 int ret = 0; 2395 struct bio *bio; 2396 int nr; 2397 int contig = 0; 2398 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED; 2399 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; 2400 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); 2401 2402 if (bio_ret && *bio_ret) { 2403 bio = *bio_ret; 2404 if (old_compressed) 2405 contig = bio->bi_sector == sector; 2406 else 2407 contig = bio->bi_sector + (bio->bi_size >> 9) == 2408 sector; 2409 2410 if (prev_bio_flags != bio_flags || !contig || 2411 (tree->ops && tree->ops->merge_bio_hook && 2412 tree->ops->merge_bio_hook(page, offset, page_size, bio, 2413 bio_flags)) || 2414 bio_add_page(bio, page, page_size, offset) < page_size) { 2415 ret = submit_one_bio(rw, bio, mirror_num, 2416 prev_bio_flags); 2417 bio = NULL; 2418 } else { 2419 return 0; 2420 } 2421 } 2422 if (this_compressed) 2423 nr = BIO_MAX_PAGES; 2424 else 2425 nr = bio_get_nr_vecs(bdev); 2426 2427 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); 2428 if (!bio) 2429 return -ENOMEM; 2430 2431 bio_add_page(bio, page, page_size, offset); 2432 bio->bi_end_io = end_io_func; 2433 bio->bi_private = tree; 2434 2435 if (bio_ret) 2436 *bio_ret = bio; 2437 else 2438 ret = submit_one_bio(rw, bio, mirror_num, bio_flags); 2439 2440 return ret; 2441 } 2442 2443 void set_page_extent_mapped(struct page *page) 2444 { 2445 if (!PagePrivate(page)) { 2446 SetPagePrivate(page); 2447 page_cache_get(page); 2448 set_page_private(page, EXTENT_PAGE_PRIVATE); 2449 } 2450 } 2451 2452 static void set_page_extent_head(struct page *page, unsigned long len) 2453 { 2454 WARN_ON(!PagePrivate(page)); 2455 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); 2456 } 2457 2458 /* 2459 * basic readpage implementation. Locked extent state structs are inserted 2460 * into the tree that are removed when the IO is done (by the end_io 2461 * handlers) 2462 */ 2463 static int __extent_read_full_page(struct extent_io_tree *tree, 2464 struct page *page, 2465 get_extent_t *get_extent, 2466 struct bio **bio, int mirror_num, 2467 unsigned long *bio_flags) 2468 { 2469 struct inode *inode = page->mapping->host; 2470 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 2471 u64 page_end = start + PAGE_CACHE_SIZE - 1; 2472 u64 end; 2473 u64 cur = start; 2474 u64 extent_offset; 2475 u64 last_byte = i_size_read(inode); 2476 u64 block_start; 2477 u64 cur_end; 2478 sector_t sector; 2479 struct extent_map *em; 2480 struct block_device *bdev; 2481 struct btrfs_ordered_extent *ordered; 2482 int ret; 2483 int nr = 0; 2484 size_t pg_offset = 0; 2485 size_t iosize; 2486 size_t disk_io_size; 2487 size_t blocksize = inode->i_sb->s_blocksize; 2488 unsigned long this_bio_flag = 0; 2489 2490 set_page_extent_mapped(page); 2491 2492 if (!PageUptodate(page)) { 2493 if (cleancache_get_page(page) == 0) { 2494 BUG_ON(blocksize != PAGE_SIZE); 2495 goto out; 2496 } 2497 } 2498 2499 end = page_end; 2500 while (1) { 2501 lock_extent(tree, start, end, GFP_NOFS); 2502 ordered = btrfs_lookup_ordered_extent(inode, start); 2503 if (!ordered) 2504 break; 2505 unlock_extent(tree, start, end, GFP_NOFS); 2506 btrfs_start_ordered_extent(inode, ordered, 1); 2507 btrfs_put_ordered_extent(ordered); 2508 } 2509 2510 if (page->index == last_byte >> PAGE_CACHE_SHIFT) { 2511 char *userpage; 2512 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); 2513 2514 if (zero_offset) { 2515 iosize = PAGE_CACHE_SIZE - zero_offset; 2516 userpage = kmap_atomic(page, KM_USER0); 2517 memset(userpage + zero_offset, 0, iosize); 2518 flush_dcache_page(page); 2519 kunmap_atomic(userpage, KM_USER0); 2520 } 2521 } 2522 while (cur <= end) { 2523 if (cur >= last_byte) { 2524 char *userpage; 2525 struct extent_state *cached = NULL; 2526 2527 iosize = PAGE_CACHE_SIZE - pg_offset; 2528 userpage = kmap_atomic(page, KM_USER0); 2529 memset(userpage + pg_offset, 0, iosize); 2530 flush_dcache_page(page); 2531 kunmap_atomic(userpage, KM_USER0); 2532 set_extent_uptodate(tree, cur, cur + iosize - 1, 2533 &cached, GFP_NOFS); 2534 unlock_extent_cached(tree, cur, cur + iosize - 1, 2535 &cached, GFP_NOFS); 2536 break; 2537 } 2538 em = get_extent(inode, page, pg_offset, cur, 2539 end - cur + 1, 0); 2540 if (IS_ERR_OR_NULL(em)) { 2541 SetPageError(page); 2542 unlock_extent(tree, cur, end, GFP_NOFS); 2543 break; 2544 } 2545 extent_offset = cur - em->start; 2546 BUG_ON(extent_map_end(em) <= cur); 2547 BUG_ON(end < cur); 2548 2549 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 2550 this_bio_flag = EXTENT_BIO_COMPRESSED; 2551 extent_set_compress_type(&this_bio_flag, 2552 em->compress_type); 2553 } 2554 2555 iosize = min(extent_map_end(em) - cur, end - cur + 1); 2556 cur_end = min(extent_map_end(em) - 1, end); 2557 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 2558 if (this_bio_flag & EXTENT_BIO_COMPRESSED) { 2559 disk_io_size = em->block_len; 2560 sector = em->block_start >> 9; 2561 } else { 2562 sector = (em->block_start + extent_offset) >> 9; 2563 disk_io_size = iosize; 2564 } 2565 bdev = em->bdev; 2566 block_start = em->block_start; 2567 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 2568 block_start = EXTENT_MAP_HOLE; 2569 free_extent_map(em); 2570 em = NULL; 2571 2572 /* we've found a hole, just zero and go on */ 2573 if (block_start == EXTENT_MAP_HOLE) { 2574 char *userpage; 2575 struct extent_state *cached = NULL; 2576 2577 userpage = kmap_atomic(page, KM_USER0); 2578 memset(userpage + pg_offset, 0, iosize); 2579 flush_dcache_page(page); 2580 kunmap_atomic(userpage, KM_USER0); 2581 2582 set_extent_uptodate(tree, cur, cur + iosize - 1, 2583 &cached, GFP_NOFS); 2584 unlock_extent_cached(tree, cur, cur + iosize - 1, 2585 &cached, GFP_NOFS); 2586 cur = cur + iosize; 2587 pg_offset += iosize; 2588 continue; 2589 } 2590 /* the get_extent function already copied into the page */ 2591 if (test_range_bit(tree, cur, cur_end, 2592 EXTENT_UPTODATE, 1, NULL)) { 2593 check_page_uptodate(tree, page); 2594 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2595 cur = cur + iosize; 2596 pg_offset += iosize; 2597 continue; 2598 } 2599 /* we have an inline extent but it didn't get marked up 2600 * to date. Error out 2601 */ 2602 if (block_start == EXTENT_MAP_INLINE) { 2603 SetPageError(page); 2604 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2605 cur = cur + iosize; 2606 pg_offset += iosize; 2607 continue; 2608 } 2609 2610 ret = 0; 2611 if (tree->ops && tree->ops->readpage_io_hook) { 2612 ret = tree->ops->readpage_io_hook(page, cur, 2613 cur + iosize - 1); 2614 } 2615 if (!ret) { 2616 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2617 pnr -= page->index; 2618 ret = submit_extent_page(READ, tree, page, 2619 sector, disk_io_size, pg_offset, 2620 bdev, bio, pnr, 2621 end_bio_extent_readpage, mirror_num, 2622 *bio_flags, 2623 this_bio_flag); 2624 nr++; 2625 *bio_flags = this_bio_flag; 2626 } 2627 if (ret) 2628 SetPageError(page); 2629 cur = cur + iosize; 2630 pg_offset += iosize; 2631 } 2632 out: 2633 if (!nr) { 2634 if (!PageError(page)) 2635 SetPageUptodate(page); 2636 unlock_page(page); 2637 } 2638 return 0; 2639 } 2640 2641 int extent_read_full_page(struct extent_io_tree *tree, struct page *page, 2642 get_extent_t *get_extent, int mirror_num) 2643 { 2644 struct bio *bio = NULL; 2645 unsigned long bio_flags = 0; 2646 int ret; 2647 2648 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, 2649 &bio_flags); 2650 if (bio) 2651 ret = submit_one_bio(READ, bio, mirror_num, bio_flags); 2652 return ret; 2653 } 2654 2655 static noinline void update_nr_written(struct page *page, 2656 struct writeback_control *wbc, 2657 unsigned long nr_written) 2658 { 2659 wbc->nr_to_write -= nr_written; 2660 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && 2661 wbc->range_start == 0 && wbc->range_end == LLONG_MAX)) 2662 page->mapping->writeback_index = page->index + nr_written; 2663 } 2664 2665 /* 2666 * the writepage semantics are similar to regular writepage. extent 2667 * records are inserted to lock ranges in the tree, and as dirty areas 2668 * are found, they are marked writeback. Then the lock bits are removed 2669 * and the end_io handler clears the writeback ranges 2670 */ 2671 static int __extent_writepage(struct page *page, struct writeback_control *wbc, 2672 void *data) 2673 { 2674 struct inode *inode = page->mapping->host; 2675 struct extent_page_data *epd = data; 2676 struct extent_io_tree *tree = epd->tree; 2677 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 2678 u64 delalloc_start; 2679 u64 page_end = start + PAGE_CACHE_SIZE - 1; 2680 u64 end; 2681 u64 cur = start; 2682 u64 extent_offset; 2683 u64 last_byte = i_size_read(inode); 2684 u64 block_start; 2685 u64 iosize; 2686 sector_t sector; 2687 struct extent_state *cached_state = NULL; 2688 struct extent_map *em; 2689 struct block_device *bdev; 2690 int ret; 2691 int nr = 0; 2692 size_t pg_offset = 0; 2693 size_t blocksize; 2694 loff_t i_size = i_size_read(inode); 2695 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 2696 u64 nr_delalloc; 2697 u64 delalloc_end; 2698 int page_started; 2699 int compressed; 2700 int write_flags; 2701 unsigned long nr_written = 0; 2702 bool fill_delalloc = true; 2703 2704 if (wbc->sync_mode == WB_SYNC_ALL) 2705 write_flags = WRITE_SYNC; 2706 else 2707 write_flags = WRITE; 2708 2709 trace___extent_writepage(page, inode, wbc); 2710 2711 WARN_ON(!PageLocked(page)); 2712 2713 ClearPageError(page); 2714 2715 pg_offset = i_size & (PAGE_CACHE_SIZE - 1); 2716 if (page->index > end_index || 2717 (page->index == end_index && !pg_offset)) { 2718 page->mapping->a_ops->invalidatepage(page, 0); 2719 unlock_page(page); 2720 return 0; 2721 } 2722 2723 if (page->index == end_index) { 2724 char *userpage; 2725 2726 userpage = kmap_atomic(page, KM_USER0); 2727 memset(userpage + pg_offset, 0, 2728 PAGE_CACHE_SIZE - pg_offset); 2729 kunmap_atomic(userpage, KM_USER0); 2730 flush_dcache_page(page); 2731 } 2732 pg_offset = 0; 2733 2734 set_page_extent_mapped(page); 2735 2736 if (!tree->ops || !tree->ops->fill_delalloc) 2737 fill_delalloc = false; 2738 2739 delalloc_start = start; 2740 delalloc_end = 0; 2741 page_started = 0; 2742 if (!epd->extent_locked && fill_delalloc) { 2743 u64 delalloc_to_write = 0; 2744 /* 2745 * make sure the wbc mapping index is at least updated 2746 * to this page. 2747 */ 2748 update_nr_written(page, wbc, 0); 2749 2750 while (delalloc_end < page_end) { 2751 nr_delalloc = find_lock_delalloc_range(inode, tree, 2752 page, 2753 &delalloc_start, 2754 &delalloc_end, 2755 128 * 1024 * 1024); 2756 if (nr_delalloc == 0) { 2757 delalloc_start = delalloc_end + 1; 2758 continue; 2759 } 2760 tree->ops->fill_delalloc(inode, page, delalloc_start, 2761 delalloc_end, &page_started, 2762 &nr_written); 2763 /* 2764 * delalloc_end is already one less than the total 2765 * length, so we don't subtract one from 2766 * PAGE_CACHE_SIZE 2767 */ 2768 delalloc_to_write += (delalloc_end - delalloc_start + 2769 PAGE_CACHE_SIZE) >> 2770 PAGE_CACHE_SHIFT; 2771 delalloc_start = delalloc_end + 1; 2772 } 2773 if (wbc->nr_to_write < delalloc_to_write) { 2774 int thresh = 8192; 2775 2776 if (delalloc_to_write < thresh * 2) 2777 thresh = delalloc_to_write; 2778 wbc->nr_to_write = min_t(u64, delalloc_to_write, 2779 thresh); 2780 } 2781 2782 /* did the fill delalloc function already unlock and start 2783 * the IO? 2784 */ 2785 if (page_started) { 2786 ret = 0; 2787 /* 2788 * we've unlocked the page, so we can't update 2789 * the mapping's writeback index, just update 2790 * nr_to_write. 2791 */ 2792 wbc->nr_to_write -= nr_written; 2793 goto done_unlocked; 2794 } 2795 } 2796 if (tree->ops && tree->ops->writepage_start_hook) { 2797 ret = tree->ops->writepage_start_hook(page, start, 2798 page_end); 2799 if (ret == -EAGAIN) { 2800 redirty_page_for_writepage(wbc, page); 2801 update_nr_written(page, wbc, nr_written); 2802 unlock_page(page); 2803 ret = 0; 2804 goto done_unlocked; 2805 } 2806 } 2807 2808 /* 2809 * we don't want to touch the inode after unlocking the page, 2810 * so we update the mapping writeback index now 2811 */ 2812 update_nr_written(page, wbc, nr_written + 1); 2813 2814 end = page_end; 2815 if (last_byte <= start) { 2816 if (tree->ops && tree->ops->writepage_end_io_hook) 2817 tree->ops->writepage_end_io_hook(page, start, 2818 page_end, NULL, 1); 2819 goto done; 2820 } 2821 2822 blocksize = inode->i_sb->s_blocksize; 2823 2824 while (cur <= end) { 2825 if (cur >= last_byte) { 2826 if (tree->ops && tree->ops->writepage_end_io_hook) 2827 tree->ops->writepage_end_io_hook(page, cur, 2828 page_end, NULL, 1); 2829 break; 2830 } 2831 em = epd->get_extent(inode, page, pg_offset, cur, 2832 end - cur + 1, 1); 2833 if (IS_ERR_OR_NULL(em)) { 2834 SetPageError(page); 2835 break; 2836 } 2837 2838 extent_offset = cur - em->start; 2839 BUG_ON(extent_map_end(em) <= cur); 2840 BUG_ON(end < cur); 2841 iosize = min(extent_map_end(em) - cur, end - cur + 1); 2842 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 2843 sector = (em->block_start + extent_offset) >> 9; 2844 bdev = em->bdev; 2845 block_start = em->block_start; 2846 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 2847 free_extent_map(em); 2848 em = NULL; 2849 2850 /* 2851 * compressed and inline extents are written through other 2852 * paths in the FS 2853 */ 2854 if (compressed || block_start == EXTENT_MAP_HOLE || 2855 block_start == EXTENT_MAP_INLINE) { 2856 /* 2857 * end_io notification does not happen here for 2858 * compressed extents 2859 */ 2860 if (!compressed && tree->ops && 2861 tree->ops->writepage_end_io_hook) 2862 tree->ops->writepage_end_io_hook(page, cur, 2863 cur + iosize - 1, 2864 NULL, 1); 2865 else if (compressed) { 2866 /* we don't want to end_page_writeback on 2867 * a compressed extent. this happens 2868 * elsewhere 2869 */ 2870 nr++; 2871 } 2872 2873 cur += iosize; 2874 pg_offset += iosize; 2875 continue; 2876 } 2877 /* leave this out until we have a page_mkwrite call */ 2878 if (0 && !test_range_bit(tree, cur, cur + iosize - 1, 2879 EXTENT_DIRTY, 0, NULL)) { 2880 cur = cur + iosize; 2881 pg_offset += iosize; 2882 continue; 2883 } 2884 2885 if (tree->ops && tree->ops->writepage_io_hook) { 2886 ret = tree->ops->writepage_io_hook(page, cur, 2887 cur + iosize - 1); 2888 } else { 2889 ret = 0; 2890 } 2891 if (ret) { 2892 SetPageError(page); 2893 } else { 2894 unsigned long max_nr = end_index + 1; 2895 2896 set_range_writeback(tree, cur, cur + iosize - 1); 2897 if (!PageWriteback(page)) { 2898 printk(KERN_ERR "btrfs warning page %lu not " 2899 "writeback, cur %llu end %llu\n", 2900 page->index, (unsigned long long)cur, 2901 (unsigned long long)end); 2902 } 2903 2904 ret = submit_extent_page(write_flags, tree, page, 2905 sector, iosize, pg_offset, 2906 bdev, &epd->bio, max_nr, 2907 end_bio_extent_writepage, 2908 0, 0, 0); 2909 if (ret) 2910 SetPageError(page); 2911 } 2912 cur = cur + iosize; 2913 pg_offset += iosize; 2914 nr++; 2915 } 2916 done: 2917 if (nr == 0) { 2918 /* make sure the mapping tag for page dirty gets cleared */ 2919 set_page_writeback(page); 2920 end_page_writeback(page); 2921 } 2922 unlock_page(page); 2923 2924 done_unlocked: 2925 2926 /* drop our reference on any cached states */ 2927 free_extent_state(cached_state); 2928 return 0; 2929 } 2930 2931 /** 2932 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. 2933 * @mapping: address space structure to write 2934 * @wbc: subtract the number of written pages from *@wbc->nr_to_write 2935 * @writepage: function called for each page 2936 * @data: data passed to writepage function 2937 * 2938 * If a page is already under I/O, write_cache_pages() skips it, even 2939 * if it's dirty. This is desirable behaviour for memory-cleaning writeback, 2940 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() 2941 * and msync() need to guarantee that all the data which was dirty at the time 2942 * the call was made get new I/O started against them. If wbc->sync_mode is 2943 * WB_SYNC_ALL then we were called for data integrity and we must wait for 2944 * existing IO to complete. 2945 */ 2946 static int extent_write_cache_pages(struct extent_io_tree *tree, 2947 struct address_space *mapping, 2948 struct writeback_control *wbc, 2949 writepage_t writepage, void *data, 2950 void (*flush_fn)(void *)) 2951 { 2952 int ret = 0; 2953 int done = 0; 2954 int nr_to_write_done = 0; 2955 struct pagevec pvec; 2956 int nr_pages; 2957 pgoff_t index; 2958 pgoff_t end; /* Inclusive */ 2959 int scanned = 0; 2960 int tag; 2961 2962 pagevec_init(&pvec, 0); 2963 if (wbc->range_cyclic) { 2964 index = mapping->writeback_index; /* Start from prev offset */ 2965 end = -1; 2966 } else { 2967 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2968 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2969 scanned = 1; 2970 } 2971 if (wbc->sync_mode == WB_SYNC_ALL) 2972 tag = PAGECACHE_TAG_TOWRITE; 2973 else 2974 tag = PAGECACHE_TAG_DIRTY; 2975 retry: 2976 if (wbc->sync_mode == WB_SYNC_ALL) 2977 tag_pages_for_writeback(mapping, index, end); 2978 while (!done && !nr_to_write_done && (index <= end) && 2979 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 2980 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { 2981 unsigned i; 2982 2983 scanned = 1; 2984 for (i = 0; i < nr_pages; i++) { 2985 struct page *page = pvec.pages[i]; 2986 2987 /* 2988 * At this point we hold neither mapping->tree_lock nor 2989 * lock on the page itself: the page may be truncated or 2990 * invalidated (changing page->mapping to NULL), or even 2991 * swizzled back from swapper_space to tmpfs file 2992 * mapping 2993 */ 2994 if (tree->ops && 2995 tree->ops->write_cache_pages_lock_hook) { 2996 tree->ops->write_cache_pages_lock_hook(page, 2997 data, flush_fn); 2998 } else { 2999 if (!trylock_page(page)) { 3000 flush_fn(data); 3001 lock_page(page); 3002 } 3003 } 3004 3005 if (unlikely(page->mapping != mapping)) { 3006 unlock_page(page); 3007 continue; 3008 } 3009 3010 if (!wbc->range_cyclic && page->index > end) { 3011 done = 1; 3012 unlock_page(page); 3013 continue; 3014 } 3015 3016 if (wbc->sync_mode != WB_SYNC_NONE) { 3017 if (PageWriteback(page)) 3018 flush_fn(data); 3019 wait_on_page_writeback(page); 3020 } 3021 3022 if (PageWriteback(page) || 3023 !clear_page_dirty_for_io(page)) { 3024 unlock_page(page); 3025 continue; 3026 } 3027 3028 ret = (*writepage)(page, wbc, data); 3029 3030 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) { 3031 unlock_page(page); 3032 ret = 0; 3033 } 3034 if (ret) 3035 done = 1; 3036 3037 /* 3038 * the filesystem may choose to bump up nr_to_write. 3039 * We have to make sure to honor the new nr_to_write 3040 * at any time 3041 */ 3042 nr_to_write_done = wbc->nr_to_write <= 0; 3043 } 3044 pagevec_release(&pvec); 3045 cond_resched(); 3046 } 3047 if (!scanned && !done) { 3048 /* 3049 * We hit the last page and there is more work to be done: wrap 3050 * back to the start of the file 3051 */ 3052 scanned = 1; 3053 index = 0; 3054 goto retry; 3055 } 3056 return ret; 3057 } 3058 3059 static void flush_epd_write_bio(struct extent_page_data *epd) 3060 { 3061 if (epd->bio) { 3062 if (epd->sync_io) 3063 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0); 3064 else 3065 submit_one_bio(WRITE, epd->bio, 0, 0); 3066 epd->bio = NULL; 3067 } 3068 } 3069 3070 static noinline void flush_write_bio(void *data) 3071 { 3072 struct extent_page_data *epd = data; 3073 flush_epd_write_bio(epd); 3074 } 3075 3076 int extent_write_full_page(struct extent_io_tree *tree, struct page *page, 3077 get_extent_t *get_extent, 3078 struct writeback_control *wbc) 3079 { 3080 int ret; 3081 struct extent_page_data epd = { 3082 .bio = NULL, 3083 .tree = tree, 3084 .get_extent = get_extent, 3085 .extent_locked = 0, 3086 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 3087 }; 3088 3089 ret = __extent_writepage(page, wbc, &epd); 3090 3091 flush_epd_write_bio(&epd); 3092 return ret; 3093 } 3094 3095 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, 3096 u64 start, u64 end, get_extent_t *get_extent, 3097 int mode) 3098 { 3099 int ret = 0; 3100 struct address_space *mapping = inode->i_mapping; 3101 struct page *page; 3102 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> 3103 PAGE_CACHE_SHIFT; 3104 3105 struct extent_page_data epd = { 3106 .bio = NULL, 3107 .tree = tree, 3108 .get_extent = get_extent, 3109 .extent_locked = 1, 3110 .sync_io = mode == WB_SYNC_ALL, 3111 }; 3112 struct writeback_control wbc_writepages = { 3113 .sync_mode = mode, 3114 .nr_to_write = nr_pages * 2, 3115 .range_start = start, 3116 .range_end = end + 1, 3117 }; 3118 3119 while (start <= end) { 3120 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 3121 if (clear_page_dirty_for_io(page)) 3122 ret = __extent_writepage(page, &wbc_writepages, &epd); 3123 else { 3124 if (tree->ops && tree->ops->writepage_end_io_hook) 3125 tree->ops->writepage_end_io_hook(page, start, 3126 start + PAGE_CACHE_SIZE - 1, 3127 NULL, 1); 3128 unlock_page(page); 3129 } 3130 page_cache_release(page); 3131 start += PAGE_CACHE_SIZE; 3132 } 3133 3134 flush_epd_write_bio(&epd); 3135 return ret; 3136 } 3137 3138 int extent_writepages(struct extent_io_tree *tree, 3139 struct address_space *mapping, 3140 get_extent_t *get_extent, 3141 struct writeback_control *wbc) 3142 { 3143 int ret = 0; 3144 struct extent_page_data epd = { 3145 .bio = NULL, 3146 .tree = tree, 3147 .get_extent = get_extent, 3148 .extent_locked = 0, 3149 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 3150 }; 3151 3152 ret = extent_write_cache_pages(tree, mapping, wbc, 3153 __extent_writepage, &epd, 3154 flush_write_bio); 3155 flush_epd_write_bio(&epd); 3156 return ret; 3157 } 3158 3159 int extent_readpages(struct extent_io_tree *tree, 3160 struct address_space *mapping, 3161 struct list_head *pages, unsigned nr_pages, 3162 get_extent_t get_extent) 3163 { 3164 struct bio *bio = NULL; 3165 unsigned page_idx; 3166 unsigned long bio_flags = 0; 3167 3168 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 3169 struct page *page = list_entry(pages->prev, struct page, lru); 3170 3171 prefetchw(&page->flags); 3172 list_del(&page->lru); 3173 if (!add_to_page_cache_lru(page, mapping, 3174 page->index, GFP_NOFS)) { 3175 __extent_read_full_page(tree, page, get_extent, 3176 &bio, 0, &bio_flags); 3177 } 3178 page_cache_release(page); 3179 } 3180 BUG_ON(!list_empty(pages)); 3181 if (bio) 3182 submit_one_bio(READ, bio, 0, bio_flags); 3183 return 0; 3184 } 3185 3186 /* 3187 * basic invalidatepage code, this waits on any locked or writeback 3188 * ranges corresponding to the page, and then deletes any extent state 3189 * records from the tree 3190 */ 3191 int extent_invalidatepage(struct extent_io_tree *tree, 3192 struct page *page, unsigned long offset) 3193 { 3194 struct extent_state *cached_state = NULL; 3195 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); 3196 u64 end = start + PAGE_CACHE_SIZE - 1; 3197 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 3198 3199 start += (offset + blocksize - 1) & ~(blocksize - 1); 3200 if (start > end) 3201 return 0; 3202 3203 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); 3204 wait_on_page_writeback(page); 3205 clear_extent_bit(tree, start, end, 3206 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 3207 EXTENT_DO_ACCOUNTING, 3208 1, 1, &cached_state, GFP_NOFS); 3209 return 0; 3210 } 3211 3212 /* 3213 * a helper for releasepage, this tests for areas of the page that 3214 * are locked or under IO and drops the related state bits if it is safe 3215 * to drop the page. 3216 */ 3217 int try_release_extent_state(struct extent_map_tree *map, 3218 struct extent_io_tree *tree, struct page *page, 3219 gfp_t mask) 3220 { 3221 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 3222 u64 end = start + PAGE_CACHE_SIZE - 1; 3223 int ret = 1; 3224 3225 if (test_range_bit(tree, start, end, 3226 EXTENT_IOBITS, 0, NULL)) 3227 ret = 0; 3228 else { 3229 if ((mask & GFP_NOFS) == GFP_NOFS) 3230 mask = GFP_NOFS; 3231 /* 3232 * at this point we can safely clear everything except the 3233 * locked bit and the nodatasum bit 3234 */ 3235 ret = clear_extent_bit(tree, start, end, 3236 ~(EXTENT_LOCKED | EXTENT_NODATASUM), 3237 0, 0, NULL, mask); 3238 3239 /* if clear_extent_bit failed for enomem reasons, 3240 * we can't allow the release to continue. 3241 */ 3242 if (ret < 0) 3243 ret = 0; 3244 else 3245 ret = 1; 3246 } 3247 return ret; 3248 } 3249 3250 /* 3251 * a helper for releasepage. As long as there are no locked extents 3252 * in the range corresponding to the page, both state records and extent 3253 * map records are removed 3254 */ 3255 int try_release_extent_mapping(struct extent_map_tree *map, 3256 struct extent_io_tree *tree, struct page *page, 3257 gfp_t mask) 3258 { 3259 struct extent_map *em; 3260 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 3261 u64 end = start + PAGE_CACHE_SIZE - 1; 3262 3263 if ((mask & __GFP_WAIT) && 3264 page->mapping->host->i_size > 16 * 1024 * 1024) { 3265 u64 len; 3266 while (start <= end) { 3267 len = end - start + 1; 3268 write_lock(&map->lock); 3269 em = lookup_extent_mapping(map, start, len); 3270 if (IS_ERR_OR_NULL(em)) { 3271 write_unlock(&map->lock); 3272 break; 3273 } 3274 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) || 3275 em->start != start) { 3276 write_unlock(&map->lock); 3277 free_extent_map(em); 3278 break; 3279 } 3280 if (!test_range_bit(tree, em->start, 3281 extent_map_end(em) - 1, 3282 EXTENT_LOCKED | EXTENT_WRITEBACK, 3283 0, NULL)) { 3284 remove_extent_mapping(map, em); 3285 /* once for the rb tree */ 3286 free_extent_map(em); 3287 } 3288 start = extent_map_end(em); 3289 write_unlock(&map->lock); 3290 3291 /* once for us */ 3292 free_extent_map(em); 3293 } 3294 } 3295 return try_release_extent_state(map, tree, page, mask); 3296 } 3297 3298 /* 3299 * helper function for fiemap, which doesn't want to see any holes. 3300 * This maps until we find something past 'last' 3301 */ 3302 static struct extent_map *get_extent_skip_holes(struct inode *inode, 3303 u64 offset, 3304 u64 last, 3305 get_extent_t *get_extent) 3306 { 3307 u64 sectorsize = BTRFS_I(inode)->root->sectorsize; 3308 struct extent_map *em; 3309 u64 len; 3310 3311 if (offset >= last) 3312 return NULL; 3313 3314 while(1) { 3315 len = last - offset; 3316 if (len == 0) 3317 break; 3318 len = (len + sectorsize - 1) & ~(sectorsize - 1); 3319 em = get_extent(inode, NULL, 0, offset, len, 0); 3320 if (IS_ERR_OR_NULL(em)) 3321 return em; 3322 3323 /* if this isn't a hole return it */ 3324 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) && 3325 em->block_start != EXTENT_MAP_HOLE) { 3326 return em; 3327 } 3328 3329 /* this is a hole, advance to the next extent */ 3330 offset = extent_map_end(em); 3331 free_extent_map(em); 3332 if (offset >= last) 3333 break; 3334 } 3335 return NULL; 3336 } 3337 3338 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3339 __u64 start, __u64 len, get_extent_t *get_extent) 3340 { 3341 int ret = 0; 3342 u64 off = start; 3343 u64 max = start + len; 3344 u32 flags = 0; 3345 u32 found_type; 3346 u64 last; 3347 u64 last_for_get_extent = 0; 3348 u64 disko = 0; 3349 u64 isize = i_size_read(inode); 3350 struct btrfs_key found_key; 3351 struct extent_map *em = NULL; 3352 struct extent_state *cached_state = NULL; 3353 struct btrfs_path *path; 3354 struct btrfs_file_extent_item *item; 3355 int end = 0; 3356 u64 em_start = 0; 3357 u64 em_len = 0; 3358 u64 em_end = 0; 3359 unsigned long emflags; 3360 3361 if (len == 0) 3362 return -EINVAL; 3363 3364 path = btrfs_alloc_path(); 3365 if (!path) 3366 return -ENOMEM; 3367 path->leave_spinning = 1; 3368 3369 /* 3370 * lookup the last file extent. We're not using i_size here 3371 * because there might be preallocation past i_size 3372 */ 3373 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, 3374 path, btrfs_ino(inode), -1, 0); 3375 if (ret < 0) { 3376 btrfs_free_path(path); 3377 return ret; 3378 } 3379 WARN_ON(!ret); 3380 path->slots[0]--; 3381 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3382 struct btrfs_file_extent_item); 3383 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 3384 found_type = btrfs_key_type(&found_key); 3385 3386 /* No extents, but there might be delalloc bits */ 3387 if (found_key.objectid != btrfs_ino(inode) || 3388 found_type != BTRFS_EXTENT_DATA_KEY) { 3389 /* have to trust i_size as the end */ 3390 last = (u64)-1; 3391 last_for_get_extent = isize; 3392 } else { 3393 /* 3394 * remember the start of the last extent. There are a 3395 * bunch of different factors that go into the length of the 3396 * extent, so its much less complex to remember where it started 3397 */ 3398 last = found_key.offset; 3399 last_for_get_extent = last + 1; 3400 } 3401 btrfs_free_path(path); 3402 3403 /* 3404 * we might have some extents allocated but more delalloc past those 3405 * extents. so, we trust isize unless the start of the last extent is 3406 * beyond isize 3407 */ 3408 if (last < isize) { 3409 last = (u64)-1; 3410 last_for_get_extent = isize; 3411 } 3412 3413 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3414 &cached_state, GFP_NOFS); 3415 3416 em = get_extent_skip_holes(inode, off, last_for_get_extent, 3417 get_extent); 3418 if (!em) 3419 goto out; 3420 if (IS_ERR(em)) { 3421 ret = PTR_ERR(em); 3422 goto out; 3423 } 3424 3425 while (!end) { 3426 u64 offset_in_extent; 3427 3428 /* break if the extent we found is outside the range */ 3429 if (em->start >= max || extent_map_end(em) < off) 3430 break; 3431 3432 /* 3433 * get_extent may return an extent that starts before our 3434 * requested range. We have to make sure the ranges 3435 * we return to fiemap always move forward and don't 3436 * overlap, so adjust the offsets here 3437 */ 3438 em_start = max(em->start, off); 3439 3440 /* 3441 * record the offset from the start of the extent 3442 * for adjusting the disk offset below 3443 */ 3444 offset_in_extent = em_start - em->start; 3445 em_end = extent_map_end(em); 3446 em_len = em_end - em_start; 3447 emflags = em->flags; 3448 disko = 0; 3449 flags = 0; 3450 3451 /* 3452 * bump off for our next call to get_extent 3453 */ 3454 off = extent_map_end(em); 3455 if (off >= max) 3456 end = 1; 3457 3458 if (em->block_start == EXTENT_MAP_LAST_BYTE) { 3459 end = 1; 3460 flags |= FIEMAP_EXTENT_LAST; 3461 } else if (em->block_start == EXTENT_MAP_INLINE) { 3462 flags |= (FIEMAP_EXTENT_DATA_INLINE | 3463 FIEMAP_EXTENT_NOT_ALIGNED); 3464 } else if (em->block_start == EXTENT_MAP_DELALLOC) { 3465 flags |= (FIEMAP_EXTENT_DELALLOC | 3466 FIEMAP_EXTENT_UNKNOWN); 3467 } else { 3468 disko = em->block_start + offset_in_extent; 3469 } 3470 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 3471 flags |= FIEMAP_EXTENT_ENCODED; 3472 3473 free_extent_map(em); 3474 em = NULL; 3475 if ((em_start >= last) || em_len == (u64)-1 || 3476 (last == (u64)-1 && isize <= em_end)) { 3477 flags |= FIEMAP_EXTENT_LAST; 3478 end = 1; 3479 } 3480 3481 /* now scan forward to see if this is really the last extent. */ 3482 em = get_extent_skip_holes(inode, off, last_for_get_extent, 3483 get_extent); 3484 if (IS_ERR(em)) { 3485 ret = PTR_ERR(em); 3486 goto out; 3487 } 3488 if (!em) { 3489 flags |= FIEMAP_EXTENT_LAST; 3490 end = 1; 3491 } 3492 ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 3493 em_len, flags); 3494 if (ret) 3495 goto out_free; 3496 } 3497 out_free: 3498 free_extent_map(em); 3499 out: 3500 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, 3501 &cached_state, GFP_NOFS); 3502 return ret; 3503 } 3504 3505 inline struct page *extent_buffer_page(struct extent_buffer *eb, 3506 unsigned long i) 3507 { 3508 struct page *p; 3509 struct address_space *mapping; 3510 3511 if (i == 0) 3512 return eb->first_page; 3513 i += eb->start >> PAGE_CACHE_SHIFT; 3514 mapping = eb->first_page->mapping; 3515 if (!mapping) 3516 return NULL; 3517 3518 /* 3519 * extent_buffer_page is only called after pinning the page 3520 * by increasing the reference count. So we know the page must 3521 * be in the radix tree. 3522 */ 3523 rcu_read_lock(); 3524 p = radix_tree_lookup(&mapping->page_tree, i); 3525 rcu_read_unlock(); 3526 3527 return p; 3528 } 3529 3530 inline unsigned long num_extent_pages(u64 start, u64 len) 3531 { 3532 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 3533 (start >> PAGE_CACHE_SHIFT); 3534 } 3535 3536 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, 3537 u64 start, 3538 unsigned long len, 3539 gfp_t mask) 3540 { 3541 struct extent_buffer *eb = NULL; 3542 #if LEAK_DEBUG 3543 unsigned long flags; 3544 #endif 3545 3546 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 3547 if (eb == NULL) 3548 return NULL; 3549 eb->start = start; 3550 eb->len = len; 3551 rwlock_init(&eb->lock); 3552 atomic_set(&eb->write_locks, 0); 3553 atomic_set(&eb->read_locks, 0); 3554 atomic_set(&eb->blocking_readers, 0); 3555 atomic_set(&eb->blocking_writers, 0); 3556 atomic_set(&eb->spinning_readers, 0); 3557 atomic_set(&eb->spinning_writers, 0); 3558 init_waitqueue_head(&eb->write_lock_wq); 3559 init_waitqueue_head(&eb->read_lock_wq); 3560 3561 #if LEAK_DEBUG 3562 spin_lock_irqsave(&leak_lock, flags); 3563 list_add(&eb->leak_list, &buffers); 3564 spin_unlock_irqrestore(&leak_lock, flags); 3565 #endif 3566 atomic_set(&eb->refs, 1); 3567 3568 return eb; 3569 } 3570 3571 static void __free_extent_buffer(struct extent_buffer *eb) 3572 { 3573 #if LEAK_DEBUG 3574 unsigned long flags; 3575 spin_lock_irqsave(&leak_lock, flags); 3576 list_del(&eb->leak_list); 3577 spin_unlock_irqrestore(&leak_lock, flags); 3578 #endif 3579 kmem_cache_free(extent_buffer_cache, eb); 3580 } 3581 3582 /* 3583 * Helper for releasing extent buffer page. 3584 */ 3585 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb, 3586 unsigned long start_idx) 3587 { 3588 unsigned long index; 3589 struct page *page; 3590 3591 if (!eb->first_page) 3592 return; 3593 3594 index = num_extent_pages(eb->start, eb->len); 3595 if (start_idx >= index) 3596 return; 3597 3598 do { 3599 index--; 3600 page = extent_buffer_page(eb, index); 3601 if (page) 3602 page_cache_release(page); 3603 } while (index != start_idx); 3604 } 3605 3606 /* 3607 * Helper for releasing the extent buffer. 3608 */ 3609 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) 3610 { 3611 btrfs_release_extent_buffer_page(eb, 0); 3612 __free_extent_buffer(eb); 3613 } 3614 3615 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 3616 u64 start, unsigned long len, 3617 struct page *page0) 3618 { 3619 unsigned long num_pages = num_extent_pages(start, len); 3620 unsigned long i; 3621 unsigned long index = start >> PAGE_CACHE_SHIFT; 3622 struct extent_buffer *eb; 3623 struct extent_buffer *exists = NULL; 3624 struct page *p; 3625 struct address_space *mapping = tree->mapping; 3626 int uptodate = 1; 3627 int ret; 3628 3629 rcu_read_lock(); 3630 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); 3631 if (eb && atomic_inc_not_zero(&eb->refs)) { 3632 rcu_read_unlock(); 3633 mark_page_accessed(eb->first_page); 3634 return eb; 3635 } 3636 rcu_read_unlock(); 3637 3638 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS); 3639 if (!eb) 3640 return NULL; 3641 3642 if (page0) { 3643 eb->first_page = page0; 3644 i = 1; 3645 index++; 3646 page_cache_get(page0); 3647 mark_page_accessed(page0); 3648 set_page_extent_mapped(page0); 3649 set_page_extent_head(page0, len); 3650 uptodate = PageUptodate(page0); 3651 } else { 3652 i = 0; 3653 } 3654 for (; i < num_pages; i++, index++) { 3655 p = find_or_create_page(mapping, index, GFP_NOFS); 3656 if (!p) { 3657 WARN_ON(1); 3658 goto free_eb; 3659 } 3660 set_page_extent_mapped(p); 3661 mark_page_accessed(p); 3662 if (i == 0) { 3663 eb->first_page = p; 3664 set_page_extent_head(p, len); 3665 } else { 3666 set_page_private(p, EXTENT_PAGE_PRIVATE); 3667 } 3668 if (!PageUptodate(p)) 3669 uptodate = 0; 3670 3671 /* 3672 * see below about how we avoid a nasty race with release page 3673 * and why we unlock later 3674 */ 3675 if (i != 0) 3676 unlock_page(p); 3677 } 3678 if (uptodate) 3679 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 3680 3681 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); 3682 if (ret) 3683 goto free_eb; 3684 3685 spin_lock(&tree->buffer_lock); 3686 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb); 3687 if (ret == -EEXIST) { 3688 exists = radix_tree_lookup(&tree->buffer, 3689 start >> PAGE_CACHE_SHIFT); 3690 /* add one reference for the caller */ 3691 atomic_inc(&exists->refs); 3692 spin_unlock(&tree->buffer_lock); 3693 radix_tree_preload_end(); 3694 goto free_eb; 3695 } 3696 /* add one reference for the tree */ 3697 atomic_inc(&eb->refs); 3698 spin_unlock(&tree->buffer_lock); 3699 radix_tree_preload_end(); 3700 3701 /* 3702 * there is a race where release page may have 3703 * tried to find this extent buffer in the radix 3704 * but failed. It will tell the VM it is safe to 3705 * reclaim the, and it will clear the page private bit. 3706 * We must make sure to set the page private bit properly 3707 * after the extent buffer is in the radix tree so 3708 * it doesn't get lost 3709 */ 3710 set_page_extent_mapped(eb->first_page); 3711 set_page_extent_head(eb->first_page, eb->len); 3712 if (!page0) 3713 unlock_page(eb->first_page); 3714 return eb; 3715 3716 free_eb: 3717 if (eb->first_page && !page0) 3718 unlock_page(eb->first_page); 3719 3720 if (!atomic_dec_and_test(&eb->refs)) 3721 return exists; 3722 btrfs_release_extent_buffer(eb); 3723 return exists; 3724 } 3725 3726 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, 3727 u64 start, unsigned long len) 3728 { 3729 struct extent_buffer *eb; 3730 3731 rcu_read_lock(); 3732 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); 3733 if (eb && atomic_inc_not_zero(&eb->refs)) { 3734 rcu_read_unlock(); 3735 mark_page_accessed(eb->first_page); 3736 return eb; 3737 } 3738 rcu_read_unlock(); 3739 3740 return NULL; 3741 } 3742 3743 void free_extent_buffer(struct extent_buffer *eb) 3744 { 3745 if (!eb) 3746 return; 3747 3748 if (!atomic_dec_and_test(&eb->refs)) 3749 return; 3750 3751 WARN_ON(1); 3752 } 3753 3754 int clear_extent_buffer_dirty(struct extent_io_tree *tree, 3755 struct extent_buffer *eb) 3756 { 3757 unsigned long i; 3758 unsigned long num_pages; 3759 struct page *page; 3760 3761 num_pages = num_extent_pages(eb->start, eb->len); 3762 3763 for (i = 0; i < num_pages; i++) { 3764 page = extent_buffer_page(eb, i); 3765 if (!PageDirty(page)) 3766 continue; 3767 3768 lock_page(page); 3769 WARN_ON(!PagePrivate(page)); 3770 3771 set_page_extent_mapped(page); 3772 if (i == 0) 3773 set_page_extent_head(page, eb->len); 3774 3775 clear_page_dirty_for_io(page); 3776 spin_lock_irq(&page->mapping->tree_lock); 3777 if (!PageDirty(page)) { 3778 radix_tree_tag_clear(&page->mapping->page_tree, 3779 page_index(page), 3780 PAGECACHE_TAG_DIRTY); 3781 } 3782 spin_unlock_irq(&page->mapping->tree_lock); 3783 ClearPageError(page); 3784 unlock_page(page); 3785 } 3786 return 0; 3787 } 3788 3789 int set_extent_buffer_dirty(struct extent_io_tree *tree, 3790 struct extent_buffer *eb) 3791 { 3792 unsigned long i; 3793 unsigned long num_pages; 3794 int was_dirty = 0; 3795 3796 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); 3797 num_pages = num_extent_pages(eb->start, eb->len); 3798 for (i = 0; i < num_pages; i++) 3799 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 3800 return was_dirty; 3801 } 3802 3803 static int __eb_straddles_pages(u64 start, u64 len) 3804 { 3805 if (len < PAGE_CACHE_SIZE) 3806 return 1; 3807 if (start & (PAGE_CACHE_SIZE - 1)) 3808 return 1; 3809 if ((start + len) & (PAGE_CACHE_SIZE - 1)) 3810 return 1; 3811 return 0; 3812 } 3813 3814 static int eb_straddles_pages(struct extent_buffer *eb) 3815 { 3816 return __eb_straddles_pages(eb->start, eb->len); 3817 } 3818 3819 int clear_extent_buffer_uptodate(struct extent_io_tree *tree, 3820 struct extent_buffer *eb, 3821 struct extent_state **cached_state) 3822 { 3823 unsigned long i; 3824 struct page *page; 3825 unsigned long num_pages; 3826 3827 num_pages = num_extent_pages(eb->start, eb->len); 3828 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 3829 3830 if (eb_straddles_pages(eb)) { 3831 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3832 cached_state, GFP_NOFS); 3833 } 3834 for (i = 0; i < num_pages; i++) { 3835 page = extent_buffer_page(eb, i); 3836 if (page) 3837 ClearPageUptodate(page); 3838 } 3839 return 0; 3840 } 3841 3842 int set_extent_buffer_uptodate(struct extent_io_tree *tree, 3843 struct extent_buffer *eb) 3844 { 3845 unsigned long i; 3846 struct page *page; 3847 unsigned long num_pages; 3848 3849 num_pages = num_extent_pages(eb->start, eb->len); 3850 3851 if (eb_straddles_pages(eb)) { 3852 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3853 NULL, GFP_NOFS); 3854 } 3855 for (i = 0; i < num_pages; i++) { 3856 page = extent_buffer_page(eb, i); 3857 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 3858 ((i == num_pages - 1) && 3859 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) { 3860 check_page_uptodate(tree, page); 3861 continue; 3862 } 3863 SetPageUptodate(page); 3864 } 3865 return 0; 3866 } 3867 3868 int extent_range_uptodate(struct extent_io_tree *tree, 3869 u64 start, u64 end) 3870 { 3871 struct page *page; 3872 int ret; 3873 int pg_uptodate = 1; 3874 int uptodate; 3875 unsigned long index; 3876 3877 if (__eb_straddles_pages(start, end - start + 1)) { 3878 ret = test_range_bit(tree, start, end, 3879 EXTENT_UPTODATE, 1, NULL); 3880 if (ret) 3881 return 1; 3882 } 3883 while (start <= end) { 3884 index = start >> PAGE_CACHE_SHIFT; 3885 page = find_get_page(tree->mapping, index); 3886 uptodate = PageUptodate(page); 3887 page_cache_release(page); 3888 if (!uptodate) { 3889 pg_uptodate = 0; 3890 break; 3891 } 3892 start += PAGE_CACHE_SIZE; 3893 } 3894 return pg_uptodate; 3895 } 3896 3897 int extent_buffer_uptodate(struct extent_io_tree *tree, 3898 struct extent_buffer *eb, 3899 struct extent_state *cached_state) 3900 { 3901 int ret = 0; 3902 unsigned long num_pages; 3903 unsigned long i; 3904 struct page *page; 3905 int pg_uptodate = 1; 3906 3907 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 3908 return 1; 3909 3910 if (eb_straddles_pages(eb)) { 3911 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3912 EXTENT_UPTODATE, 1, cached_state); 3913 if (ret) 3914 return ret; 3915 } 3916 3917 num_pages = num_extent_pages(eb->start, eb->len); 3918 for (i = 0; i < num_pages; i++) { 3919 page = extent_buffer_page(eb, i); 3920 if (!PageUptodate(page)) { 3921 pg_uptodate = 0; 3922 break; 3923 } 3924 } 3925 return pg_uptodate; 3926 } 3927 3928 int read_extent_buffer_pages(struct extent_io_tree *tree, 3929 struct extent_buffer *eb, u64 start, int wait, 3930 get_extent_t *get_extent, int mirror_num) 3931 { 3932 unsigned long i; 3933 unsigned long start_i; 3934 struct page *page; 3935 int err; 3936 int ret = 0; 3937 int locked_pages = 0; 3938 int all_uptodate = 1; 3939 int inc_all_pages = 0; 3940 unsigned long num_pages; 3941 struct bio *bio = NULL; 3942 unsigned long bio_flags = 0; 3943 3944 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 3945 return 0; 3946 3947 if (eb_straddles_pages(eb)) { 3948 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3949 EXTENT_UPTODATE, 1, NULL)) { 3950 return 0; 3951 } 3952 } 3953 3954 if (start) { 3955 WARN_ON(start < eb->start); 3956 start_i = (start >> PAGE_CACHE_SHIFT) - 3957 (eb->start >> PAGE_CACHE_SHIFT); 3958 } else { 3959 start_i = 0; 3960 } 3961 3962 num_pages = num_extent_pages(eb->start, eb->len); 3963 for (i = start_i; i < num_pages; i++) { 3964 page = extent_buffer_page(eb, i); 3965 if (wait == WAIT_NONE) { 3966 if (!trylock_page(page)) 3967 goto unlock_exit; 3968 } else { 3969 lock_page(page); 3970 } 3971 locked_pages++; 3972 if (!PageUptodate(page)) 3973 all_uptodate = 0; 3974 } 3975 if (all_uptodate) { 3976 if (start_i == 0) 3977 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 3978 goto unlock_exit; 3979 } 3980 3981 for (i = start_i; i < num_pages; i++) { 3982 page = extent_buffer_page(eb, i); 3983 3984 WARN_ON(!PagePrivate(page)); 3985 3986 set_page_extent_mapped(page); 3987 if (i == 0) 3988 set_page_extent_head(page, eb->len); 3989 3990 if (inc_all_pages) 3991 page_cache_get(page); 3992 if (!PageUptodate(page)) { 3993 if (start_i == 0) 3994 inc_all_pages = 1; 3995 ClearPageError(page); 3996 err = __extent_read_full_page(tree, page, 3997 get_extent, &bio, 3998 mirror_num, &bio_flags); 3999 if (err) 4000 ret = err; 4001 } else { 4002 unlock_page(page); 4003 } 4004 } 4005 4006 if (bio) 4007 submit_one_bio(READ, bio, mirror_num, bio_flags); 4008 4009 if (ret || wait != WAIT_COMPLETE) 4010 return ret; 4011 4012 for (i = start_i; i < num_pages; i++) { 4013 page = extent_buffer_page(eb, i); 4014 wait_on_page_locked(page); 4015 if (!PageUptodate(page)) 4016 ret = -EIO; 4017 } 4018 4019 if (!ret) 4020 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4021 return ret; 4022 4023 unlock_exit: 4024 i = start_i; 4025 while (locked_pages > 0) { 4026 page = extent_buffer_page(eb, i); 4027 i++; 4028 unlock_page(page); 4029 locked_pages--; 4030 } 4031 return ret; 4032 } 4033 4034 void read_extent_buffer(struct extent_buffer *eb, void *dstv, 4035 unsigned long start, 4036 unsigned long len) 4037 { 4038 size_t cur; 4039 size_t offset; 4040 struct page *page; 4041 char *kaddr; 4042 char *dst = (char *)dstv; 4043 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 4044 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 4045 4046 WARN_ON(start > eb->len); 4047 WARN_ON(start + len > eb->start + eb->len); 4048 4049 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 4050 4051 while (len > 0) { 4052 page = extent_buffer_page(eb, i); 4053 4054 cur = min(len, (PAGE_CACHE_SIZE - offset)); 4055 kaddr = page_address(page); 4056 memcpy(dst, kaddr + offset, cur); 4057 4058 dst += cur; 4059 len -= cur; 4060 offset = 0; 4061 i++; 4062 } 4063 } 4064 4065 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 4066 unsigned long min_len, char **map, 4067 unsigned long *map_start, 4068 unsigned long *map_len) 4069 { 4070 size_t offset = start & (PAGE_CACHE_SIZE - 1); 4071 char *kaddr; 4072 struct page *p; 4073 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 4074 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 4075 unsigned long end_i = (start_offset + start + min_len - 1) >> 4076 PAGE_CACHE_SHIFT; 4077 4078 if (i != end_i) 4079 return -EINVAL; 4080 4081 if (i == 0) { 4082 offset = start_offset; 4083 *map_start = 0; 4084 } else { 4085 offset = 0; 4086 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; 4087 } 4088 4089 if (start + min_len > eb->len) { 4090 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, " 4091 "wanted %lu %lu\n", (unsigned long long)eb->start, 4092 eb->len, start, min_len); 4093 WARN_ON(1); 4094 return -EINVAL; 4095 } 4096 4097 p = extent_buffer_page(eb, i); 4098 kaddr = page_address(p); 4099 *map = kaddr + offset; 4100 *map_len = PAGE_CACHE_SIZE - offset; 4101 return 0; 4102 } 4103 4104 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, 4105 unsigned long start, 4106 unsigned long len) 4107 { 4108 size_t cur; 4109 size_t offset; 4110 struct page *page; 4111 char *kaddr; 4112 char *ptr = (char *)ptrv; 4113 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 4114 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 4115 int ret = 0; 4116 4117 WARN_ON(start > eb->len); 4118 WARN_ON(start + len > eb->start + eb->len); 4119 4120 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 4121 4122 while (len > 0) { 4123 page = extent_buffer_page(eb, i); 4124 4125 cur = min(len, (PAGE_CACHE_SIZE - offset)); 4126 4127 kaddr = page_address(page); 4128 ret = memcmp(ptr, kaddr + offset, cur); 4129 if (ret) 4130 break; 4131 4132 ptr += cur; 4133 len -= cur; 4134 offset = 0; 4135 i++; 4136 } 4137 return ret; 4138 } 4139 4140 void write_extent_buffer(struct extent_buffer *eb, const void *srcv, 4141 unsigned long start, unsigned long len) 4142 { 4143 size_t cur; 4144 size_t offset; 4145 struct page *page; 4146 char *kaddr; 4147 char *src = (char *)srcv; 4148 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 4149 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 4150 4151 WARN_ON(start > eb->len); 4152 WARN_ON(start + len > eb->start + eb->len); 4153 4154 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 4155 4156 while (len > 0) { 4157 page = extent_buffer_page(eb, i); 4158 WARN_ON(!PageUptodate(page)); 4159 4160 cur = min(len, PAGE_CACHE_SIZE - offset); 4161 kaddr = page_address(page); 4162 memcpy(kaddr + offset, src, cur); 4163 4164 src += cur; 4165 len -= cur; 4166 offset = 0; 4167 i++; 4168 } 4169 } 4170 4171 void memset_extent_buffer(struct extent_buffer *eb, char c, 4172 unsigned long start, unsigned long len) 4173 { 4174 size_t cur; 4175 size_t offset; 4176 struct page *page; 4177 char *kaddr; 4178 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 4179 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 4180 4181 WARN_ON(start > eb->len); 4182 WARN_ON(start + len > eb->start + eb->len); 4183 4184 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 4185 4186 while (len > 0) { 4187 page = extent_buffer_page(eb, i); 4188 WARN_ON(!PageUptodate(page)); 4189 4190 cur = min(len, PAGE_CACHE_SIZE - offset); 4191 kaddr = page_address(page); 4192 memset(kaddr + offset, c, cur); 4193 4194 len -= cur; 4195 offset = 0; 4196 i++; 4197 } 4198 } 4199 4200 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, 4201 unsigned long dst_offset, unsigned long src_offset, 4202 unsigned long len) 4203 { 4204 u64 dst_len = dst->len; 4205 size_t cur; 4206 size_t offset; 4207 struct page *page; 4208 char *kaddr; 4209 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 4210 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 4211 4212 WARN_ON(src->len != dst_len); 4213 4214 offset = (start_offset + dst_offset) & 4215 ((unsigned long)PAGE_CACHE_SIZE - 1); 4216 4217 while (len > 0) { 4218 page = extent_buffer_page(dst, i); 4219 WARN_ON(!PageUptodate(page)); 4220 4221 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 4222 4223 kaddr = page_address(page); 4224 read_extent_buffer(src, kaddr + offset, src_offset, cur); 4225 4226 src_offset += cur; 4227 len -= cur; 4228 offset = 0; 4229 i++; 4230 } 4231 } 4232 4233 static void move_pages(struct page *dst_page, struct page *src_page, 4234 unsigned long dst_off, unsigned long src_off, 4235 unsigned long len) 4236 { 4237 char *dst_kaddr = page_address(dst_page); 4238 if (dst_page == src_page) { 4239 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); 4240 } else { 4241 char *src_kaddr = page_address(src_page); 4242 char *p = dst_kaddr + dst_off + len; 4243 char *s = src_kaddr + src_off + len; 4244 4245 while (len--) 4246 *--p = *--s; 4247 } 4248 } 4249 4250 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) 4251 { 4252 unsigned long distance = (src > dst) ? src - dst : dst - src; 4253 return distance < len; 4254 } 4255 4256 static void copy_pages(struct page *dst_page, struct page *src_page, 4257 unsigned long dst_off, unsigned long src_off, 4258 unsigned long len) 4259 { 4260 char *dst_kaddr = page_address(dst_page); 4261 char *src_kaddr; 4262 4263 if (dst_page != src_page) { 4264 src_kaddr = page_address(src_page); 4265 } else { 4266 src_kaddr = dst_kaddr; 4267 BUG_ON(areas_overlap(src_off, dst_off, len)); 4268 } 4269 4270 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 4271 } 4272 4273 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 4274 unsigned long src_offset, unsigned long len) 4275 { 4276 size_t cur; 4277 size_t dst_off_in_page; 4278 size_t src_off_in_page; 4279 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 4280 unsigned long dst_i; 4281 unsigned long src_i; 4282 4283 if (src_offset + len > dst->len) { 4284 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move " 4285 "len %lu dst len %lu\n", src_offset, len, dst->len); 4286 BUG_ON(1); 4287 } 4288 if (dst_offset + len > dst->len) { 4289 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move " 4290 "len %lu dst len %lu\n", dst_offset, len, dst->len); 4291 BUG_ON(1); 4292 } 4293 4294 while (len > 0) { 4295 dst_off_in_page = (start_offset + dst_offset) & 4296 ((unsigned long)PAGE_CACHE_SIZE - 1); 4297 src_off_in_page = (start_offset + src_offset) & 4298 ((unsigned long)PAGE_CACHE_SIZE - 1); 4299 4300 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 4301 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; 4302 4303 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - 4304 src_off_in_page)); 4305 cur = min_t(unsigned long, cur, 4306 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); 4307 4308 copy_pages(extent_buffer_page(dst, dst_i), 4309 extent_buffer_page(dst, src_i), 4310 dst_off_in_page, src_off_in_page, cur); 4311 4312 src_offset += cur; 4313 dst_offset += cur; 4314 len -= cur; 4315 } 4316 } 4317 4318 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 4319 unsigned long src_offset, unsigned long len) 4320 { 4321 size_t cur; 4322 size_t dst_off_in_page; 4323 size_t src_off_in_page; 4324 unsigned long dst_end = dst_offset + len - 1; 4325 unsigned long src_end = src_offset + len - 1; 4326 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 4327 unsigned long dst_i; 4328 unsigned long src_i; 4329 4330 if (src_offset + len > dst->len) { 4331 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move " 4332 "len %lu len %lu\n", src_offset, len, dst->len); 4333 BUG_ON(1); 4334 } 4335 if (dst_offset + len > dst->len) { 4336 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move " 4337 "len %lu len %lu\n", dst_offset, len, dst->len); 4338 BUG_ON(1); 4339 } 4340 if (!areas_overlap(src_offset, dst_offset, len)) { 4341 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 4342 return; 4343 } 4344 while (len > 0) { 4345 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 4346 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 4347 4348 dst_off_in_page = (start_offset + dst_end) & 4349 ((unsigned long)PAGE_CACHE_SIZE - 1); 4350 src_off_in_page = (start_offset + src_end) & 4351 ((unsigned long)PAGE_CACHE_SIZE - 1); 4352 4353 cur = min_t(unsigned long, len, src_off_in_page + 1); 4354 cur = min(cur, dst_off_in_page + 1); 4355 move_pages(extent_buffer_page(dst, dst_i), 4356 extent_buffer_page(dst, src_i), 4357 dst_off_in_page - cur + 1, 4358 src_off_in_page - cur + 1, cur); 4359 4360 dst_end -= cur; 4361 src_end -= cur; 4362 len -= cur; 4363 } 4364 } 4365 4366 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head) 4367 { 4368 struct extent_buffer *eb = 4369 container_of(head, struct extent_buffer, rcu_head); 4370 4371 btrfs_release_extent_buffer(eb); 4372 } 4373 4374 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) 4375 { 4376 u64 start = page_offset(page); 4377 struct extent_buffer *eb; 4378 int ret = 1; 4379 4380 spin_lock(&tree->buffer_lock); 4381 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); 4382 if (!eb) { 4383 spin_unlock(&tree->buffer_lock); 4384 return ret; 4385 } 4386 4387 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { 4388 ret = 0; 4389 goto out; 4390 } 4391 4392 /* 4393 * set @eb->refs to 0 if it is already 1, and then release the @eb. 4394 * Or go back. 4395 */ 4396 if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) { 4397 ret = 0; 4398 goto out; 4399 } 4400 4401 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT); 4402 out: 4403 spin_unlock(&tree->buffer_lock); 4404 4405 /* at this point we can safely release the extent buffer */ 4406 if (atomic_read(&eb->refs) == 0) 4407 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); 4408 return ret; 4409 } 4410