1 #include <linux/bitops.h> 2 #include <linux/slab.h> 3 #include <linux/bio.h> 4 #include <linux/mm.h> 5 #include <linux/gfp.h> 6 #include <linux/pagemap.h> 7 #include <linux/page-flags.h> 8 #include <linux/module.h> 9 #include <linux/spinlock.h> 10 #include <linux/blkdev.h> 11 #include <linux/swap.h> 12 #include <linux/version.h> 13 #include <linux/writeback.h> 14 #include <linux/pagevec.h> 15 #include "extent_map.h" 16 17 /* temporary define until extent_map moves out of btrfs */ 18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size, 19 unsigned long extra_flags, 20 void (*ctor)(void *, struct kmem_cache *, 21 unsigned long)); 22 23 static struct kmem_cache *extent_map_cache; 24 static struct kmem_cache *extent_state_cache; 25 static struct kmem_cache *extent_buffer_cache; 26 27 static LIST_HEAD(buffers); 28 static LIST_HEAD(states); 29 30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; 31 #define BUFFER_LRU_MAX 64 32 33 struct tree_entry { 34 u64 start; 35 u64 end; 36 int in_tree; 37 struct rb_node rb_node; 38 }; 39 40 struct extent_page_data { 41 struct bio *bio; 42 struct extent_map_tree *tree; 43 get_extent_t *get_extent; 44 }; 45 int __init extent_map_init(void) 46 { 47 extent_map_cache = btrfs_cache_create("extent_map", 48 sizeof(struct extent_map), 0, 49 NULL); 50 if (!extent_map_cache) 51 return -ENOMEM; 52 extent_state_cache = btrfs_cache_create("extent_state", 53 sizeof(struct extent_state), 0, 54 NULL); 55 if (!extent_state_cache) 56 goto free_map_cache; 57 extent_buffer_cache = btrfs_cache_create("extent_buffers", 58 sizeof(struct extent_buffer), 0, 59 NULL); 60 if (!extent_buffer_cache) 61 goto free_state_cache; 62 return 0; 63 64 free_state_cache: 65 kmem_cache_destroy(extent_state_cache); 66 free_map_cache: 67 kmem_cache_destroy(extent_map_cache); 68 return -ENOMEM; 69 } 70 71 void __exit extent_map_exit(void) 72 { 73 struct extent_state *state; 74 75 while (!list_empty(&states)) { 76 state = list_entry(states.next, struct extent_state, list); 77 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); 78 list_del(&state->list); 79 kmem_cache_free(extent_state_cache, state); 80 81 } 82 83 if (extent_map_cache) 84 kmem_cache_destroy(extent_map_cache); 85 if (extent_state_cache) 86 kmem_cache_destroy(extent_state_cache); 87 if (extent_buffer_cache) 88 kmem_cache_destroy(extent_buffer_cache); 89 } 90 91 void extent_map_tree_init(struct extent_map_tree *tree, 92 struct address_space *mapping, gfp_t mask) 93 { 94 tree->map.rb_node = NULL; 95 tree->state.rb_node = NULL; 96 tree->ops = NULL; 97 rwlock_init(&tree->lock); 98 spin_lock_init(&tree->lru_lock); 99 tree->mapping = mapping; 100 INIT_LIST_HEAD(&tree->buffer_lru); 101 tree->lru_size = 0; 102 } 103 EXPORT_SYMBOL(extent_map_tree_init); 104 105 void extent_map_tree_empty_lru(struct extent_map_tree *tree) 106 { 107 struct extent_buffer *eb; 108 while(!list_empty(&tree->buffer_lru)) { 109 eb = list_entry(tree->buffer_lru.next, struct extent_buffer, 110 lru); 111 list_del_init(&eb->lru); 112 free_extent_buffer(eb); 113 } 114 } 115 EXPORT_SYMBOL(extent_map_tree_empty_lru); 116 117 struct extent_map *alloc_extent_map(gfp_t mask) 118 { 119 struct extent_map *em; 120 em = kmem_cache_alloc(extent_map_cache, mask); 121 if (!em || IS_ERR(em)) 122 return em; 123 em->in_tree = 0; 124 atomic_set(&em->refs, 1); 125 return em; 126 } 127 EXPORT_SYMBOL(alloc_extent_map); 128 129 void free_extent_map(struct extent_map *em) 130 { 131 if (!em) 132 return; 133 if (atomic_dec_and_test(&em->refs)) { 134 WARN_ON(em->in_tree); 135 kmem_cache_free(extent_map_cache, em); 136 } 137 } 138 EXPORT_SYMBOL(free_extent_map); 139 140 141 struct extent_state *alloc_extent_state(gfp_t mask) 142 { 143 struct extent_state *state; 144 unsigned long flags; 145 146 state = kmem_cache_alloc(extent_state_cache, mask); 147 if (!state || IS_ERR(state)) 148 return state; 149 state->state = 0; 150 state->in_tree = 0; 151 state->private = 0; 152 153 spin_lock_irqsave(&state_lock, flags); 154 list_add(&state->list, &states); 155 spin_unlock_irqrestore(&state_lock, flags); 156 157 atomic_set(&state->refs, 1); 158 init_waitqueue_head(&state->wq); 159 return state; 160 } 161 EXPORT_SYMBOL(alloc_extent_state); 162 163 void free_extent_state(struct extent_state *state) 164 { 165 unsigned long flags; 166 if (!state) 167 return; 168 if (atomic_dec_and_test(&state->refs)) { 169 WARN_ON(state->in_tree); 170 spin_lock_irqsave(&state_lock, flags); 171 list_del(&state->list); 172 spin_unlock_irqrestore(&state_lock, flags); 173 kmem_cache_free(extent_state_cache, state); 174 } 175 } 176 EXPORT_SYMBOL(free_extent_state); 177 178 static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 179 struct rb_node *node) 180 { 181 struct rb_node ** p = &root->rb_node; 182 struct rb_node * parent = NULL; 183 struct tree_entry *entry; 184 185 while(*p) { 186 parent = *p; 187 entry = rb_entry(parent, struct tree_entry, rb_node); 188 189 if (offset < entry->start) 190 p = &(*p)->rb_left; 191 else if (offset > entry->end) 192 p = &(*p)->rb_right; 193 else 194 return parent; 195 } 196 197 entry = rb_entry(node, struct tree_entry, rb_node); 198 entry->in_tree = 1; 199 rb_link_node(node, parent, p); 200 rb_insert_color(node, root); 201 return NULL; 202 } 203 204 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 205 struct rb_node **prev_ret) 206 { 207 struct rb_node * n = root->rb_node; 208 struct rb_node *prev = NULL; 209 struct tree_entry *entry; 210 struct tree_entry *prev_entry = NULL; 211 212 while(n) { 213 entry = rb_entry(n, struct tree_entry, rb_node); 214 prev = n; 215 prev_entry = entry; 216 217 if (offset < entry->start) 218 n = n->rb_left; 219 else if (offset > entry->end) 220 n = n->rb_right; 221 else 222 return n; 223 } 224 if (!prev_ret) 225 return NULL; 226 while(prev && offset > prev_entry->end) { 227 prev = rb_next(prev); 228 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 229 } 230 *prev_ret = prev; 231 return NULL; 232 } 233 234 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) 235 { 236 struct rb_node *prev; 237 struct rb_node *ret; 238 ret = __tree_search(root, offset, &prev); 239 if (!ret) 240 return prev; 241 return ret; 242 } 243 244 static int tree_delete(struct rb_root *root, u64 offset) 245 { 246 struct rb_node *node; 247 struct tree_entry *entry; 248 249 node = __tree_search(root, offset, NULL); 250 if (!node) 251 return -ENOENT; 252 entry = rb_entry(node, struct tree_entry, rb_node); 253 entry->in_tree = 0; 254 rb_erase(node, root); 255 return 0; 256 } 257 258 /* 259 * add_extent_mapping tries a simple backward merge with existing 260 * mappings. The extent_map struct passed in will be inserted into 261 * the tree directly (no copies made, just a reference taken). 262 */ 263 int add_extent_mapping(struct extent_map_tree *tree, 264 struct extent_map *em) 265 { 266 int ret = 0; 267 struct extent_map *prev = NULL; 268 struct rb_node *rb; 269 270 write_lock_irq(&tree->lock); 271 rb = tree_insert(&tree->map, em->end, &em->rb_node); 272 if (rb) { 273 prev = rb_entry(rb, struct extent_map, rb_node); 274 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end); 275 ret = -EEXIST; 276 goto out; 277 } 278 atomic_inc(&em->refs); 279 if (em->start != 0) { 280 rb = rb_prev(&em->rb_node); 281 if (rb) 282 prev = rb_entry(rb, struct extent_map, rb_node); 283 if (prev && prev->end + 1 == em->start && 284 ((em->block_start == EXTENT_MAP_HOLE && 285 prev->block_start == EXTENT_MAP_HOLE) || 286 (em->block_start == EXTENT_MAP_INLINE && 287 prev->block_start == EXTENT_MAP_INLINE) || 288 (em->block_start == EXTENT_MAP_DELALLOC && 289 prev->block_start == EXTENT_MAP_DELALLOC) || 290 (em->block_start < EXTENT_MAP_DELALLOC - 1 && 291 em->block_start == prev->block_end + 1))) { 292 em->start = prev->start; 293 em->block_start = prev->block_start; 294 rb_erase(&prev->rb_node, &tree->map); 295 prev->in_tree = 0; 296 free_extent_map(prev); 297 } 298 } 299 out: 300 write_unlock_irq(&tree->lock); 301 return ret; 302 } 303 EXPORT_SYMBOL(add_extent_mapping); 304 305 /* 306 * lookup_extent_mapping returns the first extent_map struct in the 307 * tree that intersects the [start, end] (inclusive) range. There may 308 * be additional objects in the tree that intersect, so check the object 309 * returned carefully to make sure you don't need additional lookups. 310 */ 311 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 312 u64 start, u64 end) 313 { 314 struct extent_map *em; 315 struct rb_node *rb_node; 316 317 read_lock_irq(&tree->lock); 318 rb_node = tree_search(&tree->map, start); 319 if (!rb_node) { 320 em = NULL; 321 goto out; 322 } 323 if (IS_ERR(rb_node)) { 324 em = ERR_PTR(PTR_ERR(rb_node)); 325 goto out; 326 } 327 em = rb_entry(rb_node, struct extent_map, rb_node); 328 if (em->end < start || em->start > end) { 329 em = NULL; 330 goto out; 331 } 332 atomic_inc(&em->refs); 333 out: 334 read_unlock_irq(&tree->lock); 335 return em; 336 } 337 EXPORT_SYMBOL(lookup_extent_mapping); 338 339 /* 340 * removes an extent_map struct from the tree. No reference counts are 341 * dropped, and no checks are done to see if the range is in use 342 */ 343 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 344 { 345 int ret; 346 347 write_lock_irq(&tree->lock); 348 ret = tree_delete(&tree->map, em->end); 349 write_unlock_irq(&tree->lock); 350 return ret; 351 } 352 EXPORT_SYMBOL(remove_extent_mapping); 353 354 /* 355 * utility function to look for merge candidates inside a given range. 356 * Any extents with matching state are merged together into a single 357 * extent in the tree. Extents with EXTENT_IO in their state field 358 * are not merged because the end_io handlers need to be able to do 359 * operations on them without sleeping (or doing allocations/splits). 360 * 361 * This should be called with the tree lock held. 362 */ 363 static int merge_state(struct extent_map_tree *tree, 364 struct extent_state *state) 365 { 366 struct extent_state *other; 367 struct rb_node *other_node; 368 369 if (state->state & EXTENT_IOBITS) 370 return 0; 371 372 other_node = rb_prev(&state->rb_node); 373 if (other_node) { 374 other = rb_entry(other_node, struct extent_state, rb_node); 375 if (other->end == state->start - 1 && 376 other->state == state->state) { 377 state->start = other->start; 378 other->in_tree = 0; 379 rb_erase(&other->rb_node, &tree->state); 380 free_extent_state(other); 381 } 382 } 383 other_node = rb_next(&state->rb_node); 384 if (other_node) { 385 other = rb_entry(other_node, struct extent_state, rb_node); 386 if (other->start == state->end + 1 && 387 other->state == state->state) { 388 other->start = state->start; 389 state->in_tree = 0; 390 rb_erase(&state->rb_node, &tree->state); 391 free_extent_state(state); 392 } 393 } 394 return 0; 395 } 396 397 /* 398 * insert an extent_state struct into the tree. 'bits' are set on the 399 * struct before it is inserted. 400 * 401 * This may return -EEXIST if the extent is already there, in which case the 402 * state struct is freed. 403 * 404 * The tree lock is not taken internally. This is a utility function and 405 * probably isn't what you want to call (see set/clear_extent_bit). 406 */ 407 static int insert_state(struct extent_map_tree *tree, 408 struct extent_state *state, u64 start, u64 end, 409 int bits) 410 { 411 struct rb_node *node; 412 413 if (end < start) { 414 printk("end < start %Lu %Lu\n", end, start); 415 WARN_ON(1); 416 } 417 state->state |= bits; 418 state->start = start; 419 state->end = end; 420 node = tree_insert(&tree->state, end, &state->rb_node); 421 if (node) { 422 struct extent_state *found; 423 found = rb_entry(node, struct extent_state, rb_node); 424 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); 425 free_extent_state(state); 426 return -EEXIST; 427 } 428 merge_state(tree, state); 429 return 0; 430 } 431 432 /* 433 * split a given extent state struct in two, inserting the preallocated 434 * struct 'prealloc' as the newly created second half. 'split' indicates an 435 * offset inside 'orig' where it should be split. 436 * 437 * Before calling, 438 * the tree has 'orig' at [orig->start, orig->end]. After calling, there 439 * are two extent state structs in the tree: 440 * prealloc: [orig->start, split - 1] 441 * orig: [ split, orig->end ] 442 * 443 * The tree locks are not taken by this function. They need to be held 444 * by the caller. 445 */ 446 static int split_state(struct extent_map_tree *tree, struct extent_state *orig, 447 struct extent_state *prealloc, u64 split) 448 { 449 struct rb_node *node; 450 prealloc->start = orig->start; 451 prealloc->end = split - 1; 452 prealloc->state = orig->state; 453 orig->start = split; 454 455 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); 456 if (node) { 457 struct extent_state *found; 458 found = rb_entry(node, struct extent_state, rb_node); 459 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end); 460 free_extent_state(prealloc); 461 return -EEXIST; 462 } 463 return 0; 464 } 465 466 /* 467 * utility function to clear some bits in an extent state struct. 468 * it will optionally wake up any one waiting on this state (wake == 1), or 469 * forcibly remove the state from the tree (delete == 1). 470 * 471 * If no bits are set on the state struct after clearing things, the 472 * struct is freed and removed from the tree 473 */ 474 static int clear_state_bit(struct extent_map_tree *tree, 475 struct extent_state *state, int bits, int wake, 476 int delete) 477 { 478 int ret = state->state & bits; 479 state->state &= ~bits; 480 if (wake) 481 wake_up(&state->wq); 482 if (delete || state->state == 0) { 483 if (state->in_tree) { 484 rb_erase(&state->rb_node, &tree->state); 485 state->in_tree = 0; 486 free_extent_state(state); 487 } else { 488 WARN_ON(1); 489 } 490 } else { 491 merge_state(tree, state); 492 } 493 return ret; 494 } 495 496 /* 497 * clear some bits on a range in the tree. This may require splitting 498 * or inserting elements in the tree, so the gfp mask is used to 499 * indicate which allocations or sleeping are allowed. 500 * 501 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove 502 * the given range from the tree regardless of state (ie for truncate). 503 * 504 * the range [start, end] is inclusive. 505 * 506 * This takes the tree lock, and returns < 0 on error, > 0 if any of the 507 * bits were already set, or zero if none of the bits were already set. 508 */ 509 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, 510 int bits, int wake, int delete, gfp_t mask) 511 { 512 struct extent_state *state; 513 struct extent_state *prealloc = NULL; 514 struct rb_node *node; 515 unsigned long flags; 516 int err; 517 int set = 0; 518 519 again: 520 if (!prealloc && (mask & __GFP_WAIT)) { 521 prealloc = alloc_extent_state(mask); 522 if (!prealloc) 523 return -ENOMEM; 524 } 525 526 write_lock_irqsave(&tree->lock, flags); 527 /* 528 * this search will find the extents that end after 529 * our range starts 530 */ 531 node = tree_search(&tree->state, start); 532 if (!node) 533 goto out; 534 state = rb_entry(node, struct extent_state, rb_node); 535 if (state->start > end) 536 goto out; 537 WARN_ON(state->end < start); 538 539 /* 540 * | ---- desired range ---- | 541 * | state | or 542 * | ------------- state -------------- | 543 * 544 * We need to split the extent we found, and may flip 545 * bits on second half. 546 * 547 * If the extent we found extends past our range, we 548 * just split and search again. It'll get split again 549 * the next time though. 550 * 551 * If the extent we found is inside our range, we clear 552 * the desired bit on it. 553 */ 554 555 if (state->start < start) { 556 err = split_state(tree, state, prealloc, start); 557 BUG_ON(err == -EEXIST); 558 prealloc = NULL; 559 if (err) 560 goto out; 561 if (state->end <= end) { 562 start = state->end + 1; 563 set |= clear_state_bit(tree, state, bits, 564 wake, delete); 565 } else { 566 start = state->start; 567 } 568 goto search_again; 569 } 570 /* 571 * | ---- desired range ---- | 572 * | state | 573 * We need to split the extent, and clear the bit 574 * on the first half 575 */ 576 if (state->start <= end && state->end > end) { 577 err = split_state(tree, state, prealloc, end + 1); 578 BUG_ON(err == -EEXIST); 579 580 if (wake) 581 wake_up(&state->wq); 582 set |= clear_state_bit(tree, prealloc, bits, 583 wake, delete); 584 prealloc = NULL; 585 goto out; 586 } 587 588 start = state->end + 1; 589 set |= clear_state_bit(tree, state, bits, wake, delete); 590 goto search_again; 591 592 out: 593 write_unlock_irqrestore(&tree->lock, flags); 594 if (prealloc) 595 free_extent_state(prealloc); 596 597 return set; 598 599 search_again: 600 if (start > end) 601 goto out; 602 write_unlock_irqrestore(&tree->lock, flags); 603 if (mask & __GFP_WAIT) 604 cond_resched(); 605 goto again; 606 } 607 EXPORT_SYMBOL(clear_extent_bit); 608 609 static int wait_on_state(struct extent_map_tree *tree, 610 struct extent_state *state) 611 { 612 DEFINE_WAIT(wait); 613 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 614 read_unlock_irq(&tree->lock); 615 schedule(); 616 read_lock_irq(&tree->lock); 617 finish_wait(&state->wq, &wait); 618 return 0; 619 } 620 621 /* 622 * waits for one or more bits to clear on a range in the state tree. 623 * The range [start, end] is inclusive. 624 * The tree lock is taken by this function 625 */ 626 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits) 627 { 628 struct extent_state *state; 629 struct rb_node *node; 630 631 read_lock_irq(&tree->lock); 632 again: 633 while (1) { 634 /* 635 * this search will find all the extents that end after 636 * our range starts 637 */ 638 node = tree_search(&tree->state, start); 639 if (!node) 640 break; 641 642 state = rb_entry(node, struct extent_state, rb_node); 643 644 if (state->start > end) 645 goto out; 646 647 if (state->state & bits) { 648 start = state->start; 649 atomic_inc(&state->refs); 650 wait_on_state(tree, state); 651 free_extent_state(state); 652 goto again; 653 } 654 start = state->end + 1; 655 656 if (start > end) 657 break; 658 659 if (need_resched()) { 660 read_unlock_irq(&tree->lock); 661 cond_resched(); 662 read_lock_irq(&tree->lock); 663 } 664 } 665 out: 666 read_unlock_irq(&tree->lock); 667 return 0; 668 } 669 EXPORT_SYMBOL(wait_extent_bit); 670 671 /* 672 * set some bits on a range in the tree. This may require allocations 673 * or sleeping, so the gfp mask is used to indicate what is allowed. 674 * 675 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the 676 * range already has the desired bits set. The start of the existing 677 * range is returned in failed_start in this case. 678 * 679 * [start, end] is inclusive 680 * This takes the tree lock. 681 */ 682 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits, 683 int exclusive, u64 *failed_start, gfp_t mask) 684 { 685 struct extent_state *state; 686 struct extent_state *prealloc = NULL; 687 struct rb_node *node; 688 unsigned long flags; 689 int err = 0; 690 int set; 691 u64 last_start; 692 u64 last_end; 693 again: 694 if (!prealloc && (mask & __GFP_WAIT)) { 695 prealloc = alloc_extent_state(mask); 696 if (!prealloc) 697 return -ENOMEM; 698 } 699 700 write_lock_irqsave(&tree->lock, flags); 701 /* 702 * this search will find all the extents that end after 703 * our range starts. 704 */ 705 node = tree_search(&tree->state, start); 706 if (!node) { 707 err = insert_state(tree, prealloc, start, end, bits); 708 prealloc = NULL; 709 BUG_ON(err == -EEXIST); 710 goto out; 711 } 712 713 state = rb_entry(node, struct extent_state, rb_node); 714 last_start = state->start; 715 last_end = state->end; 716 717 /* 718 * | ---- desired range ---- | 719 * | state | 720 * 721 * Just lock what we found and keep going 722 */ 723 if (state->start == start && state->end <= end) { 724 set = state->state & bits; 725 if (set && exclusive) { 726 *failed_start = state->start; 727 err = -EEXIST; 728 goto out; 729 } 730 state->state |= bits; 731 start = state->end + 1; 732 merge_state(tree, state); 733 goto search_again; 734 } 735 736 /* 737 * | ---- desired range ---- | 738 * | state | 739 * or 740 * | ------------- state -------------- | 741 * 742 * We need to split the extent we found, and may flip bits on 743 * second half. 744 * 745 * If the extent we found extends past our 746 * range, we just split and search again. It'll get split 747 * again the next time though. 748 * 749 * If the extent we found is inside our range, we set the 750 * desired bit on it. 751 */ 752 if (state->start < start) { 753 set = state->state & bits; 754 if (exclusive && set) { 755 *failed_start = start; 756 err = -EEXIST; 757 goto out; 758 } 759 err = split_state(tree, state, prealloc, start); 760 BUG_ON(err == -EEXIST); 761 prealloc = NULL; 762 if (err) 763 goto out; 764 if (state->end <= end) { 765 state->state |= bits; 766 start = state->end + 1; 767 merge_state(tree, state); 768 } else { 769 start = state->start; 770 } 771 goto search_again; 772 } 773 /* 774 * | ---- desired range ---- | 775 * | state | or | state | 776 * 777 * There's a hole, we need to insert something in it and 778 * ignore the extent we found. 779 */ 780 if (state->start > start) { 781 u64 this_end; 782 if (end < last_start) 783 this_end = end; 784 else 785 this_end = last_start -1; 786 err = insert_state(tree, prealloc, start, this_end, 787 bits); 788 prealloc = NULL; 789 BUG_ON(err == -EEXIST); 790 if (err) 791 goto out; 792 start = this_end + 1; 793 goto search_again; 794 } 795 /* 796 * | ---- desired range ---- | 797 * | state | 798 * We need to split the extent, and set the bit 799 * on the first half 800 */ 801 if (state->start <= end && state->end > end) { 802 set = state->state & bits; 803 if (exclusive && set) { 804 *failed_start = start; 805 err = -EEXIST; 806 goto out; 807 } 808 err = split_state(tree, state, prealloc, end + 1); 809 BUG_ON(err == -EEXIST); 810 811 prealloc->state |= bits; 812 merge_state(tree, prealloc); 813 prealloc = NULL; 814 goto out; 815 } 816 817 goto search_again; 818 819 out: 820 write_unlock_irqrestore(&tree->lock, flags); 821 if (prealloc) 822 free_extent_state(prealloc); 823 824 return err; 825 826 search_again: 827 if (start > end) 828 goto out; 829 write_unlock_irqrestore(&tree->lock, flags); 830 if (mask & __GFP_WAIT) 831 cond_resched(); 832 goto again; 833 } 834 EXPORT_SYMBOL(set_extent_bit); 835 836 /* wrappers around set/clear extent bit */ 837 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 838 gfp_t mask) 839 { 840 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 841 mask); 842 } 843 EXPORT_SYMBOL(set_extent_dirty); 844 845 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end, 846 int bits, gfp_t mask) 847 { 848 return set_extent_bit(tree, start, end, bits, 0, NULL, 849 mask); 850 } 851 EXPORT_SYMBOL(set_extent_bits); 852 853 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end, 854 int bits, gfp_t mask) 855 { 856 return clear_extent_bit(tree, start, end, bits, 0, 0, mask); 857 } 858 EXPORT_SYMBOL(clear_extent_bits); 859 860 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end, 861 gfp_t mask) 862 { 863 return set_extent_bit(tree, start, end, 864 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL, 865 mask); 866 } 867 EXPORT_SYMBOL(set_extent_delalloc); 868 869 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 870 gfp_t mask) 871 { 872 return clear_extent_bit(tree, start, end, 873 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask); 874 } 875 EXPORT_SYMBOL(clear_extent_dirty); 876 877 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 878 gfp_t mask) 879 { 880 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 881 mask); 882 } 883 EXPORT_SYMBOL(set_extent_new); 884 885 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 886 gfp_t mask) 887 { 888 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask); 889 } 890 EXPORT_SYMBOL(clear_extent_new); 891 892 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 893 gfp_t mask) 894 { 895 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, 896 mask); 897 } 898 EXPORT_SYMBOL(set_extent_uptodate); 899 900 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 901 gfp_t mask) 902 { 903 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); 904 } 905 EXPORT_SYMBOL(clear_extent_uptodate); 906 907 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 908 gfp_t mask) 909 { 910 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK, 911 0, NULL, mask); 912 } 913 EXPORT_SYMBOL(set_extent_writeback); 914 915 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 916 gfp_t mask) 917 { 918 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); 919 } 920 EXPORT_SYMBOL(clear_extent_writeback); 921 922 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end) 923 { 924 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); 925 } 926 EXPORT_SYMBOL(wait_on_extent_writeback); 927 928 /* 929 * locks a range in ascending order, waiting for any locked regions 930 * it hits on the way. [start,end] are inclusive, and this will sleep. 931 */ 932 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask) 933 { 934 int err; 935 u64 failed_start; 936 while (1) { 937 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 938 &failed_start, mask); 939 if (err == -EEXIST && (mask & __GFP_WAIT)) { 940 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 941 start = failed_start; 942 } else { 943 break; 944 } 945 WARN_ON(start > end); 946 } 947 return err; 948 } 949 EXPORT_SYMBOL(lock_extent); 950 951 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end, 952 gfp_t mask) 953 { 954 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask); 955 } 956 EXPORT_SYMBOL(unlock_extent); 957 958 /* 959 * helper function to set pages and extents in the tree dirty 960 */ 961 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end) 962 { 963 unsigned long index = start >> PAGE_CACHE_SHIFT; 964 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 965 struct page *page; 966 967 while (index <= end_index) { 968 page = find_get_page(tree->mapping, index); 969 BUG_ON(!page); 970 __set_page_dirty_nobuffers(page); 971 page_cache_release(page); 972 index++; 973 } 974 set_extent_dirty(tree, start, end, GFP_NOFS); 975 return 0; 976 } 977 EXPORT_SYMBOL(set_range_dirty); 978 979 /* 980 * helper function to set both pages and extents in the tree writeback 981 */ 982 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end) 983 { 984 unsigned long index = start >> PAGE_CACHE_SHIFT; 985 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 986 struct page *page; 987 988 while (index <= end_index) { 989 page = find_get_page(tree->mapping, index); 990 BUG_ON(!page); 991 set_page_writeback(page); 992 page_cache_release(page); 993 index++; 994 } 995 set_extent_writeback(tree, start, end, GFP_NOFS); 996 return 0; 997 } 998 EXPORT_SYMBOL(set_range_writeback); 999 1000 int find_first_extent_bit(struct extent_map_tree *tree, u64 start, 1001 u64 *start_ret, u64 *end_ret, int bits) 1002 { 1003 struct rb_node *node; 1004 struct extent_state *state; 1005 int ret = 1; 1006 1007 read_lock_irq(&tree->lock); 1008 /* 1009 * this search will find all the extents that end after 1010 * our range starts. 1011 */ 1012 node = tree_search(&tree->state, start); 1013 if (!node || IS_ERR(node)) { 1014 goto out; 1015 } 1016 1017 while(1) { 1018 state = rb_entry(node, struct extent_state, rb_node); 1019 if (state->end >= start && (state->state & bits)) { 1020 *start_ret = state->start; 1021 *end_ret = state->end; 1022 ret = 0; 1023 break; 1024 } 1025 node = rb_next(node); 1026 if (!node) 1027 break; 1028 } 1029 out: 1030 read_unlock_irq(&tree->lock); 1031 return ret; 1032 } 1033 EXPORT_SYMBOL(find_first_extent_bit); 1034 1035 u64 find_lock_delalloc_range(struct extent_map_tree *tree, 1036 u64 *start, u64 *end, u64 max_bytes) 1037 { 1038 struct rb_node *node; 1039 struct extent_state *state; 1040 u64 cur_start = *start; 1041 u64 found = 0; 1042 u64 total_bytes = 0; 1043 1044 write_lock_irq(&tree->lock); 1045 /* 1046 * this search will find all the extents that end after 1047 * our range starts. 1048 */ 1049 search_again: 1050 node = tree_search(&tree->state, cur_start); 1051 if (!node || IS_ERR(node)) { 1052 goto out; 1053 } 1054 1055 while(1) { 1056 state = rb_entry(node, struct extent_state, rb_node); 1057 if (found && state->start != cur_start) { 1058 goto out; 1059 } 1060 if (!(state->state & EXTENT_DELALLOC)) { 1061 goto out; 1062 } 1063 if (!found) { 1064 struct extent_state *prev_state; 1065 struct rb_node *prev_node = node; 1066 while(1) { 1067 prev_node = rb_prev(prev_node); 1068 if (!prev_node) 1069 break; 1070 prev_state = rb_entry(prev_node, 1071 struct extent_state, 1072 rb_node); 1073 if (!(prev_state->state & EXTENT_DELALLOC)) 1074 break; 1075 state = prev_state; 1076 node = prev_node; 1077 } 1078 } 1079 if (state->state & EXTENT_LOCKED) { 1080 DEFINE_WAIT(wait); 1081 atomic_inc(&state->refs); 1082 prepare_to_wait(&state->wq, &wait, 1083 TASK_UNINTERRUPTIBLE); 1084 write_unlock_irq(&tree->lock); 1085 schedule(); 1086 write_lock_irq(&tree->lock); 1087 finish_wait(&state->wq, &wait); 1088 free_extent_state(state); 1089 goto search_again; 1090 } 1091 state->state |= EXTENT_LOCKED; 1092 if (!found) 1093 *start = state->start; 1094 found++; 1095 *end = state->end; 1096 cur_start = state->end + 1; 1097 node = rb_next(node); 1098 if (!node) 1099 break; 1100 total_bytes += state->end - state->start + 1; 1101 if (total_bytes >= max_bytes) 1102 break; 1103 } 1104 out: 1105 write_unlock_irq(&tree->lock); 1106 return found; 1107 } 1108 1109 /* 1110 * helper function to lock both pages and extents in the tree. 1111 * pages must be locked first. 1112 */ 1113 int lock_range(struct extent_map_tree *tree, u64 start, u64 end) 1114 { 1115 unsigned long index = start >> PAGE_CACHE_SHIFT; 1116 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1117 struct page *page; 1118 int err; 1119 1120 while (index <= end_index) { 1121 page = grab_cache_page(tree->mapping, index); 1122 if (!page) { 1123 err = -ENOMEM; 1124 goto failed; 1125 } 1126 if (IS_ERR(page)) { 1127 err = PTR_ERR(page); 1128 goto failed; 1129 } 1130 index++; 1131 } 1132 lock_extent(tree, start, end, GFP_NOFS); 1133 return 0; 1134 1135 failed: 1136 /* 1137 * we failed above in getting the page at 'index', so we undo here 1138 * up to but not including the page at 'index' 1139 */ 1140 end_index = index; 1141 index = start >> PAGE_CACHE_SHIFT; 1142 while (index < end_index) { 1143 page = find_get_page(tree->mapping, index); 1144 unlock_page(page); 1145 page_cache_release(page); 1146 index++; 1147 } 1148 return err; 1149 } 1150 EXPORT_SYMBOL(lock_range); 1151 1152 /* 1153 * helper function to unlock both pages and extents in the tree. 1154 */ 1155 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end) 1156 { 1157 unsigned long index = start >> PAGE_CACHE_SHIFT; 1158 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1159 struct page *page; 1160 1161 while (index <= end_index) { 1162 page = find_get_page(tree->mapping, index); 1163 unlock_page(page); 1164 page_cache_release(page); 1165 index++; 1166 } 1167 unlock_extent(tree, start, end, GFP_NOFS); 1168 return 0; 1169 } 1170 EXPORT_SYMBOL(unlock_range); 1171 1172 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private) 1173 { 1174 struct rb_node *node; 1175 struct extent_state *state; 1176 int ret = 0; 1177 1178 write_lock_irq(&tree->lock); 1179 /* 1180 * this search will find all the extents that end after 1181 * our range starts. 1182 */ 1183 node = tree_search(&tree->state, start); 1184 if (!node || IS_ERR(node)) { 1185 ret = -ENOENT; 1186 goto out; 1187 } 1188 state = rb_entry(node, struct extent_state, rb_node); 1189 if (state->start != start) { 1190 ret = -ENOENT; 1191 goto out; 1192 } 1193 state->private = private; 1194 out: 1195 write_unlock_irq(&tree->lock); 1196 return ret; 1197 } 1198 1199 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private) 1200 { 1201 struct rb_node *node; 1202 struct extent_state *state; 1203 int ret = 0; 1204 1205 read_lock_irq(&tree->lock); 1206 /* 1207 * this search will find all the extents that end after 1208 * our range starts. 1209 */ 1210 node = tree_search(&tree->state, start); 1211 if (!node || IS_ERR(node)) { 1212 ret = -ENOENT; 1213 goto out; 1214 } 1215 state = rb_entry(node, struct extent_state, rb_node); 1216 if (state->start != start) { 1217 ret = -ENOENT; 1218 goto out; 1219 } 1220 *private = state->private; 1221 out: 1222 read_unlock_irq(&tree->lock); 1223 return ret; 1224 } 1225 1226 /* 1227 * searches a range in the state tree for a given mask. 1228 * If 'filled' == 1, this returns 1 only if ever extent in the tree 1229 * has the bits set. Otherwise, 1 is returned if any bit in the 1230 * range is found set. 1231 */ 1232 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end, 1233 int bits, int filled) 1234 { 1235 struct extent_state *state = NULL; 1236 struct rb_node *node; 1237 int bitset = 0; 1238 1239 read_lock_irq(&tree->lock); 1240 node = tree_search(&tree->state, start); 1241 while (node && start <= end) { 1242 state = rb_entry(node, struct extent_state, rb_node); 1243 1244 if (filled && state->start > start) { 1245 bitset = 0; 1246 break; 1247 } 1248 1249 if (state->start > end) 1250 break; 1251 1252 if (state->state & bits) { 1253 bitset = 1; 1254 if (!filled) 1255 break; 1256 } else if (filled) { 1257 bitset = 0; 1258 break; 1259 } 1260 start = state->end + 1; 1261 if (start > end) 1262 break; 1263 node = rb_next(node); 1264 } 1265 read_unlock_irq(&tree->lock); 1266 return bitset; 1267 } 1268 EXPORT_SYMBOL(test_range_bit); 1269 1270 /* 1271 * helper function to set a given page up to date if all the 1272 * extents in the tree for that page are up to date 1273 */ 1274 static int check_page_uptodate(struct extent_map_tree *tree, 1275 struct page *page) 1276 { 1277 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1278 u64 end = start + PAGE_CACHE_SIZE - 1; 1279 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1)) 1280 SetPageUptodate(page); 1281 return 0; 1282 } 1283 1284 /* 1285 * helper function to unlock a page if all the extents in the tree 1286 * for that page are unlocked 1287 */ 1288 static int check_page_locked(struct extent_map_tree *tree, 1289 struct page *page) 1290 { 1291 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1292 u64 end = start + PAGE_CACHE_SIZE - 1; 1293 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0)) 1294 unlock_page(page); 1295 return 0; 1296 } 1297 1298 /* 1299 * helper function to end page writeback if all the extents 1300 * in the tree for that page are done with writeback 1301 */ 1302 static int check_page_writeback(struct extent_map_tree *tree, 1303 struct page *page) 1304 { 1305 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1306 u64 end = start + PAGE_CACHE_SIZE - 1; 1307 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0)) 1308 end_page_writeback(page); 1309 return 0; 1310 } 1311 1312 /* lots and lots of room for performance fixes in the end_bio funcs */ 1313 1314 /* 1315 * after a writepage IO is done, we need to: 1316 * clear the uptodate bits on error 1317 * clear the writeback bits in the extent tree for this IO 1318 * end_page_writeback if the page has no more pending IO 1319 * 1320 * Scheduling is not allowed, so the extent state tree is expected 1321 * to have one and only one object corresponding to this IO. 1322 */ 1323 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) 1324 static void end_bio_extent_writepage(struct bio *bio, int err) 1325 #else 1326 static int end_bio_extent_writepage(struct bio *bio, 1327 unsigned int bytes_done, int err) 1328 #endif 1329 { 1330 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1331 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1332 struct extent_map_tree *tree = bio->bi_private; 1333 u64 start; 1334 u64 end; 1335 int whole_page; 1336 1337 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1338 if (bio->bi_size) 1339 return 1; 1340 #endif 1341 1342 do { 1343 struct page *page = bvec->bv_page; 1344 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1345 bvec->bv_offset; 1346 end = start + bvec->bv_len - 1; 1347 1348 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1349 whole_page = 1; 1350 else 1351 whole_page = 0; 1352 1353 if (--bvec >= bio->bi_io_vec) 1354 prefetchw(&bvec->bv_page->flags); 1355 1356 if (!uptodate) { 1357 clear_extent_uptodate(tree, start, end, GFP_ATOMIC); 1358 ClearPageUptodate(page); 1359 SetPageError(page); 1360 } 1361 clear_extent_writeback(tree, start, end, GFP_ATOMIC); 1362 1363 if (whole_page) 1364 end_page_writeback(page); 1365 else 1366 check_page_writeback(tree, page); 1367 if (tree->ops && tree->ops->writepage_end_io_hook) 1368 tree->ops->writepage_end_io_hook(page, start, end); 1369 } while (bvec >= bio->bi_io_vec); 1370 1371 bio_put(bio); 1372 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1373 return 0; 1374 #endif 1375 } 1376 1377 /* 1378 * after a readpage IO is done, we need to: 1379 * clear the uptodate bits on error 1380 * set the uptodate bits if things worked 1381 * set the page up to date if all extents in the tree are uptodate 1382 * clear the lock bit in the extent tree 1383 * unlock the page if there are no other extents locked for it 1384 * 1385 * Scheduling is not allowed, so the extent state tree is expected 1386 * to have one and only one object corresponding to this IO. 1387 */ 1388 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) 1389 static void end_bio_extent_readpage(struct bio *bio, int err) 1390 #else 1391 static int end_bio_extent_readpage(struct bio *bio, 1392 unsigned int bytes_done, int err) 1393 #endif 1394 { 1395 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1396 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1397 struct extent_map_tree *tree = bio->bi_private; 1398 u64 start; 1399 u64 end; 1400 int whole_page; 1401 int ret; 1402 1403 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1404 if (bio->bi_size) 1405 return 1; 1406 #endif 1407 1408 do { 1409 struct page *page = bvec->bv_page; 1410 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1411 bvec->bv_offset; 1412 end = start + bvec->bv_len - 1; 1413 1414 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1415 whole_page = 1; 1416 else 1417 whole_page = 0; 1418 1419 if (--bvec >= bio->bi_io_vec) 1420 prefetchw(&bvec->bv_page->flags); 1421 1422 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 1423 ret = tree->ops->readpage_end_io_hook(page, start, end); 1424 if (ret) 1425 uptodate = 0; 1426 } 1427 if (uptodate) { 1428 set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1429 if (whole_page) 1430 SetPageUptodate(page); 1431 else 1432 check_page_uptodate(tree, page); 1433 } else { 1434 ClearPageUptodate(page); 1435 SetPageError(page); 1436 } 1437 1438 unlock_extent(tree, start, end, GFP_ATOMIC); 1439 1440 if (whole_page) 1441 unlock_page(page); 1442 else 1443 check_page_locked(tree, page); 1444 } while (bvec >= bio->bi_io_vec); 1445 1446 bio_put(bio); 1447 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1448 return 0; 1449 #endif 1450 } 1451 1452 /* 1453 * IO done from prepare_write is pretty simple, we just unlock 1454 * the structs in the extent tree when done, and set the uptodate bits 1455 * as appropriate. 1456 */ 1457 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) 1458 static void end_bio_extent_preparewrite(struct bio *bio, int err) 1459 #else 1460 static int end_bio_extent_preparewrite(struct bio *bio, 1461 unsigned int bytes_done, int err) 1462 #endif 1463 { 1464 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1465 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1466 struct extent_map_tree *tree = bio->bi_private; 1467 u64 start; 1468 u64 end; 1469 1470 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1471 if (bio->bi_size) 1472 return 1; 1473 #endif 1474 1475 do { 1476 struct page *page = bvec->bv_page; 1477 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1478 bvec->bv_offset; 1479 end = start + bvec->bv_len - 1; 1480 1481 if (--bvec >= bio->bi_io_vec) 1482 prefetchw(&bvec->bv_page->flags); 1483 1484 if (uptodate) { 1485 set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1486 } else { 1487 ClearPageUptodate(page); 1488 SetPageError(page); 1489 } 1490 1491 unlock_extent(tree, start, end, GFP_ATOMIC); 1492 1493 } while (bvec >= bio->bi_io_vec); 1494 1495 bio_put(bio); 1496 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1497 return 0; 1498 #endif 1499 } 1500 1501 static struct bio * 1502 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 1503 gfp_t gfp_flags) 1504 { 1505 struct bio *bio; 1506 1507 bio = bio_alloc(gfp_flags, nr_vecs); 1508 1509 if (bio == NULL && (current->flags & PF_MEMALLOC)) { 1510 while (!bio && (nr_vecs /= 2)) 1511 bio = bio_alloc(gfp_flags, nr_vecs); 1512 } 1513 1514 if (bio) { 1515 bio->bi_bdev = bdev; 1516 bio->bi_sector = first_sector; 1517 } 1518 return bio; 1519 } 1520 1521 static int submit_one_bio(int rw, struct bio *bio) 1522 { 1523 int ret = 0; 1524 bio_get(bio); 1525 submit_bio(rw, bio); 1526 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 1527 ret = -EOPNOTSUPP; 1528 bio_put(bio); 1529 return ret; 1530 } 1531 1532 static int submit_extent_page(int rw, struct extent_map_tree *tree, 1533 struct page *page, sector_t sector, 1534 size_t size, unsigned long offset, 1535 struct block_device *bdev, 1536 struct bio **bio_ret, 1537 unsigned long max_pages, 1538 bio_end_io_t end_io_func) 1539 { 1540 int ret = 0; 1541 struct bio *bio; 1542 int nr; 1543 1544 if (bio_ret && *bio_ret) { 1545 bio = *bio_ret; 1546 if (bio->bi_sector + (bio->bi_size >> 9) != sector || 1547 bio_add_page(bio, page, size, offset) < size) { 1548 ret = submit_one_bio(rw, bio); 1549 bio = NULL; 1550 } else { 1551 return 0; 1552 } 1553 } 1554 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev)); 1555 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); 1556 if (!bio) { 1557 printk("failed to allocate bio nr %d\n", nr); 1558 } 1559 bio_add_page(bio, page, size, offset); 1560 bio->bi_end_io = end_io_func; 1561 bio->bi_private = tree; 1562 if (bio_ret) { 1563 *bio_ret = bio; 1564 } else { 1565 ret = submit_one_bio(rw, bio); 1566 } 1567 1568 return ret; 1569 } 1570 1571 void set_page_extent_mapped(struct page *page) 1572 { 1573 if (!PagePrivate(page)) { 1574 SetPagePrivate(page); 1575 WARN_ON(!page->mapping->a_ops->invalidatepage); 1576 set_page_private(page, EXTENT_PAGE_PRIVATE); 1577 page_cache_get(page); 1578 } 1579 } 1580 1581 /* 1582 * basic readpage implementation. Locked extent state structs are inserted 1583 * into the tree that are removed when the IO is done (by the end_io 1584 * handlers) 1585 */ 1586 static int __extent_read_full_page(struct extent_map_tree *tree, 1587 struct page *page, 1588 get_extent_t *get_extent, 1589 struct bio **bio) 1590 { 1591 struct inode *inode = page->mapping->host; 1592 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1593 u64 page_end = start + PAGE_CACHE_SIZE - 1; 1594 u64 end; 1595 u64 cur = start; 1596 u64 extent_offset; 1597 u64 last_byte = i_size_read(inode); 1598 u64 block_start; 1599 u64 cur_end; 1600 sector_t sector; 1601 struct extent_map *em; 1602 struct block_device *bdev; 1603 int ret; 1604 int nr = 0; 1605 size_t page_offset = 0; 1606 size_t iosize; 1607 size_t blocksize = inode->i_sb->s_blocksize; 1608 1609 set_page_extent_mapped(page); 1610 1611 end = page_end; 1612 lock_extent(tree, start, end, GFP_NOFS); 1613 1614 while (cur <= end) { 1615 if (cur >= last_byte) { 1616 iosize = PAGE_CACHE_SIZE - page_offset; 1617 zero_user_page(page, page_offset, iosize, KM_USER0); 1618 set_extent_uptodate(tree, cur, cur + iosize - 1, 1619 GFP_NOFS); 1620 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1621 break; 1622 } 1623 em = get_extent(inode, page, page_offset, cur, end, 0); 1624 if (IS_ERR(em) || !em) { 1625 SetPageError(page); 1626 unlock_extent(tree, cur, end, GFP_NOFS); 1627 break; 1628 } 1629 1630 extent_offset = cur - em->start; 1631 BUG_ON(em->end < cur); 1632 BUG_ON(end < cur); 1633 1634 iosize = min(em->end - cur, end - cur) + 1; 1635 cur_end = min(em->end, end); 1636 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1637 sector = (em->block_start + extent_offset) >> 9; 1638 bdev = em->bdev; 1639 block_start = em->block_start; 1640 free_extent_map(em); 1641 em = NULL; 1642 1643 /* we've found a hole, just zero and go on */ 1644 if (block_start == EXTENT_MAP_HOLE) { 1645 zero_user_page(page, page_offset, iosize, KM_USER0); 1646 set_extent_uptodate(tree, cur, cur + iosize - 1, 1647 GFP_NOFS); 1648 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1649 cur = cur + iosize; 1650 page_offset += iosize; 1651 continue; 1652 } 1653 /* the get_extent function already copied into the page */ 1654 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { 1655 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1656 cur = cur + iosize; 1657 page_offset += iosize; 1658 continue; 1659 } 1660 1661 ret = 0; 1662 if (tree->ops && tree->ops->readpage_io_hook) { 1663 ret = tree->ops->readpage_io_hook(page, cur, 1664 cur + iosize - 1); 1665 } 1666 if (!ret) { 1667 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 1668 nr -= page->index; 1669 ret = submit_extent_page(READ, tree, page, 1670 sector, iosize, page_offset, 1671 bdev, bio, nr, 1672 end_bio_extent_readpage); 1673 } 1674 if (ret) 1675 SetPageError(page); 1676 cur = cur + iosize; 1677 page_offset += iosize; 1678 nr++; 1679 } 1680 if (!nr) { 1681 if (!PageError(page)) 1682 SetPageUptodate(page); 1683 unlock_page(page); 1684 } 1685 return 0; 1686 } 1687 1688 int extent_read_full_page(struct extent_map_tree *tree, struct page *page, 1689 get_extent_t *get_extent) 1690 { 1691 struct bio *bio = NULL; 1692 int ret; 1693 1694 ret = __extent_read_full_page(tree, page, get_extent, &bio); 1695 if (bio) 1696 submit_one_bio(READ, bio); 1697 return ret; 1698 } 1699 EXPORT_SYMBOL(extent_read_full_page); 1700 1701 /* 1702 * the writepage semantics are similar to regular writepage. extent 1703 * records are inserted to lock ranges in the tree, and as dirty areas 1704 * are found, they are marked writeback. Then the lock bits are removed 1705 * and the end_io handler clears the writeback ranges 1706 */ 1707 static int __extent_writepage(struct page *page, struct writeback_control *wbc, 1708 void *data) 1709 { 1710 struct inode *inode = page->mapping->host; 1711 struct extent_page_data *epd = data; 1712 struct extent_map_tree *tree = epd->tree; 1713 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1714 u64 delalloc_start; 1715 u64 page_end = start + PAGE_CACHE_SIZE - 1; 1716 u64 end; 1717 u64 cur = start; 1718 u64 extent_offset; 1719 u64 last_byte = i_size_read(inode); 1720 u64 block_start; 1721 u64 iosize; 1722 sector_t sector; 1723 struct extent_map *em; 1724 struct block_device *bdev; 1725 int ret; 1726 int nr = 0; 1727 size_t page_offset = 0; 1728 size_t blocksize; 1729 loff_t i_size = i_size_read(inode); 1730 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 1731 u64 nr_delalloc; 1732 u64 delalloc_end; 1733 1734 WARN_ON(!PageLocked(page)); 1735 if (page->index > end_index) { 1736 clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1737 unlock_page(page); 1738 return 0; 1739 } 1740 1741 if (page->index == end_index) { 1742 size_t offset = i_size & (PAGE_CACHE_SIZE - 1); 1743 zero_user_page(page, offset, 1744 PAGE_CACHE_SIZE - offset, KM_USER0); 1745 } 1746 1747 set_page_extent_mapped(page); 1748 1749 delalloc_start = start; 1750 delalloc_end = 0; 1751 while(delalloc_end < page_end) { 1752 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start, 1753 &delalloc_end, 1754 128 * 1024 * 1024); 1755 if (nr_delalloc <= 0) 1756 break; 1757 tree->ops->fill_delalloc(inode, delalloc_start, 1758 delalloc_end); 1759 clear_extent_bit(tree, delalloc_start, 1760 delalloc_end, 1761 EXTENT_LOCKED | EXTENT_DELALLOC, 1762 1, 0, GFP_NOFS); 1763 delalloc_start = delalloc_end + 1; 1764 } 1765 lock_extent(tree, start, page_end, GFP_NOFS); 1766 1767 end = page_end; 1768 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1769 printk("found delalloc bits after lock_extent\n"); 1770 } 1771 1772 if (last_byte <= start) { 1773 clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1774 goto done; 1775 } 1776 1777 set_extent_uptodate(tree, start, page_end, GFP_NOFS); 1778 blocksize = inode->i_sb->s_blocksize; 1779 1780 while (cur <= end) { 1781 if (cur >= last_byte) { 1782 clear_extent_dirty(tree, cur, page_end, GFP_NOFS); 1783 break; 1784 } 1785 em = epd->get_extent(inode, page, page_offset, cur, end, 1); 1786 if (IS_ERR(em) || !em) { 1787 SetPageError(page); 1788 break; 1789 } 1790 1791 extent_offset = cur - em->start; 1792 BUG_ON(em->end < cur); 1793 BUG_ON(end < cur); 1794 iosize = min(em->end - cur, end - cur) + 1; 1795 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1796 sector = (em->block_start + extent_offset) >> 9; 1797 bdev = em->bdev; 1798 block_start = em->block_start; 1799 free_extent_map(em); 1800 em = NULL; 1801 1802 if (block_start == EXTENT_MAP_HOLE || 1803 block_start == EXTENT_MAP_INLINE) { 1804 clear_extent_dirty(tree, cur, 1805 cur + iosize - 1, GFP_NOFS); 1806 cur = cur + iosize; 1807 page_offset += iosize; 1808 continue; 1809 } 1810 1811 /* leave this out until we have a page_mkwrite call */ 1812 if (0 && !test_range_bit(tree, cur, cur + iosize - 1, 1813 EXTENT_DIRTY, 0)) { 1814 cur = cur + iosize; 1815 page_offset += iosize; 1816 continue; 1817 } 1818 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); 1819 if (tree->ops && tree->ops->writepage_io_hook) { 1820 ret = tree->ops->writepage_io_hook(page, cur, 1821 cur + iosize - 1); 1822 } else { 1823 ret = 0; 1824 } 1825 if (ret) 1826 SetPageError(page); 1827 else { 1828 unsigned long max_nr = end_index + 1; 1829 set_range_writeback(tree, cur, cur + iosize - 1); 1830 if (!PageWriteback(page)) { 1831 printk("warning page %lu not writeback, " 1832 "cur %llu end %llu\n", page->index, 1833 (unsigned long long)cur, 1834 (unsigned long long)end); 1835 } 1836 1837 ret = submit_extent_page(WRITE, tree, page, sector, 1838 iosize, page_offset, bdev, 1839 &epd->bio, max_nr, 1840 end_bio_extent_writepage); 1841 if (ret) 1842 SetPageError(page); 1843 } 1844 cur = cur + iosize; 1845 page_offset += iosize; 1846 nr++; 1847 } 1848 done: 1849 if (nr == 0) { 1850 /* make sure the mapping tag for page dirty gets cleared */ 1851 set_page_writeback(page); 1852 end_page_writeback(page); 1853 } 1854 unlock_extent(tree, start, page_end, GFP_NOFS); 1855 unlock_page(page); 1856 return 0; 1857 } 1858 1859 int extent_write_full_page(struct extent_map_tree *tree, struct page *page, 1860 get_extent_t *get_extent, 1861 struct writeback_control *wbc) 1862 { 1863 int ret; 1864 struct address_space *mapping = page->mapping; 1865 struct extent_page_data epd = { 1866 .bio = NULL, 1867 .tree = tree, 1868 .get_extent = get_extent, 1869 }; 1870 struct writeback_control wbc_writepages = { 1871 .bdi = wbc->bdi, 1872 .sync_mode = WB_SYNC_NONE, 1873 .older_than_this = NULL, 1874 .nr_to_write = 64, 1875 .range_start = page_offset(page) + PAGE_CACHE_SIZE, 1876 .range_end = (loff_t)-1, 1877 }; 1878 1879 1880 ret = __extent_writepage(page, wbc, &epd); 1881 1882 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd); 1883 if (epd.bio) 1884 submit_one_bio(WRITE, epd.bio); 1885 return ret; 1886 } 1887 EXPORT_SYMBOL(extent_write_full_page); 1888 1889 int extent_writepages(struct extent_map_tree *tree, 1890 struct address_space *mapping, 1891 get_extent_t *get_extent, 1892 struct writeback_control *wbc) 1893 { 1894 int ret; 1895 struct extent_page_data epd = { 1896 .bio = NULL, 1897 .tree = tree, 1898 .get_extent = get_extent, 1899 }; 1900 1901 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd); 1902 if (epd.bio) 1903 submit_one_bio(WRITE, epd.bio); 1904 return ret; 1905 } 1906 EXPORT_SYMBOL(extent_writepages); 1907 1908 int extent_readpages(struct extent_map_tree *tree, 1909 struct address_space *mapping, 1910 struct list_head *pages, unsigned nr_pages, 1911 get_extent_t get_extent) 1912 { 1913 struct bio *bio = NULL; 1914 unsigned page_idx; 1915 struct pagevec pvec; 1916 1917 pagevec_init(&pvec, 0); 1918 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 1919 struct page *page = list_entry(pages->prev, struct page, lru); 1920 1921 prefetchw(&page->flags); 1922 list_del(&page->lru); 1923 /* 1924 * what we want to do here is call add_to_page_cache_lru, 1925 * but that isn't exported, so we reproduce it here 1926 */ 1927 if (!add_to_page_cache(page, mapping, 1928 page->index, GFP_KERNEL)) { 1929 1930 /* open coding of lru_cache_add, also not exported */ 1931 page_cache_get(page); 1932 if (!pagevec_add(&pvec, page)) 1933 __pagevec_lru_add(&pvec); 1934 __extent_read_full_page(tree, page, get_extent, &bio); 1935 } 1936 page_cache_release(page); 1937 } 1938 if (pagevec_count(&pvec)) 1939 __pagevec_lru_add(&pvec); 1940 BUG_ON(!list_empty(pages)); 1941 if (bio) 1942 submit_one_bio(READ, bio); 1943 return 0; 1944 } 1945 EXPORT_SYMBOL(extent_readpages); 1946 1947 /* 1948 * basic invalidatepage code, this waits on any locked or writeback 1949 * ranges corresponding to the page, and then deletes any extent state 1950 * records from the tree 1951 */ 1952 int extent_invalidatepage(struct extent_map_tree *tree, 1953 struct page *page, unsigned long offset) 1954 { 1955 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); 1956 u64 end = start + PAGE_CACHE_SIZE - 1; 1957 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 1958 1959 start += (offset + blocksize -1) & ~(blocksize - 1); 1960 if (start > end) 1961 return 0; 1962 1963 lock_extent(tree, start, end, GFP_NOFS); 1964 wait_on_extent_writeback(tree, start, end); 1965 clear_extent_bit(tree, start, end, 1966 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 1967 1, 1, GFP_NOFS); 1968 return 0; 1969 } 1970 EXPORT_SYMBOL(extent_invalidatepage); 1971 1972 /* 1973 * simple commit_write call, set_range_dirty is used to mark both 1974 * the pages and the extent records as dirty 1975 */ 1976 int extent_commit_write(struct extent_map_tree *tree, 1977 struct inode *inode, struct page *page, 1978 unsigned from, unsigned to) 1979 { 1980 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1981 1982 set_page_extent_mapped(page); 1983 set_page_dirty(page); 1984 1985 if (pos > inode->i_size) { 1986 i_size_write(inode, pos); 1987 mark_inode_dirty(inode); 1988 } 1989 return 0; 1990 } 1991 EXPORT_SYMBOL(extent_commit_write); 1992 1993 int extent_prepare_write(struct extent_map_tree *tree, 1994 struct inode *inode, struct page *page, 1995 unsigned from, unsigned to, get_extent_t *get_extent) 1996 { 1997 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 1998 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 1999 u64 block_start; 2000 u64 orig_block_start; 2001 u64 block_end; 2002 u64 cur_end; 2003 struct extent_map *em; 2004 unsigned blocksize = 1 << inode->i_blkbits; 2005 size_t page_offset = 0; 2006 size_t block_off_start; 2007 size_t block_off_end; 2008 int err = 0; 2009 int iocount = 0; 2010 int ret = 0; 2011 int isnew; 2012 2013 set_page_extent_mapped(page); 2014 2015 block_start = (page_start + from) & ~((u64)blocksize - 1); 2016 block_end = (page_start + to - 1) | (blocksize - 1); 2017 orig_block_start = block_start; 2018 2019 lock_extent(tree, page_start, page_end, GFP_NOFS); 2020 while(block_start <= block_end) { 2021 em = get_extent(inode, page, page_offset, block_start, 2022 block_end, 1); 2023 if (IS_ERR(em) || !em) { 2024 goto err; 2025 } 2026 cur_end = min(block_end, em->end); 2027 block_off_start = block_start & (PAGE_CACHE_SIZE - 1); 2028 block_off_end = block_off_start + blocksize; 2029 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); 2030 2031 if (!PageUptodate(page) && isnew && 2032 (block_off_end > to || block_off_start < from)) { 2033 void *kaddr; 2034 2035 kaddr = kmap_atomic(page, KM_USER0); 2036 if (block_off_end > to) 2037 memset(kaddr + to, 0, block_off_end - to); 2038 if (block_off_start < from) 2039 memset(kaddr + block_off_start, 0, 2040 from - block_off_start); 2041 flush_dcache_page(page); 2042 kunmap_atomic(kaddr, KM_USER0); 2043 } 2044 if (!isnew && !PageUptodate(page) && 2045 (block_off_end > to || block_off_start < from) && 2046 !test_range_bit(tree, block_start, cur_end, 2047 EXTENT_UPTODATE, 1)) { 2048 u64 sector; 2049 u64 extent_offset = block_start - em->start; 2050 size_t iosize; 2051 sector = (em->block_start + extent_offset) >> 9; 2052 iosize = (cur_end - block_start + blocksize - 1) & 2053 ~((u64)blocksize - 1); 2054 /* 2055 * we've already got the extent locked, but we 2056 * need to split the state such that our end_bio 2057 * handler can clear the lock. 2058 */ 2059 set_extent_bit(tree, block_start, 2060 block_start + iosize - 1, 2061 EXTENT_LOCKED, 0, NULL, GFP_NOFS); 2062 ret = submit_extent_page(READ, tree, page, 2063 sector, iosize, page_offset, em->bdev, 2064 NULL, 1, 2065 end_bio_extent_preparewrite); 2066 iocount++; 2067 block_start = block_start + iosize; 2068 } else { 2069 set_extent_uptodate(tree, block_start, cur_end, 2070 GFP_NOFS); 2071 unlock_extent(tree, block_start, cur_end, GFP_NOFS); 2072 block_start = cur_end + 1; 2073 } 2074 page_offset = block_start & (PAGE_CACHE_SIZE - 1); 2075 free_extent_map(em); 2076 } 2077 if (iocount) { 2078 wait_extent_bit(tree, orig_block_start, 2079 block_end, EXTENT_LOCKED); 2080 } 2081 check_page_uptodate(tree, page); 2082 err: 2083 /* FIXME, zero out newly allocated blocks on error */ 2084 return err; 2085 } 2086 EXPORT_SYMBOL(extent_prepare_write); 2087 2088 /* 2089 * a helper for releasepage. As long as there are no locked extents 2090 * in the range corresponding to the page, both state records and extent 2091 * map records are removed 2092 */ 2093 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page) 2094 { 2095 struct extent_map *em; 2096 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 2097 u64 end = start + PAGE_CACHE_SIZE - 1; 2098 u64 orig_start = start; 2099 int ret = 1; 2100 2101 while (start <= end) { 2102 em = lookup_extent_mapping(tree, start, end); 2103 if (!em || IS_ERR(em)) 2104 break; 2105 if (!test_range_bit(tree, em->start, em->end, 2106 EXTENT_LOCKED, 0)) { 2107 remove_extent_mapping(tree, em); 2108 /* once for the rb tree */ 2109 free_extent_map(em); 2110 } 2111 start = em->end + 1; 2112 /* once for us */ 2113 free_extent_map(em); 2114 } 2115 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0)) 2116 ret = 0; 2117 else 2118 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE, 2119 1, 1, GFP_NOFS); 2120 return ret; 2121 } 2122 EXPORT_SYMBOL(try_release_extent_mapping); 2123 2124 sector_t extent_bmap(struct address_space *mapping, sector_t iblock, 2125 get_extent_t *get_extent) 2126 { 2127 struct inode *inode = mapping->host; 2128 u64 start = iblock << inode->i_blkbits; 2129 u64 end = start + (1 << inode->i_blkbits) - 1; 2130 sector_t sector = 0; 2131 struct extent_map *em; 2132 2133 em = get_extent(inode, NULL, 0, start, end, 0); 2134 if (!em || IS_ERR(em)) 2135 return 0; 2136 2137 if (em->block_start == EXTENT_MAP_INLINE || 2138 em->block_start == EXTENT_MAP_HOLE) 2139 goto out; 2140 2141 sector = (em->block_start + start - em->start) >> inode->i_blkbits; 2142 out: 2143 free_extent_map(em); 2144 return sector; 2145 } 2146 2147 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb) 2148 { 2149 if (list_empty(&eb->lru)) { 2150 extent_buffer_get(eb); 2151 list_add(&eb->lru, &tree->buffer_lru); 2152 tree->lru_size++; 2153 if (tree->lru_size >= BUFFER_LRU_MAX) { 2154 struct extent_buffer *rm; 2155 rm = list_entry(tree->buffer_lru.prev, 2156 struct extent_buffer, lru); 2157 tree->lru_size--; 2158 list_del_init(&rm->lru); 2159 free_extent_buffer(rm); 2160 } 2161 } else 2162 list_move(&eb->lru, &tree->buffer_lru); 2163 return 0; 2164 } 2165 static struct extent_buffer *find_lru(struct extent_map_tree *tree, 2166 u64 start, unsigned long len) 2167 { 2168 struct list_head *lru = &tree->buffer_lru; 2169 struct list_head *cur = lru->next; 2170 struct extent_buffer *eb; 2171 2172 if (list_empty(lru)) 2173 return NULL; 2174 2175 do { 2176 eb = list_entry(cur, struct extent_buffer, lru); 2177 if (eb->start == start && eb->len == len) { 2178 extent_buffer_get(eb); 2179 return eb; 2180 } 2181 cur = cur->next; 2182 } while (cur != lru); 2183 return NULL; 2184 } 2185 2186 static inline unsigned long num_extent_pages(u64 start, u64 len) 2187 { 2188 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 2189 (start >> PAGE_CACHE_SHIFT); 2190 } 2191 2192 static inline struct page *extent_buffer_page(struct extent_buffer *eb, 2193 unsigned long i) 2194 { 2195 struct page *p; 2196 struct address_space *mapping; 2197 2198 if (i == 0) 2199 return eb->first_page; 2200 i += eb->start >> PAGE_CACHE_SHIFT; 2201 mapping = eb->first_page->mapping; 2202 read_lock_irq(&mapping->tree_lock); 2203 p = radix_tree_lookup(&mapping->page_tree, i); 2204 read_unlock_irq(&mapping->tree_lock); 2205 return p; 2206 } 2207 2208 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree, 2209 u64 start, 2210 unsigned long len, 2211 gfp_t mask) 2212 { 2213 struct extent_buffer *eb = NULL; 2214 2215 spin_lock(&tree->lru_lock); 2216 eb = find_lru(tree, start, len); 2217 spin_unlock(&tree->lru_lock); 2218 if (eb) { 2219 return eb; 2220 } 2221 2222 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 2223 INIT_LIST_HEAD(&eb->lru); 2224 eb->start = start; 2225 eb->len = len; 2226 atomic_set(&eb->refs, 1); 2227 2228 return eb; 2229 } 2230 2231 static void __free_extent_buffer(struct extent_buffer *eb) 2232 { 2233 kmem_cache_free(extent_buffer_cache, eb); 2234 } 2235 2236 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, 2237 u64 start, unsigned long len, 2238 struct page *page0, 2239 gfp_t mask) 2240 { 2241 unsigned long num_pages = num_extent_pages(start, len); 2242 unsigned long i; 2243 unsigned long index = start >> PAGE_CACHE_SHIFT; 2244 struct extent_buffer *eb; 2245 struct page *p; 2246 struct address_space *mapping = tree->mapping; 2247 int uptodate = 1; 2248 2249 eb = __alloc_extent_buffer(tree, start, len, mask); 2250 if (!eb || IS_ERR(eb)) 2251 return NULL; 2252 2253 if (eb->flags & EXTENT_BUFFER_FILLED) 2254 goto lru_add; 2255 2256 if (page0) { 2257 eb->first_page = page0; 2258 i = 1; 2259 index++; 2260 page_cache_get(page0); 2261 mark_page_accessed(page0); 2262 set_page_extent_mapped(page0); 2263 WARN_ON(!PageUptodate(page0)); 2264 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE | 2265 len << 2); 2266 } else { 2267 i = 0; 2268 } 2269 for (; i < num_pages; i++, index++) { 2270 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 2271 if (!p) { 2272 WARN_ON(1); 2273 goto fail; 2274 } 2275 set_page_extent_mapped(p); 2276 mark_page_accessed(p); 2277 if (i == 0) { 2278 eb->first_page = p; 2279 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE | 2280 len << 2); 2281 } else { 2282 set_page_private(p, EXTENT_PAGE_PRIVATE); 2283 } 2284 if (!PageUptodate(p)) 2285 uptodate = 0; 2286 unlock_page(p); 2287 } 2288 if (uptodate) 2289 eb->flags |= EXTENT_UPTODATE; 2290 eb->flags |= EXTENT_BUFFER_FILLED; 2291 2292 lru_add: 2293 spin_lock(&tree->lru_lock); 2294 add_lru(tree, eb); 2295 spin_unlock(&tree->lru_lock); 2296 return eb; 2297 2298 fail: 2299 spin_lock(&tree->lru_lock); 2300 list_del_init(&eb->lru); 2301 spin_unlock(&tree->lru_lock); 2302 if (!atomic_dec_and_test(&eb->refs)) 2303 return NULL; 2304 for (index = 1; index < i; index++) { 2305 page_cache_release(extent_buffer_page(eb, index)); 2306 } 2307 if (i > 0) 2308 page_cache_release(extent_buffer_page(eb, 0)); 2309 __free_extent_buffer(eb); 2310 return NULL; 2311 } 2312 EXPORT_SYMBOL(alloc_extent_buffer); 2313 2314 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree, 2315 u64 start, unsigned long len, 2316 gfp_t mask) 2317 { 2318 unsigned long num_pages = num_extent_pages(start, len); 2319 unsigned long i; 2320 unsigned long index = start >> PAGE_CACHE_SHIFT; 2321 struct extent_buffer *eb; 2322 struct page *p; 2323 struct address_space *mapping = tree->mapping; 2324 int uptodate = 1; 2325 2326 eb = __alloc_extent_buffer(tree, start, len, mask); 2327 if (!eb || IS_ERR(eb)) 2328 return NULL; 2329 2330 if (eb->flags & EXTENT_BUFFER_FILLED) 2331 goto lru_add; 2332 2333 for (i = 0; i < num_pages; i++, index++) { 2334 p = find_lock_page(mapping, index); 2335 if (!p) { 2336 goto fail; 2337 } 2338 set_page_extent_mapped(p); 2339 mark_page_accessed(p); 2340 2341 if (i == 0) { 2342 eb->first_page = p; 2343 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE | 2344 len << 2); 2345 } else { 2346 set_page_private(p, EXTENT_PAGE_PRIVATE); 2347 } 2348 2349 if (!PageUptodate(p)) 2350 uptodate = 0; 2351 unlock_page(p); 2352 } 2353 if (uptodate) 2354 eb->flags |= EXTENT_UPTODATE; 2355 eb->flags |= EXTENT_BUFFER_FILLED; 2356 2357 lru_add: 2358 spin_lock(&tree->lru_lock); 2359 add_lru(tree, eb); 2360 spin_unlock(&tree->lru_lock); 2361 return eb; 2362 fail: 2363 spin_lock(&tree->lru_lock); 2364 list_del_init(&eb->lru); 2365 spin_unlock(&tree->lru_lock); 2366 if (!atomic_dec_and_test(&eb->refs)) 2367 return NULL; 2368 for (index = 1; index < i; index++) { 2369 page_cache_release(extent_buffer_page(eb, index)); 2370 } 2371 if (i > 0) 2372 page_cache_release(extent_buffer_page(eb, 0)); 2373 __free_extent_buffer(eb); 2374 return NULL; 2375 } 2376 EXPORT_SYMBOL(find_extent_buffer); 2377 2378 void free_extent_buffer(struct extent_buffer *eb) 2379 { 2380 unsigned long i; 2381 unsigned long num_pages; 2382 2383 if (!eb) 2384 return; 2385 2386 if (!atomic_dec_and_test(&eb->refs)) 2387 return; 2388 2389 WARN_ON(!list_empty(&eb->lru)); 2390 num_pages = num_extent_pages(eb->start, eb->len); 2391 2392 for (i = 1; i < num_pages; i++) { 2393 page_cache_release(extent_buffer_page(eb, i)); 2394 } 2395 page_cache_release(extent_buffer_page(eb, 0)); 2396 __free_extent_buffer(eb); 2397 } 2398 EXPORT_SYMBOL(free_extent_buffer); 2399 2400 int clear_extent_buffer_dirty(struct extent_map_tree *tree, 2401 struct extent_buffer *eb) 2402 { 2403 int set; 2404 unsigned long i; 2405 unsigned long num_pages; 2406 struct page *page; 2407 2408 u64 start = eb->start; 2409 u64 end = start + eb->len - 1; 2410 2411 set = clear_extent_dirty(tree, start, end, GFP_NOFS); 2412 num_pages = num_extent_pages(eb->start, eb->len); 2413 2414 for (i = 0; i < num_pages; i++) { 2415 page = extent_buffer_page(eb, i); 2416 lock_page(page); 2417 /* 2418 * if we're on the last page or the first page and the 2419 * block isn't aligned on a page boundary, do extra checks 2420 * to make sure we don't clean page that is partially dirty 2421 */ 2422 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 2423 ((i == num_pages - 1) && 2424 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) { 2425 start = (u64)page->index << PAGE_CACHE_SHIFT; 2426 end = start + PAGE_CACHE_SIZE - 1; 2427 if (test_range_bit(tree, start, end, 2428 EXTENT_DIRTY, 0)) { 2429 unlock_page(page); 2430 continue; 2431 } 2432 } 2433 clear_page_dirty_for_io(page); 2434 write_lock_irq(&page->mapping->tree_lock); 2435 if (!PageDirty(page)) { 2436 radix_tree_tag_clear(&page->mapping->page_tree, 2437 page_index(page), 2438 PAGECACHE_TAG_DIRTY); 2439 } 2440 write_unlock_irq(&page->mapping->tree_lock); 2441 unlock_page(page); 2442 } 2443 return 0; 2444 } 2445 EXPORT_SYMBOL(clear_extent_buffer_dirty); 2446 2447 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree, 2448 struct extent_buffer *eb) 2449 { 2450 return wait_on_extent_writeback(tree, eb->start, 2451 eb->start + eb->len - 1); 2452 } 2453 EXPORT_SYMBOL(wait_on_extent_buffer_writeback); 2454 2455 int set_extent_buffer_dirty(struct extent_map_tree *tree, 2456 struct extent_buffer *eb) 2457 { 2458 unsigned long i; 2459 unsigned long num_pages; 2460 2461 num_pages = num_extent_pages(eb->start, eb->len); 2462 for (i = 0; i < num_pages; i++) { 2463 struct page *page = extent_buffer_page(eb, i); 2464 /* writepage may need to do something special for the 2465 * first page, we have to make sure page->private is 2466 * properly set. releasepage may drop page->private 2467 * on us if the page isn't already dirty. 2468 */ 2469 if (i == 0) { 2470 lock_page(page); 2471 set_page_private(page, 2472 EXTENT_PAGE_PRIVATE_FIRST_PAGE | 2473 eb->len << 2); 2474 } 2475 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 2476 if (i == 0) 2477 unlock_page(page); 2478 } 2479 return set_extent_dirty(tree, eb->start, 2480 eb->start + eb->len - 1, GFP_NOFS); 2481 } 2482 EXPORT_SYMBOL(set_extent_buffer_dirty); 2483 2484 int set_extent_buffer_uptodate(struct extent_map_tree *tree, 2485 struct extent_buffer *eb) 2486 { 2487 unsigned long i; 2488 struct page *page; 2489 unsigned long num_pages; 2490 2491 num_pages = num_extent_pages(eb->start, eb->len); 2492 2493 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 2494 GFP_NOFS); 2495 for (i = 0; i < num_pages; i++) { 2496 page = extent_buffer_page(eb, i); 2497 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 2498 ((i == num_pages - 1) && 2499 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) { 2500 check_page_uptodate(tree, page); 2501 continue; 2502 } 2503 SetPageUptodate(page); 2504 } 2505 return 0; 2506 } 2507 EXPORT_SYMBOL(set_extent_buffer_uptodate); 2508 2509 int extent_buffer_uptodate(struct extent_map_tree *tree, 2510 struct extent_buffer *eb) 2511 { 2512 if (eb->flags & EXTENT_UPTODATE) 2513 return 1; 2514 return test_range_bit(tree, eb->start, eb->start + eb->len - 1, 2515 EXTENT_UPTODATE, 1); 2516 } 2517 EXPORT_SYMBOL(extent_buffer_uptodate); 2518 2519 int read_extent_buffer_pages(struct extent_map_tree *tree, 2520 struct extent_buffer *eb, 2521 u64 start, 2522 int wait) 2523 { 2524 unsigned long i; 2525 unsigned long start_i; 2526 struct page *page; 2527 int err; 2528 int ret = 0; 2529 unsigned long num_pages; 2530 2531 if (eb->flags & EXTENT_UPTODATE) 2532 return 0; 2533 2534 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1, 2535 EXTENT_UPTODATE, 1)) { 2536 return 0; 2537 } 2538 2539 if (start) { 2540 WARN_ON(start < eb->start); 2541 start_i = (start >> PAGE_CACHE_SHIFT) - 2542 (eb->start >> PAGE_CACHE_SHIFT); 2543 } else { 2544 start_i = 0; 2545 } 2546 2547 num_pages = num_extent_pages(eb->start, eb->len); 2548 for (i = start_i; i < num_pages; i++) { 2549 page = extent_buffer_page(eb, i); 2550 if (PageUptodate(page)) { 2551 continue; 2552 } 2553 if (!wait) { 2554 if (TestSetPageLocked(page)) { 2555 continue; 2556 } 2557 } else { 2558 lock_page(page); 2559 } 2560 if (!PageUptodate(page)) { 2561 err = page->mapping->a_ops->readpage(NULL, page); 2562 if (err) { 2563 ret = err; 2564 } 2565 } else { 2566 unlock_page(page); 2567 } 2568 } 2569 2570 if (ret || !wait) { 2571 return ret; 2572 } 2573 2574 for (i = start_i; i < num_pages; i++) { 2575 page = extent_buffer_page(eb, i); 2576 wait_on_page_locked(page); 2577 if (!PageUptodate(page)) { 2578 ret = -EIO; 2579 } 2580 } 2581 if (!ret) 2582 eb->flags |= EXTENT_UPTODATE; 2583 return ret; 2584 } 2585 EXPORT_SYMBOL(read_extent_buffer_pages); 2586 2587 void read_extent_buffer(struct extent_buffer *eb, void *dstv, 2588 unsigned long start, 2589 unsigned long len) 2590 { 2591 size_t cur; 2592 size_t offset; 2593 struct page *page; 2594 char *kaddr; 2595 char *dst = (char *)dstv; 2596 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2597 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2598 unsigned long num_pages = num_extent_pages(eb->start, eb->len); 2599 2600 WARN_ON(start > eb->len); 2601 WARN_ON(start + len > eb->start + eb->len); 2602 2603 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 2604 2605 while(len > 0) { 2606 page = extent_buffer_page(eb, i); 2607 if (!PageUptodate(page)) { 2608 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len); 2609 WARN_ON(1); 2610 } 2611 WARN_ON(!PageUptodate(page)); 2612 2613 cur = min(len, (PAGE_CACHE_SIZE - offset)); 2614 kaddr = kmap_atomic(page, KM_USER1); 2615 memcpy(dst, kaddr + offset, cur); 2616 kunmap_atomic(kaddr, KM_USER1); 2617 2618 dst += cur; 2619 len -= cur; 2620 offset = 0; 2621 i++; 2622 } 2623 } 2624 EXPORT_SYMBOL(read_extent_buffer); 2625 2626 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 2627 unsigned long min_len, char **token, char **map, 2628 unsigned long *map_start, 2629 unsigned long *map_len, int km) 2630 { 2631 size_t offset = start & (PAGE_CACHE_SIZE - 1); 2632 char *kaddr; 2633 struct page *p; 2634 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2635 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2636 unsigned long end_i = (start_offset + start + min_len - 1) >> 2637 PAGE_CACHE_SHIFT; 2638 2639 if (i != end_i) 2640 return -EINVAL; 2641 2642 if (i == 0) { 2643 offset = start_offset; 2644 *map_start = 0; 2645 } else { 2646 offset = 0; 2647 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; 2648 } 2649 if (start + min_len > eb->len) { 2650 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len); 2651 WARN_ON(1); 2652 } 2653 2654 p = extent_buffer_page(eb, i); 2655 WARN_ON(!PageUptodate(p)); 2656 kaddr = kmap_atomic(p, km); 2657 *token = kaddr; 2658 *map = kaddr + offset; 2659 *map_len = PAGE_CACHE_SIZE - offset; 2660 return 0; 2661 } 2662 EXPORT_SYMBOL(map_private_extent_buffer); 2663 2664 int map_extent_buffer(struct extent_buffer *eb, unsigned long start, 2665 unsigned long min_len, 2666 char **token, char **map, 2667 unsigned long *map_start, 2668 unsigned long *map_len, int km) 2669 { 2670 int err; 2671 int save = 0; 2672 if (eb->map_token) { 2673 unmap_extent_buffer(eb, eb->map_token, km); 2674 eb->map_token = NULL; 2675 save = 1; 2676 } 2677 err = map_private_extent_buffer(eb, start, min_len, token, map, 2678 map_start, map_len, km); 2679 if (!err && save) { 2680 eb->map_token = *token; 2681 eb->kaddr = *map; 2682 eb->map_start = *map_start; 2683 eb->map_len = *map_len; 2684 } 2685 return err; 2686 } 2687 EXPORT_SYMBOL(map_extent_buffer); 2688 2689 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km) 2690 { 2691 kunmap_atomic(token, km); 2692 } 2693 EXPORT_SYMBOL(unmap_extent_buffer); 2694 2695 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, 2696 unsigned long start, 2697 unsigned long len) 2698 { 2699 size_t cur; 2700 size_t offset; 2701 struct page *page; 2702 char *kaddr; 2703 char *ptr = (char *)ptrv; 2704 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2705 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2706 int ret = 0; 2707 2708 WARN_ON(start > eb->len); 2709 WARN_ON(start + len > eb->start + eb->len); 2710 2711 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 2712 2713 while(len > 0) { 2714 page = extent_buffer_page(eb, i); 2715 WARN_ON(!PageUptodate(page)); 2716 2717 cur = min(len, (PAGE_CACHE_SIZE - offset)); 2718 2719 kaddr = kmap_atomic(page, KM_USER0); 2720 ret = memcmp(ptr, kaddr + offset, cur); 2721 kunmap_atomic(kaddr, KM_USER0); 2722 if (ret) 2723 break; 2724 2725 ptr += cur; 2726 len -= cur; 2727 offset = 0; 2728 i++; 2729 } 2730 return ret; 2731 } 2732 EXPORT_SYMBOL(memcmp_extent_buffer); 2733 2734 void write_extent_buffer(struct extent_buffer *eb, const void *srcv, 2735 unsigned long start, unsigned long len) 2736 { 2737 size_t cur; 2738 size_t offset; 2739 struct page *page; 2740 char *kaddr; 2741 char *src = (char *)srcv; 2742 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2743 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2744 2745 WARN_ON(start > eb->len); 2746 WARN_ON(start + len > eb->start + eb->len); 2747 2748 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 2749 2750 while(len > 0) { 2751 page = extent_buffer_page(eb, i); 2752 WARN_ON(!PageUptodate(page)); 2753 2754 cur = min(len, PAGE_CACHE_SIZE - offset); 2755 kaddr = kmap_atomic(page, KM_USER1); 2756 memcpy(kaddr + offset, src, cur); 2757 kunmap_atomic(kaddr, KM_USER1); 2758 2759 src += cur; 2760 len -= cur; 2761 offset = 0; 2762 i++; 2763 } 2764 } 2765 EXPORT_SYMBOL(write_extent_buffer); 2766 2767 void memset_extent_buffer(struct extent_buffer *eb, char c, 2768 unsigned long start, unsigned long len) 2769 { 2770 size_t cur; 2771 size_t offset; 2772 struct page *page; 2773 char *kaddr; 2774 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2775 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2776 2777 WARN_ON(start > eb->len); 2778 WARN_ON(start + len > eb->start + eb->len); 2779 2780 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 2781 2782 while(len > 0) { 2783 page = extent_buffer_page(eb, i); 2784 WARN_ON(!PageUptodate(page)); 2785 2786 cur = min(len, PAGE_CACHE_SIZE - offset); 2787 kaddr = kmap_atomic(page, KM_USER0); 2788 memset(kaddr + offset, c, cur); 2789 kunmap_atomic(kaddr, KM_USER0); 2790 2791 len -= cur; 2792 offset = 0; 2793 i++; 2794 } 2795 } 2796 EXPORT_SYMBOL(memset_extent_buffer); 2797 2798 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, 2799 unsigned long dst_offset, unsigned long src_offset, 2800 unsigned long len) 2801 { 2802 u64 dst_len = dst->len; 2803 size_t cur; 2804 size_t offset; 2805 struct page *page; 2806 char *kaddr; 2807 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 2808 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 2809 2810 WARN_ON(src->len != dst_len); 2811 2812 offset = (start_offset + dst_offset) & 2813 ((unsigned long)PAGE_CACHE_SIZE - 1); 2814 2815 while(len > 0) { 2816 page = extent_buffer_page(dst, i); 2817 WARN_ON(!PageUptodate(page)); 2818 2819 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 2820 2821 kaddr = kmap_atomic(page, KM_USER0); 2822 read_extent_buffer(src, kaddr + offset, src_offset, cur); 2823 kunmap_atomic(kaddr, KM_USER0); 2824 2825 src_offset += cur; 2826 len -= cur; 2827 offset = 0; 2828 i++; 2829 } 2830 } 2831 EXPORT_SYMBOL(copy_extent_buffer); 2832 2833 static void move_pages(struct page *dst_page, struct page *src_page, 2834 unsigned long dst_off, unsigned long src_off, 2835 unsigned long len) 2836 { 2837 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 2838 if (dst_page == src_page) { 2839 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); 2840 } else { 2841 char *src_kaddr = kmap_atomic(src_page, KM_USER1); 2842 char *p = dst_kaddr + dst_off + len; 2843 char *s = src_kaddr + src_off + len; 2844 2845 while (len--) 2846 *--p = *--s; 2847 2848 kunmap_atomic(src_kaddr, KM_USER1); 2849 } 2850 kunmap_atomic(dst_kaddr, KM_USER0); 2851 } 2852 2853 static void copy_pages(struct page *dst_page, struct page *src_page, 2854 unsigned long dst_off, unsigned long src_off, 2855 unsigned long len) 2856 { 2857 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 2858 char *src_kaddr; 2859 2860 if (dst_page != src_page) 2861 src_kaddr = kmap_atomic(src_page, KM_USER1); 2862 else 2863 src_kaddr = dst_kaddr; 2864 2865 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 2866 kunmap_atomic(dst_kaddr, KM_USER0); 2867 if (dst_page != src_page) 2868 kunmap_atomic(src_kaddr, KM_USER1); 2869 } 2870 2871 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 2872 unsigned long src_offset, unsigned long len) 2873 { 2874 size_t cur; 2875 size_t dst_off_in_page; 2876 size_t src_off_in_page; 2877 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 2878 unsigned long dst_i; 2879 unsigned long src_i; 2880 2881 if (src_offset + len > dst->len) { 2882 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 2883 src_offset, len, dst->len); 2884 BUG_ON(1); 2885 } 2886 if (dst_offset + len > dst->len) { 2887 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 2888 dst_offset, len, dst->len); 2889 BUG_ON(1); 2890 } 2891 2892 while(len > 0) { 2893 dst_off_in_page = (start_offset + dst_offset) & 2894 ((unsigned long)PAGE_CACHE_SIZE - 1); 2895 src_off_in_page = (start_offset + src_offset) & 2896 ((unsigned long)PAGE_CACHE_SIZE - 1); 2897 2898 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 2899 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; 2900 2901 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - 2902 src_off_in_page)); 2903 cur = min_t(unsigned long, cur, 2904 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); 2905 2906 copy_pages(extent_buffer_page(dst, dst_i), 2907 extent_buffer_page(dst, src_i), 2908 dst_off_in_page, src_off_in_page, cur); 2909 2910 src_offset += cur; 2911 dst_offset += cur; 2912 len -= cur; 2913 } 2914 } 2915 EXPORT_SYMBOL(memcpy_extent_buffer); 2916 2917 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 2918 unsigned long src_offset, unsigned long len) 2919 { 2920 size_t cur; 2921 size_t dst_off_in_page; 2922 size_t src_off_in_page; 2923 unsigned long dst_end = dst_offset + len - 1; 2924 unsigned long src_end = src_offset + len - 1; 2925 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 2926 unsigned long dst_i; 2927 unsigned long src_i; 2928 2929 if (src_offset + len > dst->len) { 2930 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 2931 src_offset, len, dst->len); 2932 BUG_ON(1); 2933 } 2934 if (dst_offset + len > dst->len) { 2935 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 2936 dst_offset, len, dst->len); 2937 BUG_ON(1); 2938 } 2939 if (dst_offset < src_offset) { 2940 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 2941 return; 2942 } 2943 while(len > 0) { 2944 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 2945 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 2946 2947 dst_off_in_page = (start_offset + dst_end) & 2948 ((unsigned long)PAGE_CACHE_SIZE - 1); 2949 src_off_in_page = (start_offset + src_end) & 2950 ((unsigned long)PAGE_CACHE_SIZE - 1); 2951 2952 cur = min_t(unsigned long, len, src_off_in_page + 1); 2953 cur = min(cur, dst_off_in_page + 1); 2954 move_pages(extent_buffer_page(dst, dst_i), 2955 extent_buffer_page(dst, src_i), 2956 dst_off_in_page - cur + 1, 2957 src_off_in_page - cur + 1, cur); 2958 2959 dst_end -= cur; 2960 src_end -= cur; 2961 len -= cur; 2962 } 2963 } 2964 EXPORT_SYMBOL(memmove_extent_buffer); 2965