1 #include <linux/bitops.h> 2 #include <linux/slab.h> 3 #include <linux/bio.h> 4 #include <linux/mm.h> 5 #include <linux/gfp.h> 6 #include <linux/pagemap.h> 7 #include <linux/page-flags.h> 8 #include <linux/module.h> 9 #include <linux/spinlock.h> 10 #include <linux/blkdev.h> 11 #include <linux/swap.h> 12 #include <linux/version.h> 13 #include <linux/writeback.h> 14 #include <linux/pagevec.h> 15 #include "extent_map.h" 16 17 /* temporary define until extent_map moves out of btrfs */ 18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size, 19 unsigned long extra_flags, 20 void (*ctor)(void *, struct kmem_cache *, 21 unsigned long)); 22 23 static struct kmem_cache *extent_map_cache; 24 static struct kmem_cache *extent_state_cache; 25 static struct kmem_cache *extent_buffer_cache; 26 27 static LIST_HEAD(buffers); 28 static LIST_HEAD(states); 29 30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; 31 #define BUFFER_LRU_MAX 64 32 33 struct tree_entry { 34 u64 start; 35 u64 end; 36 int in_tree; 37 struct rb_node rb_node; 38 }; 39 40 struct extent_page_data { 41 struct bio *bio; 42 struct extent_map_tree *tree; 43 get_extent_t *get_extent; 44 }; 45 46 int __init extent_map_init(void) 47 { 48 extent_map_cache = btrfs_cache_create("extent_map", 49 sizeof(struct extent_map), 0, 50 NULL); 51 if (!extent_map_cache) 52 return -ENOMEM; 53 extent_state_cache = btrfs_cache_create("extent_state", 54 sizeof(struct extent_state), 0, 55 NULL); 56 if (!extent_state_cache) 57 goto free_map_cache; 58 extent_buffer_cache = btrfs_cache_create("extent_buffers", 59 sizeof(struct extent_buffer), 0, 60 NULL); 61 if (!extent_buffer_cache) 62 goto free_state_cache; 63 return 0; 64 65 free_state_cache: 66 kmem_cache_destroy(extent_state_cache); 67 free_map_cache: 68 kmem_cache_destroy(extent_map_cache); 69 return -ENOMEM; 70 } 71 72 void extent_map_exit(void) 73 { 74 struct extent_state *state; 75 76 while (!list_empty(&states)) { 77 state = list_entry(states.next, struct extent_state, list); 78 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); 79 list_del(&state->list); 80 kmem_cache_free(extent_state_cache, state); 81 82 } 83 84 if (extent_map_cache) 85 kmem_cache_destroy(extent_map_cache); 86 if (extent_state_cache) 87 kmem_cache_destroy(extent_state_cache); 88 if (extent_buffer_cache) 89 kmem_cache_destroy(extent_buffer_cache); 90 } 91 92 void extent_map_tree_init(struct extent_map_tree *tree, 93 struct address_space *mapping, gfp_t mask) 94 { 95 tree->map.rb_node = NULL; 96 tree->state.rb_node = NULL; 97 tree->ops = NULL; 98 tree->dirty_bytes = 0; 99 rwlock_init(&tree->lock); 100 spin_lock_init(&tree->lru_lock); 101 tree->mapping = mapping; 102 INIT_LIST_HEAD(&tree->buffer_lru); 103 tree->lru_size = 0; 104 } 105 EXPORT_SYMBOL(extent_map_tree_init); 106 107 void extent_map_tree_empty_lru(struct extent_map_tree *tree) 108 { 109 struct extent_buffer *eb; 110 while(!list_empty(&tree->buffer_lru)) { 111 eb = list_entry(tree->buffer_lru.next, struct extent_buffer, 112 lru); 113 list_del_init(&eb->lru); 114 free_extent_buffer(eb); 115 } 116 } 117 EXPORT_SYMBOL(extent_map_tree_empty_lru); 118 119 struct extent_map *alloc_extent_map(gfp_t mask) 120 { 121 struct extent_map *em; 122 em = kmem_cache_alloc(extent_map_cache, mask); 123 if (!em || IS_ERR(em)) 124 return em; 125 em->in_tree = 0; 126 atomic_set(&em->refs, 1); 127 return em; 128 } 129 EXPORT_SYMBOL(alloc_extent_map); 130 131 void free_extent_map(struct extent_map *em) 132 { 133 if (!em) 134 return; 135 if (atomic_dec_and_test(&em->refs)) { 136 WARN_ON(em->in_tree); 137 kmem_cache_free(extent_map_cache, em); 138 } 139 } 140 EXPORT_SYMBOL(free_extent_map); 141 142 143 struct extent_state *alloc_extent_state(gfp_t mask) 144 { 145 struct extent_state *state; 146 unsigned long flags; 147 148 state = kmem_cache_alloc(extent_state_cache, mask); 149 if (!state || IS_ERR(state)) 150 return state; 151 state->state = 0; 152 state->in_tree = 0; 153 state->private = 0; 154 155 spin_lock_irqsave(&state_lock, flags); 156 list_add(&state->list, &states); 157 spin_unlock_irqrestore(&state_lock, flags); 158 159 atomic_set(&state->refs, 1); 160 init_waitqueue_head(&state->wq); 161 return state; 162 } 163 EXPORT_SYMBOL(alloc_extent_state); 164 165 void free_extent_state(struct extent_state *state) 166 { 167 unsigned long flags; 168 if (!state) 169 return; 170 if (atomic_dec_and_test(&state->refs)) { 171 WARN_ON(state->in_tree); 172 spin_lock_irqsave(&state_lock, flags); 173 list_del(&state->list); 174 spin_unlock_irqrestore(&state_lock, flags); 175 kmem_cache_free(extent_state_cache, state); 176 } 177 } 178 EXPORT_SYMBOL(free_extent_state); 179 180 static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 181 struct rb_node *node) 182 { 183 struct rb_node ** p = &root->rb_node; 184 struct rb_node * parent = NULL; 185 struct tree_entry *entry; 186 187 while(*p) { 188 parent = *p; 189 entry = rb_entry(parent, struct tree_entry, rb_node); 190 191 if (offset < entry->start) 192 p = &(*p)->rb_left; 193 else if (offset > entry->end) 194 p = &(*p)->rb_right; 195 else 196 return parent; 197 } 198 199 entry = rb_entry(node, struct tree_entry, rb_node); 200 entry->in_tree = 1; 201 rb_link_node(node, parent, p); 202 rb_insert_color(node, root); 203 return NULL; 204 } 205 206 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 207 struct rb_node **prev_ret) 208 { 209 struct rb_node * n = root->rb_node; 210 struct rb_node *prev = NULL; 211 struct tree_entry *entry; 212 struct tree_entry *prev_entry = NULL; 213 214 while(n) { 215 entry = rb_entry(n, struct tree_entry, rb_node); 216 prev = n; 217 prev_entry = entry; 218 219 if (offset < entry->start) 220 n = n->rb_left; 221 else if (offset > entry->end) 222 n = n->rb_right; 223 else 224 return n; 225 } 226 if (!prev_ret) 227 return NULL; 228 while(prev && offset > prev_entry->end) { 229 prev = rb_next(prev); 230 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 231 } 232 *prev_ret = prev; 233 return NULL; 234 } 235 236 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) 237 { 238 struct rb_node *prev; 239 struct rb_node *ret; 240 ret = __tree_search(root, offset, &prev); 241 if (!ret) 242 return prev; 243 return ret; 244 } 245 246 static int tree_delete(struct rb_root *root, u64 offset) 247 { 248 struct rb_node *node; 249 struct tree_entry *entry; 250 251 node = __tree_search(root, offset, NULL); 252 if (!node) 253 return -ENOENT; 254 entry = rb_entry(node, struct tree_entry, rb_node); 255 entry->in_tree = 0; 256 rb_erase(node, root); 257 return 0; 258 } 259 260 /* 261 * add_extent_mapping tries a simple backward merge with existing 262 * mappings. The extent_map struct passed in will be inserted into 263 * the tree directly (no copies made, just a reference taken). 264 */ 265 int add_extent_mapping(struct extent_map_tree *tree, 266 struct extent_map *em) 267 { 268 int ret = 0; 269 struct extent_map *prev = NULL; 270 struct rb_node *rb; 271 272 write_lock_irq(&tree->lock); 273 rb = tree_insert(&tree->map, em->end, &em->rb_node); 274 if (rb) { 275 prev = rb_entry(rb, struct extent_map, rb_node); 276 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end); 277 ret = -EEXIST; 278 goto out; 279 } 280 atomic_inc(&em->refs); 281 if (em->start != 0) { 282 rb = rb_prev(&em->rb_node); 283 if (rb) 284 prev = rb_entry(rb, struct extent_map, rb_node); 285 if (prev && prev->end + 1 == em->start && 286 ((em->block_start == EXTENT_MAP_HOLE && 287 prev->block_start == EXTENT_MAP_HOLE) || 288 (em->block_start == EXTENT_MAP_INLINE && 289 prev->block_start == EXTENT_MAP_INLINE) || 290 (em->block_start == EXTENT_MAP_DELALLOC && 291 prev->block_start == EXTENT_MAP_DELALLOC) || 292 (em->block_start < EXTENT_MAP_DELALLOC - 1 && 293 em->block_start == prev->block_end + 1))) { 294 em->start = prev->start; 295 em->block_start = prev->block_start; 296 rb_erase(&prev->rb_node, &tree->map); 297 prev->in_tree = 0; 298 free_extent_map(prev); 299 } 300 } 301 out: 302 write_unlock_irq(&tree->lock); 303 return ret; 304 } 305 EXPORT_SYMBOL(add_extent_mapping); 306 307 /* 308 * lookup_extent_mapping returns the first extent_map struct in the 309 * tree that intersects the [start, end] (inclusive) range. There may 310 * be additional objects in the tree that intersect, so check the object 311 * returned carefully to make sure you don't need additional lookups. 312 */ 313 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 314 u64 start, u64 end) 315 { 316 struct extent_map *em; 317 struct rb_node *rb_node; 318 319 read_lock_irq(&tree->lock); 320 rb_node = tree_search(&tree->map, start); 321 if (!rb_node) { 322 em = NULL; 323 goto out; 324 } 325 if (IS_ERR(rb_node)) { 326 em = ERR_PTR(PTR_ERR(rb_node)); 327 goto out; 328 } 329 em = rb_entry(rb_node, struct extent_map, rb_node); 330 if (em->end < start || em->start > end) { 331 em = NULL; 332 goto out; 333 } 334 atomic_inc(&em->refs); 335 out: 336 read_unlock_irq(&tree->lock); 337 return em; 338 } 339 EXPORT_SYMBOL(lookup_extent_mapping); 340 341 /* 342 * removes an extent_map struct from the tree. No reference counts are 343 * dropped, and no checks are done to see if the range is in use 344 */ 345 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 346 { 347 int ret; 348 349 write_lock_irq(&tree->lock); 350 ret = tree_delete(&tree->map, em->end); 351 write_unlock_irq(&tree->lock); 352 return ret; 353 } 354 EXPORT_SYMBOL(remove_extent_mapping); 355 356 /* 357 * utility function to look for merge candidates inside a given range. 358 * Any extents with matching state are merged together into a single 359 * extent in the tree. Extents with EXTENT_IO in their state field 360 * are not merged because the end_io handlers need to be able to do 361 * operations on them without sleeping (or doing allocations/splits). 362 * 363 * This should be called with the tree lock held. 364 */ 365 static int merge_state(struct extent_map_tree *tree, 366 struct extent_state *state) 367 { 368 struct extent_state *other; 369 struct rb_node *other_node; 370 371 if (state->state & EXTENT_IOBITS) 372 return 0; 373 374 other_node = rb_prev(&state->rb_node); 375 if (other_node) { 376 other = rb_entry(other_node, struct extent_state, rb_node); 377 if (other->end == state->start - 1 && 378 other->state == state->state) { 379 state->start = other->start; 380 other->in_tree = 0; 381 rb_erase(&other->rb_node, &tree->state); 382 free_extent_state(other); 383 } 384 } 385 other_node = rb_next(&state->rb_node); 386 if (other_node) { 387 other = rb_entry(other_node, struct extent_state, rb_node); 388 if (other->start == state->end + 1 && 389 other->state == state->state) { 390 other->start = state->start; 391 state->in_tree = 0; 392 rb_erase(&state->rb_node, &tree->state); 393 free_extent_state(state); 394 } 395 } 396 return 0; 397 } 398 399 /* 400 * insert an extent_state struct into the tree. 'bits' are set on the 401 * struct before it is inserted. 402 * 403 * This may return -EEXIST if the extent is already there, in which case the 404 * state struct is freed. 405 * 406 * The tree lock is not taken internally. This is a utility function and 407 * probably isn't what you want to call (see set/clear_extent_bit). 408 */ 409 static int insert_state(struct extent_map_tree *tree, 410 struct extent_state *state, u64 start, u64 end, 411 int bits) 412 { 413 struct rb_node *node; 414 415 if (end < start) { 416 printk("end < start %Lu %Lu\n", end, start); 417 WARN_ON(1); 418 } 419 if (bits & EXTENT_DIRTY) 420 tree->dirty_bytes += end - start + 1; 421 state->state |= bits; 422 state->start = start; 423 state->end = end; 424 node = tree_insert(&tree->state, end, &state->rb_node); 425 if (node) { 426 struct extent_state *found; 427 found = rb_entry(node, struct extent_state, rb_node); 428 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); 429 free_extent_state(state); 430 return -EEXIST; 431 } 432 merge_state(tree, state); 433 return 0; 434 } 435 436 /* 437 * split a given extent state struct in two, inserting the preallocated 438 * struct 'prealloc' as the newly created second half. 'split' indicates an 439 * offset inside 'orig' where it should be split. 440 * 441 * Before calling, 442 * the tree has 'orig' at [orig->start, orig->end]. After calling, there 443 * are two extent state structs in the tree: 444 * prealloc: [orig->start, split - 1] 445 * orig: [ split, orig->end ] 446 * 447 * The tree locks are not taken by this function. They need to be held 448 * by the caller. 449 */ 450 static int split_state(struct extent_map_tree *tree, struct extent_state *orig, 451 struct extent_state *prealloc, u64 split) 452 { 453 struct rb_node *node; 454 prealloc->start = orig->start; 455 prealloc->end = split - 1; 456 prealloc->state = orig->state; 457 orig->start = split; 458 459 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); 460 if (node) { 461 struct extent_state *found; 462 found = rb_entry(node, struct extent_state, rb_node); 463 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end); 464 free_extent_state(prealloc); 465 return -EEXIST; 466 } 467 return 0; 468 } 469 470 /* 471 * utility function to clear some bits in an extent state struct. 472 * it will optionally wake up any one waiting on this state (wake == 1), or 473 * forcibly remove the state from the tree (delete == 1). 474 * 475 * If no bits are set on the state struct after clearing things, the 476 * struct is freed and removed from the tree 477 */ 478 static int clear_state_bit(struct extent_map_tree *tree, 479 struct extent_state *state, int bits, int wake, 480 int delete) 481 { 482 int ret = state->state & bits; 483 484 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { 485 u64 range = state->end - state->start + 1; 486 WARN_ON(range > tree->dirty_bytes); 487 tree->dirty_bytes -= range; 488 } 489 state->state &= ~bits; 490 if (wake) 491 wake_up(&state->wq); 492 if (delete || state->state == 0) { 493 if (state->in_tree) { 494 rb_erase(&state->rb_node, &tree->state); 495 state->in_tree = 0; 496 free_extent_state(state); 497 } else { 498 WARN_ON(1); 499 } 500 } else { 501 merge_state(tree, state); 502 } 503 return ret; 504 } 505 506 /* 507 * clear some bits on a range in the tree. This may require splitting 508 * or inserting elements in the tree, so the gfp mask is used to 509 * indicate which allocations or sleeping are allowed. 510 * 511 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove 512 * the given range from the tree regardless of state (ie for truncate). 513 * 514 * the range [start, end] is inclusive. 515 * 516 * This takes the tree lock, and returns < 0 on error, > 0 if any of the 517 * bits were already set, or zero if none of the bits were already set. 518 */ 519 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, 520 int bits, int wake, int delete, gfp_t mask) 521 { 522 struct extent_state *state; 523 struct extent_state *prealloc = NULL; 524 struct rb_node *node; 525 unsigned long flags; 526 int err; 527 int set = 0; 528 529 again: 530 if (!prealloc && (mask & __GFP_WAIT)) { 531 prealloc = alloc_extent_state(mask); 532 if (!prealloc) 533 return -ENOMEM; 534 } 535 536 write_lock_irqsave(&tree->lock, flags); 537 /* 538 * this search will find the extents that end after 539 * our range starts 540 */ 541 node = tree_search(&tree->state, start); 542 if (!node) 543 goto out; 544 state = rb_entry(node, struct extent_state, rb_node); 545 if (state->start > end) 546 goto out; 547 WARN_ON(state->end < start); 548 549 /* 550 * | ---- desired range ---- | 551 * | state | or 552 * | ------------- state -------------- | 553 * 554 * We need to split the extent we found, and may flip 555 * bits on second half. 556 * 557 * If the extent we found extends past our range, we 558 * just split and search again. It'll get split again 559 * the next time though. 560 * 561 * If the extent we found is inside our range, we clear 562 * the desired bit on it. 563 */ 564 565 if (state->start < start) { 566 err = split_state(tree, state, prealloc, start); 567 BUG_ON(err == -EEXIST); 568 prealloc = NULL; 569 if (err) 570 goto out; 571 if (state->end <= end) { 572 start = state->end + 1; 573 set |= clear_state_bit(tree, state, bits, 574 wake, delete); 575 } else { 576 start = state->start; 577 } 578 goto search_again; 579 } 580 /* 581 * | ---- desired range ---- | 582 * | state | 583 * We need to split the extent, and clear the bit 584 * on the first half 585 */ 586 if (state->start <= end && state->end > end) { 587 err = split_state(tree, state, prealloc, end + 1); 588 BUG_ON(err == -EEXIST); 589 590 if (wake) 591 wake_up(&state->wq); 592 set |= clear_state_bit(tree, prealloc, bits, 593 wake, delete); 594 prealloc = NULL; 595 goto out; 596 } 597 598 start = state->end + 1; 599 set |= clear_state_bit(tree, state, bits, wake, delete); 600 goto search_again; 601 602 out: 603 write_unlock_irqrestore(&tree->lock, flags); 604 if (prealloc) 605 free_extent_state(prealloc); 606 607 return set; 608 609 search_again: 610 if (start > end) 611 goto out; 612 write_unlock_irqrestore(&tree->lock, flags); 613 if (mask & __GFP_WAIT) 614 cond_resched(); 615 goto again; 616 } 617 EXPORT_SYMBOL(clear_extent_bit); 618 619 static int wait_on_state(struct extent_map_tree *tree, 620 struct extent_state *state) 621 { 622 DEFINE_WAIT(wait); 623 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 624 read_unlock_irq(&tree->lock); 625 schedule(); 626 read_lock_irq(&tree->lock); 627 finish_wait(&state->wq, &wait); 628 return 0; 629 } 630 631 /* 632 * waits for one or more bits to clear on a range in the state tree. 633 * The range [start, end] is inclusive. 634 * The tree lock is taken by this function 635 */ 636 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits) 637 { 638 struct extent_state *state; 639 struct rb_node *node; 640 641 read_lock_irq(&tree->lock); 642 again: 643 while (1) { 644 /* 645 * this search will find all the extents that end after 646 * our range starts 647 */ 648 node = tree_search(&tree->state, start); 649 if (!node) 650 break; 651 652 state = rb_entry(node, struct extent_state, rb_node); 653 654 if (state->start > end) 655 goto out; 656 657 if (state->state & bits) { 658 start = state->start; 659 atomic_inc(&state->refs); 660 wait_on_state(tree, state); 661 free_extent_state(state); 662 goto again; 663 } 664 start = state->end + 1; 665 666 if (start > end) 667 break; 668 669 if (need_resched()) { 670 read_unlock_irq(&tree->lock); 671 cond_resched(); 672 read_lock_irq(&tree->lock); 673 } 674 } 675 out: 676 read_unlock_irq(&tree->lock); 677 return 0; 678 } 679 EXPORT_SYMBOL(wait_extent_bit); 680 681 static void set_state_bits(struct extent_map_tree *tree, 682 struct extent_state *state, 683 int bits) 684 { 685 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 686 u64 range = state->end - state->start + 1; 687 tree->dirty_bytes += range; 688 } 689 state->state |= bits; 690 } 691 692 /* 693 * set some bits on a range in the tree. This may require allocations 694 * or sleeping, so the gfp mask is used to indicate what is allowed. 695 * 696 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the 697 * range already has the desired bits set. The start of the existing 698 * range is returned in failed_start in this case. 699 * 700 * [start, end] is inclusive 701 * This takes the tree lock. 702 */ 703 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits, 704 int exclusive, u64 *failed_start, gfp_t mask) 705 { 706 struct extent_state *state; 707 struct extent_state *prealloc = NULL; 708 struct rb_node *node; 709 unsigned long flags; 710 int err = 0; 711 int set; 712 u64 last_start; 713 u64 last_end; 714 again: 715 if (!prealloc && (mask & __GFP_WAIT)) { 716 prealloc = alloc_extent_state(mask); 717 if (!prealloc) 718 return -ENOMEM; 719 } 720 721 write_lock_irqsave(&tree->lock, flags); 722 /* 723 * this search will find all the extents that end after 724 * our range starts. 725 */ 726 node = tree_search(&tree->state, start); 727 if (!node) { 728 err = insert_state(tree, prealloc, start, end, bits); 729 prealloc = NULL; 730 BUG_ON(err == -EEXIST); 731 goto out; 732 } 733 734 state = rb_entry(node, struct extent_state, rb_node); 735 last_start = state->start; 736 last_end = state->end; 737 738 /* 739 * | ---- desired range ---- | 740 * | state | 741 * 742 * Just lock what we found and keep going 743 */ 744 if (state->start == start && state->end <= end) { 745 set = state->state & bits; 746 if (set && exclusive) { 747 *failed_start = state->start; 748 err = -EEXIST; 749 goto out; 750 } 751 set_state_bits(tree, state, bits); 752 start = state->end + 1; 753 merge_state(tree, state); 754 goto search_again; 755 } 756 757 /* 758 * | ---- desired range ---- | 759 * | state | 760 * or 761 * | ------------- state -------------- | 762 * 763 * We need to split the extent we found, and may flip bits on 764 * second half. 765 * 766 * If the extent we found extends past our 767 * range, we just split and search again. It'll get split 768 * again the next time though. 769 * 770 * If the extent we found is inside our range, we set the 771 * desired bit on it. 772 */ 773 if (state->start < start) { 774 set = state->state & bits; 775 if (exclusive && set) { 776 *failed_start = start; 777 err = -EEXIST; 778 goto out; 779 } 780 err = split_state(tree, state, prealloc, start); 781 BUG_ON(err == -EEXIST); 782 prealloc = NULL; 783 if (err) 784 goto out; 785 if (state->end <= end) { 786 set_state_bits(tree, state, bits); 787 start = state->end + 1; 788 merge_state(tree, state); 789 } else { 790 start = state->start; 791 } 792 goto search_again; 793 } 794 /* 795 * | ---- desired range ---- | 796 * | state | or | state | 797 * 798 * There's a hole, we need to insert something in it and 799 * ignore the extent we found. 800 */ 801 if (state->start > start) { 802 u64 this_end; 803 if (end < last_start) 804 this_end = end; 805 else 806 this_end = last_start -1; 807 err = insert_state(tree, prealloc, start, this_end, 808 bits); 809 prealloc = NULL; 810 BUG_ON(err == -EEXIST); 811 if (err) 812 goto out; 813 start = this_end + 1; 814 goto search_again; 815 } 816 /* 817 * | ---- desired range ---- | 818 * | state | 819 * We need to split the extent, and set the bit 820 * on the first half 821 */ 822 if (state->start <= end && state->end > end) { 823 set = state->state & bits; 824 if (exclusive && set) { 825 *failed_start = start; 826 err = -EEXIST; 827 goto out; 828 } 829 err = split_state(tree, state, prealloc, end + 1); 830 BUG_ON(err == -EEXIST); 831 832 set_state_bits(tree, prealloc, bits); 833 merge_state(tree, prealloc); 834 prealloc = NULL; 835 goto out; 836 } 837 838 goto search_again; 839 840 out: 841 write_unlock_irqrestore(&tree->lock, flags); 842 if (prealloc) 843 free_extent_state(prealloc); 844 845 return err; 846 847 search_again: 848 if (start > end) 849 goto out; 850 write_unlock_irqrestore(&tree->lock, flags); 851 if (mask & __GFP_WAIT) 852 cond_resched(); 853 goto again; 854 } 855 EXPORT_SYMBOL(set_extent_bit); 856 857 /* wrappers around set/clear extent bit */ 858 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 859 gfp_t mask) 860 { 861 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 862 mask); 863 } 864 EXPORT_SYMBOL(set_extent_dirty); 865 866 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end, 867 int bits, gfp_t mask) 868 { 869 return set_extent_bit(tree, start, end, bits, 0, NULL, 870 mask); 871 } 872 EXPORT_SYMBOL(set_extent_bits); 873 874 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end, 875 int bits, gfp_t mask) 876 { 877 return clear_extent_bit(tree, start, end, bits, 0, 0, mask); 878 } 879 EXPORT_SYMBOL(clear_extent_bits); 880 881 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end, 882 gfp_t mask) 883 { 884 return set_extent_bit(tree, start, end, 885 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL, 886 mask); 887 } 888 EXPORT_SYMBOL(set_extent_delalloc); 889 890 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 891 gfp_t mask) 892 { 893 return clear_extent_bit(tree, start, end, 894 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask); 895 } 896 EXPORT_SYMBOL(clear_extent_dirty); 897 898 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 899 gfp_t mask) 900 { 901 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 902 mask); 903 } 904 EXPORT_SYMBOL(set_extent_new); 905 906 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 907 gfp_t mask) 908 { 909 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask); 910 } 911 EXPORT_SYMBOL(clear_extent_new); 912 913 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 914 gfp_t mask) 915 { 916 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, 917 mask); 918 } 919 EXPORT_SYMBOL(set_extent_uptodate); 920 921 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 922 gfp_t mask) 923 { 924 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); 925 } 926 EXPORT_SYMBOL(clear_extent_uptodate); 927 928 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 929 gfp_t mask) 930 { 931 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK, 932 0, NULL, mask); 933 } 934 EXPORT_SYMBOL(set_extent_writeback); 935 936 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 937 gfp_t mask) 938 { 939 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); 940 } 941 EXPORT_SYMBOL(clear_extent_writeback); 942 943 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end) 944 { 945 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); 946 } 947 EXPORT_SYMBOL(wait_on_extent_writeback); 948 949 /* 950 * locks a range in ascending order, waiting for any locked regions 951 * it hits on the way. [start,end] are inclusive, and this will sleep. 952 */ 953 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask) 954 { 955 int err; 956 u64 failed_start; 957 while (1) { 958 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 959 &failed_start, mask); 960 if (err == -EEXIST && (mask & __GFP_WAIT)) { 961 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 962 start = failed_start; 963 } else { 964 break; 965 } 966 WARN_ON(start > end); 967 } 968 return err; 969 } 970 EXPORT_SYMBOL(lock_extent); 971 972 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end, 973 gfp_t mask) 974 { 975 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask); 976 } 977 EXPORT_SYMBOL(unlock_extent); 978 979 /* 980 * helper function to set pages and extents in the tree dirty 981 */ 982 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end) 983 { 984 unsigned long index = start >> PAGE_CACHE_SHIFT; 985 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 986 struct page *page; 987 988 while (index <= end_index) { 989 page = find_get_page(tree->mapping, index); 990 BUG_ON(!page); 991 __set_page_dirty_nobuffers(page); 992 page_cache_release(page); 993 index++; 994 } 995 set_extent_dirty(tree, start, end, GFP_NOFS); 996 return 0; 997 } 998 EXPORT_SYMBOL(set_range_dirty); 999 1000 /* 1001 * helper function to set both pages and extents in the tree writeback 1002 */ 1003 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end) 1004 { 1005 unsigned long index = start >> PAGE_CACHE_SHIFT; 1006 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1007 struct page *page; 1008 1009 while (index <= end_index) { 1010 page = find_get_page(tree->mapping, index); 1011 BUG_ON(!page); 1012 set_page_writeback(page); 1013 page_cache_release(page); 1014 index++; 1015 } 1016 set_extent_writeback(tree, start, end, GFP_NOFS); 1017 return 0; 1018 } 1019 EXPORT_SYMBOL(set_range_writeback); 1020 1021 int find_first_extent_bit(struct extent_map_tree *tree, u64 start, 1022 u64 *start_ret, u64 *end_ret, int bits) 1023 { 1024 struct rb_node *node; 1025 struct extent_state *state; 1026 int ret = 1; 1027 1028 read_lock_irq(&tree->lock); 1029 /* 1030 * this search will find all the extents that end after 1031 * our range starts. 1032 */ 1033 node = tree_search(&tree->state, start); 1034 if (!node || IS_ERR(node)) { 1035 goto out; 1036 } 1037 1038 while(1) { 1039 state = rb_entry(node, struct extent_state, rb_node); 1040 if (state->end >= start && (state->state & bits)) { 1041 *start_ret = state->start; 1042 *end_ret = state->end; 1043 ret = 0; 1044 break; 1045 } 1046 node = rb_next(node); 1047 if (!node) 1048 break; 1049 } 1050 out: 1051 read_unlock_irq(&tree->lock); 1052 return ret; 1053 } 1054 EXPORT_SYMBOL(find_first_extent_bit); 1055 1056 u64 find_lock_delalloc_range(struct extent_map_tree *tree, 1057 u64 *start, u64 *end, u64 max_bytes) 1058 { 1059 struct rb_node *node; 1060 struct extent_state *state; 1061 u64 cur_start = *start; 1062 u64 found = 0; 1063 u64 total_bytes = 0; 1064 1065 write_lock_irq(&tree->lock); 1066 /* 1067 * this search will find all the extents that end after 1068 * our range starts. 1069 */ 1070 search_again: 1071 node = tree_search(&tree->state, cur_start); 1072 if (!node || IS_ERR(node)) { 1073 *end = (u64)-1; 1074 goto out; 1075 } 1076 1077 while(1) { 1078 state = rb_entry(node, struct extent_state, rb_node); 1079 if (found && state->start != cur_start) { 1080 goto out; 1081 } 1082 if (!(state->state & EXTENT_DELALLOC)) { 1083 if (!found) 1084 *end = state->end; 1085 goto out; 1086 } 1087 if (!found) { 1088 struct extent_state *prev_state; 1089 struct rb_node *prev_node = node; 1090 while(1) { 1091 prev_node = rb_prev(prev_node); 1092 if (!prev_node) 1093 break; 1094 prev_state = rb_entry(prev_node, 1095 struct extent_state, 1096 rb_node); 1097 if (!(prev_state->state & EXTENT_DELALLOC)) 1098 break; 1099 state = prev_state; 1100 node = prev_node; 1101 } 1102 } 1103 if (state->state & EXTENT_LOCKED) { 1104 DEFINE_WAIT(wait); 1105 atomic_inc(&state->refs); 1106 prepare_to_wait(&state->wq, &wait, 1107 TASK_UNINTERRUPTIBLE); 1108 write_unlock_irq(&tree->lock); 1109 schedule(); 1110 write_lock_irq(&tree->lock); 1111 finish_wait(&state->wq, &wait); 1112 free_extent_state(state); 1113 goto search_again; 1114 } 1115 state->state |= EXTENT_LOCKED; 1116 if (!found) 1117 *start = state->start; 1118 found++; 1119 *end = state->end; 1120 cur_start = state->end + 1; 1121 node = rb_next(node); 1122 if (!node) 1123 break; 1124 total_bytes += state->end - state->start + 1; 1125 if (total_bytes >= max_bytes) 1126 break; 1127 } 1128 out: 1129 write_unlock_irq(&tree->lock); 1130 return found; 1131 } 1132 1133 u64 count_range_bits(struct extent_map_tree *tree, 1134 u64 *start, u64 search_end, u64 max_bytes, 1135 unsigned long bits) 1136 { 1137 struct rb_node *node; 1138 struct extent_state *state; 1139 u64 cur_start = *start; 1140 u64 total_bytes = 0; 1141 int found = 0; 1142 1143 if (search_end <= cur_start) { 1144 printk("search_end %Lu start %Lu\n", search_end, cur_start); 1145 WARN_ON(1); 1146 return 0; 1147 } 1148 1149 write_lock_irq(&tree->lock); 1150 if (cur_start == 0 && bits == EXTENT_DIRTY) { 1151 total_bytes = tree->dirty_bytes; 1152 goto out; 1153 } 1154 /* 1155 * this search will find all the extents that end after 1156 * our range starts. 1157 */ 1158 node = tree_search(&tree->state, cur_start); 1159 if (!node || IS_ERR(node)) { 1160 goto out; 1161 } 1162 1163 while(1) { 1164 state = rb_entry(node, struct extent_state, rb_node); 1165 if (state->start > search_end) 1166 break; 1167 if (state->end >= cur_start && (state->state & bits)) { 1168 total_bytes += min(search_end, state->end) + 1 - 1169 max(cur_start, state->start); 1170 if (total_bytes >= max_bytes) 1171 break; 1172 if (!found) { 1173 *start = state->start; 1174 found = 1; 1175 } 1176 } 1177 node = rb_next(node); 1178 if (!node) 1179 break; 1180 } 1181 out: 1182 write_unlock_irq(&tree->lock); 1183 return total_bytes; 1184 } 1185 /* 1186 * helper function to lock both pages and extents in the tree. 1187 * pages must be locked first. 1188 */ 1189 int lock_range(struct extent_map_tree *tree, u64 start, u64 end) 1190 { 1191 unsigned long index = start >> PAGE_CACHE_SHIFT; 1192 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1193 struct page *page; 1194 int err; 1195 1196 while (index <= end_index) { 1197 page = grab_cache_page(tree->mapping, index); 1198 if (!page) { 1199 err = -ENOMEM; 1200 goto failed; 1201 } 1202 if (IS_ERR(page)) { 1203 err = PTR_ERR(page); 1204 goto failed; 1205 } 1206 index++; 1207 } 1208 lock_extent(tree, start, end, GFP_NOFS); 1209 return 0; 1210 1211 failed: 1212 /* 1213 * we failed above in getting the page at 'index', so we undo here 1214 * up to but not including the page at 'index' 1215 */ 1216 end_index = index; 1217 index = start >> PAGE_CACHE_SHIFT; 1218 while (index < end_index) { 1219 page = find_get_page(tree->mapping, index); 1220 unlock_page(page); 1221 page_cache_release(page); 1222 index++; 1223 } 1224 return err; 1225 } 1226 EXPORT_SYMBOL(lock_range); 1227 1228 /* 1229 * helper function to unlock both pages and extents in the tree. 1230 */ 1231 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end) 1232 { 1233 unsigned long index = start >> PAGE_CACHE_SHIFT; 1234 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1235 struct page *page; 1236 1237 while (index <= end_index) { 1238 page = find_get_page(tree->mapping, index); 1239 unlock_page(page); 1240 page_cache_release(page); 1241 index++; 1242 } 1243 unlock_extent(tree, start, end, GFP_NOFS); 1244 return 0; 1245 } 1246 EXPORT_SYMBOL(unlock_range); 1247 1248 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private) 1249 { 1250 struct rb_node *node; 1251 struct extent_state *state; 1252 int ret = 0; 1253 1254 write_lock_irq(&tree->lock); 1255 /* 1256 * this search will find all the extents that end after 1257 * our range starts. 1258 */ 1259 node = tree_search(&tree->state, start); 1260 if (!node || IS_ERR(node)) { 1261 ret = -ENOENT; 1262 goto out; 1263 } 1264 state = rb_entry(node, struct extent_state, rb_node); 1265 if (state->start != start) { 1266 ret = -ENOENT; 1267 goto out; 1268 } 1269 state->private = private; 1270 out: 1271 write_unlock_irq(&tree->lock); 1272 return ret; 1273 } 1274 1275 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private) 1276 { 1277 struct rb_node *node; 1278 struct extent_state *state; 1279 int ret = 0; 1280 1281 read_lock_irq(&tree->lock); 1282 /* 1283 * this search will find all the extents that end after 1284 * our range starts. 1285 */ 1286 node = tree_search(&tree->state, start); 1287 if (!node || IS_ERR(node)) { 1288 ret = -ENOENT; 1289 goto out; 1290 } 1291 state = rb_entry(node, struct extent_state, rb_node); 1292 if (state->start != start) { 1293 ret = -ENOENT; 1294 goto out; 1295 } 1296 *private = state->private; 1297 out: 1298 read_unlock_irq(&tree->lock); 1299 return ret; 1300 } 1301 1302 /* 1303 * searches a range in the state tree for a given mask. 1304 * If 'filled' == 1, this returns 1 only if ever extent in the tree 1305 * has the bits set. Otherwise, 1 is returned if any bit in the 1306 * range is found set. 1307 */ 1308 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end, 1309 int bits, int filled) 1310 { 1311 struct extent_state *state = NULL; 1312 struct rb_node *node; 1313 int bitset = 0; 1314 1315 read_lock_irq(&tree->lock); 1316 node = tree_search(&tree->state, start); 1317 while (node && start <= end) { 1318 state = rb_entry(node, struct extent_state, rb_node); 1319 1320 if (filled && state->start > start) { 1321 bitset = 0; 1322 break; 1323 } 1324 1325 if (state->start > end) 1326 break; 1327 1328 if (state->state & bits) { 1329 bitset = 1; 1330 if (!filled) 1331 break; 1332 } else if (filled) { 1333 bitset = 0; 1334 break; 1335 } 1336 start = state->end + 1; 1337 if (start > end) 1338 break; 1339 node = rb_next(node); 1340 } 1341 read_unlock_irq(&tree->lock); 1342 return bitset; 1343 } 1344 EXPORT_SYMBOL(test_range_bit); 1345 1346 /* 1347 * helper function to set a given page up to date if all the 1348 * extents in the tree for that page are up to date 1349 */ 1350 static int check_page_uptodate(struct extent_map_tree *tree, 1351 struct page *page) 1352 { 1353 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1354 u64 end = start + PAGE_CACHE_SIZE - 1; 1355 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1)) 1356 SetPageUptodate(page); 1357 return 0; 1358 } 1359 1360 /* 1361 * helper function to unlock a page if all the extents in the tree 1362 * for that page are unlocked 1363 */ 1364 static int check_page_locked(struct extent_map_tree *tree, 1365 struct page *page) 1366 { 1367 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1368 u64 end = start + PAGE_CACHE_SIZE - 1; 1369 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0)) 1370 unlock_page(page); 1371 return 0; 1372 } 1373 1374 /* 1375 * helper function to end page writeback if all the extents 1376 * in the tree for that page are done with writeback 1377 */ 1378 static int check_page_writeback(struct extent_map_tree *tree, 1379 struct page *page) 1380 { 1381 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1382 u64 end = start + PAGE_CACHE_SIZE - 1; 1383 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0)) 1384 end_page_writeback(page); 1385 return 0; 1386 } 1387 1388 /* lots and lots of room for performance fixes in the end_bio funcs */ 1389 1390 /* 1391 * after a writepage IO is done, we need to: 1392 * clear the uptodate bits on error 1393 * clear the writeback bits in the extent tree for this IO 1394 * end_page_writeback if the page has no more pending IO 1395 * 1396 * Scheduling is not allowed, so the extent state tree is expected 1397 * to have one and only one object corresponding to this IO. 1398 */ 1399 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) 1400 static void end_bio_extent_writepage(struct bio *bio, int err) 1401 #else 1402 static int end_bio_extent_writepage(struct bio *bio, 1403 unsigned int bytes_done, int err) 1404 #endif 1405 { 1406 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1407 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1408 struct extent_map_tree *tree = bio->bi_private; 1409 u64 start; 1410 u64 end; 1411 int whole_page; 1412 1413 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1414 if (bio->bi_size) 1415 return 1; 1416 #endif 1417 1418 do { 1419 struct page *page = bvec->bv_page; 1420 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1421 bvec->bv_offset; 1422 end = start + bvec->bv_len - 1; 1423 1424 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1425 whole_page = 1; 1426 else 1427 whole_page = 0; 1428 1429 if (--bvec >= bio->bi_io_vec) 1430 prefetchw(&bvec->bv_page->flags); 1431 1432 if (!uptodate) { 1433 clear_extent_uptodate(tree, start, end, GFP_ATOMIC); 1434 ClearPageUptodate(page); 1435 SetPageError(page); 1436 } 1437 clear_extent_writeback(tree, start, end, GFP_ATOMIC); 1438 1439 if (whole_page) 1440 end_page_writeback(page); 1441 else 1442 check_page_writeback(tree, page); 1443 if (tree->ops && tree->ops->writepage_end_io_hook) 1444 tree->ops->writepage_end_io_hook(page, start, end); 1445 } while (bvec >= bio->bi_io_vec); 1446 1447 bio_put(bio); 1448 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1449 return 0; 1450 #endif 1451 } 1452 1453 /* 1454 * after a readpage IO is done, we need to: 1455 * clear the uptodate bits on error 1456 * set the uptodate bits if things worked 1457 * set the page up to date if all extents in the tree are uptodate 1458 * clear the lock bit in the extent tree 1459 * unlock the page if there are no other extents locked for it 1460 * 1461 * Scheduling is not allowed, so the extent state tree is expected 1462 * to have one and only one object corresponding to this IO. 1463 */ 1464 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) 1465 static void end_bio_extent_readpage(struct bio *bio, int err) 1466 #else 1467 static int end_bio_extent_readpage(struct bio *bio, 1468 unsigned int bytes_done, int err) 1469 #endif 1470 { 1471 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1472 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1473 struct extent_map_tree *tree = bio->bi_private; 1474 u64 start; 1475 u64 end; 1476 int whole_page; 1477 int ret; 1478 1479 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1480 if (bio->bi_size) 1481 return 1; 1482 #endif 1483 1484 do { 1485 struct page *page = bvec->bv_page; 1486 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1487 bvec->bv_offset; 1488 end = start + bvec->bv_len - 1; 1489 1490 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1491 whole_page = 1; 1492 else 1493 whole_page = 0; 1494 1495 if (--bvec >= bio->bi_io_vec) 1496 prefetchw(&bvec->bv_page->flags); 1497 1498 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 1499 ret = tree->ops->readpage_end_io_hook(page, start, end); 1500 if (ret) 1501 uptodate = 0; 1502 } 1503 if (uptodate) { 1504 set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1505 if (whole_page) 1506 SetPageUptodate(page); 1507 else 1508 check_page_uptodate(tree, page); 1509 } else { 1510 ClearPageUptodate(page); 1511 SetPageError(page); 1512 } 1513 1514 unlock_extent(tree, start, end, GFP_ATOMIC); 1515 1516 if (whole_page) 1517 unlock_page(page); 1518 else 1519 check_page_locked(tree, page); 1520 } while (bvec >= bio->bi_io_vec); 1521 1522 bio_put(bio); 1523 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1524 return 0; 1525 #endif 1526 } 1527 1528 /* 1529 * IO done from prepare_write is pretty simple, we just unlock 1530 * the structs in the extent tree when done, and set the uptodate bits 1531 * as appropriate. 1532 */ 1533 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) 1534 static void end_bio_extent_preparewrite(struct bio *bio, int err) 1535 #else 1536 static int end_bio_extent_preparewrite(struct bio *bio, 1537 unsigned int bytes_done, int err) 1538 #endif 1539 { 1540 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1541 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1542 struct extent_map_tree *tree = bio->bi_private; 1543 u64 start; 1544 u64 end; 1545 1546 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1547 if (bio->bi_size) 1548 return 1; 1549 #endif 1550 1551 do { 1552 struct page *page = bvec->bv_page; 1553 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1554 bvec->bv_offset; 1555 end = start + bvec->bv_len - 1; 1556 1557 if (--bvec >= bio->bi_io_vec) 1558 prefetchw(&bvec->bv_page->flags); 1559 1560 if (uptodate) { 1561 set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1562 } else { 1563 ClearPageUptodate(page); 1564 SetPageError(page); 1565 } 1566 1567 unlock_extent(tree, start, end, GFP_ATOMIC); 1568 1569 } while (bvec >= bio->bi_io_vec); 1570 1571 bio_put(bio); 1572 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1573 return 0; 1574 #endif 1575 } 1576 1577 static struct bio * 1578 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 1579 gfp_t gfp_flags) 1580 { 1581 struct bio *bio; 1582 1583 bio = bio_alloc(gfp_flags, nr_vecs); 1584 1585 if (bio == NULL && (current->flags & PF_MEMALLOC)) { 1586 while (!bio && (nr_vecs /= 2)) 1587 bio = bio_alloc(gfp_flags, nr_vecs); 1588 } 1589 1590 if (bio) { 1591 bio->bi_bdev = bdev; 1592 bio->bi_sector = first_sector; 1593 } 1594 return bio; 1595 } 1596 1597 static int submit_one_bio(int rw, struct bio *bio) 1598 { 1599 u64 maxsector; 1600 int ret = 0; 1601 1602 bio_get(bio); 1603 1604 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 1605 if (maxsector < bio->bi_sector) { 1606 printk("sector too large max %Lu got %llu\n", maxsector, 1607 (unsigned long long)bio->bi_sector); 1608 WARN_ON(1); 1609 } 1610 1611 submit_bio(rw, bio); 1612 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 1613 ret = -EOPNOTSUPP; 1614 bio_put(bio); 1615 return ret; 1616 } 1617 1618 static int submit_extent_page(int rw, struct extent_map_tree *tree, 1619 struct page *page, sector_t sector, 1620 size_t size, unsigned long offset, 1621 struct block_device *bdev, 1622 struct bio **bio_ret, 1623 unsigned long max_pages, 1624 bio_end_io_t end_io_func) 1625 { 1626 int ret = 0; 1627 struct bio *bio; 1628 int nr; 1629 1630 if (bio_ret && *bio_ret) { 1631 bio = *bio_ret; 1632 if (bio->bi_sector + (bio->bi_size >> 9) != sector || 1633 bio_add_page(bio, page, size, offset) < size) { 1634 ret = submit_one_bio(rw, bio); 1635 bio = NULL; 1636 } else { 1637 return 0; 1638 } 1639 } 1640 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev)); 1641 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); 1642 if (!bio) { 1643 printk("failed to allocate bio nr %d\n", nr); 1644 } 1645 bio_add_page(bio, page, size, offset); 1646 bio->bi_end_io = end_io_func; 1647 bio->bi_private = tree; 1648 if (bio_ret) { 1649 *bio_ret = bio; 1650 } else { 1651 ret = submit_one_bio(rw, bio); 1652 } 1653 1654 return ret; 1655 } 1656 1657 void set_page_extent_mapped(struct page *page) 1658 { 1659 if (!PagePrivate(page)) { 1660 SetPagePrivate(page); 1661 WARN_ON(!page->mapping->a_ops->invalidatepage); 1662 set_page_private(page, EXTENT_PAGE_PRIVATE); 1663 page_cache_get(page); 1664 } 1665 } 1666 1667 /* 1668 * basic readpage implementation. Locked extent state structs are inserted 1669 * into the tree that are removed when the IO is done (by the end_io 1670 * handlers) 1671 */ 1672 static int __extent_read_full_page(struct extent_map_tree *tree, 1673 struct page *page, 1674 get_extent_t *get_extent, 1675 struct bio **bio) 1676 { 1677 struct inode *inode = page->mapping->host; 1678 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1679 u64 page_end = start + PAGE_CACHE_SIZE - 1; 1680 u64 end; 1681 u64 cur = start; 1682 u64 extent_offset; 1683 u64 last_byte = i_size_read(inode); 1684 u64 block_start; 1685 u64 cur_end; 1686 sector_t sector; 1687 struct extent_map *em; 1688 struct block_device *bdev; 1689 int ret; 1690 int nr = 0; 1691 size_t page_offset = 0; 1692 size_t iosize; 1693 size_t blocksize = inode->i_sb->s_blocksize; 1694 1695 set_page_extent_mapped(page); 1696 1697 end = page_end; 1698 lock_extent(tree, start, end, GFP_NOFS); 1699 1700 while (cur <= end) { 1701 if (cur >= last_byte) { 1702 char *userpage; 1703 iosize = PAGE_CACHE_SIZE - page_offset; 1704 userpage = kmap_atomic(page, KM_USER0); 1705 memset(userpage + page_offset, 0, iosize); 1706 flush_dcache_page(page); 1707 kunmap_atomic(userpage, KM_USER0); 1708 set_extent_uptodate(tree, cur, cur + iosize - 1, 1709 GFP_NOFS); 1710 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1711 break; 1712 } 1713 em = get_extent(inode, page, page_offset, cur, end, 0); 1714 if (IS_ERR(em) || !em) { 1715 SetPageError(page); 1716 unlock_extent(tree, cur, end, GFP_NOFS); 1717 break; 1718 } 1719 1720 extent_offset = cur - em->start; 1721 BUG_ON(em->end < cur); 1722 BUG_ON(end < cur); 1723 1724 iosize = min(em->end - cur, end - cur) + 1; 1725 cur_end = min(em->end, end); 1726 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1727 sector = (em->block_start + extent_offset) >> 9; 1728 bdev = em->bdev; 1729 block_start = em->block_start; 1730 free_extent_map(em); 1731 em = NULL; 1732 1733 /* we've found a hole, just zero and go on */ 1734 if (block_start == EXTENT_MAP_HOLE) { 1735 char *userpage; 1736 userpage = kmap_atomic(page, KM_USER0); 1737 memset(userpage + page_offset, 0, iosize); 1738 flush_dcache_page(page); 1739 kunmap_atomic(userpage, KM_USER0); 1740 1741 set_extent_uptodate(tree, cur, cur + iosize - 1, 1742 GFP_NOFS); 1743 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1744 cur = cur + iosize; 1745 page_offset += iosize; 1746 continue; 1747 } 1748 /* the get_extent function already copied into the page */ 1749 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { 1750 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1751 cur = cur + iosize; 1752 page_offset += iosize; 1753 continue; 1754 } 1755 1756 ret = 0; 1757 if (tree->ops && tree->ops->readpage_io_hook) { 1758 ret = tree->ops->readpage_io_hook(page, cur, 1759 cur + iosize - 1); 1760 } 1761 if (!ret) { 1762 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 1763 nr -= page->index; 1764 ret = submit_extent_page(READ, tree, page, 1765 sector, iosize, page_offset, 1766 bdev, bio, nr, 1767 end_bio_extent_readpage); 1768 } 1769 if (ret) 1770 SetPageError(page); 1771 cur = cur + iosize; 1772 page_offset += iosize; 1773 nr++; 1774 } 1775 if (!nr) { 1776 if (!PageError(page)) 1777 SetPageUptodate(page); 1778 unlock_page(page); 1779 } 1780 return 0; 1781 } 1782 1783 int extent_read_full_page(struct extent_map_tree *tree, struct page *page, 1784 get_extent_t *get_extent) 1785 { 1786 struct bio *bio = NULL; 1787 int ret; 1788 1789 ret = __extent_read_full_page(tree, page, get_extent, &bio); 1790 if (bio) 1791 submit_one_bio(READ, bio); 1792 return ret; 1793 } 1794 EXPORT_SYMBOL(extent_read_full_page); 1795 1796 /* 1797 * the writepage semantics are similar to regular writepage. extent 1798 * records are inserted to lock ranges in the tree, and as dirty areas 1799 * are found, they are marked writeback. Then the lock bits are removed 1800 * and the end_io handler clears the writeback ranges 1801 */ 1802 static int __extent_writepage(struct page *page, struct writeback_control *wbc, 1803 void *data) 1804 { 1805 struct inode *inode = page->mapping->host; 1806 struct extent_page_data *epd = data; 1807 struct extent_map_tree *tree = epd->tree; 1808 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1809 u64 delalloc_start; 1810 u64 page_end = start + PAGE_CACHE_SIZE - 1; 1811 u64 end; 1812 u64 cur = start; 1813 u64 extent_offset; 1814 u64 last_byte = i_size_read(inode); 1815 u64 block_start; 1816 u64 iosize; 1817 sector_t sector; 1818 struct extent_map *em; 1819 struct block_device *bdev; 1820 int ret; 1821 int nr = 0; 1822 size_t page_offset = 0; 1823 size_t blocksize; 1824 loff_t i_size = i_size_read(inode); 1825 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 1826 u64 nr_delalloc; 1827 u64 delalloc_end; 1828 1829 WARN_ON(!PageLocked(page)); 1830 if (page->index > end_index) { 1831 clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1832 unlock_page(page); 1833 return 0; 1834 } 1835 1836 if (page->index == end_index) { 1837 char *userpage; 1838 1839 size_t offset = i_size & (PAGE_CACHE_SIZE - 1); 1840 1841 userpage = kmap_atomic(page, KM_USER0); 1842 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset); 1843 flush_dcache_page(page); 1844 kunmap_atomic(userpage, KM_USER0); 1845 } 1846 1847 set_page_extent_mapped(page); 1848 1849 delalloc_start = start; 1850 delalloc_end = 0; 1851 while(delalloc_end < page_end) { 1852 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start, 1853 &delalloc_end, 1854 128 * 1024 * 1024); 1855 if (nr_delalloc == 0) { 1856 delalloc_start = delalloc_end + 1; 1857 continue; 1858 } 1859 tree->ops->fill_delalloc(inode, delalloc_start, 1860 delalloc_end); 1861 clear_extent_bit(tree, delalloc_start, 1862 delalloc_end, 1863 EXTENT_LOCKED | EXTENT_DELALLOC, 1864 1, 0, GFP_NOFS); 1865 delalloc_start = delalloc_end + 1; 1866 } 1867 lock_extent(tree, start, page_end, GFP_NOFS); 1868 1869 end = page_end; 1870 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1871 printk("found delalloc bits after lock_extent\n"); 1872 } 1873 1874 if (last_byte <= start) { 1875 clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1876 goto done; 1877 } 1878 1879 set_extent_uptodate(tree, start, page_end, GFP_NOFS); 1880 blocksize = inode->i_sb->s_blocksize; 1881 1882 while (cur <= end) { 1883 if (cur >= last_byte) { 1884 clear_extent_dirty(tree, cur, page_end, GFP_NOFS); 1885 break; 1886 } 1887 em = epd->get_extent(inode, page, page_offset, cur, end, 1); 1888 if (IS_ERR(em) || !em) { 1889 SetPageError(page); 1890 break; 1891 } 1892 1893 extent_offset = cur - em->start; 1894 BUG_ON(em->end < cur); 1895 BUG_ON(end < cur); 1896 iosize = min(em->end - cur, end - cur) + 1; 1897 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1898 sector = (em->block_start + extent_offset) >> 9; 1899 bdev = em->bdev; 1900 block_start = em->block_start; 1901 free_extent_map(em); 1902 em = NULL; 1903 1904 if (block_start == EXTENT_MAP_HOLE || 1905 block_start == EXTENT_MAP_INLINE) { 1906 clear_extent_dirty(tree, cur, 1907 cur + iosize - 1, GFP_NOFS); 1908 cur = cur + iosize; 1909 page_offset += iosize; 1910 continue; 1911 } 1912 1913 /* leave this out until we have a page_mkwrite call */ 1914 if (0 && !test_range_bit(tree, cur, cur + iosize - 1, 1915 EXTENT_DIRTY, 0)) { 1916 cur = cur + iosize; 1917 page_offset += iosize; 1918 continue; 1919 } 1920 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); 1921 if (tree->ops && tree->ops->writepage_io_hook) { 1922 ret = tree->ops->writepage_io_hook(page, cur, 1923 cur + iosize - 1); 1924 } else { 1925 ret = 0; 1926 } 1927 if (ret) 1928 SetPageError(page); 1929 else { 1930 unsigned long max_nr = end_index + 1; 1931 set_range_writeback(tree, cur, cur + iosize - 1); 1932 if (!PageWriteback(page)) { 1933 printk("warning page %lu not writeback, " 1934 "cur %llu end %llu\n", page->index, 1935 (unsigned long long)cur, 1936 (unsigned long long)end); 1937 } 1938 1939 ret = submit_extent_page(WRITE, tree, page, sector, 1940 iosize, page_offset, bdev, 1941 &epd->bio, max_nr, 1942 end_bio_extent_writepage); 1943 if (ret) 1944 SetPageError(page); 1945 } 1946 cur = cur + iosize; 1947 page_offset += iosize; 1948 nr++; 1949 } 1950 done: 1951 if (nr == 0) { 1952 /* make sure the mapping tag for page dirty gets cleared */ 1953 set_page_writeback(page); 1954 end_page_writeback(page); 1955 } 1956 unlock_extent(tree, start, page_end, GFP_NOFS); 1957 unlock_page(page); 1958 return 0; 1959 } 1960 1961 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) 1962 1963 /* Taken directly from 2.6.23 for 2.6.18 back port */ 1964 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, 1965 void *data); 1966 1967 /** 1968 * write_cache_pages - walk the list of dirty pages of the given address space 1969 * and write all of them. 1970 * @mapping: address space structure to write 1971 * @wbc: subtract the number of written pages from *@wbc->nr_to_write 1972 * @writepage: function called for each page 1973 * @data: data passed to writepage function 1974 * 1975 * If a page is already under I/O, write_cache_pages() skips it, even 1976 * if it's dirty. This is desirable behaviour for memory-cleaning writeback, 1977 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() 1978 * and msync() need to guarantee that all the data which was dirty at the time 1979 * the call was made get new I/O started against them. If wbc->sync_mode is 1980 * WB_SYNC_ALL then we were called for data integrity and we must wait for 1981 * existing IO to complete. 1982 */ 1983 static int write_cache_pages(struct address_space *mapping, 1984 struct writeback_control *wbc, writepage_t writepage, 1985 void *data) 1986 { 1987 struct backing_dev_info *bdi = mapping->backing_dev_info; 1988 int ret = 0; 1989 int done = 0; 1990 struct pagevec pvec; 1991 int nr_pages; 1992 pgoff_t index; 1993 pgoff_t end; /* Inclusive */ 1994 int scanned = 0; 1995 int range_whole = 0; 1996 1997 if (wbc->nonblocking && bdi_write_congested(bdi)) { 1998 wbc->encountered_congestion = 1; 1999 return 0; 2000 } 2001 2002 pagevec_init(&pvec, 0); 2003 if (wbc->range_cyclic) { 2004 index = mapping->writeback_index; /* Start from prev offset */ 2005 end = -1; 2006 } else { 2007 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2008 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2009 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2010 range_whole = 1; 2011 scanned = 1; 2012 } 2013 retry: 2014 while (!done && (index <= end) && 2015 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 2016 PAGECACHE_TAG_DIRTY, 2017 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { 2018 unsigned i; 2019 2020 scanned = 1; 2021 for (i = 0; i < nr_pages; i++) { 2022 struct page *page = pvec.pages[i]; 2023 2024 /* 2025 * At this point we hold neither mapping->tree_lock nor 2026 * lock on the page itself: the page may be truncated or 2027 * invalidated (changing page->mapping to NULL), or even 2028 * swizzled back from swapper_space to tmpfs file 2029 * mapping 2030 */ 2031 lock_page(page); 2032 2033 if (unlikely(page->mapping != mapping)) { 2034 unlock_page(page); 2035 continue; 2036 } 2037 2038 if (!wbc->range_cyclic && page->index > end) { 2039 done = 1; 2040 unlock_page(page); 2041 continue; 2042 } 2043 2044 if (wbc->sync_mode != WB_SYNC_NONE) 2045 wait_on_page_writeback(page); 2046 2047 if (PageWriteback(page) || 2048 !clear_page_dirty_for_io(page)) { 2049 unlock_page(page); 2050 continue; 2051 } 2052 2053 ret = (*writepage)(page, wbc, data); 2054 2055 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) { 2056 unlock_page(page); 2057 ret = 0; 2058 } 2059 if (ret || (--(wbc->nr_to_write) <= 0)) 2060 done = 1; 2061 if (wbc->nonblocking && bdi_write_congested(bdi)) { 2062 wbc->encountered_congestion = 1; 2063 done = 1; 2064 } 2065 } 2066 pagevec_release(&pvec); 2067 cond_resched(); 2068 } 2069 if (!scanned && !done) { 2070 /* 2071 * We hit the last page and there is more work to be done: wrap 2072 * back to the start of the file 2073 */ 2074 scanned = 1; 2075 index = 0; 2076 goto retry; 2077 } 2078 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2079 mapping->writeback_index = index; 2080 return ret; 2081 } 2082 #endif 2083 2084 int extent_write_full_page(struct extent_map_tree *tree, struct page *page, 2085 get_extent_t *get_extent, 2086 struct writeback_control *wbc) 2087 { 2088 int ret; 2089 struct address_space *mapping = page->mapping; 2090 struct extent_page_data epd = { 2091 .bio = NULL, 2092 .tree = tree, 2093 .get_extent = get_extent, 2094 }; 2095 struct writeback_control wbc_writepages = { 2096 .bdi = wbc->bdi, 2097 .sync_mode = WB_SYNC_NONE, 2098 .older_than_this = NULL, 2099 .nr_to_write = 64, 2100 .range_start = page_offset(page) + PAGE_CACHE_SIZE, 2101 .range_end = (loff_t)-1, 2102 }; 2103 2104 2105 ret = __extent_writepage(page, wbc, &epd); 2106 2107 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd); 2108 if (epd.bio) { 2109 submit_one_bio(WRITE, epd.bio); 2110 } 2111 return ret; 2112 } 2113 EXPORT_SYMBOL(extent_write_full_page); 2114 2115 2116 int extent_writepages(struct extent_map_tree *tree, 2117 struct address_space *mapping, 2118 get_extent_t *get_extent, 2119 struct writeback_control *wbc) 2120 { 2121 int ret = 0; 2122 struct extent_page_data epd = { 2123 .bio = NULL, 2124 .tree = tree, 2125 .get_extent = get_extent, 2126 }; 2127 2128 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd); 2129 if (epd.bio) { 2130 submit_one_bio(WRITE, epd.bio); 2131 } 2132 return ret; 2133 } 2134 EXPORT_SYMBOL(extent_writepages); 2135 2136 int extent_readpages(struct extent_map_tree *tree, 2137 struct address_space *mapping, 2138 struct list_head *pages, unsigned nr_pages, 2139 get_extent_t get_extent) 2140 { 2141 struct bio *bio = NULL; 2142 unsigned page_idx; 2143 struct pagevec pvec; 2144 2145 pagevec_init(&pvec, 0); 2146 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 2147 struct page *page = list_entry(pages->prev, struct page, lru); 2148 2149 prefetchw(&page->flags); 2150 list_del(&page->lru); 2151 /* 2152 * what we want to do here is call add_to_page_cache_lru, 2153 * but that isn't exported, so we reproduce it here 2154 */ 2155 if (!add_to_page_cache(page, mapping, 2156 page->index, GFP_KERNEL)) { 2157 2158 /* open coding of lru_cache_add, also not exported */ 2159 page_cache_get(page); 2160 if (!pagevec_add(&pvec, page)) 2161 __pagevec_lru_add(&pvec); 2162 __extent_read_full_page(tree, page, get_extent, &bio); 2163 } 2164 page_cache_release(page); 2165 } 2166 if (pagevec_count(&pvec)) 2167 __pagevec_lru_add(&pvec); 2168 BUG_ON(!list_empty(pages)); 2169 if (bio) 2170 submit_one_bio(READ, bio); 2171 return 0; 2172 } 2173 EXPORT_SYMBOL(extent_readpages); 2174 2175 /* 2176 * basic invalidatepage code, this waits on any locked or writeback 2177 * ranges corresponding to the page, and then deletes any extent state 2178 * records from the tree 2179 */ 2180 int extent_invalidatepage(struct extent_map_tree *tree, 2181 struct page *page, unsigned long offset) 2182 { 2183 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); 2184 u64 end = start + PAGE_CACHE_SIZE - 1; 2185 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 2186 2187 start += (offset + blocksize -1) & ~(blocksize - 1); 2188 if (start > end) 2189 return 0; 2190 2191 lock_extent(tree, start, end, GFP_NOFS); 2192 wait_on_extent_writeback(tree, start, end); 2193 clear_extent_bit(tree, start, end, 2194 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 2195 1, 1, GFP_NOFS); 2196 return 0; 2197 } 2198 EXPORT_SYMBOL(extent_invalidatepage); 2199 2200 /* 2201 * simple commit_write call, set_range_dirty is used to mark both 2202 * the pages and the extent records as dirty 2203 */ 2204 int extent_commit_write(struct extent_map_tree *tree, 2205 struct inode *inode, struct page *page, 2206 unsigned from, unsigned to) 2207 { 2208 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 2209 2210 set_page_extent_mapped(page); 2211 set_page_dirty(page); 2212 2213 if (pos > inode->i_size) { 2214 i_size_write(inode, pos); 2215 mark_inode_dirty(inode); 2216 } 2217 return 0; 2218 } 2219 EXPORT_SYMBOL(extent_commit_write); 2220 2221 int extent_prepare_write(struct extent_map_tree *tree, 2222 struct inode *inode, struct page *page, 2223 unsigned from, unsigned to, get_extent_t *get_extent) 2224 { 2225 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 2226 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 2227 u64 block_start; 2228 u64 orig_block_start; 2229 u64 block_end; 2230 u64 cur_end; 2231 struct extent_map *em; 2232 unsigned blocksize = 1 << inode->i_blkbits; 2233 size_t page_offset = 0; 2234 size_t block_off_start; 2235 size_t block_off_end; 2236 int err = 0; 2237 int iocount = 0; 2238 int ret = 0; 2239 int isnew; 2240 2241 set_page_extent_mapped(page); 2242 2243 block_start = (page_start + from) & ~((u64)blocksize - 1); 2244 block_end = (page_start + to - 1) | (blocksize - 1); 2245 orig_block_start = block_start; 2246 2247 lock_extent(tree, page_start, page_end, GFP_NOFS); 2248 while(block_start <= block_end) { 2249 em = get_extent(inode, page, page_offset, block_start, 2250 block_end, 1); 2251 if (IS_ERR(em) || !em) { 2252 goto err; 2253 } 2254 cur_end = min(block_end, em->end); 2255 block_off_start = block_start & (PAGE_CACHE_SIZE - 1); 2256 block_off_end = block_off_start + blocksize; 2257 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); 2258 2259 if (!PageUptodate(page) && isnew && 2260 (block_off_end > to || block_off_start < from)) { 2261 void *kaddr; 2262 2263 kaddr = kmap_atomic(page, KM_USER0); 2264 if (block_off_end > to) 2265 memset(kaddr + to, 0, block_off_end - to); 2266 if (block_off_start < from) 2267 memset(kaddr + block_off_start, 0, 2268 from - block_off_start); 2269 flush_dcache_page(page); 2270 kunmap_atomic(kaddr, KM_USER0); 2271 } 2272 if ((em->block_start != EXTENT_MAP_HOLE && 2273 em->block_start != EXTENT_MAP_INLINE) && 2274 !isnew && !PageUptodate(page) && 2275 (block_off_end > to || block_off_start < from) && 2276 !test_range_bit(tree, block_start, cur_end, 2277 EXTENT_UPTODATE, 1)) { 2278 u64 sector; 2279 u64 extent_offset = block_start - em->start; 2280 size_t iosize; 2281 sector = (em->block_start + extent_offset) >> 9; 2282 iosize = (cur_end - block_start + blocksize) & 2283 ~((u64)blocksize - 1); 2284 /* 2285 * we've already got the extent locked, but we 2286 * need to split the state such that our end_bio 2287 * handler can clear the lock. 2288 */ 2289 set_extent_bit(tree, block_start, 2290 block_start + iosize - 1, 2291 EXTENT_LOCKED, 0, NULL, GFP_NOFS); 2292 ret = submit_extent_page(READ, tree, page, 2293 sector, iosize, page_offset, em->bdev, 2294 NULL, 1, 2295 end_bio_extent_preparewrite); 2296 iocount++; 2297 block_start = block_start + iosize; 2298 } else { 2299 set_extent_uptodate(tree, block_start, cur_end, 2300 GFP_NOFS); 2301 unlock_extent(tree, block_start, cur_end, GFP_NOFS); 2302 block_start = cur_end + 1; 2303 } 2304 page_offset = block_start & (PAGE_CACHE_SIZE - 1); 2305 free_extent_map(em); 2306 } 2307 if (iocount) { 2308 wait_extent_bit(tree, orig_block_start, 2309 block_end, EXTENT_LOCKED); 2310 } 2311 check_page_uptodate(tree, page); 2312 err: 2313 /* FIXME, zero out newly allocated blocks on error */ 2314 return err; 2315 } 2316 EXPORT_SYMBOL(extent_prepare_write); 2317 2318 /* 2319 * a helper for releasepage. As long as there are no locked extents 2320 * in the range corresponding to the page, both state records and extent 2321 * map records are removed 2322 */ 2323 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page) 2324 { 2325 struct extent_map *em; 2326 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 2327 u64 end = start + PAGE_CACHE_SIZE - 1; 2328 u64 orig_start = start; 2329 int ret = 1; 2330 2331 while (start <= end) { 2332 em = lookup_extent_mapping(tree, start, end); 2333 if (!em || IS_ERR(em)) 2334 break; 2335 if (!test_range_bit(tree, em->start, em->end, 2336 EXTENT_LOCKED, 0)) { 2337 remove_extent_mapping(tree, em); 2338 /* once for the rb tree */ 2339 free_extent_map(em); 2340 } 2341 start = em->end + 1; 2342 /* once for us */ 2343 free_extent_map(em); 2344 } 2345 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0)) 2346 ret = 0; 2347 else 2348 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE, 2349 1, 1, GFP_NOFS); 2350 return ret; 2351 } 2352 EXPORT_SYMBOL(try_release_extent_mapping); 2353 2354 sector_t extent_bmap(struct address_space *mapping, sector_t iblock, 2355 get_extent_t *get_extent) 2356 { 2357 struct inode *inode = mapping->host; 2358 u64 start = iblock << inode->i_blkbits; 2359 u64 end = start + (1 << inode->i_blkbits) - 1; 2360 sector_t sector = 0; 2361 struct extent_map *em; 2362 2363 em = get_extent(inode, NULL, 0, start, end, 0); 2364 if (!em || IS_ERR(em)) 2365 return 0; 2366 2367 if (em->block_start == EXTENT_MAP_INLINE || 2368 em->block_start == EXTENT_MAP_HOLE) 2369 goto out; 2370 2371 sector = (em->block_start + start - em->start) >> inode->i_blkbits; 2372 out: 2373 free_extent_map(em); 2374 return sector; 2375 } 2376 2377 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb) 2378 { 2379 if (list_empty(&eb->lru)) { 2380 extent_buffer_get(eb); 2381 list_add(&eb->lru, &tree->buffer_lru); 2382 tree->lru_size++; 2383 if (tree->lru_size >= BUFFER_LRU_MAX) { 2384 struct extent_buffer *rm; 2385 rm = list_entry(tree->buffer_lru.prev, 2386 struct extent_buffer, lru); 2387 tree->lru_size--; 2388 list_del_init(&rm->lru); 2389 free_extent_buffer(rm); 2390 } 2391 } else 2392 list_move(&eb->lru, &tree->buffer_lru); 2393 return 0; 2394 } 2395 static struct extent_buffer *find_lru(struct extent_map_tree *tree, 2396 u64 start, unsigned long len) 2397 { 2398 struct list_head *lru = &tree->buffer_lru; 2399 struct list_head *cur = lru->next; 2400 struct extent_buffer *eb; 2401 2402 if (list_empty(lru)) 2403 return NULL; 2404 2405 do { 2406 eb = list_entry(cur, struct extent_buffer, lru); 2407 if (eb->start == start && eb->len == len) { 2408 extent_buffer_get(eb); 2409 return eb; 2410 } 2411 cur = cur->next; 2412 } while (cur != lru); 2413 return NULL; 2414 } 2415 2416 static inline unsigned long num_extent_pages(u64 start, u64 len) 2417 { 2418 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 2419 (start >> PAGE_CACHE_SHIFT); 2420 } 2421 2422 static inline struct page *extent_buffer_page(struct extent_buffer *eb, 2423 unsigned long i) 2424 { 2425 struct page *p; 2426 struct address_space *mapping; 2427 2428 if (i == 0) 2429 return eb->first_page; 2430 i += eb->start >> PAGE_CACHE_SHIFT; 2431 mapping = eb->first_page->mapping; 2432 read_lock_irq(&mapping->tree_lock); 2433 p = radix_tree_lookup(&mapping->page_tree, i); 2434 read_unlock_irq(&mapping->tree_lock); 2435 return p; 2436 } 2437 2438 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree, 2439 u64 start, 2440 unsigned long len, 2441 gfp_t mask) 2442 { 2443 struct extent_buffer *eb = NULL; 2444 2445 spin_lock(&tree->lru_lock); 2446 eb = find_lru(tree, start, len); 2447 spin_unlock(&tree->lru_lock); 2448 if (eb) { 2449 return eb; 2450 } 2451 2452 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 2453 INIT_LIST_HEAD(&eb->lru); 2454 eb->start = start; 2455 eb->len = len; 2456 atomic_set(&eb->refs, 1); 2457 2458 return eb; 2459 } 2460 2461 static void __free_extent_buffer(struct extent_buffer *eb) 2462 { 2463 kmem_cache_free(extent_buffer_cache, eb); 2464 } 2465 2466 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, 2467 u64 start, unsigned long len, 2468 struct page *page0, 2469 gfp_t mask) 2470 { 2471 unsigned long num_pages = num_extent_pages(start, len); 2472 unsigned long i; 2473 unsigned long index = start >> PAGE_CACHE_SHIFT; 2474 struct extent_buffer *eb; 2475 struct page *p; 2476 struct address_space *mapping = tree->mapping; 2477 int uptodate = 1; 2478 2479 eb = __alloc_extent_buffer(tree, start, len, mask); 2480 if (!eb || IS_ERR(eb)) 2481 return NULL; 2482 2483 if (eb->flags & EXTENT_BUFFER_FILLED) 2484 goto lru_add; 2485 2486 if (page0) { 2487 eb->first_page = page0; 2488 i = 1; 2489 index++; 2490 page_cache_get(page0); 2491 mark_page_accessed(page0); 2492 set_page_extent_mapped(page0); 2493 WARN_ON(!PageUptodate(page0)); 2494 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE | 2495 len << 2); 2496 } else { 2497 i = 0; 2498 } 2499 for (; i < num_pages; i++, index++) { 2500 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 2501 if (!p) { 2502 WARN_ON(1); 2503 goto fail; 2504 } 2505 set_page_extent_mapped(p); 2506 mark_page_accessed(p); 2507 if (i == 0) { 2508 eb->first_page = p; 2509 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE | 2510 len << 2); 2511 } else { 2512 set_page_private(p, EXTENT_PAGE_PRIVATE); 2513 } 2514 if (!PageUptodate(p)) 2515 uptodate = 0; 2516 unlock_page(p); 2517 } 2518 if (uptodate) 2519 eb->flags |= EXTENT_UPTODATE; 2520 eb->flags |= EXTENT_BUFFER_FILLED; 2521 2522 lru_add: 2523 spin_lock(&tree->lru_lock); 2524 add_lru(tree, eb); 2525 spin_unlock(&tree->lru_lock); 2526 return eb; 2527 2528 fail: 2529 spin_lock(&tree->lru_lock); 2530 list_del_init(&eb->lru); 2531 spin_unlock(&tree->lru_lock); 2532 if (!atomic_dec_and_test(&eb->refs)) 2533 return NULL; 2534 for (index = 1; index < i; index++) { 2535 page_cache_release(extent_buffer_page(eb, index)); 2536 } 2537 if (i > 0) 2538 page_cache_release(extent_buffer_page(eb, 0)); 2539 __free_extent_buffer(eb); 2540 return NULL; 2541 } 2542 EXPORT_SYMBOL(alloc_extent_buffer); 2543 2544 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree, 2545 u64 start, unsigned long len, 2546 gfp_t mask) 2547 { 2548 unsigned long num_pages = num_extent_pages(start, len); 2549 unsigned long i; 2550 unsigned long index = start >> PAGE_CACHE_SHIFT; 2551 struct extent_buffer *eb; 2552 struct page *p; 2553 struct address_space *mapping = tree->mapping; 2554 int uptodate = 1; 2555 2556 eb = __alloc_extent_buffer(tree, start, len, mask); 2557 if (!eb || IS_ERR(eb)) 2558 return NULL; 2559 2560 if (eb->flags & EXTENT_BUFFER_FILLED) 2561 goto lru_add; 2562 2563 for (i = 0; i < num_pages; i++, index++) { 2564 p = find_lock_page(mapping, index); 2565 if (!p) { 2566 goto fail; 2567 } 2568 set_page_extent_mapped(p); 2569 mark_page_accessed(p); 2570 2571 if (i == 0) { 2572 eb->first_page = p; 2573 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE | 2574 len << 2); 2575 } else { 2576 set_page_private(p, EXTENT_PAGE_PRIVATE); 2577 } 2578 2579 if (!PageUptodate(p)) 2580 uptodate = 0; 2581 unlock_page(p); 2582 } 2583 if (uptodate) 2584 eb->flags |= EXTENT_UPTODATE; 2585 eb->flags |= EXTENT_BUFFER_FILLED; 2586 2587 lru_add: 2588 spin_lock(&tree->lru_lock); 2589 add_lru(tree, eb); 2590 spin_unlock(&tree->lru_lock); 2591 return eb; 2592 fail: 2593 spin_lock(&tree->lru_lock); 2594 list_del_init(&eb->lru); 2595 spin_unlock(&tree->lru_lock); 2596 if (!atomic_dec_and_test(&eb->refs)) 2597 return NULL; 2598 for (index = 1; index < i; index++) { 2599 page_cache_release(extent_buffer_page(eb, index)); 2600 } 2601 if (i > 0) 2602 page_cache_release(extent_buffer_page(eb, 0)); 2603 __free_extent_buffer(eb); 2604 return NULL; 2605 } 2606 EXPORT_SYMBOL(find_extent_buffer); 2607 2608 void free_extent_buffer(struct extent_buffer *eb) 2609 { 2610 unsigned long i; 2611 unsigned long num_pages; 2612 2613 if (!eb) 2614 return; 2615 2616 if (!atomic_dec_and_test(&eb->refs)) 2617 return; 2618 2619 WARN_ON(!list_empty(&eb->lru)); 2620 num_pages = num_extent_pages(eb->start, eb->len); 2621 2622 for (i = 1; i < num_pages; i++) { 2623 page_cache_release(extent_buffer_page(eb, i)); 2624 } 2625 page_cache_release(extent_buffer_page(eb, 0)); 2626 __free_extent_buffer(eb); 2627 } 2628 EXPORT_SYMBOL(free_extent_buffer); 2629 2630 int clear_extent_buffer_dirty(struct extent_map_tree *tree, 2631 struct extent_buffer *eb) 2632 { 2633 int set; 2634 unsigned long i; 2635 unsigned long num_pages; 2636 struct page *page; 2637 2638 u64 start = eb->start; 2639 u64 end = start + eb->len - 1; 2640 2641 set = clear_extent_dirty(tree, start, end, GFP_NOFS); 2642 num_pages = num_extent_pages(eb->start, eb->len); 2643 2644 for (i = 0; i < num_pages; i++) { 2645 page = extent_buffer_page(eb, i); 2646 lock_page(page); 2647 /* 2648 * if we're on the last page or the first page and the 2649 * block isn't aligned on a page boundary, do extra checks 2650 * to make sure we don't clean page that is partially dirty 2651 */ 2652 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 2653 ((i == num_pages - 1) && 2654 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) { 2655 start = (u64)page->index << PAGE_CACHE_SHIFT; 2656 end = start + PAGE_CACHE_SIZE - 1; 2657 if (test_range_bit(tree, start, end, 2658 EXTENT_DIRTY, 0)) { 2659 unlock_page(page); 2660 continue; 2661 } 2662 } 2663 clear_page_dirty_for_io(page); 2664 write_lock_irq(&page->mapping->tree_lock); 2665 if (!PageDirty(page)) { 2666 radix_tree_tag_clear(&page->mapping->page_tree, 2667 page_index(page), 2668 PAGECACHE_TAG_DIRTY); 2669 } 2670 write_unlock_irq(&page->mapping->tree_lock); 2671 unlock_page(page); 2672 } 2673 return 0; 2674 } 2675 EXPORT_SYMBOL(clear_extent_buffer_dirty); 2676 2677 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree, 2678 struct extent_buffer *eb) 2679 { 2680 return wait_on_extent_writeback(tree, eb->start, 2681 eb->start + eb->len - 1); 2682 } 2683 EXPORT_SYMBOL(wait_on_extent_buffer_writeback); 2684 2685 int set_extent_buffer_dirty(struct extent_map_tree *tree, 2686 struct extent_buffer *eb) 2687 { 2688 unsigned long i; 2689 unsigned long num_pages; 2690 2691 num_pages = num_extent_pages(eb->start, eb->len); 2692 for (i = 0; i < num_pages; i++) { 2693 struct page *page = extent_buffer_page(eb, i); 2694 /* writepage may need to do something special for the 2695 * first page, we have to make sure page->private is 2696 * properly set. releasepage may drop page->private 2697 * on us if the page isn't already dirty. 2698 */ 2699 if (i == 0) { 2700 lock_page(page); 2701 set_page_private(page, 2702 EXTENT_PAGE_PRIVATE_FIRST_PAGE | 2703 eb->len << 2); 2704 } 2705 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 2706 if (i == 0) 2707 unlock_page(page); 2708 } 2709 return set_extent_dirty(tree, eb->start, 2710 eb->start + eb->len - 1, GFP_NOFS); 2711 } 2712 EXPORT_SYMBOL(set_extent_buffer_dirty); 2713 2714 int set_extent_buffer_uptodate(struct extent_map_tree *tree, 2715 struct extent_buffer *eb) 2716 { 2717 unsigned long i; 2718 struct page *page; 2719 unsigned long num_pages; 2720 2721 num_pages = num_extent_pages(eb->start, eb->len); 2722 2723 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 2724 GFP_NOFS); 2725 for (i = 0; i < num_pages; i++) { 2726 page = extent_buffer_page(eb, i); 2727 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 2728 ((i == num_pages - 1) && 2729 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) { 2730 check_page_uptodate(tree, page); 2731 continue; 2732 } 2733 SetPageUptodate(page); 2734 } 2735 return 0; 2736 } 2737 EXPORT_SYMBOL(set_extent_buffer_uptodate); 2738 2739 int extent_buffer_uptodate(struct extent_map_tree *tree, 2740 struct extent_buffer *eb) 2741 { 2742 if (eb->flags & EXTENT_UPTODATE) 2743 return 1; 2744 return test_range_bit(tree, eb->start, eb->start + eb->len - 1, 2745 EXTENT_UPTODATE, 1); 2746 } 2747 EXPORT_SYMBOL(extent_buffer_uptodate); 2748 2749 int read_extent_buffer_pages(struct extent_map_tree *tree, 2750 struct extent_buffer *eb, 2751 u64 start, 2752 int wait) 2753 { 2754 unsigned long i; 2755 unsigned long start_i; 2756 struct page *page; 2757 int err; 2758 int ret = 0; 2759 unsigned long num_pages; 2760 2761 if (eb->flags & EXTENT_UPTODATE) 2762 return 0; 2763 2764 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1, 2765 EXTENT_UPTODATE, 1)) { 2766 return 0; 2767 } 2768 2769 if (start) { 2770 WARN_ON(start < eb->start); 2771 start_i = (start >> PAGE_CACHE_SHIFT) - 2772 (eb->start >> PAGE_CACHE_SHIFT); 2773 } else { 2774 start_i = 0; 2775 } 2776 2777 num_pages = num_extent_pages(eb->start, eb->len); 2778 for (i = start_i; i < num_pages; i++) { 2779 page = extent_buffer_page(eb, i); 2780 if (PageUptodate(page)) { 2781 continue; 2782 } 2783 if (!wait) { 2784 if (TestSetPageLocked(page)) { 2785 continue; 2786 } 2787 } else { 2788 lock_page(page); 2789 } 2790 if (!PageUptodate(page)) { 2791 err = page->mapping->a_ops->readpage(NULL, page); 2792 if (err) { 2793 ret = err; 2794 } 2795 } else { 2796 unlock_page(page); 2797 } 2798 } 2799 2800 if (ret || !wait) { 2801 return ret; 2802 } 2803 2804 for (i = start_i; i < num_pages; i++) { 2805 page = extent_buffer_page(eb, i); 2806 wait_on_page_locked(page); 2807 if (!PageUptodate(page)) { 2808 ret = -EIO; 2809 } 2810 } 2811 if (!ret) 2812 eb->flags |= EXTENT_UPTODATE; 2813 return ret; 2814 } 2815 EXPORT_SYMBOL(read_extent_buffer_pages); 2816 2817 void read_extent_buffer(struct extent_buffer *eb, void *dstv, 2818 unsigned long start, 2819 unsigned long len) 2820 { 2821 size_t cur; 2822 size_t offset; 2823 struct page *page; 2824 char *kaddr; 2825 char *dst = (char *)dstv; 2826 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2827 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2828 unsigned long num_pages = num_extent_pages(eb->start, eb->len); 2829 2830 WARN_ON(start > eb->len); 2831 WARN_ON(start + len > eb->start + eb->len); 2832 2833 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 2834 2835 while(len > 0) { 2836 page = extent_buffer_page(eb, i); 2837 if (!PageUptodate(page)) { 2838 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len); 2839 WARN_ON(1); 2840 } 2841 WARN_ON(!PageUptodate(page)); 2842 2843 cur = min(len, (PAGE_CACHE_SIZE - offset)); 2844 kaddr = kmap_atomic(page, KM_USER1); 2845 memcpy(dst, kaddr + offset, cur); 2846 kunmap_atomic(kaddr, KM_USER1); 2847 2848 dst += cur; 2849 len -= cur; 2850 offset = 0; 2851 i++; 2852 } 2853 } 2854 EXPORT_SYMBOL(read_extent_buffer); 2855 2856 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 2857 unsigned long min_len, char **token, char **map, 2858 unsigned long *map_start, 2859 unsigned long *map_len, int km) 2860 { 2861 size_t offset = start & (PAGE_CACHE_SIZE - 1); 2862 char *kaddr; 2863 struct page *p; 2864 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2865 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2866 unsigned long end_i = (start_offset + start + min_len - 1) >> 2867 PAGE_CACHE_SHIFT; 2868 2869 if (i != end_i) 2870 return -EINVAL; 2871 2872 if (i == 0) { 2873 offset = start_offset; 2874 *map_start = 0; 2875 } else { 2876 offset = 0; 2877 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; 2878 } 2879 if (start + min_len > eb->len) { 2880 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len); 2881 WARN_ON(1); 2882 } 2883 2884 p = extent_buffer_page(eb, i); 2885 WARN_ON(!PageUptodate(p)); 2886 kaddr = kmap_atomic(p, km); 2887 *token = kaddr; 2888 *map = kaddr + offset; 2889 *map_len = PAGE_CACHE_SIZE - offset; 2890 return 0; 2891 } 2892 EXPORT_SYMBOL(map_private_extent_buffer); 2893 2894 int map_extent_buffer(struct extent_buffer *eb, unsigned long start, 2895 unsigned long min_len, 2896 char **token, char **map, 2897 unsigned long *map_start, 2898 unsigned long *map_len, int km) 2899 { 2900 int err; 2901 int save = 0; 2902 if (eb->map_token) { 2903 unmap_extent_buffer(eb, eb->map_token, km); 2904 eb->map_token = NULL; 2905 save = 1; 2906 } 2907 err = map_private_extent_buffer(eb, start, min_len, token, map, 2908 map_start, map_len, km); 2909 if (!err && save) { 2910 eb->map_token = *token; 2911 eb->kaddr = *map; 2912 eb->map_start = *map_start; 2913 eb->map_len = *map_len; 2914 } 2915 return err; 2916 } 2917 EXPORT_SYMBOL(map_extent_buffer); 2918 2919 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km) 2920 { 2921 kunmap_atomic(token, km); 2922 } 2923 EXPORT_SYMBOL(unmap_extent_buffer); 2924 2925 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, 2926 unsigned long start, 2927 unsigned long len) 2928 { 2929 size_t cur; 2930 size_t offset; 2931 struct page *page; 2932 char *kaddr; 2933 char *ptr = (char *)ptrv; 2934 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2935 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2936 int ret = 0; 2937 2938 WARN_ON(start > eb->len); 2939 WARN_ON(start + len > eb->start + eb->len); 2940 2941 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 2942 2943 while(len > 0) { 2944 page = extent_buffer_page(eb, i); 2945 WARN_ON(!PageUptodate(page)); 2946 2947 cur = min(len, (PAGE_CACHE_SIZE - offset)); 2948 2949 kaddr = kmap_atomic(page, KM_USER0); 2950 ret = memcmp(ptr, kaddr + offset, cur); 2951 kunmap_atomic(kaddr, KM_USER0); 2952 if (ret) 2953 break; 2954 2955 ptr += cur; 2956 len -= cur; 2957 offset = 0; 2958 i++; 2959 } 2960 return ret; 2961 } 2962 EXPORT_SYMBOL(memcmp_extent_buffer); 2963 2964 void write_extent_buffer(struct extent_buffer *eb, const void *srcv, 2965 unsigned long start, unsigned long len) 2966 { 2967 size_t cur; 2968 size_t offset; 2969 struct page *page; 2970 char *kaddr; 2971 char *src = (char *)srcv; 2972 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2973 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2974 2975 WARN_ON(start > eb->len); 2976 WARN_ON(start + len > eb->start + eb->len); 2977 2978 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 2979 2980 while(len > 0) { 2981 page = extent_buffer_page(eb, i); 2982 WARN_ON(!PageUptodate(page)); 2983 2984 cur = min(len, PAGE_CACHE_SIZE - offset); 2985 kaddr = kmap_atomic(page, KM_USER1); 2986 memcpy(kaddr + offset, src, cur); 2987 kunmap_atomic(kaddr, KM_USER1); 2988 2989 src += cur; 2990 len -= cur; 2991 offset = 0; 2992 i++; 2993 } 2994 } 2995 EXPORT_SYMBOL(write_extent_buffer); 2996 2997 void memset_extent_buffer(struct extent_buffer *eb, char c, 2998 unsigned long start, unsigned long len) 2999 { 3000 size_t cur; 3001 size_t offset; 3002 struct page *page; 3003 char *kaddr; 3004 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 3005 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 3006 3007 WARN_ON(start > eb->len); 3008 WARN_ON(start + len > eb->start + eb->len); 3009 3010 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 3011 3012 while(len > 0) { 3013 page = extent_buffer_page(eb, i); 3014 WARN_ON(!PageUptodate(page)); 3015 3016 cur = min(len, PAGE_CACHE_SIZE - offset); 3017 kaddr = kmap_atomic(page, KM_USER0); 3018 memset(kaddr + offset, c, cur); 3019 kunmap_atomic(kaddr, KM_USER0); 3020 3021 len -= cur; 3022 offset = 0; 3023 i++; 3024 } 3025 } 3026 EXPORT_SYMBOL(memset_extent_buffer); 3027 3028 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, 3029 unsigned long dst_offset, unsigned long src_offset, 3030 unsigned long len) 3031 { 3032 u64 dst_len = dst->len; 3033 size_t cur; 3034 size_t offset; 3035 struct page *page; 3036 char *kaddr; 3037 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 3038 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 3039 3040 WARN_ON(src->len != dst_len); 3041 3042 offset = (start_offset + dst_offset) & 3043 ((unsigned long)PAGE_CACHE_SIZE - 1); 3044 3045 while(len > 0) { 3046 page = extent_buffer_page(dst, i); 3047 WARN_ON(!PageUptodate(page)); 3048 3049 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 3050 3051 kaddr = kmap_atomic(page, KM_USER0); 3052 read_extent_buffer(src, kaddr + offset, src_offset, cur); 3053 kunmap_atomic(kaddr, KM_USER0); 3054 3055 src_offset += cur; 3056 len -= cur; 3057 offset = 0; 3058 i++; 3059 } 3060 } 3061 EXPORT_SYMBOL(copy_extent_buffer); 3062 3063 static void move_pages(struct page *dst_page, struct page *src_page, 3064 unsigned long dst_off, unsigned long src_off, 3065 unsigned long len) 3066 { 3067 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3068 if (dst_page == src_page) { 3069 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); 3070 } else { 3071 char *src_kaddr = kmap_atomic(src_page, KM_USER1); 3072 char *p = dst_kaddr + dst_off + len; 3073 char *s = src_kaddr + src_off + len; 3074 3075 while (len--) 3076 *--p = *--s; 3077 3078 kunmap_atomic(src_kaddr, KM_USER1); 3079 } 3080 kunmap_atomic(dst_kaddr, KM_USER0); 3081 } 3082 3083 static void copy_pages(struct page *dst_page, struct page *src_page, 3084 unsigned long dst_off, unsigned long src_off, 3085 unsigned long len) 3086 { 3087 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3088 char *src_kaddr; 3089 3090 if (dst_page != src_page) 3091 src_kaddr = kmap_atomic(src_page, KM_USER1); 3092 else 3093 src_kaddr = dst_kaddr; 3094 3095 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 3096 kunmap_atomic(dst_kaddr, KM_USER0); 3097 if (dst_page != src_page) 3098 kunmap_atomic(src_kaddr, KM_USER1); 3099 } 3100 3101 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 3102 unsigned long src_offset, unsigned long len) 3103 { 3104 size_t cur; 3105 size_t dst_off_in_page; 3106 size_t src_off_in_page; 3107 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 3108 unsigned long dst_i; 3109 unsigned long src_i; 3110 3111 if (src_offset + len > dst->len) { 3112 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 3113 src_offset, len, dst->len); 3114 BUG_ON(1); 3115 } 3116 if (dst_offset + len > dst->len) { 3117 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 3118 dst_offset, len, dst->len); 3119 BUG_ON(1); 3120 } 3121 3122 while(len > 0) { 3123 dst_off_in_page = (start_offset + dst_offset) & 3124 ((unsigned long)PAGE_CACHE_SIZE - 1); 3125 src_off_in_page = (start_offset + src_offset) & 3126 ((unsigned long)PAGE_CACHE_SIZE - 1); 3127 3128 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 3129 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; 3130 3131 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - 3132 src_off_in_page)); 3133 cur = min_t(unsigned long, cur, 3134 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); 3135 3136 copy_pages(extent_buffer_page(dst, dst_i), 3137 extent_buffer_page(dst, src_i), 3138 dst_off_in_page, src_off_in_page, cur); 3139 3140 src_offset += cur; 3141 dst_offset += cur; 3142 len -= cur; 3143 } 3144 } 3145 EXPORT_SYMBOL(memcpy_extent_buffer); 3146 3147 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 3148 unsigned long src_offset, unsigned long len) 3149 { 3150 size_t cur; 3151 size_t dst_off_in_page; 3152 size_t src_off_in_page; 3153 unsigned long dst_end = dst_offset + len - 1; 3154 unsigned long src_end = src_offset + len - 1; 3155 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 3156 unsigned long dst_i; 3157 unsigned long src_i; 3158 3159 if (src_offset + len > dst->len) { 3160 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 3161 src_offset, len, dst->len); 3162 BUG_ON(1); 3163 } 3164 if (dst_offset + len > dst->len) { 3165 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 3166 dst_offset, len, dst->len); 3167 BUG_ON(1); 3168 } 3169 if (dst_offset < src_offset) { 3170 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 3171 return; 3172 } 3173 while(len > 0) { 3174 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 3175 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 3176 3177 dst_off_in_page = (start_offset + dst_end) & 3178 ((unsigned long)PAGE_CACHE_SIZE - 1); 3179 src_off_in_page = (start_offset + src_end) & 3180 ((unsigned long)PAGE_CACHE_SIZE - 1); 3181 3182 cur = min_t(unsigned long, len, src_off_in_page + 1); 3183 cur = min(cur, dst_off_in_page + 1); 3184 move_pages(extent_buffer_page(dst, dst_i), 3185 extent_buffer_page(dst, src_i), 3186 dst_off_in_page - cur + 1, 3187 src_off_in_page - cur + 1, cur); 3188 3189 dst_end -= cur; 3190 src_end -= cur; 3191 len -= cur; 3192 } 3193 } 3194 EXPORT_SYMBOL(memmove_extent_buffer); 3195