1 #include <linux/bitops.h> 2 #include <linux/slab.h> 3 #include <linux/bio.h> 4 #include <linux/mm.h> 5 #include <linux/gfp.h> 6 #include <linux/pagemap.h> 7 #include <linux/page-flags.h> 8 #include <linux/module.h> 9 #include <linux/spinlock.h> 10 #include <linux/blkdev.h> 11 #include "extent_map.h" 12 13 /* temporary define until extent_map moves out of btrfs */ 14 struct kmem_cache *btrfs_cache_create(const char *name, size_t size, 15 unsigned long extra_flags, 16 void (*ctor)(void *, struct kmem_cache *, 17 unsigned long)); 18 19 static struct kmem_cache *extent_map_cache; 20 static struct kmem_cache *extent_state_cache; 21 static struct kmem_cache *extent_buffer_cache; 22 23 static LIST_HEAD(extent_buffers); 24 static LIST_HEAD(buffers); 25 static LIST_HEAD(states); 26 27 static spinlock_t extent_buffers_lock; 28 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; 29 static int nr_extent_buffers; 30 #define MAX_EXTENT_BUFFER_CACHE 128 31 32 struct tree_entry { 33 u64 start; 34 u64 end; 35 int in_tree; 36 struct rb_node rb_node; 37 }; 38 39 void __init extent_map_init(void) 40 { 41 extent_map_cache = btrfs_cache_create("extent_map", 42 sizeof(struct extent_map), 0, 43 NULL); 44 extent_state_cache = btrfs_cache_create("extent_state", 45 sizeof(struct extent_state), 0, 46 NULL); 47 extent_buffer_cache = btrfs_cache_create("extent_buffers", 48 sizeof(struct extent_buffer), 0, 49 NULL); 50 spin_lock_init(&extent_buffers_lock); 51 } 52 53 void __exit extent_map_exit(void) 54 { 55 struct extent_buffer *eb; 56 struct extent_state *state; 57 58 while (!list_empty(&extent_buffers)) { 59 eb = list_entry(extent_buffers.next, 60 struct extent_buffer, list); 61 list_del(&eb->list); 62 kmem_cache_free(extent_buffer_cache, eb); 63 } 64 while (!list_empty(&states)) { 65 state = list_entry(states.next, struct extent_state, list); 66 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); 67 list_del(&state->list); 68 kmem_cache_free(extent_state_cache, state); 69 70 } 71 while (!list_empty(&buffers)) { 72 eb = list_entry(buffers.next, 73 struct extent_buffer, leak_list); 74 printk("buffer leak start %Lu len %lu return %lX\n", eb->start, eb->len, eb->alloc_addr); 75 list_del(&eb->leak_list); 76 kmem_cache_free(extent_buffer_cache, eb); 77 } 78 79 80 if (extent_map_cache) 81 kmem_cache_destroy(extent_map_cache); 82 if (extent_state_cache) 83 kmem_cache_destroy(extent_state_cache); 84 if (extent_buffer_cache) 85 kmem_cache_destroy(extent_buffer_cache); 86 } 87 88 void extent_map_tree_init(struct extent_map_tree *tree, 89 struct address_space *mapping, gfp_t mask) 90 { 91 tree->map.rb_node = NULL; 92 tree->state.rb_node = NULL; 93 tree->ops = NULL; 94 rwlock_init(&tree->lock); 95 tree->mapping = mapping; 96 } 97 EXPORT_SYMBOL(extent_map_tree_init); 98 99 struct extent_map *alloc_extent_map(gfp_t mask) 100 { 101 struct extent_map *em; 102 em = kmem_cache_alloc(extent_map_cache, mask); 103 if (!em || IS_ERR(em)) 104 return em; 105 em->in_tree = 0; 106 atomic_set(&em->refs, 1); 107 return em; 108 } 109 EXPORT_SYMBOL(alloc_extent_map); 110 111 void free_extent_map(struct extent_map *em) 112 { 113 if (!em) 114 return; 115 if (atomic_dec_and_test(&em->refs)) { 116 WARN_ON(em->in_tree); 117 kmem_cache_free(extent_map_cache, em); 118 } 119 } 120 EXPORT_SYMBOL(free_extent_map); 121 122 123 struct extent_state *alloc_extent_state(gfp_t mask) 124 { 125 struct extent_state *state; 126 unsigned long flags; 127 128 state = kmem_cache_alloc(extent_state_cache, mask); 129 if (!state || IS_ERR(state)) 130 return state; 131 state->state = 0; 132 state->in_tree = 0; 133 state->private = 0; 134 135 spin_lock_irqsave(&state_lock, flags); 136 list_add(&state->list, &states); 137 spin_unlock_irqrestore(&state_lock, flags); 138 139 atomic_set(&state->refs, 1); 140 init_waitqueue_head(&state->wq); 141 return state; 142 } 143 EXPORT_SYMBOL(alloc_extent_state); 144 145 void free_extent_state(struct extent_state *state) 146 { 147 unsigned long flags; 148 if (!state) 149 return; 150 if (atomic_dec_and_test(&state->refs)) { 151 WARN_ON(state->in_tree); 152 spin_lock_irqsave(&state_lock, flags); 153 list_del(&state->list); 154 spin_unlock_irqrestore(&state_lock, flags); 155 kmem_cache_free(extent_state_cache, state); 156 } 157 } 158 EXPORT_SYMBOL(free_extent_state); 159 160 static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 161 struct rb_node *node) 162 { 163 struct rb_node ** p = &root->rb_node; 164 struct rb_node * parent = NULL; 165 struct tree_entry *entry; 166 167 while(*p) { 168 parent = *p; 169 entry = rb_entry(parent, struct tree_entry, rb_node); 170 171 if (offset < entry->start) 172 p = &(*p)->rb_left; 173 else if (offset > entry->end) 174 p = &(*p)->rb_right; 175 else 176 return parent; 177 } 178 179 entry = rb_entry(node, struct tree_entry, rb_node); 180 entry->in_tree = 1; 181 rb_link_node(node, parent, p); 182 rb_insert_color(node, root); 183 return NULL; 184 } 185 186 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 187 struct rb_node **prev_ret) 188 { 189 struct rb_node * n = root->rb_node; 190 struct rb_node *prev = NULL; 191 struct tree_entry *entry; 192 struct tree_entry *prev_entry = NULL; 193 194 while(n) { 195 entry = rb_entry(n, struct tree_entry, rb_node); 196 prev = n; 197 prev_entry = entry; 198 199 if (offset < entry->start) 200 n = n->rb_left; 201 else if (offset > entry->end) 202 n = n->rb_right; 203 else 204 return n; 205 } 206 if (!prev_ret) 207 return NULL; 208 while(prev && offset > prev_entry->end) { 209 prev = rb_next(prev); 210 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 211 } 212 *prev_ret = prev; 213 return NULL; 214 } 215 216 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) 217 { 218 struct rb_node *prev; 219 struct rb_node *ret; 220 ret = __tree_search(root, offset, &prev); 221 if (!ret) 222 return prev; 223 return ret; 224 } 225 226 static int tree_delete(struct rb_root *root, u64 offset) 227 { 228 struct rb_node *node; 229 struct tree_entry *entry; 230 231 node = __tree_search(root, offset, NULL); 232 if (!node) 233 return -ENOENT; 234 entry = rb_entry(node, struct tree_entry, rb_node); 235 entry->in_tree = 0; 236 rb_erase(node, root); 237 return 0; 238 } 239 240 /* 241 * add_extent_mapping tries a simple backward merge with existing 242 * mappings. The extent_map struct passed in will be inserted into 243 * the tree directly (no copies made, just a reference taken). 244 */ 245 int add_extent_mapping(struct extent_map_tree *tree, 246 struct extent_map *em) 247 { 248 int ret = 0; 249 struct extent_map *prev = NULL; 250 struct rb_node *rb; 251 252 write_lock_irq(&tree->lock); 253 rb = tree_insert(&tree->map, em->end, &em->rb_node); 254 if (rb) { 255 prev = rb_entry(rb, struct extent_map, rb_node); 256 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end); 257 ret = -EEXIST; 258 goto out; 259 } 260 atomic_inc(&em->refs); 261 if (em->start != 0) { 262 rb = rb_prev(&em->rb_node); 263 if (rb) 264 prev = rb_entry(rb, struct extent_map, rb_node); 265 if (prev && prev->end + 1 == em->start && 266 ((em->block_start == EXTENT_MAP_HOLE && 267 prev->block_start == EXTENT_MAP_HOLE) || 268 (em->block_start == prev->block_end + 1))) { 269 em->start = prev->start; 270 em->block_start = prev->block_start; 271 rb_erase(&prev->rb_node, &tree->map); 272 prev->in_tree = 0; 273 free_extent_map(prev); 274 } 275 } 276 out: 277 write_unlock_irq(&tree->lock); 278 return ret; 279 } 280 EXPORT_SYMBOL(add_extent_mapping); 281 282 /* 283 * lookup_extent_mapping returns the first extent_map struct in the 284 * tree that intersects the [start, end] (inclusive) range. There may 285 * be additional objects in the tree that intersect, so check the object 286 * returned carefully to make sure you don't need additional lookups. 287 */ 288 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 289 u64 start, u64 end) 290 { 291 struct extent_map *em; 292 struct rb_node *rb_node; 293 294 read_lock_irq(&tree->lock); 295 rb_node = tree_search(&tree->map, start); 296 if (!rb_node) { 297 em = NULL; 298 goto out; 299 } 300 if (IS_ERR(rb_node)) { 301 em = ERR_PTR(PTR_ERR(rb_node)); 302 goto out; 303 } 304 em = rb_entry(rb_node, struct extent_map, rb_node); 305 if (em->end < start || em->start > end) { 306 em = NULL; 307 goto out; 308 } 309 atomic_inc(&em->refs); 310 out: 311 read_unlock_irq(&tree->lock); 312 return em; 313 } 314 EXPORT_SYMBOL(lookup_extent_mapping); 315 316 /* 317 * removes an extent_map struct from the tree. No reference counts are 318 * dropped, and no checks are done to see if the range is in use 319 */ 320 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 321 { 322 int ret; 323 324 write_lock_irq(&tree->lock); 325 ret = tree_delete(&tree->map, em->end); 326 write_unlock_irq(&tree->lock); 327 return ret; 328 } 329 EXPORT_SYMBOL(remove_extent_mapping); 330 331 /* 332 * utility function to look for merge candidates inside a given range. 333 * Any extents with matching state are merged together into a single 334 * extent in the tree. Extents with EXTENT_IO in their state field 335 * are not merged because the end_io handlers need to be able to do 336 * operations on them without sleeping (or doing allocations/splits). 337 * 338 * This should be called with the tree lock held. 339 */ 340 static int merge_state(struct extent_map_tree *tree, 341 struct extent_state *state) 342 { 343 struct extent_state *other; 344 struct rb_node *other_node; 345 346 if (state->state & EXTENT_IOBITS) 347 return 0; 348 349 other_node = rb_prev(&state->rb_node); 350 if (other_node) { 351 other = rb_entry(other_node, struct extent_state, rb_node); 352 if (other->end == state->start - 1 && 353 other->state == state->state) { 354 state->start = other->start; 355 other->in_tree = 0; 356 rb_erase(&other->rb_node, &tree->state); 357 free_extent_state(other); 358 } 359 } 360 other_node = rb_next(&state->rb_node); 361 if (other_node) { 362 other = rb_entry(other_node, struct extent_state, rb_node); 363 if (other->start == state->end + 1 && 364 other->state == state->state) { 365 other->start = state->start; 366 state->in_tree = 0; 367 rb_erase(&state->rb_node, &tree->state); 368 free_extent_state(state); 369 } 370 } 371 return 0; 372 } 373 374 /* 375 * insert an extent_state struct into the tree. 'bits' are set on the 376 * struct before it is inserted. 377 * 378 * This may return -EEXIST if the extent is already there, in which case the 379 * state struct is freed. 380 * 381 * The tree lock is not taken internally. This is a utility function and 382 * probably isn't what you want to call (see set/clear_extent_bit). 383 */ 384 static int insert_state(struct extent_map_tree *tree, 385 struct extent_state *state, u64 start, u64 end, 386 int bits) 387 { 388 struct rb_node *node; 389 390 if (end < start) { 391 printk("end < start %Lu %Lu\n", end, start); 392 WARN_ON(1); 393 } 394 state->state |= bits; 395 state->start = start; 396 state->end = end; 397 node = tree_insert(&tree->state, end, &state->rb_node); 398 if (node) { 399 struct extent_state *found; 400 found = rb_entry(node, struct extent_state, rb_node); 401 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); 402 free_extent_state(state); 403 return -EEXIST; 404 } 405 merge_state(tree, state); 406 return 0; 407 } 408 409 /* 410 * split a given extent state struct in two, inserting the preallocated 411 * struct 'prealloc' as the newly created second half. 'split' indicates an 412 * offset inside 'orig' where it should be split. 413 * 414 * Before calling, 415 * the tree has 'orig' at [orig->start, orig->end]. After calling, there 416 * are two extent state structs in the tree: 417 * prealloc: [orig->start, split - 1] 418 * orig: [ split, orig->end ] 419 * 420 * The tree locks are not taken by this function. They need to be held 421 * by the caller. 422 */ 423 static int split_state(struct extent_map_tree *tree, struct extent_state *orig, 424 struct extent_state *prealloc, u64 split) 425 { 426 struct rb_node *node; 427 prealloc->start = orig->start; 428 prealloc->end = split - 1; 429 prealloc->state = orig->state; 430 orig->start = split; 431 432 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); 433 if (node) { 434 struct extent_state *found; 435 found = rb_entry(node, struct extent_state, rb_node); 436 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end); 437 free_extent_state(prealloc); 438 return -EEXIST; 439 } 440 return 0; 441 } 442 443 /* 444 * utility function to clear some bits in an extent state struct. 445 * it will optionally wake up any one waiting on this state (wake == 1), or 446 * forcibly remove the state from the tree (delete == 1). 447 * 448 * If no bits are set on the state struct after clearing things, the 449 * struct is freed and removed from the tree 450 */ 451 static int clear_state_bit(struct extent_map_tree *tree, 452 struct extent_state *state, int bits, int wake, 453 int delete) 454 { 455 int ret = state->state & bits; 456 state->state &= ~bits; 457 if (wake) 458 wake_up(&state->wq); 459 if (delete || state->state == 0) { 460 if (state->in_tree) { 461 rb_erase(&state->rb_node, &tree->state); 462 state->in_tree = 0; 463 free_extent_state(state); 464 } else { 465 WARN_ON(1); 466 } 467 } else { 468 merge_state(tree, state); 469 } 470 return ret; 471 } 472 473 /* 474 * clear some bits on a range in the tree. This may require splitting 475 * or inserting elements in the tree, so the gfp mask is used to 476 * indicate which allocations or sleeping are allowed. 477 * 478 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove 479 * the given range from the tree regardless of state (ie for truncate). 480 * 481 * the range [start, end] is inclusive. 482 * 483 * This takes the tree lock, and returns < 0 on error, > 0 if any of the 484 * bits were already set, or zero if none of the bits were already set. 485 */ 486 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, 487 int bits, int wake, int delete, gfp_t mask) 488 { 489 struct extent_state *state; 490 struct extent_state *prealloc = NULL; 491 struct rb_node *node; 492 unsigned long flags; 493 int err; 494 int set = 0; 495 496 again: 497 if (!prealloc && (mask & __GFP_WAIT)) { 498 prealloc = alloc_extent_state(mask); 499 if (!prealloc) 500 return -ENOMEM; 501 } 502 503 write_lock_irqsave(&tree->lock, flags); 504 /* 505 * this search will find the extents that end after 506 * our range starts 507 */ 508 node = tree_search(&tree->state, start); 509 if (!node) 510 goto out; 511 state = rb_entry(node, struct extent_state, rb_node); 512 if (state->start > end) 513 goto out; 514 WARN_ON(state->end < start); 515 516 /* 517 * | ---- desired range ---- | 518 * | state | or 519 * | ------------- state -------------- | 520 * 521 * We need to split the extent we found, and may flip 522 * bits on second half. 523 * 524 * If the extent we found extends past our range, we 525 * just split and search again. It'll get split again 526 * the next time though. 527 * 528 * If the extent we found is inside our range, we clear 529 * the desired bit on it. 530 */ 531 532 if (state->start < start) { 533 err = split_state(tree, state, prealloc, start); 534 BUG_ON(err == -EEXIST); 535 prealloc = NULL; 536 if (err) 537 goto out; 538 if (state->end <= end) { 539 start = state->end + 1; 540 set |= clear_state_bit(tree, state, bits, 541 wake, delete); 542 } else { 543 start = state->start; 544 } 545 goto search_again; 546 } 547 /* 548 * | ---- desired range ---- | 549 * | state | 550 * We need to split the extent, and clear the bit 551 * on the first half 552 */ 553 if (state->start <= end && state->end > end) { 554 err = split_state(tree, state, prealloc, end + 1); 555 BUG_ON(err == -EEXIST); 556 557 if (wake) 558 wake_up(&state->wq); 559 set |= clear_state_bit(tree, prealloc, bits, 560 wake, delete); 561 prealloc = NULL; 562 goto out; 563 } 564 565 start = state->end + 1; 566 set |= clear_state_bit(tree, state, bits, wake, delete); 567 goto search_again; 568 569 out: 570 write_unlock_irqrestore(&tree->lock, flags); 571 if (prealloc) 572 free_extent_state(prealloc); 573 574 return set; 575 576 search_again: 577 if (start > end) 578 goto out; 579 write_unlock_irqrestore(&tree->lock, flags); 580 if (mask & __GFP_WAIT) 581 cond_resched(); 582 goto again; 583 } 584 EXPORT_SYMBOL(clear_extent_bit); 585 586 static int wait_on_state(struct extent_map_tree *tree, 587 struct extent_state *state) 588 { 589 DEFINE_WAIT(wait); 590 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 591 read_unlock_irq(&tree->lock); 592 schedule(); 593 read_lock_irq(&tree->lock); 594 finish_wait(&state->wq, &wait); 595 return 0; 596 } 597 598 /* 599 * waits for one or more bits to clear on a range in the state tree. 600 * The range [start, end] is inclusive. 601 * The tree lock is taken by this function 602 */ 603 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits) 604 { 605 struct extent_state *state; 606 struct rb_node *node; 607 608 read_lock_irq(&tree->lock); 609 again: 610 while (1) { 611 /* 612 * this search will find all the extents that end after 613 * our range starts 614 */ 615 node = tree_search(&tree->state, start); 616 if (!node) 617 break; 618 619 state = rb_entry(node, struct extent_state, rb_node); 620 621 if (state->start > end) 622 goto out; 623 624 if (state->state & bits) { 625 start = state->start; 626 atomic_inc(&state->refs); 627 wait_on_state(tree, state); 628 free_extent_state(state); 629 goto again; 630 } 631 start = state->end + 1; 632 633 if (start > end) 634 break; 635 636 if (need_resched()) { 637 read_unlock_irq(&tree->lock); 638 cond_resched(); 639 read_lock_irq(&tree->lock); 640 } 641 } 642 out: 643 read_unlock_irq(&tree->lock); 644 return 0; 645 } 646 EXPORT_SYMBOL(wait_extent_bit); 647 648 /* 649 * set some bits on a range in the tree. This may require allocations 650 * or sleeping, so the gfp mask is used to indicate what is allowed. 651 * 652 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the 653 * range already has the desired bits set. The start of the existing 654 * range is returned in failed_start in this case. 655 * 656 * [start, end] is inclusive 657 * This takes the tree lock. 658 */ 659 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits, 660 int exclusive, u64 *failed_start, gfp_t mask) 661 { 662 struct extent_state *state; 663 struct extent_state *prealloc = NULL; 664 struct rb_node *node; 665 unsigned long flags; 666 int err = 0; 667 int set; 668 u64 last_start; 669 u64 last_end; 670 again: 671 if (!prealloc && (mask & __GFP_WAIT)) { 672 prealloc = alloc_extent_state(mask); 673 if (!prealloc) 674 return -ENOMEM; 675 } 676 677 write_lock_irqsave(&tree->lock, flags); 678 /* 679 * this search will find all the extents that end after 680 * our range starts. 681 */ 682 node = tree_search(&tree->state, start); 683 if (!node) { 684 err = insert_state(tree, prealloc, start, end, bits); 685 prealloc = NULL; 686 BUG_ON(err == -EEXIST); 687 goto out; 688 } 689 690 state = rb_entry(node, struct extent_state, rb_node); 691 last_start = state->start; 692 last_end = state->end; 693 694 /* 695 * | ---- desired range ---- | 696 * | state | 697 * 698 * Just lock what we found and keep going 699 */ 700 if (state->start == start && state->end <= end) { 701 set = state->state & bits; 702 if (set && exclusive) { 703 *failed_start = state->start; 704 err = -EEXIST; 705 goto out; 706 } 707 state->state |= bits; 708 start = state->end + 1; 709 merge_state(tree, state); 710 goto search_again; 711 } 712 713 /* 714 * | ---- desired range ---- | 715 * | state | 716 * or 717 * | ------------- state -------------- | 718 * 719 * We need to split the extent we found, and may flip bits on 720 * second half. 721 * 722 * If the extent we found extends past our 723 * range, we just split and search again. It'll get split 724 * again the next time though. 725 * 726 * If the extent we found is inside our range, we set the 727 * desired bit on it. 728 */ 729 if (state->start < start) { 730 set = state->state & bits; 731 if (exclusive && set) { 732 *failed_start = start; 733 err = -EEXIST; 734 goto out; 735 } 736 err = split_state(tree, state, prealloc, start); 737 BUG_ON(err == -EEXIST); 738 prealloc = NULL; 739 if (err) 740 goto out; 741 if (state->end <= end) { 742 state->state |= bits; 743 start = state->end + 1; 744 merge_state(tree, state); 745 } else { 746 start = state->start; 747 } 748 goto search_again; 749 } 750 /* 751 * | ---- desired range ---- | 752 * | state | or | state | 753 * 754 * There's a hole, we need to insert something in it and 755 * ignore the extent we found. 756 */ 757 if (state->start > start) { 758 u64 this_end; 759 if (end < last_start) 760 this_end = end; 761 else 762 this_end = last_start -1; 763 err = insert_state(tree, prealloc, start, this_end, 764 bits); 765 prealloc = NULL; 766 BUG_ON(err == -EEXIST); 767 if (err) 768 goto out; 769 start = this_end + 1; 770 goto search_again; 771 } 772 /* 773 * | ---- desired range ---- | 774 * | state | 775 * We need to split the extent, and set the bit 776 * on the first half 777 */ 778 if (state->start <= end && state->end > end) { 779 set = state->state & bits; 780 if (exclusive && set) { 781 *failed_start = start; 782 err = -EEXIST; 783 goto out; 784 } 785 err = split_state(tree, state, prealloc, end + 1); 786 BUG_ON(err == -EEXIST); 787 788 prealloc->state |= bits; 789 merge_state(tree, prealloc); 790 prealloc = NULL; 791 goto out; 792 } 793 794 goto search_again; 795 796 out: 797 write_unlock_irqrestore(&tree->lock, flags); 798 if (prealloc) 799 free_extent_state(prealloc); 800 801 return err; 802 803 search_again: 804 if (start > end) 805 goto out; 806 write_unlock_irqrestore(&tree->lock, flags); 807 if (mask & __GFP_WAIT) 808 cond_resched(); 809 goto again; 810 } 811 EXPORT_SYMBOL(set_extent_bit); 812 813 /* wrappers around set/clear extent bit */ 814 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 815 gfp_t mask) 816 { 817 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 818 mask); 819 } 820 EXPORT_SYMBOL(set_extent_dirty); 821 822 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end, 823 int bits, gfp_t mask) 824 { 825 return set_extent_bit(tree, start, end, bits, 0, NULL, 826 mask); 827 } 828 EXPORT_SYMBOL(set_extent_bits); 829 830 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end, 831 int bits, gfp_t mask) 832 { 833 return clear_extent_bit(tree, start, end, bits, 0, 0, mask); 834 } 835 EXPORT_SYMBOL(clear_extent_bits); 836 837 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end, 838 gfp_t mask) 839 { 840 return set_extent_bit(tree, start, end, 841 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL, 842 mask); 843 } 844 EXPORT_SYMBOL(set_extent_delalloc); 845 846 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, 847 gfp_t mask) 848 { 849 return clear_extent_bit(tree, start, end, 850 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask); 851 } 852 EXPORT_SYMBOL(clear_extent_dirty); 853 854 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 855 gfp_t mask) 856 { 857 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 858 mask); 859 } 860 EXPORT_SYMBOL(set_extent_new); 861 862 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end, 863 gfp_t mask) 864 { 865 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask); 866 } 867 EXPORT_SYMBOL(clear_extent_new); 868 869 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 870 gfp_t mask) 871 { 872 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, 873 mask); 874 } 875 EXPORT_SYMBOL(set_extent_uptodate); 876 877 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, 878 gfp_t mask) 879 { 880 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); 881 } 882 EXPORT_SYMBOL(clear_extent_uptodate); 883 884 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 885 gfp_t mask) 886 { 887 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK, 888 0, NULL, mask); 889 } 890 EXPORT_SYMBOL(set_extent_writeback); 891 892 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, 893 gfp_t mask) 894 { 895 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); 896 } 897 EXPORT_SYMBOL(clear_extent_writeback); 898 899 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end) 900 { 901 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); 902 } 903 EXPORT_SYMBOL(wait_on_extent_writeback); 904 905 /* 906 * locks a range in ascending order, waiting for any locked regions 907 * it hits on the way. [start,end] are inclusive, and this will sleep. 908 */ 909 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask) 910 { 911 int err; 912 u64 failed_start; 913 while (1) { 914 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 915 &failed_start, mask); 916 if (err == -EEXIST && (mask & __GFP_WAIT)) { 917 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 918 start = failed_start; 919 } else { 920 break; 921 } 922 WARN_ON(start > end); 923 } 924 return err; 925 } 926 EXPORT_SYMBOL(lock_extent); 927 928 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end, 929 gfp_t mask) 930 { 931 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask); 932 } 933 EXPORT_SYMBOL(unlock_extent); 934 935 /* 936 * helper function to set pages and extents in the tree dirty 937 */ 938 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end) 939 { 940 unsigned long index = start >> PAGE_CACHE_SHIFT; 941 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 942 struct page *page; 943 944 while (index <= end_index) { 945 page = find_get_page(tree->mapping, index); 946 BUG_ON(!page); 947 __set_page_dirty_nobuffers(page); 948 page_cache_release(page); 949 index++; 950 } 951 set_extent_dirty(tree, start, end, GFP_NOFS); 952 return 0; 953 } 954 EXPORT_SYMBOL(set_range_dirty); 955 956 /* 957 * helper function to set both pages and extents in the tree writeback 958 */ 959 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end) 960 { 961 unsigned long index = start >> PAGE_CACHE_SHIFT; 962 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 963 struct page *page; 964 965 while (index <= end_index) { 966 page = find_get_page(tree->mapping, index); 967 BUG_ON(!page); 968 set_page_writeback(page); 969 page_cache_release(page); 970 index++; 971 } 972 set_extent_writeback(tree, start, end, GFP_NOFS); 973 return 0; 974 } 975 EXPORT_SYMBOL(set_range_writeback); 976 977 int find_first_extent_bit(struct extent_map_tree *tree, u64 start, 978 u64 *start_ret, u64 *end_ret, int bits) 979 { 980 struct rb_node *node; 981 struct extent_state *state; 982 int ret = 1; 983 984 write_lock_irq(&tree->lock); 985 /* 986 * this search will find all the extents that end after 987 * our range starts. 988 */ 989 node = tree_search(&tree->state, start); 990 if (!node || IS_ERR(node)) { 991 goto out; 992 } 993 994 while(1) { 995 state = rb_entry(node, struct extent_state, rb_node); 996 if (state->state & bits) { 997 *start_ret = state->start; 998 *end_ret = state->end; 999 ret = 0; 1000 break; 1001 } 1002 node = rb_next(node); 1003 if (!node) 1004 break; 1005 } 1006 out: 1007 write_unlock_irq(&tree->lock); 1008 return ret; 1009 } 1010 EXPORT_SYMBOL(find_first_extent_bit); 1011 1012 u64 find_lock_delalloc_range(struct extent_map_tree *tree, 1013 u64 start, u64 lock_start, u64 *end, u64 max_bytes) 1014 { 1015 struct rb_node *node; 1016 struct extent_state *state; 1017 u64 cur_start = start; 1018 u64 found = 0; 1019 u64 total_bytes = 0; 1020 1021 write_lock_irq(&tree->lock); 1022 /* 1023 * this search will find all the extents that end after 1024 * our range starts. 1025 */ 1026 search_again: 1027 node = tree_search(&tree->state, cur_start); 1028 if (!node || IS_ERR(node)) { 1029 goto out; 1030 } 1031 1032 while(1) { 1033 state = rb_entry(node, struct extent_state, rb_node); 1034 if (state->start != cur_start) { 1035 goto out; 1036 } 1037 if (!(state->state & EXTENT_DELALLOC)) { 1038 goto out; 1039 } 1040 if (state->start >= lock_start) { 1041 if (state->state & EXTENT_LOCKED) { 1042 DEFINE_WAIT(wait); 1043 atomic_inc(&state->refs); 1044 write_unlock_irq(&tree->lock); 1045 schedule(); 1046 write_lock_irq(&tree->lock); 1047 finish_wait(&state->wq, &wait); 1048 free_extent_state(state); 1049 goto search_again; 1050 } 1051 state->state |= EXTENT_LOCKED; 1052 } 1053 found++; 1054 *end = state->end; 1055 cur_start = state->end + 1; 1056 node = rb_next(node); 1057 if (!node) 1058 break; 1059 total_bytes = state->end - state->start + 1; 1060 if (total_bytes >= max_bytes) 1061 break; 1062 } 1063 out: 1064 write_unlock_irq(&tree->lock); 1065 return found; 1066 } 1067 1068 /* 1069 * helper function to lock both pages and extents in the tree. 1070 * pages must be locked first. 1071 */ 1072 int lock_range(struct extent_map_tree *tree, u64 start, u64 end) 1073 { 1074 unsigned long index = start >> PAGE_CACHE_SHIFT; 1075 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1076 struct page *page; 1077 int err; 1078 1079 while (index <= end_index) { 1080 page = grab_cache_page(tree->mapping, index); 1081 if (!page) { 1082 err = -ENOMEM; 1083 goto failed; 1084 } 1085 if (IS_ERR(page)) { 1086 err = PTR_ERR(page); 1087 goto failed; 1088 } 1089 index++; 1090 } 1091 lock_extent(tree, start, end, GFP_NOFS); 1092 return 0; 1093 1094 failed: 1095 /* 1096 * we failed above in getting the page at 'index', so we undo here 1097 * up to but not including the page at 'index' 1098 */ 1099 end_index = index; 1100 index = start >> PAGE_CACHE_SHIFT; 1101 while (index < end_index) { 1102 page = find_get_page(tree->mapping, index); 1103 unlock_page(page); 1104 page_cache_release(page); 1105 index++; 1106 } 1107 return err; 1108 } 1109 EXPORT_SYMBOL(lock_range); 1110 1111 /* 1112 * helper function to unlock both pages and extents in the tree. 1113 */ 1114 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end) 1115 { 1116 unsigned long index = start >> PAGE_CACHE_SHIFT; 1117 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1118 struct page *page; 1119 1120 while (index <= end_index) { 1121 page = find_get_page(tree->mapping, index); 1122 unlock_page(page); 1123 page_cache_release(page); 1124 index++; 1125 } 1126 unlock_extent(tree, start, end, GFP_NOFS); 1127 return 0; 1128 } 1129 EXPORT_SYMBOL(unlock_range); 1130 1131 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private) 1132 { 1133 struct rb_node *node; 1134 struct extent_state *state; 1135 int ret = 0; 1136 1137 write_lock_irq(&tree->lock); 1138 /* 1139 * this search will find all the extents that end after 1140 * our range starts. 1141 */ 1142 node = tree_search(&tree->state, start); 1143 if (!node || IS_ERR(node)) { 1144 ret = -ENOENT; 1145 goto out; 1146 } 1147 state = rb_entry(node, struct extent_state, rb_node); 1148 if (state->start != start) { 1149 ret = -ENOENT; 1150 goto out; 1151 } 1152 state->private = private; 1153 out: 1154 write_unlock_irq(&tree->lock); 1155 return ret; 1156 } 1157 1158 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private) 1159 { 1160 struct rb_node *node; 1161 struct extent_state *state; 1162 int ret = 0; 1163 1164 read_lock_irq(&tree->lock); 1165 /* 1166 * this search will find all the extents that end after 1167 * our range starts. 1168 */ 1169 node = tree_search(&tree->state, start); 1170 if (!node || IS_ERR(node)) { 1171 ret = -ENOENT; 1172 goto out; 1173 } 1174 state = rb_entry(node, struct extent_state, rb_node); 1175 if (state->start != start) { 1176 ret = -ENOENT; 1177 goto out; 1178 } 1179 *private = state->private; 1180 out: 1181 read_unlock_irq(&tree->lock); 1182 return ret; 1183 } 1184 1185 /* 1186 * searches a range in the state tree for a given mask. 1187 * If 'filled' == 1, this returns 1 only if ever extent in the tree 1188 * has the bits set. Otherwise, 1 is returned if any bit in the 1189 * range is found set. 1190 */ 1191 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end, 1192 int bits, int filled) 1193 { 1194 struct extent_state *state = NULL; 1195 struct rb_node *node; 1196 int bitset = 0; 1197 1198 read_lock_irq(&tree->lock); 1199 node = tree_search(&tree->state, start); 1200 while (node && start <= end) { 1201 state = rb_entry(node, struct extent_state, rb_node); 1202 if (state->start > end) 1203 break; 1204 1205 if (filled && state->start > start) { 1206 bitset = 0; 1207 break; 1208 } 1209 if (state->state & bits) { 1210 bitset = 1; 1211 if (!filled) 1212 break; 1213 } else if (filled) { 1214 bitset = 0; 1215 break; 1216 } 1217 start = state->end + 1; 1218 if (start > end) 1219 break; 1220 node = rb_next(node); 1221 } 1222 read_unlock_irq(&tree->lock); 1223 return bitset; 1224 } 1225 EXPORT_SYMBOL(test_range_bit); 1226 1227 /* 1228 * helper function to set a given page up to date if all the 1229 * extents in the tree for that page are up to date 1230 */ 1231 static int check_page_uptodate(struct extent_map_tree *tree, 1232 struct page *page) 1233 { 1234 u64 start = page->index << PAGE_CACHE_SHIFT; 1235 u64 end = start + PAGE_CACHE_SIZE - 1; 1236 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1)) 1237 SetPageUptodate(page); 1238 return 0; 1239 } 1240 1241 /* 1242 * helper function to unlock a page if all the extents in the tree 1243 * for that page are unlocked 1244 */ 1245 static int check_page_locked(struct extent_map_tree *tree, 1246 struct page *page) 1247 { 1248 u64 start = page->index << PAGE_CACHE_SHIFT; 1249 u64 end = start + PAGE_CACHE_SIZE - 1; 1250 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0)) 1251 unlock_page(page); 1252 return 0; 1253 } 1254 1255 /* 1256 * helper function to end page writeback if all the extents 1257 * in the tree for that page are done with writeback 1258 */ 1259 static int check_page_writeback(struct extent_map_tree *tree, 1260 struct page *page) 1261 { 1262 u64 start = page->index << PAGE_CACHE_SHIFT; 1263 u64 end = start + PAGE_CACHE_SIZE - 1; 1264 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0)) 1265 end_page_writeback(page); 1266 return 0; 1267 } 1268 1269 /* lots and lots of room for performance fixes in the end_bio funcs */ 1270 1271 /* 1272 * after a writepage IO is done, we need to: 1273 * clear the uptodate bits on error 1274 * clear the writeback bits in the extent tree for this IO 1275 * end_page_writeback if the page has no more pending IO 1276 * 1277 * Scheduling is not allowed, so the extent state tree is expected 1278 * to have one and only one object corresponding to this IO. 1279 */ 1280 static int end_bio_extent_writepage(struct bio *bio, 1281 unsigned int bytes_done, int err) 1282 { 1283 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1284 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1285 struct extent_map_tree *tree = bio->bi_private; 1286 u64 start; 1287 u64 end; 1288 int whole_page; 1289 1290 if (bio->bi_size) 1291 return 1; 1292 1293 do { 1294 struct page *page = bvec->bv_page; 1295 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 1296 end = start + bvec->bv_len - 1; 1297 1298 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1299 whole_page = 1; 1300 else 1301 whole_page = 0; 1302 1303 if (--bvec >= bio->bi_io_vec) 1304 prefetchw(&bvec->bv_page->flags); 1305 1306 if (!uptodate) { 1307 clear_extent_uptodate(tree, start, end, GFP_ATOMIC); 1308 ClearPageUptodate(page); 1309 SetPageError(page); 1310 } 1311 clear_extent_writeback(tree, start, end, GFP_ATOMIC); 1312 1313 if (whole_page) 1314 end_page_writeback(page); 1315 else 1316 check_page_writeback(tree, page); 1317 if (tree->ops && tree->ops->writepage_end_io_hook) 1318 tree->ops->writepage_end_io_hook(page, start, end); 1319 } while (bvec >= bio->bi_io_vec); 1320 1321 bio_put(bio); 1322 return 0; 1323 } 1324 1325 /* 1326 * after a readpage IO is done, we need to: 1327 * clear the uptodate bits on error 1328 * set the uptodate bits if things worked 1329 * set the page up to date if all extents in the tree are uptodate 1330 * clear the lock bit in the extent tree 1331 * unlock the page if there are no other extents locked for it 1332 * 1333 * Scheduling is not allowed, so the extent state tree is expected 1334 * to have one and only one object corresponding to this IO. 1335 */ 1336 static int end_bio_extent_readpage(struct bio *bio, 1337 unsigned int bytes_done, int err) 1338 { 1339 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1340 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1341 struct extent_map_tree *tree = bio->bi_private; 1342 u64 start; 1343 u64 end; 1344 int whole_page; 1345 int ret; 1346 1347 if (bio->bi_size) 1348 return 1; 1349 1350 do { 1351 struct page *page = bvec->bv_page; 1352 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 1353 end = start + bvec->bv_len - 1; 1354 1355 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 1356 whole_page = 1; 1357 else 1358 whole_page = 0; 1359 1360 if (--bvec >= bio->bi_io_vec) 1361 prefetchw(&bvec->bv_page->flags); 1362 1363 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 1364 ret = tree->ops->readpage_end_io_hook(page, start, end); 1365 if (ret) 1366 uptodate = 0; 1367 } 1368 if (uptodate) { 1369 set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1370 if (whole_page) 1371 SetPageUptodate(page); 1372 else 1373 check_page_uptodate(tree, page); 1374 } else { 1375 ClearPageUptodate(page); 1376 SetPageError(page); 1377 } 1378 1379 unlock_extent(tree, start, end, GFP_ATOMIC); 1380 1381 if (whole_page) 1382 unlock_page(page); 1383 else 1384 check_page_locked(tree, page); 1385 } while (bvec >= bio->bi_io_vec); 1386 1387 bio_put(bio); 1388 return 0; 1389 } 1390 1391 /* 1392 * IO done from prepare_write is pretty simple, we just unlock 1393 * the structs in the extent tree when done, and set the uptodate bits 1394 * as appropriate. 1395 */ 1396 static int end_bio_extent_preparewrite(struct bio *bio, 1397 unsigned int bytes_done, int err) 1398 { 1399 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1400 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1401 struct extent_map_tree *tree = bio->bi_private; 1402 u64 start; 1403 u64 end; 1404 1405 if (bio->bi_size) 1406 return 1; 1407 1408 do { 1409 struct page *page = bvec->bv_page; 1410 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 1411 end = start + bvec->bv_len - 1; 1412 1413 if (--bvec >= bio->bi_io_vec) 1414 prefetchw(&bvec->bv_page->flags); 1415 1416 if (uptodate) { 1417 set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1418 } else { 1419 ClearPageUptodate(page); 1420 SetPageError(page); 1421 } 1422 1423 unlock_extent(tree, start, end, GFP_ATOMIC); 1424 1425 } while (bvec >= bio->bi_io_vec); 1426 1427 bio_put(bio); 1428 return 0; 1429 } 1430 1431 static int submit_extent_page(int rw, struct extent_map_tree *tree, 1432 struct page *page, sector_t sector, 1433 size_t size, unsigned long offset, 1434 struct block_device *bdev, 1435 bio_end_io_t end_io_func) 1436 { 1437 struct bio *bio; 1438 int ret = 0; 1439 1440 bio = bio_alloc(GFP_NOIO, 1); 1441 1442 bio->bi_sector = sector; 1443 bio->bi_bdev = bdev; 1444 bio->bi_io_vec[0].bv_page = page; 1445 bio->bi_io_vec[0].bv_len = size; 1446 bio->bi_io_vec[0].bv_offset = offset; 1447 1448 bio->bi_vcnt = 1; 1449 bio->bi_idx = 0; 1450 bio->bi_size = size; 1451 1452 bio->bi_end_io = end_io_func; 1453 bio->bi_private = tree; 1454 1455 bio_get(bio); 1456 submit_bio(rw, bio); 1457 1458 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 1459 ret = -EOPNOTSUPP; 1460 1461 bio_put(bio); 1462 return ret; 1463 } 1464 1465 void set_page_extent_mapped(struct page *page) 1466 { 1467 if (!PagePrivate(page)) { 1468 SetPagePrivate(page); 1469 WARN_ON(!page->mapping->a_ops->invalidatepage); 1470 set_page_private(page, 1); 1471 page_cache_get(page); 1472 } 1473 } 1474 1475 /* 1476 * basic readpage implementation. Locked extent state structs are inserted 1477 * into the tree that are removed when the IO is done (by the end_io 1478 * handlers) 1479 */ 1480 int extent_read_full_page(struct extent_map_tree *tree, struct page *page, 1481 get_extent_t *get_extent) 1482 { 1483 struct inode *inode = page->mapping->host; 1484 u64 start = page->index << PAGE_CACHE_SHIFT; 1485 u64 page_end = start + PAGE_CACHE_SIZE - 1; 1486 u64 end; 1487 u64 cur = start; 1488 u64 extent_offset; 1489 u64 last_byte = i_size_read(inode); 1490 u64 block_start; 1491 u64 cur_end; 1492 sector_t sector; 1493 struct extent_map *em; 1494 struct block_device *bdev; 1495 int ret; 1496 int nr = 0; 1497 size_t page_offset = 0; 1498 size_t iosize; 1499 size_t blocksize = inode->i_sb->s_blocksize; 1500 1501 set_page_extent_mapped(page); 1502 1503 end = page_end; 1504 lock_extent(tree, start, end, GFP_NOFS); 1505 1506 while (cur <= end) { 1507 if (cur >= last_byte) { 1508 iosize = PAGE_CACHE_SIZE - page_offset; 1509 zero_user_page(page, page_offset, iosize, KM_USER0); 1510 set_extent_uptodate(tree, cur, cur + iosize - 1, 1511 GFP_NOFS); 1512 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1513 break; 1514 } 1515 em = get_extent(inode, page, page_offset, cur, end, 0); 1516 if (IS_ERR(em) || !em) { 1517 SetPageError(page); 1518 unlock_extent(tree, cur, end, GFP_NOFS); 1519 break; 1520 } 1521 1522 extent_offset = cur - em->start; 1523 BUG_ON(em->end < cur); 1524 BUG_ON(end < cur); 1525 1526 iosize = min(em->end - cur, end - cur) + 1; 1527 cur_end = min(em->end, end); 1528 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1529 sector = (em->block_start + extent_offset) >> 9; 1530 bdev = em->bdev; 1531 block_start = em->block_start; 1532 free_extent_map(em); 1533 em = NULL; 1534 1535 /* we've found a hole, just zero and go on */ 1536 if (block_start == EXTENT_MAP_HOLE) { 1537 zero_user_page(page, page_offset, iosize, KM_USER0); 1538 set_extent_uptodate(tree, cur, cur + iosize - 1, 1539 GFP_NOFS); 1540 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1541 cur = cur + iosize; 1542 page_offset += iosize; 1543 continue; 1544 } 1545 /* the get_extent function already copied into the page */ 1546 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { 1547 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1548 cur = cur + iosize; 1549 page_offset += iosize; 1550 continue; 1551 } 1552 1553 ret = 0; 1554 if (tree->ops && tree->ops->readpage_io_hook) { 1555 ret = tree->ops->readpage_io_hook(page, cur, 1556 cur + iosize - 1); 1557 } 1558 if (!ret) { 1559 ret = submit_extent_page(READ, tree, page, 1560 sector, iosize, page_offset, 1561 bdev, end_bio_extent_readpage); 1562 } 1563 if (ret) 1564 SetPageError(page); 1565 cur = cur + iosize; 1566 page_offset += iosize; 1567 nr++; 1568 } 1569 if (!nr) { 1570 if (!PageError(page)) 1571 SetPageUptodate(page); 1572 unlock_page(page); 1573 } 1574 return 0; 1575 } 1576 EXPORT_SYMBOL(extent_read_full_page); 1577 1578 /* 1579 * the writepage semantics are similar to regular writepage. extent 1580 * records are inserted to lock ranges in the tree, and as dirty areas 1581 * are found, they are marked writeback. Then the lock bits are removed 1582 * and the end_io handler clears the writeback ranges 1583 */ 1584 int extent_write_full_page(struct extent_map_tree *tree, struct page *page, 1585 get_extent_t *get_extent, 1586 struct writeback_control *wbc) 1587 { 1588 struct inode *inode = page->mapping->host; 1589 u64 start = page->index << PAGE_CACHE_SHIFT; 1590 u64 page_end = start + PAGE_CACHE_SIZE - 1; 1591 u64 end; 1592 u64 cur = start; 1593 u64 extent_offset; 1594 u64 last_byte = i_size_read(inode); 1595 u64 block_start; 1596 sector_t sector; 1597 struct extent_map *em; 1598 struct block_device *bdev; 1599 int ret; 1600 int nr = 0; 1601 size_t page_offset = 0; 1602 size_t iosize; 1603 size_t blocksize; 1604 loff_t i_size = i_size_read(inode); 1605 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 1606 u64 nr_delalloc; 1607 u64 delalloc_end; 1608 1609 WARN_ON(!PageLocked(page)); 1610 if (page->index > end_index) { 1611 clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1612 unlock_page(page); 1613 return 0; 1614 } 1615 1616 if (page->index == end_index) { 1617 size_t offset = i_size & (PAGE_CACHE_SIZE - 1); 1618 zero_user_page(page, offset, 1619 PAGE_CACHE_SIZE - offset, KM_USER0); 1620 } 1621 1622 set_page_extent_mapped(page); 1623 1624 lock_extent(tree, start, page_end, GFP_NOFS); 1625 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1, 1626 &delalloc_end, 1627 128 * 1024 * 1024); 1628 if (nr_delalloc) { 1629 tree->ops->fill_delalloc(inode, start, delalloc_end); 1630 if (delalloc_end >= page_end + 1) { 1631 clear_extent_bit(tree, page_end + 1, delalloc_end, 1632 EXTENT_LOCKED | EXTENT_DELALLOC, 1633 1, 0, GFP_NOFS); 1634 } 1635 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC, 1636 0, 0, GFP_NOFS); 1637 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1638 printk("found delalloc bits after clear extent_bit\n"); 1639 } 1640 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1641 printk("found delalloc bits after find_delalloc_range returns 0\n"); 1642 } 1643 1644 end = page_end; 1645 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 1646 printk("found delalloc bits after lock_extent\n"); 1647 } 1648 1649 if (last_byte <= start) { 1650 clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1651 goto done; 1652 } 1653 1654 set_extent_uptodate(tree, start, page_end, GFP_NOFS); 1655 blocksize = inode->i_sb->s_blocksize; 1656 1657 while (cur <= end) { 1658 if (cur >= last_byte) { 1659 clear_extent_dirty(tree, cur, page_end, GFP_NOFS); 1660 break; 1661 } 1662 em = get_extent(inode, page, page_offset, cur, end, 0); 1663 if (IS_ERR(em) || !em) { 1664 SetPageError(page); 1665 break; 1666 } 1667 1668 extent_offset = cur - em->start; 1669 BUG_ON(em->end < cur); 1670 BUG_ON(end < cur); 1671 iosize = min(em->end - cur, end - cur) + 1; 1672 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); 1673 sector = (em->block_start + extent_offset) >> 9; 1674 bdev = em->bdev; 1675 block_start = em->block_start; 1676 free_extent_map(em); 1677 em = NULL; 1678 1679 if (block_start == EXTENT_MAP_HOLE || 1680 block_start == EXTENT_MAP_INLINE) { 1681 clear_extent_dirty(tree, cur, 1682 cur + iosize - 1, GFP_NOFS); 1683 cur = cur + iosize; 1684 page_offset += iosize; 1685 continue; 1686 } 1687 1688 /* leave this out until we have a page_mkwrite call */ 1689 if (0 && !test_range_bit(tree, cur, cur + iosize - 1, 1690 EXTENT_DIRTY, 0)) { 1691 cur = cur + iosize; 1692 page_offset += iosize; 1693 continue; 1694 } 1695 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); 1696 if (tree->ops && tree->ops->writepage_io_hook) { 1697 ret = tree->ops->writepage_io_hook(page, cur, 1698 cur + iosize - 1); 1699 } else { 1700 ret = 0; 1701 } 1702 if (ret) 1703 SetPageError(page); 1704 else { 1705 set_range_writeback(tree, cur, cur + iosize - 1); 1706 ret = submit_extent_page(WRITE, tree, page, sector, 1707 iosize, page_offset, bdev, 1708 end_bio_extent_writepage); 1709 if (ret) 1710 SetPageError(page); 1711 } 1712 cur = cur + iosize; 1713 page_offset += iosize; 1714 nr++; 1715 } 1716 done: 1717 unlock_extent(tree, start, page_end, GFP_NOFS); 1718 unlock_page(page); 1719 return 0; 1720 } 1721 EXPORT_SYMBOL(extent_write_full_page); 1722 1723 /* 1724 * basic invalidatepage code, this waits on any locked or writeback 1725 * ranges corresponding to the page, and then deletes any extent state 1726 * records from the tree 1727 */ 1728 int extent_invalidatepage(struct extent_map_tree *tree, 1729 struct page *page, unsigned long offset) 1730 { 1731 u64 start = (page->index << PAGE_CACHE_SHIFT); 1732 u64 end = start + PAGE_CACHE_SIZE - 1; 1733 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 1734 1735 start += (offset + blocksize -1) & ~(blocksize - 1); 1736 if (start > end) 1737 return 0; 1738 1739 lock_extent(tree, start, end, GFP_NOFS); 1740 wait_on_extent_writeback(tree, start, end); 1741 clear_extent_bit(tree, start, end, 1742 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 1743 1, 1, GFP_NOFS); 1744 return 0; 1745 } 1746 EXPORT_SYMBOL(extent_invalidatepage); 1747 1748 /* 1749 * simple commit_write call, set_range_dirty is used to mark both 1750 * the pages and the extent records as dirty 1751 */ 1752 int extent_commit_write(struct extent_map_tree *tree, 1753 struct inode *inode, struct page *page, 1754 unsigned from, unsigned to) 1755 { 1756 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1757 1758 set_page_extent_mapped(page); 1759 set_page_dirty(page); 1760 1761 if (pos > inode->i_size) { 1762 i_size_write(inode, pos); 1763 mark_inode_dirty(inode); 1764 } 1765 return 0; 1766 } 1767 EXPORT_SYMBOL(extent_commit_write); 1768 1769 int extent_prepare_write(struct extent_map_tree *tree, 1770 struct inode *inode, struct page *page, 1771 unsigned from, unsigned to, get_extent_t *get_extent) 1772 { 1773 u64 page_start = page->index << PAGE_CACHE_SHIFT; 1774 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 1775 u64 block_start; 1776 u64 orig_block_start; 1777 u64 block_end; 1778 u64 cur_end; 1779 struct extent_map *em; 1780 unsigned blocksize = 1 << inode->i_blkbits; 1781 size_t page_offset = 0; 1782 size_t block_off_start; 1783 size_t block_off_end; 1784 int err = 0; 1785 int iocount = 0; 1786 int ret = 0; 1787 int isnew; 1788 1789 set_page_extent_mapped(page); 1790 1791 block_start = (page_start + from) & ~((u64)blocksize - 1); 1792 block_end = (page_start + to - 1) | (blocksize - 1); 1793 orig_block_start = block_start; 1794 1795 lock_extent(tree, page_start, page_end, GFP_NOFS); 1796 while(block_start <= block_end) { 1797 em = get_extent(inode, page, page_offset, block_start, 1798 block_end, 1); 1799 if (IS_ERR(em) || !em) { 1800 goto err; 1801 } 1802 cur_end = min(block_end, em->end); 1803 block_off_start = block_start & (PAGE_CACHE_SIZE - 1); 1804 block_off_end = block_off_start + blocksize; 1805 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); 1806 1807 if (!PageUptodate(page) && isnew && 1808 (block_off_end > to || block_off_start < from)) { 1809 void *kaddr; 1810 1811 kaddr = kmap_atomic(page, KM_USER0); 1812 if (block_off_end > to) 1813 memset(kaddr + to, 0, block_off_end - to); 1814 if (block_off_start < from) 1815 memset(kaddr + block_off_start, 0, 1816 from - block_off_start); 1817 flush_dcache_page(page); 1818 kunmap_atomic(kaddr, KM_USER0); 1819 } 1820 if (!isnew && !PageUptodate(page) && 1821 (block_off_end > to || block_off_start < from) && 1822 !test_range_bit(tree, block_start, cur_end, 1823 EXTENT_UPTODATE, 1)) { 1824 u64 sector; 1825 u64 extent_offset = block_start - em->start; 1826 size_t iosize; 1827 sector = (em->block_start + extent_offset) >> 9; 1828 iosize = (cur_end - block_start + blocksize - 1) & 1829 ~((u64)blocksize - 1); 1830 /* 1831 * we've already got the extent locked, but we 1832 * need to split the state such that our end_bio 1833 * handler can clear the lock. 1834 */ 1835 set_extent_bit(tree, block_start, 1836 block_start + iosize - 1, 1837 EXTENT_LOCKED, 0, NULL, GFP_NOFS); 1838 ret = submit_extent_page(READ, tree, page, 1839 sector, iosize, page_offset, em->bdev, 1840 end_bio_extent_preparewrite); 1841 iocount++; 1842 block_start = block_start + iosize; 1843 } else { 1844 set_extent_uptodate(tree, block_start, cur_end, 1845 GFP_NOFS); 1846 unlock_extent(tree, block_start, cur_end, GFP_NOFS); 1847 block_start = cur_end + 1; 1848 } 1849 page_offset = block_start & (PAGE_CACHE_SIZE - 1); 1850 free_extent_map(em); 1851 } 1852 if (iocount) { 1853 wait_extent_bit(tree, orig_block_start, 1854 block_end, EXTENT_LOCKED); 1855 } 1856 check_page_uptodate(tree, page); 1857 err: 1858 /* FIXME, zero out newly allocated blocks on error */ 1859 return err; 1860 } 1861 EXPORT_SYMBOL(extent_prepare_write); 1862 1863 /* 1864 * a helper for releasepage. As long as there are no locked extents 1865 * in the range corresponding to the page, both state records and extent 1866 * map records are removed 1867 */ 1868 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page) 1869 { 1870 struct extent_map *em; 1871 u64 start = page->index << PAGE_CACHE_SHIFT; 1872 u64 end = start + PAGE_CACHE_SIZE - 1; 1873 u64 orig_start = start; 1874 int ret = 1; 1875 1876 while (start <= end) { 1877 em = lookup_extent_mapping(tree, start, end); 1878 if (!em || IS_ERR(em)) 1879 break; 1880 if (!test_range_bit(tree, em->start, em->end, 1881 EXTENT_LOCKED, 0)) { 1882 remove_extent_mapping(tree, em); 1883 /* once for the rb tree */ 1884 free_extent_map(em); 1885 } 1886 start = em->end + 1; 1887 /* once for us */ 1888 free_extent_map(em); 1889 } 1890 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0)) 1891 ret = 0; 1892 else 1893 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE, 1894 1, 1, GFP_NOFS); 1895 return ret; 1896 } 1897 EXPORT_SYMBOL(try_release_extent_mapping); 1898 1899 sector_t extent_bmap(struct address_space *mapping, sector_t iblock, 1900 get_extent_t *get_extent) 1901 { 1902 struct inode *inode = mapping->host; 1903 u64 start = iblock << inode->i_blkbits; 1904 u64 end = start + (1 << inode->i_blkbits) - 1; 1905 struct extent_map *em; 1906 1907 em = get_extent(inode, NULL, 0, start, end, 0); 1908 if (!em || IS_ERR(em)) 1909 return 0; 1910 1911 if (em->block_start == EXTENT_MAP_INLINE || 1912 em->block_start == EXTENT_MAP_HOLE) 1913 return 0; 1914 1915 return (em->block_start + start - em->start) >> inode->i_blkbits; 1916 } 1917 1918 static struct extent_buffer *__alloc_extent_buffer(gfp_t mask) 1919 { 1920 struct extent_buffer *eb = NULL; 1921 1922 spin_lock(&extent_buffers_lock); 1923 if (!list_empty(&extent_buffers)) { 1924 eb = list_entry(extent_buffers.next, struct extent_buffer, 1925 list); 1926 list_del(&eb->list); 1927 WARN_ON(nr_extent_buffers == 0); 1928 nr_extent_buffers--; 1929 } 1930 spin_unlock(&extent_buffers_lock); 1931 1932 if (eb) { 1933 memset(eb, 0, sizeof(*eb)); 1934 } else { 1935 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 1936 } 1937 spin_lock(&extent_buffers_lock); 1938 list_add(&eb->leak_list, &buffers); 1939 spin_unlock(&extent_buffers_lock); 1940 1941 return eb; 1942 } 1943 1944 static void __free_extent_buffer(struct extent_buffer *eb) 1945 { 1946 1947 spin_lock(&extent_buffers_lock); 1948 list_del_init(&eb->leak_list); 1949 spin_unlock(&extent_buffers_lock); 1950 1951 if (nr_extent_buffers >= MAX_EXTENT_BUFFER_CACHE) { 1952 kmem_cache_free(extent_buffer_cache, eb); 1953 } else { 1954 spin_lock(&extent_buffers_lock); 1955 list_add(&eb->list, &extent_buffers); 1956 nr_extent_buffers++; 1957 spin_unlock(&extent_buffers_lock); 1958 } 1959 } 1960 1961 static inline struct page *extent_buffer_page(struct extent_buffer *eb, int i) 1962 { 1963 struct page *p; 1964 if (i == 0) 1965 return eb->first_page; 1966 i += eb->start >> PAGE_CACHE_SHIFT; 1967 p = find_get_page(eb->first_page->mapping, i); 1968 page_cache_release(p); 1969 return p; 1970 } 1971 1972 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, 1973 u64 start, unsigned long len, 1974 gfp_t mask) 1975 { 1976 unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) - 1977 (start >> PAGE_CACHE_SHIFT) + 1; 1978 unsigned long i; 1979 unsigned long index = start >> PAGE_CACHE_SHIFT; 1980 struct extent_buffer *eb; 1981 struct page *p; 1982 struct address_space *mapping = tree->mapping; 1983 int uptodate = 0; 1984 1985 eb = __alloc_extent_buffer(mask); 1986 if (!eb || IS_ERR(eb)) 1987 return NULL; 1988 1989 eb->alloc_addr = __builtin_return_address(0); 1990 eb->start = start; 1991 eb->len = len; 1992 atomic_set(&eb->refs, 1); 1993 1994 for (i = 0; i < num_pages; i++, index++) { 1995 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 1996 if (!p) { 1997 /* make sure the free only frees the pages we've 1998 * grabbed a reference on 1999 */ 2000 eb->len = i << PAGE_CACHE_SHIFT; 2001 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1); 2002 goto fail; 2003 } 2004 set_page_extent_mapped(p); 2005 if (i == 0) 2006 eb->first_page = p; 2007 if (!PageUptodate(p)) 2008 uptodate = 0; 2009 unlock_page(p); 2010 } 2011 if (uptodate) 2012 eb->flags |= EXTENT_UPTODATE; 2013 return eb; 2014 fail: 2015 free_extent_buffer(eb); 2016 return NULL; 2017 } 2018 EXPORT_SYMBOL(alloc_extent_buffer); 2019 2020 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree, 2021 u64 start, unsigned long len, 2022 gfp_t mask) 2023 { 2024 unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) - 2025 (start >> PAGE_CACHE_SHIFT) + 1; 2026 unsigned long i; 2027 unsigned long index = start >> PAGE_CACHE_SHIFT; 2028 struct extent_buffer *eb; 2029 struct page *p; 2030 struct address_space *mapping = tree->mapping; 2031 2032 eb = __alloc_extent_buffer(mask); 2033 if (!eb || IS_ERR(eb)) 2034 return NULL; 2035 2036 eb->alloc_addr = __builtin_return_address(0); 2037 eb->start = start; 2038 eb->len = len; 2039 atomic_set(&eb->refs, 1); 2040 2041 for (i = 0; i < num_pages; i++, index++) { 2042 p = find_get_page(mapping, index); 2043 if (!p) { 2044 /* make sure the free only frees the pages we've 2045 * grabbed a reference on 2046 */ 2047 eb->len = i << PAGE_CACHE_SHIFT; 2048 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1); 2049 goto fail; 2050 } 2051 set_page_extent_mapped(p); 2052 if (i == 0) 2053 eb->first_page = p; 2054 } 2055 return eb; 2056 fail: 2057 free_extent_buffer(eb); 2058 return NULL; 2059 } 2060 EXPORT_SYMBOL(find_extent_buffer); 2061 2062 void free_extent_buffer(struct extent_buffer *eb) 2063 { 2064 unsigned long i; 2065 unsigned long num_pages; 2066 2067 if (!eb) 2068 return; 2069 2070 if (!atomic_dec_and_test(&eb->refs)) 2071 return; 2072 2073 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 2074 (eb->start >> PAGE_CACHE_SHIFT) + 1; 2075 2076 if (eb->first_page) 2077 page_cache_release(eb->first_page); 2078 for (i = 1; i < num_pages; i++) { 2079 page_cache_release(extent_buffer_page(eb, i)); 2080 } 2081 __free_extent_buffer(eb); 2082 } 2083 EXPORT_SYMBOL(free_extent_buffer); 2084 2085 int clear_extent_buffer_dirty(struct extent_map_tree *tree, 2086 struct extent_buffer *eb) 2087 { 2088 int set; 2089 unsigned long i; 2090 unsigned long num_pages; 2091 struct page *page; 2092 2093 u64 start = eb->start; 2094 u64 end = start + eb->len - 1; 2095 2096 set = clear_extent_dirty(tree, start, end, GFP_NOFS); 2097 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 2098 (eb->start >> PAGE_CACHE_SHIFT) + 1; 2099 2100 for (i = 0; i < num_pages; i++) { 2101 page = extent_buffer_page(eb, i); 2102 lock_page(page); 2103 /* 2104 * if we're on the last page or the first page and the 2105 * block isn't aligned on a page boundary, do extra checks 2106 * to make sure we don't clean page that is partially dirty 2107 */ 2108 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 2109 ((i == num_pages - 1) && 2110 ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) { 2111 start = page->index << PAGE_CACHE_SHIFT; 2112 end = start + PAGE_CACHE_SIZE - 1; 2113 if (test_range_bit(tree, start, end, 2114 EXTENT_DIRTY, 0)) { 2115 unlock_page(page); 2116 continue; 2117 } 2118 } 2119 clear_page_dirty_for_io(page); 2120 unlock_page(page); 2121 } 2122 return 0; 2123 } 2124 EXPORT_SYMBOL(clear_extent_buffer_dirty); 2125 2126 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree, 2127 struct extent_buffer *eb) 2128 { 2129 return wait_on_extent_writeback(tree, eb->start, 2130 eb->start + eb->len - 1); 2131 } 2132 EXPORT_SYMBOL(wait_on_extent_buffer_writeback); 2133 2134 int set_extent_buffer_dirty(struct extent_map_tree *tree, 2135 struct extent_buffer *eb) 2136 { 2137 return set_range_dirty(tree, eb->start, eb->start + eb->len - 1); 2138 } 2139 EXPORT_SYMBOL(set_extent_buffer_dirty); 2140 2141 int set_extent_buffer_uptodate(struct extent_map_tree *tree, 2142 struct extent_buffer *eb) 2143 { 2144 unsigned long i; 2145 struct page *page; 2146 unsigned long num_pages; 2147 2148 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 2149 (eb->start >> PAGE_CACHE_SHIFT) + 1; 2150 2151 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 2152 GFP_NOFS); 2153 for (i = 0; i < num_pages; i++) { 2154 page = extent_buffer_page(eb, i); 2155 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 2156 ((i == num_pages - 1) && 2157 ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) { 2158 check_page_uptodate(tree, page); 2159 continue; 2160 } 2161 SetPageUptodate(page); 2162 } 2163 return 0; 2164 } 2165 EXPORT_SYMBOL(set_extent_buffer_uptodate); 2166 2167 int extent_buffer_uptodate(struct extent_map_tree *tree, 2168 struct extent_buffer *eb) 2169 { 2170 if (eb->flags & EXTENT_UPTODATE) 2171 return 1; 2172 return test_range_bit(tree, eb->start, eb->start + eb->len - 1, 2173 EXTENT_UPTODATE, 1); 2174 } 2175 EXPORT_SYMBOL(extent_buffer_uptodate); 2176 2177 int read_extent_buffer_pages(struct extent_map_tree *tree, 2178 struct extent_buffer *eb, int wait) 2179 { 2180 unsigned long i; 2181 struct page *page; 2182 int err; 2183 int ret = 0; 2184 unsigned long num_pages; 2185 2186 if (eb->flags & EXTENT_UPTODATE) 2187 return 0; 2188 2189 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, 2190 EXTENT_UPTODATE, 1)) { 2191 return 0; 2192 } 2193 2194 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 2195 (eb->start >> PAGE_CACHE_SHIFT) + 1; 2196 for (i = 0; i < num_pages; i++) { 2197 page = extent_buffer_page(eb, i); 2198 if (PageUptodate(page)) { 2199 continue; 2200 } 2201 if (!wait) { 2202 if (TestSetPageLocked(page)) { 2203 continue; 2204 } 2205 } else { 2206 lock_page(page); 2207 } 2208 if (!PageUptodate(page)) { 2209 err = page->mapping->a_ops->readpage(NULL, page); 2210 if (err) { 2211 ret = err; 2212 } 2213 } else { 2214 unlock_page(page); 2215 } 2216 } 2217 2218 if (ret || !wait) { 2219 return ret; 2220 } 2221 2222 for (i = 0; i < num_pages; i++) { 2223 page = extent_buffer_page(eb, i); 2224 wait_on_page_locked(page); 2225 if (!PageUptodate(page)) { 2226 ret = -EIO; 2227 } 2228 } 2229 eb->flags |= EXTENT_UPTODATE; 2230 return ret; 2231 } 2232 EXPORT_SYMBOL(read_extent_buffer_pages); 2233 2234 void read_extent_buffer(struct extent_buffer *eb, void *dstv, 2235 unsigned long start, 2236 unsigned long len) 2237 { 2238 size_t cur; 2239 size_t offset; 2240 struct page *page; 2241 char *kaddr; 2242 char *dst = (char *)dstv; 2243 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2244 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2245 2246 WARN_ON(start > eb->len); 2247 WARN_ON(start + len > eb->start + eb->len); 2248 2249 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1); 2250 if (i == 0) 2251 offset += start_offset; 2252 2253 while(len > 0) { 2254 page = extent_buffer_page(eb, i); 2255 WARN_ON(!PageUptodate(page)); 2256 2257 cur = min(len, (PAGE_CACHE_SIZE - offset)); 2258 kaddr = kmap_atomic(page, KM_USER0); 2259 memcpy(dst, kaddr + offset, cur); 2260 kunmap_atomic(kaddr, KM_USER0); 2261 2262 dst += cur; 2263 len -= cur; 2264 offset = 0; 2265 i++; 2266 } 2267 } 2268 EXPORT_SYMBOL(read_extent_buffer); 2269 2270 int map_extent_buffer(struct extent_buffer *eb, unsigned long start, 2271 unsigned long min_len, 2272 char **token, char **map, 2273 unsigned long *map_start, 2274 unsigned long *map_len, int km) 2275 { 2276 size_t offset = start & (PAGE_CACHE_SIZE - 1); 2277 char *kaddr; 2278 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2279 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2280 unsigned long end_i = (start_offset + start + min_len) >> 2281 PAGE_CACHE_SHIFT; 2282 2283 if (i != end_i) 2284 return -EINVAL; 2285 2286 WARN_ON(start > eb->len); 2287 2288 if (i == 0) { 2289 offset = start_offset; 2290 *map_start = 0; 2291 } else { 2292 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset; 2293 } 2294 2295 kaddr = kmap_atomic(extent_buffer_page(eb, i), km); 2296 *token = kaddr; 2297 *map = kaddr + offset; 2298 *map_len = PAGE_CACHE_SIZE - offset; 2299 return 0; 2300 } 2301 EXPORT_SYMBOL(map_extent_buffer); 2302 2303 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km) 2304 { 2305 kunmap_atomic(token, km); 2306 } 2307 EXPORT_SYMBOL(unmap_extent_buffer); 2308 2309 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, 2310 unsigned long start, 2311 unsigned long len) 2312 { 2313 size_t cur; 2314 size_t offset; 2315 struct page *page; 2316 char *kaddr; 2317 char *ptr = (char *)ptrv; 2318 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2319 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2320 int ret = 0; 2321 2322 WARN_ON(start > eb->len); 2323 WARN_ON(start + len > eb->start + eb->len); 2324 2325 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1); 2326 if (i == 0) 2327 offset += start_offset; 2328 2329 while(len > 0) { 2330 page = extent_buffer_page(eb, i); 2331 WARN_ON(!PageUptodate(page)); 2332 2333 cur = min(len, (PAGE_CACHE_SIZE - offset)); 2334 2335 kaddr = kmap_atomic(page, KM_USER0); 2336 ret = memcmp(ptr, kaddr + offset, cur); 2337 kunmap_atomic(kaddr, KM_USER0); 2338 if (ret) 2339 break; 2340 2341 ptr += cur; 2342 len -= cur; 2343 offset = 0; 2344 i++; 2345 } 2346 return ret; 2347 } 2348 EXPORT_SYMBOL(memcmp_extent_buffer); 2349 2350 void write_extent_buffer(struct extent_buffer *eb, const void *srcv, 2351 unsigned long start, unsigned long len) 2352 { 2353 size_t cur; 2354 size_t offset; 2355 struct page *page; 2356 char *kaddr; 2357 char *src = (char *)srcv; 2358 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2359 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2360 2361 WARN_ON(start > eb->len); 2362 WARN_ON(start + len > eb->start + eb->len); 2363 2364 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1); 2365 if (i == 0) 2366 offset += start_offset; 2367 2368 while(len > 0) { 2369 page = extent_buffer_page(eb, i); 2370 WARN_ON(!PageUptodate(page)); 2371 2372 cur = min(len, PAGE_CACHE_SIZE - offset); 2373 kaddr = kmap_atomic(page, KM_USER0); 2374 memcpy(kaddr + offset, src, cur); 2375 kunmap_atomic(kaddr, KM_USER0); 2376 2377 src += cur; 2378 len -= cur; 2379 offset = 0; 2380 i++; 2381 } 2382 } 2383 EXPORT_SYMBOL(write_extent_buffer); 2384 2385 void memset_extent_buffer(struct extent_buffer *eb, char c, 2386 unsigned long start, unsigned long len) 2387 { 2388 size_t cur; 2389 size_t offset; 2390 struct page *page; 2391 char *kaddr; 2392 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2393 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2394 2395 WARN_ON(start > eb->len); 2396 WARN_ON(start + len > eb->start + eb->len); 2397 2398 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1); 2399 if (i == 0) 2400 offset += start_offset; 2401 2402 while(len > 0) { 2403 page = extent_buffer_page(eb, i); 2404 WARN_ON(!PageUptodate(page)); 2405 2406 cur = min(len, PAGE_CACHE_SIZE - offset); 2407 kaddr = kmap_atomic(page, KM_USER0); 2408 memset(kaddr + offset, c, cur); 2409 kunmap_atomic(kaddr, KM_USER0); 2410 2411 len -= cur; 2412 offset = 0; 2413 i++; 2414 } 2415 } 2416 EXPORT_SYMBOL(memset_extent_buffer); 2417 2418 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, 2419 unsigned long dst_offset, unsigned long src_offset, 2420 unsigned long len) 2421 { 2422 u64 dst_len = dst->len; 2423 size_t cur; 2424 size_t offset; 2425 struct page *page; 2426 char *kaddr; 2427 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 2428 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 2429 2430 WARN_ON(src->len != dst_len); 2431 2432 offset = dst_offset & ((unsigned long)PAGE_CACHE_SIZE - 1); 2433 if (i == 0) 2434 offset += start_offset; 2435 2436 while(len > 0) { 2437 page = extent_buffer_page(dst, i); 2438 WARN_ON(!PageUptodate(page)); 2439 2440 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 2441 2442 kaddr = kmap_atomic(page, KM_USER1); 2443 read_extent_buffer(src, kaddr + offset, src_offset, cur); 2444 kunmap_atomic(kaddr, KM_USER1); 2445 2446 src_offset += cur; 2447 len -= cur; 2448 offset = 0; 2449 i++; 2450 } 2451 } 2452 EXPORT_SYMBOL(copy_extent_buffer); 2453 2454 static void move_pages(struct page *dst_page, struct page *src_page, 2455 unsigned long dst_off, unsigned long src_off, 2456 unsigned long len) 2457 { 2458 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 2459 if (dst_page == src_page) { 2460 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); 2461 } else { 2462 char *src_kaddr = kmap_atomic(src_page, KM_USER1); 2463 char *p = dst_kaddr + dst_off + len; 2464 char *s = src_kaddr + src_off + len; 2465 2466 while (len--) 2467 *--p = *--s; 2468 2469 kunmap_atomic(src_kaddr, KM_USER1); 2470 } 2471 kunmap_atomic(dst_kaddr, KM_USER0); 2472 } 2473 2474 static void copy_pages(struct page *dst_page, struct page *src_page, 2475 unsigned long dst_off, unsigned long src_off, 2476 unsigned long len) 2477 { 2478 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 2479 char *src_kaddr; 2480 2481 if (dst_page != src_page) 2482 src_kaddr = kmap_atomic(src_page, KM_USER1); 2483 else 2484 src_kaddr = dst_kaddr; 2485 2486 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 2487 kunmap_atomic(dst_kaddr, KM_USER0); 2488 if (dst_page != src_page) 2489 kunmap_atomic(src_kaddr, KM_USER1); 2490 } 2491 2492 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 2493 unsigned long src_offset, unsigned long len) 2494 { 2495 size_t cur; 2496 size_t dst_off_in_page; 2497 size_t src_off_in_page; 2498 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 2499 unsigned long dst_i; 2500 unsigned long src_i; 2501 2502 if (src_offset + len > dst->len) { 2503 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 2504 src_offset, len, dst->len); 2505 BUG_ON(1); 2506 } 2507 if (dst_offset + len > dst->len) { 2508 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 2509 dst_offset, len, dst->len); 2510 BUG_ON(1); 2511 } 2512 2513 while(len > 0) { 2514 dst_off_in_page = dst_offset & 2515 ((unsigned long)PAGE_CACHE_SIZE - 1); 2516 src_off_in_page = src_offset & 2517 ((unsigned long)PAGE_CACHE_SIZE - 1); 2518 2519 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 2520 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; 2521 2522 if (src_i == 0) 2523 src_off_in_page += start_offset; 2524 if (dst_i == 0) 2525 dst_off_in_page += start_offset; 2526 2527 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - 2528 src_off_in_page)); 2529 cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE - 2530 dst_off_in_page)); 2531 2532 copy_pages(extent_buffer_page(dst, dst_i), 2533 extent_buffer_page(dst, src_i), 2534 dst_off_in_page, src_off_in_page, cur); 2535 2536 src_offset += cur; 2537 dst_offset += cur; 2538 len -= cur; 2539 } 2540 } 2541 EXPORT_SYMBOL(memcpy_extent_buffer); 2542 2543 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 2544 unsigned long src_offset, unsigned long len) 2545 { 2546 size_t cur; 2547 size_t dst_off_in_page; 2548 size_t src_off_in_page; 2549 unsigned long dst_end = dst_offset + len - 1; 2550 unsigned long src_end = src_offset + len - 1; 2551 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 2552 unsigned long dst_i; 2553 unsigned long src_i; 2554 2555 if (src_offset + len > dst->len) { 2556 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 2557 src_offset, len, dst->len); 2558 BUG_ON(1); 2559 } 2560 if (dst_offset + len > dst->len) { 2561 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 2562 dst_offset, len, dst->len); 2563 BUG_ON(1); 2564 } 2565 if (dst_offset < src_offset) { 2566 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 2567 return; 2568 } 2569 while(len > 0) { 2570 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 2571 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 2572 2573 dst_off_in_page = dst_end & 2574 ((unsigned long)PAGE_CACHE_SIZE - 1); 2575 src_off_in_page = src_end & 2576 ((unsigned long)PAGE_CACHE_SIZE - 1); 2577 2578 if (src_i == 0) 2579 src_off_in_page += start_offset; 2580 if (dst_i == 0) 2581 dst_off_in_page += start_offset; 2582 2583 cur = min(len, src_off_in_page + 1); 2584 cur = min(cur, dst_off_in_page + 1); 2585 2586 move_pages(extent_buffer_page(dst, dst_i), 2587 extent_buffer_page(dst, src_i), 2588 dst_off_in_page - cur + 1, 2589 src_off_in_page - cur + 1, cur); 2590 2591 dst_end -= cur - 1; 2592 src_end -= cur - 1; 2593 len -= cur; 2594 } 2595 } 2596 EXPORT_SYMBOL(memmove_extent_buffer); 2597