1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/pagemap.h> 21 #include <linux/highmem.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/backing-dev.h> 26 #include <linux/mpage.h> 27 #include <linux/aio.h> 28 #include <linux/falloc.h> 29 #include <linux/swap.h> 30 #include <linux/writeback.h> 31 #include <linux/statfs.h> 32 #include <linux/compat.h> 33 #include <linux/slab.h> 34 #include <linux/btrfs.h> 35 #include "ctree.h" 36 #include "disk-io.h" 37 #include "transaction.h" 38 #include "btrfs_inode.h" 39 #include "print-tree.h" 40 #include "tree-log.h" 41 #include "locking.h" 42 #include "volumes.h" 43 44 static struct kmem_cache *btrfs_inode_defrag_cachep; 45 /* 46 * when auto defrag is enabled we 47 * queue up these defrag structs to remember which 48 * inodes need defragging passes 49 */ 50 struct inode_defrag { 51 struct rb_node rb_node; 52 /* objectid */ 53 u64 ino; 54 /* 55 * transid where the defrag was added, we search for 56 * extents newer than this 57 */ 58 u64 transid; 59 60 /* root objectid */ 61 u64 root; 62 63 /* last offset we were able to defrag */ 64 u64 last_offset; 65 66 /* if we've wrapped around back to zero once already */ 67 int cycled; 68 }; 69 70 static int __compare_inode_defrag(struct inode_defrag *defrag1, 71 struct inode_defrag *defrag2) 72 { 73 if (defrag1->root > defrag2->root) 74 return 1; 75 else if (defrag1->root < defrag2->root) 76 return -1; 77 else if (defrag1->ino > defrag2->ino) 78 return 1; 79 else if (defrag1->ino < defrag2->ino) 80 return -1; 81 else 82 return 0; 83 } 84 85 /* pop a record for an inode into the defrag tree. The lock 86 * must be held already 87 * 88 * If you're inserting a record for an older transid than an 89 * existing record, the transid already in the tree is lowered 90 * 91 * If an existing record is found the defrag item you 92 * pass in is freed 93 */ 94 static int __btrfs_add_inode_defrag(struct inode *inode, 95 struct inode_defrag *defrag) 96 { 97 struct btrfs_root *root = BTRFS_I(inode)->root; 98 struct inode_defrag *entry; 99 struct rb_node **p; 100 struct rb_node *parent = NULL; 101 int ret; 102 103 p = &root->fs_info->defrag_inodes.rb_node; 104 while (*p) { 105 parent = *p; 106 entry = rb_entry(parent, struct inode_defrag, rb_node); 107 108 ret = __compare_inode_defrag(defrag, entry); 109 if (ret < 0) 110 p = &parent->rb_left; 111 else if (ret > 0) 112 p = &parent->rb_right; 113 else { 114 /* if we're reinserting an entry for 115 * an old defrag run, make sure to 116 * lower the transid of our existing record 117 */ 118 if (defrag->transid < entry->transid) 119 entry->transid = defrag->transid; 120 if (defrag->last_offset > entry->last_offset) 121 entry->last_offset = defrag->last_offset; 122 return -EEXIST; 123 } 124 } 125 set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); 126 rb_link_node(&defrag->rb_node, parent, p); 127 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); 128 return 0; 129 } 130 131 static inline int __need_auto_defrag(struct btrfs_root *root) 132 { 133 if (!btrfs_test_opt(root, AUTO_DEFRAG)) 134 return 0; 135 136 if (btrfs_fs_closing(root->fs_info)) 137 return 0; 138 139 return 1; 140 } 141 142 /* 143 * insert a defrag record for this inode if auto defrag is 144 * enabled 145 */ 146 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 147 struct inode *inode) 148 { 149 struct btrfs_root *root = BTRFS_I(inode)->root; 150 struct inode_defrag *defrag; 151 u64 transid; 152 int ret; 153 154 if (!__need_auto_defrag(root)) 155 return 0; 156 157 if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) 158 return 0; 159 160 if (trans) 161 transid = trans->transid; 162 else 163 transid = BTRFS_I(inode)->root->last_trans; 164 165 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); 166 if (!defrag) 167 return -ENOMEM; 168 169 defrag->ino = btrfs_ino(inode); 170 defrag->transid = transid; 171 defrag->root = root->root_key.objectid; 172 173 spin_lock(&root->fs_info->defrag_inodes_lock); 174 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) { 175 /* 176 * If we set IN_DEFRAG flag and evict the inode from memory, 177 * and then re-read this inode, this new inode doesn't have 178 * IN_DEFRAG flag. At the case, we may find the existed defrag. 179 */ 180 ret = __btrfs_add_inode_defrag(inode, defrag); 181 if (ret) 182 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 183 } else { 184 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 185 } 186 spin_unlock(&root->fs_info->defrag_inodes_lock); 187 return 0; 188 } 189 190 /* 191 * Requeue the defrag object. If there is a defrag object that points to 192 * the same inode in the tree, we will merge them together (by 193 * __btrfs_add_inode_defrag()) and free the one that we want to requeue. 194 */ 195 static void btrfs_requeue_inode_defrag(struct inode *inode, 196 struct inode_defrag *defrag) 197 { 198 struct btrfs_root *root = BTRFS_I(inode)->root; 199 int ret; 200 201 if (!__need_auto_defrag(root)) 202 goto out; 203 204 /* 205 * Here we don't check the IN_DEFRAG flag, because we need merge 206 * them together. 207 */ 208 spin_lock(&root->fs_info->defrag_inodes_lock); 209 ret = __btrfs_add_inode_defrag(inode, defrag); 210 spin_unlock(&root->fs_info->defrag_inodes_lock); 211 if (ret) 212 goto out; 213 return; 214 out: 215 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 216 } 217 218 /* 219 * pick the defragable inode that we want, if it doesn't exist, we will get 220 * the next one. 221 */ 222 static struct inode_defrag * 223 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino) 224 { 225 struct inode_defrag *entry = NULL; 226 struct inode_defrag tmp; 227 struct rb_node *p; 228 struct rb_node *parent = NULL; 229 int ret; 230 231 tmp.ino = ino; 232 tmp.root = root; 233 234 spin_lock(&fs_info->defrag_inodes_lock); 235 p = fs_info->defrag_inodes.rb_node; 236 while (p) { 237 parent = p; 238 entry = rb_entry(parent, struct inode_defrag, rb_node); 239 240 ret = __compare_inode_defrag(&tmp, entry); 241 if (ret < 0) 242 p = parent->rb_left; 243 else if (ret > 0) 244 p = parent->rb_right; 245 else 246 goto out; 247 } 248 249 if (parent && __compare_inode_defrag(&tmp, entry) > 0) { 250 parent = rb_next(parent); 251 if (parent) 252 entry = rb_entry(parent, struct inode_defrag, rb_node); 253 else 254 entry = NULL; 255 } 256 out: 257 if (entry) 258 rb_erase(parent, &fs_info->defrag_inodes); 259 spin_unlock(&fs_info->defrag_inodes_lock); 260 return entry; 261 } 262 263 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info) 264 { 265 struct inode_defrag *defrag; 266 struct rb_node *node; 267 268 spin_lock(&fs_info->defrag_inodes_lock); 269 node = rb_first(&fs_info->defrag_inodes); 270 while (node) { 271 rb_erase(node, &fs_info->defrag_inodes); 272 defrag = rb_entry(node, struct inode_defrag, rb_node); 273 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 274 275 if (need_resched()) { 276 spin_unlock(&fs_info->defrag_inodes_lock); 277 cond_resched(); 278 spin_lock(&fs_info->defrag_inodes_lock); 279 } 280 281 node = rb_first(&fs_info->defrag_inodes); 282 } 283 spin_unlock(&fs_info->defrag_inodes_lock); 284 } 285 286 #define BTRFS_DEFRAG_BATCH 1024 287 288 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, 289 struct inode_defrag *defrag) 290 { 291 struct btrfs_root *inode_root; 292 struct inode *inode; 293 struct btrfs_key key; 294 struct btrfs_ioctl_defrag_range_args range; 295 int num_defrag; 296 int index; 297 int ret; 298 299 /* get the inode */ 300 key.objectid = defrag->root; 301 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 302 key.offset = (u64)-1; 303 304 index = srcu_read_lock(&fs_info->subvol_srcu); 305 306 inode_root = btrfs_read_fs_root_no_name(fs_info, &key); 307 if (IS_ERR(inode_root)) { 308 ret = PTR_ERR(inode_root); 309 goto cleanup; 310 } 311 312 key.objectid = defrag->ino; 313 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 314 key.offset = 0; 315 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); 316 if (IS_ERR(inode)) { 317 ret = PTR_ERR(inode); 318 goto cleanup; 319 } 320 srcu_read_unlock(&fs_info->subvol_srcu, index); 321 322 /* do a chunk of defrag */ 323 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); 324 memset(&range, 0, sizeof(range)); 325 range.len = (u64)-1; 326 range.start = defrag->last_offset; 327 328 sb_start_write(fs_info->sb); 329 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, 330 BTRFS_DEFRAG_BATCH); 331 sb_end_write(fs_info->sb); 332 /* 333 * if we filled the whole defrag batch, there 334 * must be more work to do. Queue this defrag 335 * again 336 */ 337 if (num_defrag == BTRFS_DEFRAG_BATCH) { 338 defrag->last_offset = range.start; 339 btrfs_requeue_inode_defrag(inode, defrag); 340 } else if (defrag->last_offset && !defrag->cycled) { 341 /* 342 * we didn't fill our defrag batch, but 343 * we didn't start at zero. Make sure we loop 344 * around to the start of the file. 345 */ 346 defrag->last_offset = 0; 347 defrag->cycled = 1; 348 btrfs_requeue_inode_defrag(inode, defrag); 349 } else { 350 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 351 } 352 353 iput(inode); 354 return 0; 355 cleanup: 356 srcu_read_unlock(&fs_info->subvol_srcu, index); 357 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 358 return ret; 359 } 360 361 /* 362 * run through the list of inodes in the FS that need 363 * defragging 364 */ 365 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) 366 { 367 struct inode_defrag *defrag; 368 u64 first_ino = 0; 369 u64 root_objectid = 0; 370 371 atomic_inc(&fs_info->defrag_running); 372 while (1) { 373 /* Pause the auto defragger. */ 374 if (test_bit(BTRFS_FS_STATE_REMOUNTING, 375 &fs_info->fs_state)) 376 break; 377 378 if (!__need_auto_defrag(fs_info->tree_root)) 379 break; 380 381 /* find an inode to defrag */ 382 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, 383 first_ino); 384 if (!defrag) { 385 if (root_objectid || first_ino) { 386 root_objectid = 0; 387 first_ino = 0; 388 continue; 389 } else { 390 break; 391 } 392 } 393 394 first_ino = defrag->ino + 1; 395 root_objectid = defrag->root; 396 397 __btrfs_run_defrag_inode(fs_info, defrag); 398 } 399 atomic_dec(&fs_info->defrag_running); 400 401 /* 402 * during unmount, we use the transaction_wait queue to 403 * wait for the defragger to stop 404 */ 405 wake_up(&fs_info->transaction_wait); 406 return 0; 407 } 408 409 /* simple helper to fault in pages and copy. This should go away 410 * and be replaced with calls into generic code. 411 */ 412 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 413 size_t write_bytes, 414 struct page **prepared_pages, 415 struct iov_iter *i) 416 { 417 size_t copied = 0; 418 size_t total_copied = 0; 419 int pg = 0; 420 int offset = pos & (PAGE_CACHE_SIZE - 1); 421 422 while (write_bytes > 0) { 423 size_t count = min_t(size_t, 424 PAGE_CACHE_SIZE - offset, write_bytes); 425 struct page *page = prepared_pages[pg]; 426 /* 427 * Copy data from userspace to the current page 428 * 429 * Disable pagefault to avoid recursive lock since 430 * the pages are already locked 431 */ 432 pagefault_disable(); 433 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 434 pagefault_enable(); 435 436 /* Flush processor's dcache for this page */ 437 flush_dcache_page(page); 438 439 /* 440 * if we get a partial write, we can end up with 441 * partially up to date pages. These add 442 * a lot of complexity, so make sure they don't 443 * happen by forcing this copy to be retried. 444 * 445 * The rest of the btrfs_file_write code will fall 446 * back to page at a time copies after we return 0. 447 */ 448 if (!PageUptodate(page) && copied < count) 449 copied = 0; 450 451 iov_iter_advance(i, copied); 452 write_bytes -= copied; 453 total_copied += copied; 454 455 /* Return to btrfs_file_aio_write to fault page */ 456 if (unlikely(copied == 0)) 457 break; 458 459 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { 460 offset += copied; 461 } else { 462 pg++; 463 offset = 0; 464 } 465 } 466 return total_copied; 467 } 468 469 /* 470 * unlocks pages after btrfs_file_write is done with them 471 */ 472 static void btrfs_drop_pages(struct page **pages, size_t num_pages) 473 { 474 size_t i; 475 for (i = 0; i < num_pages; i++) { 476 /* page checked is some magic around finding pages that 477 * have been modified without going through btrfs_set_page_dirty 478 * clear it here 479 */ 480 ClearPageChecked(pages[i]); 481 unlock_page(pages[i]); 482 mark_page_accessed(pages[i]); 483 page_cache_release(pages[i]); 484 } 485 } 486 487 /* 488 * after copy_from_user, pages need to be dirtied and we need to make 489 * sure holes are created between the current EOF and the start of 490 * any next extents (if required). 491 * 492 * this also makes the decision about creating an inline extent vs 493 * doing real data extents, marking pages dirty and delalloc as required. 494 */ 495 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 496 struct page **pages, size_t num_pages, 497 loff_t pos, size_t write_bytes, 498 struct extent_state **cached) 499 { 500 int err = 0; 501 int i; 502 u64 num_bytes; 503 u64 start_pos; 504 u64 end_of_last_block; 505 u64 end_pos = pos + write_bytes; 506 loff_t isize = i_size_read(inode); 507 508 start_pos = pos & ~((u64)root->sectorsize - 1); 509 num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize); 510 511 end_of_last_block = start_pos + num_bytes - 1; 512 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 513 cached); 514 if (err) 515 return err; 516 517 for (i = 0; i < num_pages; i++) { 518 struct page *p = pages[i]; 519 SetPageUptodate(p); 520 ClearPageChecked(p); 521 set_page_dirty(p); 522 } 523 524 /* 525 * we've only changed i_size in ram, and we haven't updated 526 * the disk i_size. There is no need to log the inode 527 * at this time. 528 */ 529 if (end_pos > isize) 530 i_size_write(inode, end_pos); 531 return 0; 532 } 533 534 /* 535 * this drops all the extents in the cache that intersect the range 536 * [start, end]. Existing extents are split as required. 537 */ 538 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 539 int skip_pinned) 540 { 541 struct extent_map *em; 542 struct extent_map *split = NULL; 543 struct extent_map *split2 = NULL; 544 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 545 u64 len = end - start + 1; 546 u64 gen; 547 int ret; 548 int testend = 1; 549 unsigned long flags; 550 int compressed = 0; 551 bool modified; 552 553 WARN_ON(end < start); 554 if (end == (u64)-1) { 555 len = (u64)-1; 556 testend = 0; 557 } 558 while (1) { 559 int no_splits = 0; 560 561 modified = false; 562 if (!split) 563 split = alloc_extent_map(); 564 if (!split2) 565 split2 = alloc_extent_map(); 566 if (!split || !split2) 567 no_splits = 1; 568 569 write_lock(&em_tree->lock); 570 em = lookup_extent_mapping(em_tree, start, len); 571 if (!em) { 572 write_unlock(&em_tree->lock); 573 break; 574 } 575 flags = em->flags; 576 gen = em->generation; 577 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 578 if (testend && em->start + em->len >= start + len) { 579 free_extent_map(em); 580 write_unlock(&em_tree->lock); 581 break; 582 } 583 start = em->start + em->len; 584 if (testend) 585 len = start + len - (em->start + em->len); 586 free_extent_map(em); 587 write_unlock(&em_tree->lock); 588 continue; 589 } 590 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 591 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 592 clear_bit(EXTENT_FLAG_LOGGING, &flags); 593 modified = !list_empty(&em->list); 594 remove_extent_mapping(em_tree, em); 595 if (no_splits) 596 goto next; 597 598 if (em->start < start) { 599 split->start = em->start; 600 split->len = start - em->start; 601 602 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 603 split->orig_start = em->orig_start; 604 split->block_start = em->block_start; 605 606 if (compressed) 607 split->block_len = em->block_len; 608 else 609 split->block_len = split->len; 610 split->orig_block_len = max(split->block_len, 611 em->orig_block_len); 612 split->ram_bytes = em->ram_bytes; 613 } else { 614 split->orig_start = split->start; 615 split->block_len = 0; 616 split->block_start = em->block_start; 617 split->orig_block_len = 0; 618 split->ram_bytes = split->len; 619 } 620 621 split->generation = gen; 622 split->bdev = em->bdev; 623 split->flags = flags; 624 split->compress_type = em->compress_type; 625 ret = add_extent_mapping(em_tree, split, modified); 626 BUG_ON(ret); /* Logic error */ 627 free_extent_map(split); 628 split = split2; 629 split2 = NULL; 630 } 631 if (testend && em->start + em->len > start + len) { 632 u64 diff = start + len - em->start; 633 634 split->start = start + len; 635 split->len = em->start + em->len - (start + len); 636 split->bdev = em->bdev; 637 split->flags = flags; 638 split->compress_type = em->compress_type; 639 split->generation = gen; 640 641 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 642 split->orig_block_len = max(em->block_len, 643 em->orig_block_len); 644 645 split->ram_bytes = em->ram_bytes; 646 if (compressed) { 647 split->block_len = em->block_len; 648 split->block_start = em->block_start; 649 split->orig_start = em->orig_start; 650 } else { 651 split->block_len = split->len; 652 split->block_start = em->block_start 653 + diff; 654 split->orig_start = em->orig_start; 655 } 656 } else { 657 split->ram_bytes = split->len; 658 split->orig_start = split->start; 659 split->block_len = 0; 660 split->block_start = em->block_start; 661 split->orig_block_len = 0; 662 } 663 664 ret = add_extent_mapping(em_tree, split, modified); 665 BUG_ON(ret); /* Logic error */ 666 free_extent_map(split); 667 split = NULL; 668 } 669 next: 670 write_unlock(&em_tree->lock); 671 672 /* once for us */ 673 free_extent_map(em); 674 /* once for the tree*/ 675 free_extent_map(em); 676 } 677 if (split) 678 free_extent_map(split); 679 if (split2) 680 free_extent_map(split2); 681 } 682 683 /* 684 * this is very complex, but the basic idea is to drop all extents 685 * in the range start - end. hint_block is filled in with a block number 686 * that would be a good hint to the block allocator for this file. 687 * 688 * If an extent intersects the range but is not entirely inside the range 689 * it is either truncated or split. Anything entirely inside the range 690 * is deleted from the tree. 691 */ 692 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 693 struct btrfs_root *root, struct inode *inode, 694 struct btrfs_path *path, u64 start, u64 end, 695 u64 *drop_end, int drop_cache, 696 int replace_extent, 697 u32 extent_item_size, 698 int *key_inserted) 699 { 700 struct extent_buffer *leaf; 701 struct btrfs_file_extent_item *fi; 702 struct btrfs_key key; 703 struct btrfs_key new_key; 704 u64 ino = btrfs_ino(inode); 705 u64 search_start = start; 706 u64 disk_bytenr = 0; 707 u64 num_bytes = 0; 708 u64 extent_offset = 0; 709 u64 extent_end = 0; 710 int del_nr = 0; 711 int del_slot = 0; 712 int extent_type; 713 int recow; 714 int ret; 715 int modify_tree = -1; 716 int update_refs = (root->ref_cows || root == root->fs_info->tree_root); 717 int found = 0; 718 int leafs_visited = 0; 719 720 if (drop_cache) 721 btrfs_drop_extent_cache(inode, start, end - 1, 0); 722 723 if (start >= BTRFS_I(inode)->disk_i_size) 724 modify_tree = 0; 725 726 while (1) { 727 recow = 0; 728 ret = btrfs_lookup_file_extent(trans, root, path, ino, 729 search_start, modify_tree); 730 if (ret < 0) 731 break; 732 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 733 leaf = path->nodes[0]; 734 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 735 if (key.objectid == ino && 736 key.type == BTRFS_EXTENT_DATA_KEY) 737 path->slots[0]--; 738 } 739 ret = 0; 740 leafs_visited++; 741 next_slot: 742 leaf = path->nodes[0]; 743 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 744 BUG_ON(del_nr > 0); 745 ret = btrfs_next_leaf(root, path); 746 if (ret < 0) 747 break; 748 if (ret > 0) { 749 ret = 0; 750 break; 751 } 752 leafs_visited++; 753 leaf = path->nodes[0]; 754 recow = 1; 755 } 756 757 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 758 if (key.objectid > ino || 759 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 760 break; 761 762 fi = btrfs_item_ptr(leaf, path->slots[0], 763 struct btrfs_file_extent_item); 764 extent_type = btrfs_file_extent_type(leaf, fi); 765 766 if (extent_type == BTRFS_FILE_EXTENT_REG || 767 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 768 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 769 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 770 extent_offset = btrfs_file_extent_offset(leaf, fi); 771 extent_end = key.offset + 772 btrfs_file_extent_num_bytes(leaf, fi); 773 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 774 extent_end = key.offset + 775 btrfs_file_extent_inline_len(leaf, 776 path->slots[0], fi); 777 } else { 778 WARN_ON(1); 779 extent_end = search_start; 780 } 781 782 if (extent_end <= search_start) { 783 path->slots[0]++; 784 goto next_slot; 785 } 786 787 found = 1; 788 search_start = max(key.offset, start); 789 if (recow || !modify_tree) { 790 modify_tree = -1; 791 btrfs_release_path(path); 792 continue; 793 } 794 795 /* 796 * | - range to drop - | 797 * | -------- extent -------- | 798 */ 799 if (start > key.offset && end < extent_end) { 800 BUG_ON(del_nr > 0); 801 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 802 803 memcpy(&new_key, &key, sizeof(new_key)); 804 new_key.offset = start; 805 ret = btrfs_duplicate_item(trans, root, path, 806 &new_key); 807 if (ret == -EAGAIN) { 808 btrfs_release_path(path); 809 continue; 810 } 811 if (ret < 0) 812 break; 813 814 leaf = path->nodes[0]; 815 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 816 struct btrfs_file_extent_item); 817 btrfs_set_file_extent_num_bytes(leaf, fi, 818 start - key.offset); 819 820 fi = btrfs_item_ptr(leaf, path->slots[0], 821 struct btrfs_file_extent_item); 822 823 extent_offset += start - key.offset; 824 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 825 btrfs_set_file_extent_num_bytes(leaf, fi, 826 extent_end - start); 827 btrfs_mark_buffer_dirty(leaf); 828 829 if (update_refs && disk_bytenr > 0) { 830 ret = btrfs_inc_extent_ref(trans, root, 831 disk_bytenr, num_bytes, 0, 832 root->root_key.objectid, 833 new_key.objectid, 834 start - extent_offset, 0); 835 BUG_ON(ret); /* -ENOMEM */ 836 } 837 key.offset = start; 838 } 839 /* 840 * | ---- range to drop ----- | 841 * | -------- extent -------- | 842 */ 843 if (start <= key.offset && end < extent_end) { 844 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 845 846 memcpy(&new_key, &key, sizeof(new_key)); 847 new_key.offset = end; 848 btrfs_set_item_key_safe(root, path, &new_key); 849 850 extent_offset += end - key.offset; 851 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 852 btrfs_set_file_extent_num_bytes(leaf, fi, 853 extent_end - end); 854 btrfs_mark_buffer_dirty(leaf); 855 if (update_refs && disk_bytenr > 0) 856 inode_sub_bytes(inode, end - key.offset); 857 break; 858 } 859 860 search_start = extent_end; 861 /* 862 * | ---- range to drop ----- | 863 * | -------- extent -------- | 864 */ 865 if (start > key.offset && end >= extent_end) { 866 BUG_ON(del_nr > 0); 867 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 868 869 btrfs_set_file_extent_num_bytes(leaf, fi, 870 start - key.offset); 871 btrfs_mark_buffer_dirty(leaf); 872 if (update_refs && disk_bytenr > 0) 873 inode_sub_bytes(inode, extent_end - start); 874 if (end == extent_end) 875 break; 876 877 path->slots[0]++; 878 goto next_slot; 879 } 880 881 /* 882 * | ---- range to drop ----- | 883 * | ------ extent ------ | 884 */ 885 if (start <= key.offset && end >= extent_end) { 886 if (del_nr == 0) { 887 del_slot = path->slots[0]; 888 del_nr = 1; 889 } else { 890 BUG_ON(del_slot + del_nr != path->slots[0]); 891 del_nr++; 892 } 893 894 if (update_refs && 895 extent_type == BTRFS_FILE_EXTENT_INLINE) { 896 inode_sub_bytes(inode, 897 extent_end - key.offset); 898 extent_end = ALIGN(extent_end, 899 root->sectorsize); 900 } else if (update_refs && disk_bytenr > 0) { 901 ret = btrfs_free_extent(trans, root, 902 disk_bytenr, num_bytes, 0, 903 root->root_key.objectid, 904 key.objectid, key.offset - 905 extent_offset, 0); 906 BUG_ON(ret); /* -ENOMEM */ 907 inode_sub_bytes(inode, 908 extent_end - key.offset); 909 } 910 911 if (end == extent_end) 912 break; 913 914 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 915 path->slots[0]++; 916 goto next_slot; 917 } 918 919 ret = btrfs_del_items(trans, root, path, del_slot, 920 del_nr); 921 if (ret) { 922 btrfs_abort_transaction(trans, root, ret); 923 break; 924 } 925 926 del_nr = 0; 927 del_slot = 0; 928 929 btrfs_release_path(path); 930 continue; 931 } 932 933 BUG_ON(1); 934 } 935 936 if (!ret && del_nr > 0) { 937 /* 938 * Set path->slots[0] to first slot, so that after the delete 939 * if items are move off from our leaf to its immediate left or 940 * right neighbor leafs, we end up with a correct and adjusted 941 * path->slots[0] for our insertion. 942 */ 943 path->slots[0] = del_slot; 944 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 945 if (ret) 946 btrfs_abort_transaction(trans, root, ret); 947 948 leaf = path->nodes[0]; 949 /* 950 * leaf eb has flag EXTENT_BUFFER_STALE if it was deleted (that 951 * is, its contents got pushed to its neighbors), in which case 952 * it means path->locks[0] == 0 953 */ 954 if (!ret && replace_extent && leafs_visited == 1 && 955 path->locks[0] && 956 btrfs_leaf_free_space(root, leaf) >= 957 sizeof(struct btrfs_item) + extent_item_size) { 958 959 key.objectid = ino; 960 key.type = BTRFS_EXTENT_DATA_KEY; 961 key.offset = start; 962 setup_items_for_insert(root, path, &key, 963 &extent_item_size, 964 extent_item_size, 965 sizeof(struct btrfs_item) + 966 extent_item_size, 1); 967 *key_inserted = 1; 968 } 969 } 970 971 if (!replace_extent || !(*key_inserted)) 972 btrfs_release_path(path); 973 if (drop_end) 974 *drop_end = found ? min(end, extent_end) : end; 975 return ret; 976 } 977 978 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 979 struct btrfs_root *root, struct inode *inode, u64 start, 980 u64 end, int drop_cache) 981 { 982 struct btrfs_path *path; 983 int ret; 984 985 path = btrfs_alloc_path(); 986 if (!path) 987 return -ENOMEM; 988 ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL, 989 drop_cache, 0, 0, NULL); 990 btrfs_free_path(path); 991 return ret; 992 } 993 994 static int extent_mergeable(struct extent_buffer *leaf, int slot, 995 u64 objectid, u64 bytenr, u64 orig_offset, 996 u64 *start, u64 *end) 997 { 998 struct btrfs_file_extent_item *fi; 999 struct btrfs_key key; 1000 u64 extent_end; 1001 1002 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 1003 return 0; 1004 1005 btrfs_item_key_to_cpu(leaf, &key, slot); 1006 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 1007 return 0; 1008 1009 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 1010 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 1011 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 1012 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 1013 btrfs_file_extent_compression(leaf, fi) || 1014 btrfs_file_extent_encryption(leaf, fi) || 1015 btrfs_file_extent_other_encoding(leaf, fi)) 1016 return 0; 1017 1018 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1019 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 1020 return 0; 1021 1022 *start = key.offset; 1023 *end = extent_end; 1024 return 1; 1025 } 1026 1027 /* 1028 * Mark extent in the range start - end as written. 1029 * 1030 * This changes extent type from 'pre-allocated' to 'regular'. If only 1031 * part of extent is marked as written, the extent will be split into 1032 * two or three. 1033 */ 1034 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 1035 struct inode *inode, u64 start, u64 end) 1036 { 1037 struct btrfs_root *root = BTRFS_I(inode)->root; 1038 struct extent_buffer *leaf; 1039 struct btrfs_path *path; 1040 struct btrfs_file_extent_item *fi; 1041 struct btrfs_key key; 1042 struct btrfs_key new_key; 1043 u64 bytenr; 1044 u64 num_bytes; 1045 u64 extent_end; 1046 u64 orig_offset; 1047 u64 other_start; 1048 u64 other_end; 1049 u64 split; 1050 int del_nr = 0; 1051 int del_slot = 0; 1052 int recow; 1053 int ret; 1054 u64 ino = btrfs_ino(inode); 1055 1056 path = btrfs_alloc_path(); 1057 if (!path) 1058 return -ENOMEM; 1059 again: 1060 recow = 0; 1061 split = start; 1062 key.objectid = ino; 1063 key.type = BTRFS_EXTENT_DATA_KEY; 1064 key.offset = split; 1065 1066 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1067 if (ret < 0) 1068 goto out; 1069 if (ret > 0 && path->slots[0] > 0) 1070 path->slots[0]--; 1071 1072 leaf = path->nodes[0]; 1073 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1074 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY); 1075 fi = btrfs_item_ptr(leaf, path->slots[0], 1076 struct btrfs_file_extent_item); 1077 BUG_ON(btrfs_file_extent_type(leaf, fi) != 1078 BTRFS_FILE_EXTENT_PREALLOC); 1079 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1080 BUG_ON(key.offset > start || extent_end < end); 1081 1082 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1083 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1084 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 1085 memcpy(&new_key, &key, sizeof(new_key)); 1086 1087 if (start == key.offset && end < extent_end) { 1088 other_start = 0; 1089 other_end = start; 1090 if (extent_mergeable(leaf, path->slots[0] - 1, 1091 ino, bytenr, orig_offset, 1092 &other_start, &other_end)) { 1093 new_key.offset = end; 1094 btrfs_set_item_key_safe(root, path, &new_key); 1095 fi = btrfs_item_ptr(leaf, path->slots[0], 1096 struct btrfs_file_extent_item); 1097 btrfs_set_file_extent_generation(leaf, fi, 1098 trans->transid); 1099 btrfs_set_file_extent_num_bytes(leaf, fi, 1100 extent_end - end); 1101 btrfs_set_file_extent_offset(leaf, fi, 1102 end - orig_offset); 1103 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 1104 struct btrfs_file_extent_item); 1105 btrfs_set_file_extent_generation(leaf, fi, 1106 trans->transid); 1107 btrfs_set_file_extent_num_bytes(leaf, fi, 1108 end - other_start); 1109 btrfs_mark_buffer_dirty(leaf); 1110 goto out; 1111 } 1112 } 1113 1114 if (start > key.offset && end == extent_end) { 1115 other_start = end; 1116 other_end = 0; 1117 if (extent_mergeable(leaf, path->slots[0] + 1, 1118 ino, bytenr, orig_offset, 1119 &other_start, &other_end)) { 1120 fi = btrfs_item_ptr(leaf, path->slots[0], 1121 struct btrfs_file_extent_item); 1122 btrfs_set_file_extent_num_bytes(leaf, fi, 1123 start - key.offset); 1124 btrfs_set_file_extent_generation(leaf, fi, 1125 trans->transid); 1126 path->slots[0]++; 1127 new_key.offset = start; 1128 btrfs_set_item_key_safe(root, path, &new_key); 1129 1130 fi = btrfs_item_ptr(leaf, path->slots[0], 1131 struct btrfs_file_extent_item); 1132 btrfs_set_file_extent_generation(leaf, fi, 1133 trans->transid); 1134 btrfs_set_file_extent_num_bytes(leaf, fi, 1135 other_end - start); 1136 btrfs_set_file_extent_offset(leaf, fi, 1137 start - orig_offset); 1138 btrfs_mark_buffer_dirty(leaf); 1139 goto out; 1140 } 1141 } 1142 1143 while (start > key.offset || end < extent_end) { 1144 if (key.offset == start) 1145 split = end; 1146 1147 new_key.offset = split; 1148 ret = btrfs_duplicate_item(trans, root, path, &new_key); 1149 if (ret == -EAGAIN) { 1150 btrfs_release_path(path); 1151 goto again; 1152 } 1153 if (ret < 0) { 1154 btrfs_abort_transaction(trans, root, ret); 1155 goto out; 1156 } 1157 1158 leaf = path->nodes[0]; 1159 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 1160 struct btrfs_file_extent_item); 1161 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1162 btrfs_set_file_extent_num_bytes(leaf, fi, 1163 split - key.offset); 1164 1165 fi = btrfs_item_ptr(leaf, path->slots[0], 1166 struct btrfs_file_extent_item); 1167 1168 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1169 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 1170 btrfs_set_file_extent_num_bytes(leaf, fi, 1171 extent_end - split); 1172 btrfs_mark_buffer_dirty(leaf); 1173 1174 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 1175 root->root_key.objectid, 1176 ino, orig_offset, 0); 1177 BUG_ON(ret); /* -ENOMEM */ 1178 1179 if (split == start) { 1180 key.offset = start; 1181 } else { 1182 BUG_ON(start != key.offset); 1183 path->slots[0]--; 1184 extent_end = end; 1185 } 1186 recow = 1; 1187 } 1188 1189 other_start = end; 1190 other_end = 0; 1191 if (extent_mergeable(leaf, path->slots[0] + 1, 1192 ino, bytenr, orig_offset, 1193 &other_start, &other_end)) { 1194 if (recow) { 1195 btrfs_release_path(path); 1196 goto again; 1197 } 1198 extent_end = other_end; 1199 del_slot = path->slots[0] + 1; 1200 del_nr++; 1201 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1202 0, root->root_key.objectid, 1203 ino, orig_offset, 0); 1204 BUG_ON(ret); /* -ENOMEM */ 1205 } 1206 other_start = 0; 1207 other_end = start; 1208 if (extent_mergeable(leaf, path->slots[0] - 1, 1209 ino, bytenr, orig_offset, 1210 &other_start, &other_end)) { 1211 if (recow) { 1212 btrfs_release_path(path); 1213 goto again; 1214 } 1215 key.offset = other_start; 1216 del_slot = path->slots[0]; 1217 del_nr++; 1218 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1219 0, root->root_key.objectid, 1220 ino, orig_offset, 0); 1221 BUG_ON(ret); /* -ENOMEM */ 1222 } 1223 if (del_nr == 0) { 1224 fi = btrfs_item_ptr(leaf, path->slots[0], 1225 struct btrfs_file_extent_item); 1226 btrfs_set_file_extent_type(leaf, fi, 1227 BTRFS_FILE_EXTENT_REG); 1228 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1229 btrfs_mark_buffer_dirty(leaf); 1230 } else { 1231 fi = btrfs_item_ptr(leaf, del_slot - 1, 1232 struct btrfs_file_extent_item); 1233 btrfs_set_file_extent_type(leaf, fi, 1234 BTRFS_FILE_EXTENT_REG); 1235 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1236 btrfs_set_file_extent_num_bytes(leaf, fi, 1237 extent_end - key.offset); 1238 btrfs_mark_buffer_dirty(leaf); 1239 1240 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 1241 if (ret < 0) { 1242 btrfs_abort_transaction(trans, root, ret); 1243 goto out; 1244 } 1245 } 1246 out: 1247 btrfs_free_path(path); 1248 return 0; 1249 } 1250 1251 /* 1252 * on error we return an unlocked page and the error value 1253 * on success we return a locked page and 0 1254 */ 1255 static int prepare_uptodate_page(struct page *page, u64 pos, 1256 bool force_uptodate) 1257 { 1258 int ret = 0; 1259 1260 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && 1261 !PageUptodate(page)) { 1262 ret = btrfs_readpage(NULL, page); 1263 if (ret) 1264 return ret; 1265 lock_page(page); 1266 if (!PageUptodate(page)) { 1267 unlock_page(page); 1268 return -EIO; 1269 } 1270 } 1271 return 0; 1272 } 1273 1274 /* 1275 * this just gets pages into the page cache and locks them down. 1276 */ 1277 static noinline int prepare_pages(struct inode *inode, struct page **pages, 1278 size_t num_pages, loff_t pos, 1279 size_t write_bytes, bool force_uptodate) 1280 { 1281 int i; 1282 unsigned long index = pos >> PAGE_CACHE_SHIFT; 1283 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1284 int err = 0; 1285 int faili; 1286 1287 for (i = 0; i < num_pages; i++) { 1288 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1289 mask | __GFP_WRITE); 1290 if (!pages[i]) { 1291 faili = i - 1; 1292 err = -ENOMEM; 1293 goto fail; 1294 } 1295 1296 if (i == 0) 1297 err = prepare_uptodate_page(pages[i], pos, 1298 force_uptodate); 1299 if (i == num_pages - 1) 1300 err = prepare_uptodate_page(pages[i], 1301 pos + write_bytes, false); 1302 if (err) { 1303 page_cache_release(pages[i]); 1304 faili = i - 1; 1305 goto fail; 1306 } 1307 wait_on_page_writeback(pages[i]); 1308 } 1309 1310 return 0; 1311 fail: 1312 while (faili >= 0) { 1313 unlock_page(pages[faili]); 1314 page_cache_release(pages[faili]); 1315 faili--; 1316 } 1317 return err; 1318 1319 } 1320 1321 /* 1322 * This function locks the extent and properly waits for data=ordered extents 1323 * to finish before allowing the pages to be modified if need. 1324 * 1325 * The return value: 1326 * 1 - the extent is locked 1327 * 0 - the extent is not locked, and everything is OK 1328 * -EAGAIN - need re-prepare the pages 1329 * the other < 0 number - Something wrong happens 1330 */ 1331 static noinline int 1332 lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, 1333 size_t num_pages, loff_t pos, 1334 u64 *lockstart, u64 *lockend, 1335 struct extent_state **cached_state) 1336 { 1337 u64 start_pos; 1338 u64 last_pos; 1339 int i; 1340 int ret = 0; 1341 1342 start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1); 1343 last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1; 1344 1345 if (start_pos < inode->i_size) { 1346 struct btrfs_ordered_extent *ordered; 1347 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1348 start_pos, last_pos, 0, cached_state); 1349 ordered = btrfs_lookup_first_ordered_extent(inode, last_pos); 1350 if (ordered && 1351 ordered->file_offset + ordered->len > start_pos && 1352 ordered->file_offset <= last_pos) { 1353 btrfs_put_ordered_extent(ordered); 1354 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1355 start_pos, last_pos, 1356 cached_state, GFP_NOFS); 1357 for (i = 0; i < num_pages; i++) { 1358 unlock_page(pages[i]); 1359 page_cache_release(pages[i]); 1360 } 1361 ret = btrfs_wait_ordered_range(inode, start_pos, 1362 last_pos - start_pos + 1); 1363 if (ret) 1364 return ret; 1365 else 1366 return -EAGAIN; 1367 } 1368 if (ordered) 1369 btrfs_put_ordered_extent(ordered); 1370 1371 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, 1372 last_pos, EXTENT_DIRTY | EXTENT_DELALLOC | 1373 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1374 0, 0, cached_state, GFP_NOFS); 1375 *lockstart = start_pos; 1376 *lockend = last_pos; 1377 ret = 1; 1378 } 1379 1380 for (i = 0; i < num_pages; i++) { 1381 if (clear_page_dirty_for_io(pages[i])) 1382 account_page_redirty(pages[i]); 1383 set_page_extent_mapped(pages[i]); 1384 WARN_ON(!PageLocked(pages[i])); 1385 } 1386 1387 return ret; 1388 } 1389 1390 static noinline int check_can_nocow(struct inode *inode, loff_t pos, 1391 size_t *write_bytes) 1392 { 1393 struct btrfs_root *root = BTRFS_I(inode)->root; 1394 struct btrfs_ordered_extent *ordered; 1395 u64 lockstart, lockend; 1396 u64 num_bytes; 1397 int ret; 1398 1399 lockstart = round_down(pos, root->sectorsize); 1400 lockend = lockstart + round_up(*write_bytes, root->sectorsize) - 1; 1401 1402 while (1) { 1403 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); 1404 ordered = btrfs_lookup_ordered_range(inode, lockstart, 1405 lockend - lockstart + 1); 1406 if (!ordered) { 1407 break; 1408 } 1409 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); 1410 btrfs_start_ordered_extent(inode, ordered, 1); 1411 btrfs_put_ordered_extent(ordered); 1412 } 1413 1414 num_bytes = lockend - lockstart + 1; 1415 ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL); 1416 if (ret <= 0) { 1417 ret = 0; 1418 } else { 1419 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 1420 EXTENT_DIRTY | EXTENT_DELALLOC | 1421 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, 1422 NULL, GFP_NOFS); 1423 *write_bytes = min_t(size_t, *write_bytes, num_bytes); 1424 } 1425 1426 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); 1427 1428 return ret; 1429 } 1430 1431 static noinline ssize_t __btrfs_buffered_write(struct file *file, 1432 struct iov_iter *i, 1433 loff_t pos) 1434 { 1435 struct inode *inode = file_inode(file); 1436 struct btrfs_root *root = BTRFS_I(inode)->root; 1437 struct page **pages = NULL; 1438 struct extent_state *cached_state = NULL; 1439 u64 release_bytes = 0; 1440 u64 lockstart; 1441 u64 lockend; 1442 unsigned long first_index; 1443 size_t num_written = 0; 1444 int nrptrs; 1445 int ret = 0; 1446 bool only_release_metadata = false; 1447 bool force_page_uptodate = false; 1448 bool need_unlock; 1449 1450 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / 1451 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 1452 (sizeof(struct page *))); 1453 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1454 nrptrs = max(nrptrs, 8); 1455 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 1456 if (!pages) 1457 return -ENOMEM; 1458 1459 first_index = pos >> PAGE_CACHE_SHIFT; 1460 1461 while (iov_iter_count(i) > 0) { 1462 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1463 size_t write_bytes = min(iov_iter_count(i), 1464 nrptrs * (size_t)PAGE_CACHE_SIZE - 1465 offset); 1466 size_t num_pages = (write_bytes + offset + 1467 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1468 size_t reserve_bytes; 1469 size_t dirty_pages; 1470 size_t copied; 1471 1472 WARN_ON(num_pages > nrptrs); 1473 1474 /* 1475 * Fault pages before locking them in prepare_pages 1476 * to avoid recursive lock 1477 */ 1478 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) { 1479 ret = -EFAULT; 1480 break; 1481 } 1482 1483 reserve_bytes = num_pages << PAGE_CACHE_SHIFT; 1484 ret = btrfs_check_data_free_space(inode, reserve_bytes); 1485 if (ret == -ENOSPC && 1486 (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | 1487 BTRFS_INODE_PREALLOC))) { 1488 ret = check_can_nocow(inode, pos, &write_bytes); 1489 if (ret > 0) { 1490 only_release_metadata = true; 1491 /* 1492 * our prealloc extent may be smaller than 1493 * write_bytes, so scale down. 1494 */ 1495 num_pages = (write_bytes + offset + 1496 PAGE_CACHE_SIZE - 1) >> 1497 PAGE_CACHE_SHIFT; 1498 reserve_bytes = num_pages << PAGE_CACHE_SHIFT; 1499 ret = 0; 1500 } else { 1501 ret = -ENOSPC; 1502 } 1503 } 1504 1505 if (ret) 1506 break; 1507 1508 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes); 1509 if (ret) { 1510 if (!only_release_metadata) 1511 btrfs_free_reserved_data_space(inode, 1512 reserve_bytes); 1513 break; 1514 } 1515 1516 release_bytes = reserve_bytes; 1517 need_unlock = false; 1518 again: 1519 /* 1520 * This is going to setup the pages array with the number of 1521 * pages we want, so we don't really need to worry about the 1522 * contents of pages from loop to loop 1523 */ 1524 ret = prepare_pages(inode, pages, num_pages, 1525 pos, write_bytes, 1526 force_page_uptodate); 1527 if (ret) 1528 break; 1529 1530 ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages, 1531 pos, &lockstart, &lockend, 1532 &cached_state); 1533 if (ret < 0) { 1534 if (ret == -EAGAIN) 1535 goto again; 1536 break; 1537 } else if (ret > 0) { 1538 need_unlock = true; 1539 ret = 0; 1540 } 1541 1542 copied = btrfs_copy_from_user(pos, num_pages, 1543 write_bytes, pages, i); 1544 1545 /* 1546 * if we have trouble faulting in the pages, fall 1547 * back to one page at a time 1548 */ 1549 if (copied < write_bytes) 1550 nrptrs = 1; 1551 1552 if (copied == 0) { 1553 force_page_uptodate = true; 1554 dirty_pages = 0; 1555 } else { 1556 force_page_uptodate = false; 1557 dirty_pages = (copied + offset + 1558 PAGE_CACHE_SIZE - 1) >> 1559 PAGE_CACHE_SHIFT; 1560 } 1561 1562 /* 1563 * If we had a short copy we need to release the excess delaloc 1564 * bytes we reserved. We need to increment outstanding_extents 1565 * because btrfs_delalloc_release_space will decrement it, but 1566 * we still have an outstanding extent for the chunk we actually 1567 * managed to copy. 1568 */ 1569 if (num_pages > dirty_pages) { 1570 release_bytes = (num_pages - dirty_pages) << 1571 PAGE_CACHE_SHIFT; 1572 if (copied > 0) { 1573 spin_lock(&BTRFS_I(inode)->lock); 1574 BTRFS_I(inode)->outstanding_extents++; 1575 spin_unlock(&BTRFS_I(inode)->lock); 1576 } 1577 if (only_release_metadata) 1578 btrfs_delalloc_release_metadata(inode, 1579 release_bytes); 1580 else 1581 btrfs_delalloc_release_space(inode, 1582 release_bytes); 1583 } 1584 1585 release_bytes = dirty_pages << PAGE_CACHE_SHIFT; 1586 1587 if (copied > 0) 1588 ret = btrfs_dirty_pages(root, inode, pages, 1589 dirty_pages, pos, copied, 1590 NULL); 1591 if (need_unlock) 1592 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1593 lockstart, lockend, &cached_state, 1594 GFP_NOFS); 1595 if (ret) { 1596 btrfs_drop_pages(pages, num_pages); 1597 break; 1598 } 1599 1600 release_bytes = 0; 1601 if (only_release_metadata && copied > 0) { 1602 u64 lockstart = round_down(pos, root->sectorsize); 1603 u64 lockend = lockstart + 1604 (dirty_pages << PAGE_CACHE_SHIFT) - 1; 1605 1606 set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 1607 lockend, EXTENT_NORESERVE, NULL, 1608 NULL, GFP_NOFS); 1609 only_release_metadata = false; 1610 } 1611 1612 btrfs_drop_pages(pages, num_pages); 1613 1614 cond_resched(); 1615 1616 balance_dirty_pages_ratelimited(inode->i_mapping); 1617 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1618 btrfs_btree_balance_dirty(root); 1619 1620 pos += copied; 1621 num_written += copied; 1622 } 1623 1624 kfree(pages); 1625 1626 if (release_bytes) { 1627 if (only_release_metadata) 1628 btrfs_delalloc_release_metadata(inode, release_bytes); 1629 else 1630 btrfs_delalloc_release_space(inode, release_bytes); 1631 } 1632 1633 return num_written ? num_written : ret; 1634 } 1635 1636 static ssize_t __btrfs_direct_write(struct kiocb *iocb, 1637 const struct iovec *iov, 1638 unsigned long nr_segs, loff_t pos, 1639 loff_t *ppos, size_t count, size_t ocount) 1640 { 1641 struct file *file = iocb->ki_filp; 1642 struct iov_iter i; 1643 ssize_t written; 1644 ssize_t written_buffered; 1645 loff_t endbyte; 1646 int err; 1647 1648 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, 1649 count, ocount); 1650 1651 if (written < 0 || written == count) 1652 return written; 1653 1654 pos += written; 1655 count -= written; 1656 iov_iter_init(&i, iov, nr_segs, count, written); 1657 written_buffered = __btrfs_buffered_write(file, &i, pos); 1658 if (written_buffered < 0) { 1659 err = written_buffered; 1660 goto out; 1661 } 1662 endbyte = pos + written_buffered - 1; 1663 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); 1664 if (err) 1665 goto out; 1666 written += written_buffered; 1667 *ppos = pos + written_buffered; 1668 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, 1669 endbyte >> PAGE_CACHE_SHIFT); 1670 out: 1671 return written ? written : err; 1672 } 1673 1674 static void update_time_for_write(struct inode *inode) 1675 { 1676 struct timespec now; 1677 1678 if (IS_NOCMTIME(inode)) 1679 return; 1680 1681 now = current_fs_time(inode->i_sb); 1682 if (!timespec_equal(&inode->i_mtime, &now)) 1683 inode->i_mtime = now; 1684 1685 if (!timespec_equal(&inode->i_ctime, &now)) 1686 inode->i_ctime = now; 1687 1688 if (IS_I_VERSION(inode)) 1689 inode_inc_iversion(inode); 1690 } 1691 1692 static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 1693 const struct iovec *iov, 1694 unsigned long nr_segs, loff_t pos) 1695 { 1696 struct file *file = iocb->ki_filp; 1697 struct inode *inode = file_inode(file); 1698 struct btrfs_root *root = BTRFS_I(inode)->root; 1699 loff_t *ppos = &iocb->ki_pos; 1700 u64 start_pos; 1701 ssize_t num_written = 0; 1702 ssize_t err = 0; 1703 size_t count, ocount; 1704 bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host); 1705 1706 mutex_lock(&inode->i_mutex); 1707 1708 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 1709 if (err) { 1710 mutex_unlock(&inode->i_mutex); 1711 goto out; 1712 } 1713 count = ocount; 1714 1715 current->backing_dev_info = inode->i_mapping->backing_dev_info; 1716 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 1717 if (err) { 1718 mutex_unlock(&inode->i_mutex); 1719 goto out; 1720 } 1721 1722 if (count == 0) { 1723 mutex_unlock(&inode->i_mutex); 1724 goto out; 1725 } 1726 1727 err = file_remove_suid(file); 1728 if (err) { 1729 mutex_unlock(&inode->i_mutex); 1730 goto out; 1731 } 1732 1733 /* 1734 * If BTRFS flips readonly due to some impossible error 1735 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), 1736 * although we have opened a file as writable, we have 1737 * to stop this write operation to ensure FS consistency. 1738 */ 1739 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) { 1740 mutex_unlock(&inode->i_mutex); 1741 err = -EROFS; 1742 goto out; 1743 } 1744 1745 /* 1746 * We reserve space for updating the inode when we reserve space for the 1747 * extent we are going to write, so we will enospc out there. We don't 1748 * need to start yet another transaction to update the inode as we will 1749 * update the inode when we finish writing whatever data we write. 1750 */ 1751 update_time_for_write(inode); 1752 1753 start_pos = round_down(pos, root->sectorsize); 1754 if (start_pos > i_size_read(inode)) { 1755 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); 1756 if (err) { 1757 mutex_unlock(&inode->i_mutex); 1758 goto out; 1759 } 1760 } 1761 1762 if (sync) 1763 atomic_inc(&BTRFS_I(inode)->sync_writers); 1764 1765 if (unlikely(file->f_flags & O_DIRECT)) { 1766 num_written = __btrfs_direct_write(iocb, iov, nr_segs, 1767 pos, ppos, count, ocount); 1768 } else { 1769 struct iov_iter i; 1770 1771 iov_iter_init(&i, iov, nr_segs, count, num_written); 1772 1773 num_written = __btrfs_buffered_write(file, &i, pos); 1774 if (num_written > 0) 1775 *ppos = pos + num_written; 1776 } 1777 1778 mutex_unlock(&inode->i_mutex); 1779 1780 /* 1781 * we want to make sure fsync finds this change 1782 * but we haven't joined a transaction running right now. 1783 * 1784 * Later on, someone is sure to update the inode and get the 1785 * real transid recorded. 1786 * 1787 * We set last_trans now to the fs_info generation + 1, 1788 * this will either be one more than the running transaction 1789 * or the generation used for the next transaction if there isn't 1790 * one running right now. 1791 * 1792 * We also have to set last_sub_trans to the current log transid, 1793 * otherwise subsequent syncs to a file that's been synced in this 1794 * transaction will appear to have already occured. 1795 */ 1796 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 1797 BTRFS_I(inode)->last_sub_trans = root->log_transid; 1798 if (num_written > 0) { 1799 err = generic_write_sync(file, pos, num_written); 1800 if (err < 0 && num_written > 0) 1801 num_written = err; 1802 } 1803 1804 if (sync) 1805 atomic_dec(&BTRFS_I(inode)->sync_writers); 1806 out: 1807 current->backing_dev_info = NULL; 1808 return num_written ? num_written : err; 1809 } 1810 1811 int btrfs_release_file(struct inode *inode, struct file *filp) 1812 { 1813 /* 1814 * ordered_data_close is set by settattr when we are about to truncate 1815 * a file from a non-zero size to a zero size. This tries to 1816 * flush down new bytes that may have been written if the 1817 * application were using truncate to replace a file in place. 1818 */ 1819 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 1820 &BTRFS_I(inode)->runtime_flags)) { 1821 struct btrfs_trans_handle *trans; 1822 struct btrfs_root *root = BTRFS_I(inode)->root; 1823 1824 /* 1825 * We need to block on a committing transaction to keep us from 1826 * throwing a ordered operation on to the list and causing 1827 * something like sync to deadlock trying to flush out this 1828 * inode. 1829 */ 1830 trans = btrfs_start_transaction(root, 0); 1831 if (IS_ERR(trans)) 1832 return PTR_ERR(trans); 1833 btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode); 1834 btrfs_end_transaction(trans, root); 1835 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1836 filemap_flush(inode->i_mapping); 1837 } 1838 if (filp->private_data) 1839 btrfs_ioctl_trans_end(filp); 1840 return 0; 1841 } 1842 1843 /* 1844 * fsync call for both files and directories. This logs the inode into 1845 * the tree log instead of forcing full commits whenever possible. 1846 * 1847 * It needs to call filemap_fdatawait so that all ordered extent updates are 1848 * in the metadata btree are up to date for copying to the log. 1849 * 1850 * It drops the inode mutex before doing the tree log commit. This is an 1851 * important optimization for directories because holding the mutex prevents 1852 * new operations on the dir while we write to disk. 1853 */ 1854 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 1855 { 1856 struct dentry *dentry = file->f_path.dentry; 1857 struct inode *inode = dentry->d_inode; 1858 struct btrfs_root *root = BTRFS_I(inode)->root; 1859 int ret = 0; 1860 struct btrfs_trans_handle *trans; 1861 bool full_sync = 0; 1862 1863 trace_btrfs_sync_file(file, datasync); 1864 1865 /* 1866 * We write the dirty pages in the range and wait until they complete 1867 * out of the ->i_mutex. If so, we can flush the dirty pages by 1868 * multi-task, and make the performance up. See 1869 * btrfs_wait_ordered_range for an explanation of the ASYNC check. 1870 */ 1871 atomic_inc(&BTRFS_I(inode)->sync_writers); 1872 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 1873 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 1874 &BTRFS_I(inode)->runtime_flags)) 1875 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 1876 atomic_dec(&BTRFS_I(inode)->sync_writers); 1877 if (ret) 1878 return ret; 1879 1880 mutex_lock(&inode->i_mutex); 1881 1882 /* 1883 * We flush the dirty pages again to avoid some dirty pages in the 1884 * range being left. 1885 */ 1886 atomic_inc(&root->log_batch); 1887 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 1888 &BTRFS_I(inode)->runtime_flags); 1889 if (full_sync) { 1890 ret = btrfs_wait_ordered_range(inode, start, end - start + 1); 1891 if (ret) { 1892 mutex_unlock(&inode->i_mutex); 1893 goto out; 1894 } 1895 } 1896 atomic_inc(&root->log_batch); 1897 1898 /* 1899 * check the transaction that last modified this inode 1900 * and see if its already been committed 1901 */ 1902 if (!BTRFS_I(inode)->last_trans) { 1903 mutex_unlock(&inode->i_mutex); 1904 goto out; 1905 } 1906 1907 /* 1908 * if the last transaction that changed this file was before 1909 * the current transaction, we can bail out now without any 1910 * syncing 1911 */ 1912 smp_mb(); 1913 if (btrfs_inode_in_log(inode, root->fs_info->generation) || 1914 BTRFS_I(inode)->last_trans <= 1915 root->fs_info->last_trans_committed) { 1916 BTRFS_I(inode)->last_trans = 0; 1917 1918 /* 1919 * We'v had everything committed since the last time we were 1920 * modified so clear this flag in case it was set for whatever 1921 * reason, it's no longer relevant. 1922 */ 1923 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 1924 &BTRFS_I(inode)->runtime_flags); 1925 mutex_unlock(&inode->i_mutex); 1926 goto out; 1927 } 1928 1929 /* 1930 * ok we haven't committed the transaction yet, lets do a commit 1931 */ 1932 if (file->private_data) 1933 btrfs_ioctl_trans_end(file); 1934 1935 /* 1936 * We use start here because we will need to wait on the IO to complete 1937 * in btrfs_sync_log, which could require joining a transaction (for 1938 * example checking cross references in the nocow path). If we use join 1939 * here we could get into a situation where we're waiting on IO to 1940 * happen that is blocked on a transaction trying to commit. With start 1941 * we inc the extwriter counter, so we wait for all extwriters to exit 1942 * before we start blocking join'ers. This comment is to keep somebody 1943 * from thinking they are super smart and changing this to 1944 * btrfs_join_transaction *cough*Josef*cough*. 1945 */ 1946 trans = btrfs_start_transaction(root, 0); 1947 if (IS_ERR(trans)) { 1948 ret = PTR_ERR(trans); 1949 mutex_unlock(&inode->i_mutex); 1950 goto out; 1951 } 1952 trans->sync = true; 1953 1954 ret = btrfs_log_dentry_safe(trans, root, dentry); 1955 if (ret < 0) { 1956 /* Fallthrough and commit/free transaction. */ 1957 ret = 1; 1958 } 1959 1960 /* we've logged all the items and now have a consistent 1961 * version of the file in the log. It is possible that 1962 * someone will come in and modify the file, but that's 1963 * fine because the log is consistent on disk, and we 1964 * have references to all of the file's extents 1965 * 1966 * It is possible that someone will come in and log the 1967 * file again, but that will end up using the synchronization 1968 * inside btrfs_sync_log to keep things safe. 1969 */ 1970 mutex_unlock(&inode->i_mutex); 1971 1972 if (ret != BTRFS_NO_LOG_SYNC) { 1973 if (!ret) { 1974 ret = btrfs_sync_log(trans, root); 1975 if (!ret) { 1976 ret = btrfs_end_transaction(trans, root); 1977 goto out; 1978 } 1979 } 1980 if (!full_sync) { 1981 ret = btrfs_wait_ordered_range(inode, start, 1982 end - start + 1); 1983 if (ret) 1984 goto out; 1985 } 1986 ret = btrfs_commit_transaction(trans, root); 1987 } else { 1988 ret = btrfs_end_transaction(trans, root); 1989 } 1990 out: 1991 return ret > 0 ? -EIO : ret; 1992 } 1993 1994 static const struct vm_operations_struct btrfs_file_vm_ops = { 1995 .fault = filemap_fault, 1996 .page_mkwrite = btrfs_page_mkwrite, 1997 .remap_pages = generic_file_remap_pages, 1998 }; 1999 2000 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 2001 { 2002 struct address_space *mapping = filp->f_mapping; 2003 2004 if (!mapping->a_ops->readpage) 2005 return -ENOEXEC; 2006 2007 file_accessed(filp); 2008 vma->vm_ops = &btrfs_file_vm_ops; 2009 2010 return 0; 2011 } 2012 2013 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, 2014 int slot, u64 start, u64 end) 2015 { 2016 struct btrfs_file_extent_item *fi; 2017 struct btrfs_key key; 2018 2019 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 2020 return 0; 2021 2022 btrfs_item_key_to_cpu(leaf, &key, slot); 2023 if (key.objectid != btrfs_ino(inode) || 2024 key.type != BTRFS_EXTENT_DATA_KEY) 2025 return 0; 2026 2027 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 2028 2029 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) 2030 return 0; 2031 2032 if (btrfs_file_extent_disk_bytenr(leaf, fi)) 2033 return 0; 2034 2035 if (key.offset == end) 2036 return 1; 2037 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start) 2038 return 1; 2039 return 0; 2040 } 2041 2042 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, 2043 struct btrfs_path *path, u64 offset, u64 end) 2044 { 2045 struct btrfs_root *root = BTRFS_I(inode)->root; 2046 struct extent_buffer *leaf; 2047 struct btrfs_file_extent_item *fi; 2048 struct extent_map *hole_em; 2049 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 2050 struct btrfs_key key; 2051 int ret; 2052 2053 if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) 2054 goto out; 2055 2056 key.objectid = btrfs_ino(inode); 2057 key.type = BTRFS_EXTENT_DATA_KEY; 2058 key.offset = offset; 2059 2060 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2061 if (ret < 0) 2062 return ret; 2063 BUG_ON(!ret); 2064 2065 leaf = path->nodes[0]; 2066 if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) { 2067 u64 num_bytes; 2068 2069 path->slots[0]--; 2070 fi = btrfs_item_ptr(leaf, path->slots[0], 2071 struct btrfs_file_extent_item); 2072 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + 2073 end - offset; 2074 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2075 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2076 btrfs_set_file_extent_offset(leaf, fi, 0); 2077 btrfs_mark_buffer_dirty(leaf); 2078 goto out; 2079 } 2080 2081 if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) { 2082 u64 num_bytes; 2083 2084 path->slots[0]++; 2085 key.offset = offset; 2086 btrfs_set_item_key_safe(root, path, &key); 2087 fi = btrfs_item_ptr(leaf, path->slots[0], 2088 struct btrfs_file_extent_item); 2089 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end - 2090 offset; 2091 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2092 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2093 btrfs_set_file_extent_offset(leaf, fi, 0); 2094 btrfs_mark_buffer_dirty(leaf); 2095 goto out; 2096 } 2097 btrfs_release_path(path); 2098 2099 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset, 2100 0, 0, end - offset, 0, end - offset, 2101 0, 0, 0); 2102 if (ret) 2103 return ret; 2104 2105 out: 2106 btrfs_release_path(path); 2107 2108 hole_em = alloc_extent_map(); 2109 if (!hole_em) { 2110 btrfs_drop_extent_cache(inode, offset, end - 1, 0); 2111 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2112 &BTRFS_I(inode)->runtime_flags); 2113 } else { 2114 hole_em->start = offset; 2115 hole_em->len = end - offset; 2116 hole_em->ram_bytes = hole_em->len; 2117 hole_em->orig_start = offset; 2118 2119 hole_em->block_start = EXTENT_MAP_HOLE; 2120 hole_em->block_len = 0; 2121 hole_em->orig_block_len = 0; 2122 hole_em->bdev = root->fs_info->fs_devices->latest_bdev; 2123 hole_em->compress_type = BTRFS_COMPRESS_NONE; 2124 hole_em->generation = trans->transid; 2125 2126 do { 2127 btrfs_drop_extent_cache(inode, offset, end - 1, 0); 2128 write_lock(&em_tree->lock); 2129 ret = add_extent_mapping(em_tree, hole_em, 1); 2130 write_unlock(&em_tree->lock); 2131 } while (ret == -EEXIST); 2132 free_extent_map(hole_em); 2133 if (ret) 2134 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2135 &BTRFS_I(inode)->runtime_flags); 2136 } 2137 2138 return 0; 2139 } 2140 2141 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 2142 { 2143 struct btrfs_root *root = BTRFS_I(inode)->root; 2144 struct extent_state *cached_state = NULL; 2145 struct btrfs_path *path; 2146 struct btrfs_block_rsv *rsv; 2147 struct btrfs_trans_handle *trans; 2148 u64 lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize); 2149 u64 lockend = round_down(offset + len, 2150 BTRFS_I(inode)->root->sectorsize) - 1; 2151 u64 cur_offset = lockstart; 2152 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 2153 u64 drop_end; 2154 int ret = 0; 2155 int err = 0; 2156 int rsv_count; 2157 bool same_page = ((offset >> PAGE_CACHE_SHIFT) == 2158 ((offset + len - 1) >> PAGE_CACHE_SHIFT)); 2159 bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES); 2160 2161 ret = btrfs_wait_ordered_range(inode, offset, len); 2162 if (ret) 2163 return ret; 2164 2165 mutex_lock(&inode->i_mutex); 2166 /* 2167 * We needn't truncate any page which is beyond the end of the file 2168 * because we are sure there is no data there. 2169 */ 2170 /* 2171 * Only do this if we are in the same page and we aren't doing the 2172 * entire page. 2173 */ 2174 if (same_page && len < PAGE_CACHE_SIZE) { 2175 if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE)) 2176 ret = btrfs_truncate_page(inode, offset, len, 0); 2177 mutex_unlock(&inode->i_mutex); 2178 return ret; 2179 } 2180 2181 /* zero back part of the first page */ 2182 if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE)) { 2183 ret = btrfs_truncate_page(inode, offset, 0, 0); 2184 if (ret) { 2185 mutex_unlock(&inode->i_mutex); 2186 return ret; 2187 } 2188 } 2189 2190 /* zero the front end of the last page */ 2191 if (offset + len < round_up(inode->i_size, PAGE_CACHE_SIZE)) { 2192 ret = btrfs_truncate_page(inode, offset + len, 0, 1); 2193 if (ret) { 2194 mutex_unlock(&inode->i_mutex); 2195 return ret; 2196 } 2197 } 2198 2199 if (lockend < lockstart) { 2200 mutex_unlock(&inode->i_mutex); 2201 return 0; 2202 } 2203 2204 while (1) { 2205 struct btrfs_ordered_extent *ordered; 2206 2207 truncate_pagecache_range(inode, lockstart, lockend); 2208 2209 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2210 0, &cached_state); 2211 ordered = btrfs_lookup_first_ordered_extent(inode, lockend); 2212 2213 /* 2214 * We need to make sure we have no ordered extents in this range 2215 * and nobody raced in and read a page in this range, if we did 2216 * we need to try again. 2217 */ 2218 if ((!ordered || 2219 (ordered->file_offset + ordered->len <= lockstart || 2220 ordered->file_offset > lockend)) && 2221 !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart, 2222 lockend, EXTENT_UPTODATE, 0, 2223 cached_state)) { 2224 if (ordered) 2225 btrfs_put_ordered_extent(ordered); 2226 break; 2227 } 2228 if (ordered) 2229 btrfs_put_ordered_extent(ordered); 2230 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, 2231 lockend, &cached_state, GFP_NOFS); 2232 ret = btrfs_wait_ordered_range(inode, lockstart, 2233 lockend - lockstart + 1); 2234 if (ret) { 2235 mutex_unlock(&inode->i_mutex); 2236 return ret; 2237 } 2238 } 2239 2240 path = btrfs_alloc_path(); 2241 if (!path) { 2242 ret = -ENOMEM; 2243 goto out; 2244 } 2245 2246 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 2247 if (!rsv) { 2248 ret = -ENOMEM; 2249 goto out_free; 2250 } 2251 rsv->size = btrfs_calc_trunc_metadata_size(root, 1); 2252 rsv->failfast = 1; 2253 2254 /* 2255 * 1 - update the inode 2256 * 1 - removing the extents in the range 2257 * 1 - adding the hole extent if no_holes isn't set 2258 */ 2259 rsv_count = no_holes ? 2 : 3; 2260 trans = btrfs_start_transaction(root, rsv_count); 2261 if (IS_ERR(trans)) { 2262 err = PTR_ERR(trans); 2263 goto out_free; 2264 } 2265 2266 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, 2267 min_size); 2268 BUG_ON(ret); 2269 trans->block_rsv = rsv; 2270 2271 while (cur_offset < lockend) { 2272 ret = __btrfs_drop_extents(trans, root, inode, path, 2273 cur_offset, lockend + 1, 2274 &drop_end, 1, 0, 0, NULL); 2275 if (ret != -ENOSPC) 2276 break; 2277 2278 trans->block_rsv = &root->fs_info->trans_block_rsv; 2279 2280 ret = fill_holes(trans, inode, path, cur_offset, drop_end); 2281 if (ret) { 2282 err = ret; 2283 break; 2284 } 2285 2286 cur_offset = drop_end; 2287 2288 ret = btrfs_update_inode(trans, root, inode); 2289 if (ret) { 2290 err = ret; 2291 break; 2292 } 2293 2294 btrfs_end_transaction(trans, root); 2295 btrfs_btree_balance_dirty(root); 2296 2297 trans = btrfs_start_transaction(root, rsv_count); 2298 if (IS_ERR(trans)) { 2299 ret = PTR_ERR(trans); 2300 trans = NULL; 2301 break; 2302 } 2303 2304 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, 2305 rsv, min_size); 2306 BUG_ON(ret); /* shouldn't happen */ 2307 trans->block_rsv = rsv; 2308 } 2309 2310 if (ret) { 2311 err = ret; 2312 goto out_trans; 2313 } 2314 2315 trans->block_rsv = &root->fs_info->trans_block_rsv; 2316 ret = fill_holes(trans, inode, path, cur_offset, drop_end); 2317 if (ret) { 2318 err = ret; 2319 goto out_trans; 2320 } 2321 2322 out_trans: 2323 if (!trans) 2324 goto out_free; 2325 2326 inode_inc_iversion(inode); 2327 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 2328 2329 trans->block_rsv = &root->fs_info->trans_block_rsv; 2330 ret = btrfs_update_inode(trans, root, inode); 2331 btrfs_end_transaction(trans, root); 2332 btrfs_btree_balance_dirty(root); 2333 out_free: 2334 btrfs_free_path(path); 2335 btrfs_free_block_rsv(root, rsv); 2336 out: 2337 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2338 &cached_state, GFP_NOFS); 2339 mutex_unlock(&inode->i_mutex); 2340 if (ret && !err) 2341 err = ret; 2342 return err; 2343 } 2344 2345 static long btrfs_fallocate(struct file *file, int mode, 2346 loff_t offset, loff_t len) 2347 { 2348 struct inode *inode = file_inode(file); 2349 struct extent_state *cached_state = NULL; 2350 struct btrfs_root *root = BTRFS_I(inode)->root; 2351 u64 cur_offset; 2352 u64 last_byte; 2353 u64 alloc_start; 2354 u64 alloc_end; 2355 u64 alloc_hint = 0; 2356 u64 locked_end; 2357 struct extent_map *em; 2358 int blocksize = BTRFS_I(inode)->root->sectorsize; 2359 int ret; 2360 2361 alloc_start = round_down(offset, blocksize); 2362 alloc_end = round_up(offset + len, blocksize); 2363 2364 /* Make sure we aren't being give some crap mode */ 2365 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 2366 return -EOPNOTSUPP; 2367 2368 if (mode & FALLOC_FL_PUNCH_HOLE) 2369 return btrfs_punch_hole(inode, offset, len); 2370 2371 /* 2372 * Make sure we have enough space before we do the 2373 * allocation. 2374 */ 2375 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); 2376 if (ret) 2377 return ret; 2378 if (root->fs_info->quota_enabled) { 2379 ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start); 2380 if (ret) 2381 goto out_reserve_fail; 2382 } 2383 2384 mutex_lock(&inode->i_mutex); 2385 ret = inode_newsize_ok(inode, alloc_end); 2386 if (ret) 2387 goto out; 2388 2389 if (alloc_start > inode->i_size) { 2390 ret = btrfs_cont_expand(inode, i_size_read(inode), 2391 alloc_start); 2392 if (ret) 2393 goto out; 2394 } else { 2395 /* 2396 * If we are fallocating from the end of the file onward we 2397 * need to zero out the end of the page if i_size lands in the 2398 * middle of a page. 2399 */ 2400 ret = btrfs_truncate_page(inode, inode->i_size, 0, 0); 2401 if (ret) 2402 goto out; 2403 } 2404 2405 /* 2406 * wait for ordered IO before we have any locks. We'll loop again 2407 * below with the locks held. 2408 */ 2409 ret = btrfs_wait_ordered_range(inode, alloc_start, 2410 alloc_end - alloc_start); 2411 if (ret) 2412 goto out; 2413 2414 locked_end = alloc_end - 1; 2415 while (1) { 2416 struct btrfs_ordered_extent *ordered; 2417 2418 /* the extent lock is ordered inside the running 2419 * transaction 2420 */ 2421 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 2422 locked_end, 0, &cached_state); 2423 ordered = btrfs_lookup_first_ordered_extent(inode, 2424 alloc_end - 1); 2425 if (ordered && 2426 ordered->file_offset + ordered->len > alloc_start && 2427 ordered->file_offset < alloc_end) { 2428 btrfs_put_ordered_extent(ordered); 2429 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 2430 alloc_start, locked_end, 2431 &cached_state, GFP_NOFS); 2432 /* 2433 * we can't wait on the range with the transaction 2434 * running or with the extent lock held 2435 */ 2436 ret = btrfs_wait_ordered_range(inode, alloc_start, 2437 alloc_end - alloc_start); 2438 if (ret) 2439 goto out; 2440 } else { 2441 if (ordered) 2442 btrfs_put_ordered_extent(ordered); 2443 break; 2444 } 2445 } 2446 2447 cur_offset = alloc_start; 2448 while (1) { 2449 u64 actual_end; 2450 2451 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 2452 alloc_end - cur_offset, 0); 2453 if (IS_ERR_OR_NULL(em)) { 2454 if (!em) 2455 ret = -ENOMEM; 2456 else 2457 ret = PTR_ERR(em); 2458 break; 2459 } 2460 last_byte = min(extent_map_end(em), alloc_end); 2461 actual_end = min_t(u64, extent_map_end(em), offset + len); 2462 last_byte = ALIGN(last_byte, blocksize); 2463 2464 if (em->block_start == EXTENT_MAP_HOLE || 2465 (cur_offset >= inode->i_size && 2466 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 2467 ret = btrfs_prealloc_file_range(inode, mode, cur_offset, 2468 last_byte - cur_offset, 2469 1 << inode->i_blkbits, 2470 offset + len, 2471 &alloc_hint); 2472 2473 if (ret < 0) { 2474 free_extent_map(em); 2475 break; 2476 } 2477 } else if (actual_end > inode->i_size && 2478 !(mode & FALLOC_FL_KEEP_SIZE)) { 2479 /* 2480 * We didn't need to allocate any more space, but we 2481 * still extended the size of the file so we need to 2482 * update i_size. 2483 */ 2484 inode->i_ctime = CURRENT_TIME; 2485 i_size_write(inode, actual_end); 2486 btrfs_ordered_update_i_size(inode, actual_end, NULL); 2487 } 2488 free_extent_map(em); 2489 2490 cur_offset = last_byte; 2491 if (cur_offset >= alloc_end) { 2492 ret = 0; 2493 break; 2494 } 2495 } 2496 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 2497 &cached_state, GFP_NOFS); 2498 out: 2499 mutex_unlock(&inode->i_mutex); 2500 if (root->fs_info->quota_enabled) 2501 btrfs_qgroup_free(root, alloc_end - alloc_start); 2502 out_reserve_fail: 2503 /* Let go of our reservation. */ 2504 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); 2505 return ret; 2506 } 2507 2508 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) 2509 { 2510 struct btrfs_root *root = BTRFS_I(inode)->root; 2511 struct extent_map *em = NULL; 2512 struct extent_state *cached_state = NULL; 2513 u64 lockstart = *offset; 2514 u64 lockend = i_size_read(inode); 2515 u64 start = *offset; 2516 u64 len = i_size_read(inode); 2517 int ret = 0; 2518 2519 lockend = max_t(u64, root->sectorsize, lockend); 2520 if (lockend <= lockstart) 2521 lockend = lockstart + root->sectorsize; 2522 2523 lockend--; 2524 len = lockend - lockstart + 1; 2525 2526 len = max_t(u64, len, root->sectorsize); 2527 if (inode->i_size == 0) 2528 return -ENXIO; 2529 2530 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, 2531 &cached_state); 2532 2533 while (start < inode->i_size) { 2534 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); 2535 if (IS_ERR(em)) { 2536 ret = PTR_ERR(em); 2537 em = NULL; 2538 break; 2539 } 2540 2541 if (whence == SEEK_HOLE && 2542 (em->block_start == EXTENT_MAP_HOLE || 2543 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) 2544 break; 2545 else if (whence == SEEK_DATA && 2546 (em->block_start != EXTENT_MAP_HOLE && 2547 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) 2548 break; 2549 2550 start = em->start + em->len; 2551 free_extent_map(em); 2552 em = NULL; 2553 cond_resched(); 2554 } 2555 free_extent_map(em); 2556 if (!ret) { 2557 if (whence == SEEK_DATA && start >= inode->i_size) 2558 ret = -ENXIO; 2559 else 2560 *offset = min_t(loff_t, start, inode->i_size); 2561 } 2562 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2563 &cached_state, GFP_NOFS); 2564 return ret; 2565 } 2566 2567 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) 2568 { 2569 struct inode *inode = file->f_mapping->host; 2570 int ret; 2571 2572 mutex_lock(&inode->i_mutex); 2573 switch (whence) { 2574 case SEEK_END: 2575 case SEEK_CUR: 2576 offset = generic_file_llseek(file, offset, whence); 2577 goto out; 2578 case SEEK_DATA: 2579 case SEEK_HOLE: 2580 if (offset >= i_size_read(inode)) { 2581 mutex_unlock(&inode->i_mutex); 2582 return -ENXIO; 2583 } 2584 2585 ret = find_desired_extent(inode, &offset, whence); 2586 if (ret) { 2587 mutex_unlock(&inode->i_mutex); 2588 return ret; 2589 } 2590 } 2591 2592 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 2593 out: 2594 mutex_unlock(&inode->i_mutex); 2595 return offset; 2596 } 2597 2598 const struct file_operations btrfs_file_operations = { 2599 .llseek = btrfs_file_llseek, 2600 .read = do_sync_read, 2601 .write = do_sync_write, 2602 .aio_read = generic_file_aio_read, 2603 .splice_read = generic_file_splice_read, 2604 .aio_write = btrfs_file_aio_write, 2605 .mmap = btrfs_file_mmap, 2606 .open = generic_file_open, 2607 .release = btrfs_release_file, 2608 .fsync = btrfs_sync_file, 2609 .fallocate = btrfs_fallocate, 2610 .unlocked_ioctl = btrfs_ioctl, 2611 #ifdef CONFIG_COMPAT 2612 .compat_ioctl = btrfs_ioctl, 2613 #endif 2614 }; 2615 2616 void btrfs_auto_defrag_exit(void) 2617 { 2618 if (btrfs_inode_defrag_cachep) 2619 kmem_cache_destroy(btrfs_inode_defrag_cachep); 2620 } 2621 2622 int btrfs_auto_defrag_init(void) 2623 { 2624 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag", 2625 sizeof(struct inode_defrag), 0, 2626 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 2627 NULL); 2628 if (!btrfs_inode_defrag_cachep) 2629 return -ENOMEM; 2630 2631 return 0; 2632 } 2633