1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/pagemap.h> 8 #include <linux/time.h> 9 #include <linux/init.h> 10 #include <linux/string.h> 11 #include <linux/backing-dev.h> 12 #include <linux/falloc.h> 13 #include <linux/writeback.h> 14 #include <linux/compat.h> 15 #include <linux/slab.h> 16 #include <linux/btrfs.h> 17 #include <linux/uio.h> 18 #include <linux/iversion.h> 19 #include "ctree.h" 20 #include "disk-io.h" 21 #include "transaction.h" 22 #include "btrfs_inode.h" 23 #include "print-tree.h" 24 #include "tree-log.h" 25 #include "locking.h" 26 #include "volumes.h" 27 #include "qgroup.h" 28 #include "compression.h" 29 #include "delalloc-space.h" 30 #include "reflink.h" 31 32 static struct kmem_cache *btrfs_inode_defrag_cachep; 33 /* 34 * when auto defrag is enabled we 35 * queue up these defrag structs to remember which 36 * inodes need defragging passes 37 */ 38 struct inode_defrag { 39 struct rb_node rb_node; 40 /* objectid */ 41 u64 ino; 42 /* 43 * transid where the defrag was added, we search for 44 * extents newer than this 45 */ 46 u64 transid; 47 48 /* root objectid */ 49 u64 root; 50 51 /* last offset we were able to defrag */ 52 u64 last_offset; 53 54 /* if we've wrapped around back to zero once already */ 55 int cycled; 56 }; 57 58 static int __compare_inode_defrag(struct inode_defrag *defrag1, 59 struct inode_defrag *defrag2) 60 { 61 if (defrag1->root > defrag2->root) 62 return 1; 63 else if (defrag1->root < defrag2->root) 64 return -1; 65 else if (defrag1->ino > defrag2->ino) 66 return 1; 67 else if (defrag1->ino < defrag2->ino) 68 return -1; 69 else 70 return 0; 71 } 72 73 /* pop a record for an inode into the defrag tree. The lock 74 * must be held already 75 * 76 * If you're inserting a record for an older transid than an 77 * existing record, the transid already in the tree is lowered 78 * 79 * If an existing record is found the defrag item you 80 * pass in is freed 81 */ 82 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode, 83 struct inode_defrag *defrag) 84 { 85 struct btrfs_fs_info *fs_info = inode->root->fs_info; 86 struct inode_defrag *entry; 87 struct rb_node **p; 88 struct rb_node *parent = NULL; 89 int ret; 90 91 p = &fs_info->defrag_inodes.rb_node; 92 while (*p) { 93 parent = *p; 94 entry = rb_entry(parent, struct inode_defrag, rb_node); 95 96 ret = __compare_inode_defrag(defrag, entry); 97 if (ret < 0) 98 p = &parent->rb_left; 99 else if (ret > 0) 100 p = &parent->rb_right; 101 else { 102 /* if we're reinserting an entry for 103 * an old defrag run, make sure to 104 * lower the transid of our existing record 105 */ 106 if (defrag->transid < entry->transid) 107 entry->transid = defrag->transid; 108 if (defrag->last_offset > entry->last_offset) 109 entry->last_offset = defrag->last_offset; 110 return -EEXIST; 111 } 112 } 113 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); 114 rb_link_node(&defrag->rb_node, parent, p); 115 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); 116 return 0; 117 } 118 119 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info) 120 { 121 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG)) 122 return 0; 123 124 if (btrfs_fs_closing(fs_info)) 125 return 0; 126 127 return 1; 128 } 129 130 /* 131 * insert a defrag record for this inode if auto defrag is 132 * enabled 133 */ 134 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 135 struct btrfs_inode *inode) 136 { 137 struct btrfs_root *root = inode->root; 138 struct btrfs_fs_info *fs_info = root->fs_info; 139 struct inode_defrag *defrag; 140 u64 transid; 141 int ret; 142 143 if (!__need_auto_defrag(fs_info)) 144 return 0; 145 146 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) 147 return 0; 148 149 if (trans) 150 transid = trans->transid; 151 else 152 transid = inode->root->last_trans; 153 154 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); 155 if (!defrag) 156 return -ENOMEM; 157 158 defrag->ino = btrfs_ino(inode); 159 defrag->transid = transid; 160 defrag->root = root->root_key.objectid; 161 162 spin_lock(&fs_info->defrag_inodes_lock); 163 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { 164 /* 165 * If we set IN_DEFRAG flag and evict the inode from memory, 166 * and then re-read this inode, this new inode doesn't have 167 * IN_DEFRAG flag. At the case, we may find the existed defrag. 168 */ 169 ret = __btrfs_add_inode_defrag(inode, defrag); 170 if (ret) 171 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 172 } else { 173 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 174 } 175 spin_unlock(&fs_info->defrag_inodes_lock); 176 return 0; 177 } 178 179 /* 180 * Requeue the defrag object. If there is a defrag object that points to 181 * the same inode in the tree, we will merge them together (by 182 * __btrfs_add_inode_defrag()) and free the one that we want to requeue. 183 */ 184 static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode, 185 struct inode_defrag *defrag) 186 { 187 struct btrfs_fs_info *fs_info = inode->root->fs_info; 188 int ret; 189 190 if (!__need_auto_defrag(fs_info)) 191 goto out; 192 193 /* 194 * Here we don't check the IN_DEFRAG flag, because we need merge 195 * them together. 196 */ 197 spin_lock(&fs_info->defrag_inodes_lock); 198 ret = __btrfs_add_inode_defrag(inode, defrag); 199 spin_unlock(&fs_info->defrag_inodes_lock); 200 if (ret) 201 goto out; 202 return; 203 out: 204 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 205 } 206 207 /* 208 * pick the defragable inode that we want, if it doesn't exist, we will get 209 * the next one. 210 */ 211 static struct inode_defrag * 212 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino) 213 { 214 struct inode_defrag *entry = NULL; 215 struct inode_defrag tmp; 216 struct rb_node *p; 217 struct rb_node *parent = NULL; 218 int ret; 219 220 tmp.ino = ino; 221 tmp.root = root; 222 223 spin_lock(&fs_info->defrag_inodes_lock); 224 p = fs_info->defrag_inodes.rb_node; 225 while (p) { 226 parent = p; 227 entry = rb_entry(parent, struct inode_defrag, rb_node); 228 229 ret = __compare_inode_defrag(&tmp, entry); 230 if (ret < 0) 231 p = parent->rb_left; 232 else if (ret > 0) 233 p = parent->rb_right; 234 else 235 goto out; 236 } 237 238 if (parent && __compare_inode_defrag(&tmp, entry) > 0) { 239 parent = rb_next(parent); 240 if (parent) 241 entry = rb_entry(parent, struct inode_defrag, rb_node); 242 else 243 entry = NULL; 244 } 245 out: 246 if (entry) 247 rb_erase(parent, &fs_info->defrag_inodes); 248 spin_unlock(&fs_info->defrag_inodes_lock); 249 return entry; 250 } 251 252 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info) 253 { 254 struct inode_defrag *defrag; 255 struct rb_node *node; 256 257 spin_lock(&fs_info->defrag_inodes_lock); 258 node = rb_first(&fs_info->defrag_inodes); 259 while (node) { 260 rb_erase(node, &fs_info->defrag_inodes); 261 defrag = rb_entry(node, struct inode_defrag, rb_node); 262 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 263 264 cond_resched_lock(&fs_info->defrag_inodes_lock); 265 266 node = rb_first(&fs_info->defrag_inodes); 267 } 268 spin_unlock(&fs_info->defrag_inodes_lock); 269 } 270 271 #define BTRFS_DEFRAG_BATCH 1024 272 273 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, 274 struct inode_defrag *defrag) 275 { 276 struct btrfs_root *inode_root; 277 struct inode *inode; 278 struct btrfs_ioctl_defrag_range_args range; 279 int num_defrag; 280 int ret; 281 282 /* get the inode */ 283 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); 284 if (IS_ERR(inode_root)) { 285 ret = PTR_ERR(inode_root); 286 goto cleanup; 287 } 288 289 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root); 290 btrfs_put_root(inode_root); 291 if (IS_ERR(inode)) { 292 ret = PTR_ERR(inode); 293 goto cleanup; 294 } 295 296 /* do a chunk of defrag */ 297 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); 298 memset(&range, 0, sizeof(range)); 299 range.len = (u64)-1; 300 range.start = defrag->last_offset; 301 302 sb_start_write(fs_info->sb); 303 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, 304 BTRFS_DEFRAG_BATCH); 305 sb_end_write(fs_info->sb); 306 /* 307 * if we filled the whole defrag batch, there 308 * must be more work to do. Queue this defrag 309 * again 310 */ 311 if (num_defrag == BTRFS_DEFRAG_BATCH) { 312 defrag->last_offset = range.start; 313 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); 314 } else if (defrag->last_offset && !defrag->cycled) { 315 /* 316 * we didn't fill our defrag batch, but 317 * we didn't start at zero. Make sure we loop 318 * around to the start of the file. 319 */ 320 defrag->last_offset = 0; 321 defrag->cycled = 1; 322 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); 323 } else { 324 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 325 } 326 327 iput(inode); 328 return 0; 329 cleanup: 330 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 331 return ret; 332 } 333 334 /* 335 * run through the list of inodes in the FS that need 336 * defragging 337 */ 338 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) 339 { 340 struct inode_defrag *defrag; 341 u64 first_ino = 0; 342 u64 root_objectid = 0; 343 344 atomic_inc(&fs_info->defrag_running); 345 while (1) { 346 /* Pause the auto defragger. */ 347 if (test_bit(BTRFS_FS_STATE_REMOUNTING, 348 &fs_info->fs_state)) 349 break; 350 351 if (!__need_auto_defrag(fs_info)) 352 break; 353 354 /* find an inode to defrag */ 355 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, 356 first_ino); 357 if (!defrag) { 358 if (root_objectid || first_ino) { 359 root_objectid = 0; 360 first_ino = 0; 361 continue; 362 } else { 363 break; 364 } 365 } 366 367 first_ino = defrag->ino + 1; 368 root_objectid = defrag->root; 369 370 __btrfs_run_defrag_inode(fs_info, defrag); 371 } 372 atomic_dec(&fs_info->defrag_running); 373 374 /* 375 * during unmount, we use the transaction_wait queue to 376 * wait for the defragger to stop 377 */ 378 wake_up(&fs_info->transaction_wait); 379 return 0; 380 } 381 382 /* simple helper to fault in pages and copy. This should go away 383 * and be replaced with calls into generic code. 384 */ 385 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes, 386 struct page **prepared_pages, 387 struct iov_iter *i) 388 { 389 size_t copied = 0; 390 size_t total_copied = 0; 391 int pg = 0; 392 int offset = offset_in_page(pos); 393 394 while (write_bytes > 0) { 395 size_t count = min_t(size_t, 396 PAGE_SIZE - offset, write_bytes); 397 struct page *page = prepared_pages[pg]; 398 /* 399 * Copy data from userspace to the current page 400 */ 401 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 402 403 /* Flush processor's dcache for this page */ 404 flush_dcache_page(page); 405 406 /* 407 * if we get a partial write, we can end up with 408 * partially up to date pages. These add 409 * a lot of complexity, so make sure they don't 410 * happen by forcing this copy to be retried. 411 * 412 * The rest of the btrfs_file_write code will fall 413 * back to page at a time copies after we return 0. 414 */ 415 if (!PageUptodate(page) && copied < count) 416 copied = 0; 417 418 iov_iter_advance(i, copied); 419 write_bytes -= copied; 420 total_copied += copied; 421 422 /* Return to btrfs_file_write_iter to fault page */ 423 if (unlikely(copied == 0)) 424 break; 425 426 if (copied < PAGE_SIZE - offset) { 427 offset += copied; 428 } else { 429 pg++; 430 offset = 0; 431 } 432 } 433 return total_copied; 434 } 435 436 /* 437 * unlocks pages after btrfs_file_write is done with them 438 */ 439 static void btrfs_drop_pages(struct page **pages, size_t num_pages) 440 { 441 size_t i; 442 for (i = 0; i < num_pages; i++) { 443 /* page checked is some magic around finding pages that 444 * have been modified without going through btrfs_set_page_dirty 445 * clear it here. There should be no need to mark the pages 446 * accessed as prepare_pages should have marked them accessed 447 * in prepare_pages via find_or_create_page() 448 */ 449 ClearPageChecked(pages[i]); 450 unlock_page(pages[i]); 451 put_page(pages[i]); 452 } 453 } 454 455 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 456 const u64 start, 457 const u64 len, 458 struct extent_state **cached_state) 459 { 460 u64 search_start = start; 461 const u64 end = start + len - 1; 462 463 while (search_start < end) { 464 const u64 search_len = end - search_start + 1; 465 struct extent_map *em; 466 u64 em_len; 467 int ret = 0; 468 469 em = btrfs_get_extent(inode, NULL, 0, search_start, search_len); 470 if (IS_ERR(em)) 471 return PTR_ERR(em); 472 473 if (em->block_start != EXTENT_MAP_HOLE) 474 goto next; 475 476 em_len = em->len; 477 if (em->start < search_start) 478 em_len -= search_start - em->start; 479 if (em_len > search_len) 480 em_len = search_len; 481 482 ret = set_extent_bit(&inode->io_tree, search_start, 483 search_start + em_len - 1, 484 EXTENT_DELALLOC_NEW, 485 NULL, cached_state, GFP_NOFS); 486 next: 487 search_start = extent_map_end(em); 488 free_extent_map(em); 489 if (ret) 490 return ret; 491 } 492 return 0; 493 } 494 495 /* 496 * after copy_from_user, pages need to be dirtied and we need to make 497 * sure holes are created between the current EOF and the start of 498 * any next extents (if required). 499 * 500 * this also makes the decision about creating an inline extent vs 501 * doing real data extents, marking pages dirty and delalloc as required. 502 */ 503 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, 504 size_t num_pages, loff_t pos, size_t write_bytes, 505 struct extent_state **cached) 506 { 507 struct btrfs_fs_info *fs_info = inode->root->fs_info; 508 int err = 0; 509 int i; 510 u64 num_bytes; 511 u64 start_pos; 512 u64 end_of_last_block; 513 u64 end_pos = pos + write_bytes; 514 loff_t isize = i_size_read(&inode->vfs_inode); 515 unsigned int extra_bits = 0; 516 517 start_pos = pos & ~((u64) fs_info->sectorsize - 1); 518 num_bytes = round_up(write_bytes + pos - start_pos, 519 fs_info->sectorsize); 520 521 end_of_last_block = start_pos + num_bytes - 1; 522 523 /* 524 * The pages may have already been dirty, clear out old accounting so 525 * we can set things up properly 526 */ 527 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block, 528 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 529 0, 0, cached); 530 531 if (!btrfs_is_free_space_inode(inode)) { 532 if (start_pos >= isize && 533 !(inode->flags & BTRFS_INODE_PREALLOC)) { 534 /* 535 * There can't be any extents following eof in this case 536 * so just set the delalloc new bit for the range 537 * directly. 538 */ 539 extra_bits |= EXTENT_DELALLOC_NEW; 540 } else { 541 err = btrfs_find_new_delalloc_bytes(inode, start_pos, 542 num_bytes, cached); 543 if (err) 544 return err; 545 } 546 } 547 548 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 549 extra_bits, cached); 550 if (err) 551 return err; 552 553 for (i = 0; i < num_pages; i++) { 554 struct page *p = pages[i]; 555 SetPageUptodate(p); 556 ClearPageChecked(p); 557 set_page_dirty(p); 558 } 559 560 /* 561 * we've only changed i_size in ram, and we haven't updated 562 * the disk i_size. There is no need to log the inode 563 * at this time. 564 */ 565 if (end_pos > isize) 566 i_size_write(&inode->vfs_inode, end_pos); 567 return 0; 568 } 569 570 /* 571 * this drops all the extents in the cache that intersect the range 572 * [start, end]. Existing extents are split as required. 573 */ 574 void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, 575 int skip_pinned) 576 { 577 struct extent_map *em; 578 struct extent_map *split = NULL; 579 struct extent_map *split2 = NULL; 580 struct extent_map_tree *em_tree = &inode->extent_tree; 581 u64 len = end - start + 1; 582 u64 gen; 583 int ret; 584 int testend = 1; 585 unsigned long flags; 586 int compressed = 0; 587 bool modified; 588 589 WARN_ON(end < start); 590 if (end == (u64)-1) { 591 len = (u64)-1; 592 testend = 0; 593 } 594 while (1) { 595 int no_splits = 0; 596 597 modified = false; 598 if (!split) 599 split = alloc_extent_map(); 600 if (!split2) 601 split2 = alloc_extent_map(); 602 if (!split || !split2) 603 no_splits = 1; 604 605 write_lock(&em_tree->lock); 606 em = lookup_extent_mapping(em_tree, start, len); 607 if (!em) { 608 write_unlock(&em_tree->lock); 609 break; 610 } 611 flags = em->flags; 612 gen = em->generation; 613 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 614 if (testend && em->start + em->len >= start + len) { 615 free_extent_map(em); 616 write_unlock(&em_tree->lock); 617 break; 618 } 619 start = em->start + em->len; 620 if (testend) 621 len = start + len - (em->start + em->len); 622 free_extent_map(em); 623 write_unlock(&em_tree->lock); 624 continue; 625 } 626 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 627 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 628 clear_bit(EXTENT_FLAG_LOGGING, &flags); 629 modified = !list_empty(&em->list); 630 if (no_splits) 631 goto next; 632 633 if (em->start < start) { 634 split->start = em->start; 635 split->len = start - em->start; 636 637 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 638 split->orig_start = em->orig_start; 639 split->block_start = em->block_start; 640 641 if (compressed) 642 split->block_len = em->block_len; 643 else 644 split->block_len = split->len; 645 split->orig_block_len = max(split->block_len, 646 em->orig_block_len); 647 split->ram_bytes = em->ram_bytes; 648 } else { 649 split->orig_start = split->start; 650 split->block_len = 0; 651 split->block_start = em->block_start; 652 split->orig_block_len = 0; 653 split->ram_bytes = split->len; 654 } 655 656 split->generation = gen; 657 split->flags = flags; 658 split->compress_type = em->compress_type; 659 replace_extent_mapping(em_tree, em, split, modified); 660 free_extent_map(split); 661 split = split2; 662 split2 = NULL; 663 } 664 if (testend && em->start + em->len > start + len) { 665 u64 diff = start + len - em->start; 666 667 split->start = start + len; 668 split->len = em->start + em->len - (start + len); 669 split->flags = flags; 670 split->compress_type = em->compress_type; 671 split->generation = gen; 672 673 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 674 split->orig_block_len = max(em->block_len, 675 em->orig_block_len); 676 677 split->ram_bytes = em->ram_bytes; 678 if (compressed) { 679 split->block_len = em->block_len; 680 split->block_start = em->block_start; 681 split->orig_start = em->orig_start; 682 } else { 683 split->block_len = split->len; 684 split->block_start = em->block_start 685 + diff; 686 split->orig_start = em->orig_start; 687 } 688 } else { 689 split->ram_bytes = split->len; 690 split->orig_start = split->start; 691 split->block_len = 0; 692 split->block_start = em->block_start; 693 split->orig_block_len = 0; 694 } 695 696 if (extent_map_in_tree(em)) { 697 replace_extent_mapping(em_tree, em, split, 698 modified); 699 } else { 700 ret = add_extent_mapping(em_tree, split, 701 modified); 702 ASSERT(ret == 0); /* Logic error */ 703 } 704 free_extent_map(split); 705 split = NULL; 706 } 707 next: 708 if (extent_map_in_tree(em)) 709 remove_extent_mapping(em_tree, em); 710 write_unlock(&em_tree->lock); 711 712 /* once for us */ 713 free_extent_map(em); 714 /* once for the tree*/ 715 free_extent_map(em); 716 } 717 if (split) 718 free_extent_map(split); 719 if (split2) 720 free_extent_map(split2); 721 } 722 723 /* 724 * this is very complex, but the basic idea is to drop all extents 725 * in the range start - end. hint_block is filled in with a block number 726 * that would be a good hint to the block allocator for this file. 727 * 728 * If an extent intersects the range but is not entirely inside the range 729 * it is either truncated or split. Anything entirely inside the range 730 * is deleted from the tree. 731 */ 732 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 733 struct btrfs_root *root, struct btrfs_inode *inode, 734 struct btrfs_path *path, u64 start, u64 end, 735 u64 *drop_end, int drop_cache, 736 int replace_extent, 737 u32 extent_item_size, 738 int *key_inserted) 739 { 740 struct btrfs_fs_info *fs_info = root->fs_info; 741 struct extent_buffer *leaf; 742 struct btrfs_file_extent_item *fi; 743 struct btrfs_ref ref = { 0 }; 744 struct btrfs_key key; 745 struct btrfs_key new_key; 746 struct inode *vfs_inode = &inode->vfs_inode; 747 u64 ino = btrfs_ino(inode); 748 u64 search_start = start; 749 u64 disk_bytenr = 0; 750 u64 num_bytes = 0; 751 u64 extent_offset = 0; 752 u64 extent_end = 0; 753 u64 last_end = start; 754 int del_nr = 0; 755 int del_slot = 0; 756 int extent_type; 757 int recow; 758 int ret; 759 int modify_tree = -1; 760 int update_refs; 761 int found = 0; 762 int leafs_visited = 0; 763 764 if (drop_cache) 765 btrfs_drop_extent_cache(inode, start, end - 1, 0); 766 767 if (start >= inode->disk_i_size && !replace_extent) 768 modify_tree = 0; 769 770 update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || 771 root == fs_info->tree_root); 772 while (1) { 773 recow = 0; 774 ret = btrfs_lookup_file_extent(trans, root, path, ino, 775 search_start, modify_tree); 776 if (ret < 0) 777 break; 778 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 779 leaf = path->nodes[0]; 780 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 781 if (key.objectid == ino && 782 key.type == BTRFS_EXTENT_DATA_KEY) 783 path->slots[0]--; 784 } 785 ret = 0; 786 leafs_visited++; 787 next_slot: 788 leaf = path->nodes[0]; 789 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 790 BUG_ON(del_nr > 0); 791 ret = btrfs_next_leaf(root, path); 792 if (ret < 0) 793 break; 794 if (ret > 0) { 795 ret = 0; 796 break; 797 } 798 leafs_visited++; 799 leaf = path->nodes[0]; 800 recow = 1; 801 } 802 803 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 804 805 if (key.objectid > ino) 806 break; 807 if (WARN_ON_ONCE(key.objectid < ino) || 808 key.type < BTRFS_EXTENT_DATA_KEY) { 809 ASSERT(del_nr == 0); 810 path->slots[0]++; 811 goto next_slot; 812 } 813 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 814 break; 815 816 fi = btrfs_item_ptr(leaf, path->slots[0], 817 struct btrfs_file_extent_item); 818 extent_type = btrfs_file_extent_type(leaf, fi); 819 820 if (extent_type == BTRFS_FILE_EXTENT_REG || 821 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 822 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 823 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 824 extent_offset = btrfs_file_extent_offset(leaf, fi); 825 extent_end = key.offset + 826 btrfs_file_extent_num_bytes(leaf, fi); 827 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 828 extent_end = key.offset + 829 btrfs_file_extent_ram_bytes(leaf, fi); 830 } else { 831 /* can't happen */ 832 BUG(); 833 } 834 835 /* 836 * Don't skip extent items representing 0 byte lengths. They 837 * used to be created (bug) if while punching holes we hit 838 * -ENOSPC condition. So if we find one here, just ensure we 839 * delete it, otherwise we would insert a new file extent item 840 * with the same key (offset) as that 0 bytes length file 841 * extent item in the call to setup_items_for_insert() later 842 * in this function. 843 */ 844 if (extent_end == key.offset && extent_end >= search_start) { 845 last_end = extent_end; 846 goto delete_extent_item; 847 } 848 849 if (extent_end <= search_start) { 850 path->slots[0]++; 851 goto next_slot; 852 } 853 854 found = 1; 855 search_start = max(key.offset, start); 856 if (recow || !modify_tree) { 857 modify_tree = -1; 858 btrfs_release_path(path); 859 continue; 860 } 861 862 /* 863 * | - range to drop - | 864 * | -------- extent -------- | 865 */ 866 if (start > key.offset && end < extent_end) { 867 BUG_ON(del_nr > 0); 868 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 869 ret = -EOPNOTSUPP; 870 break; 871 } 872 873 memcpy(&new_key, &key, sizeof(new_key)); 874 new_key.offset = start; 875 ret = btrfs_duplicate_item(trans, root, path, 876 &new_key); 877 if (ret == -EAGAIN) { 878 btrfs_release_path(path); 879 continue; 880 } 881 if (ret < 0) 882 break; 883 884 leaf = path->nodes[0]; 885 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 886 struct btrfs_file_extent_item); 887 btrfs_set_file_extent_num_bytes(leaf, fi, 888 start - key.offset); 889 890 fi = btrfs_item_ptr(leaf, path->slots[0], 891 struct btrfs_file_extent_item); 892 893 extent_offset += start - key.offset; 894 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 895 btrfs_set_file_extent_num_bytes(leaf, fi, 896 extent_end - start); 897 btrfs_mark_buffer_dirty(leaf); 898 899 if (update_refs && disk_bytenr > 0) { 900 btrfs_init_generic_ref(&ref, 901 BTRFS_ADD_DELAYED_REF, 902 disk_bytenr, num_bytes, 0); 903 btrfs_init_data_ref(&ref, 904 root->root_key.objectid, 905 new_key.objectid, 906 start - extent_offset); 907 ret = btrfs_inc_extent_ref(trans, &ref); 908 BUG_ON(ret); /* -ENOMEM */ 909 } 910 key.offset = start; 911 } 912 /* 913 * From here on out we will have actually dropped something, so 914 * last_end can be updated. 915 */ 916 last_end = extent_end; 917 918 /* 919 * | ---- range to drop ----- | 920 * | -------- extent -------- | 921 */ 922 if (start <= key.offset && end < extent_end) { 923 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 924 ret = -EOPNOTSUPP; 925 break; 926 } 927 928 memcpy(&new_key, &key, sizeof(new_key)); 929 new_key.offset = end; 930 btrfs_set_item_key_safe(fs_info, path, &new_key); 931 932 extent_offset += end - key.offset; 933 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 934 btrfs_set_file_extent_num_bytes(leaf, fi, 935 extent_end - end); 936 btrfs_mark_buffer_dirty(leaf); 937 if (update_refs && disk_bytenr > 0) 938 inode_sub_bytes(vfs_inode, end - key.offset); 939 break; 940 } 941 942 search_start = extent_end; 943 /* 944 * | ---- range to drop ----- | 945 * | -------- extent -------- | 946 */ 947 if (start > key.offset && end >= extent_end) { 948 BUG_ON(del_nr > 0); 949 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 950 ret = -EOPNOTSUPP; 951 break; 952 } 953 954 btrfs_set_file_extent_num_bytes(leaf, fi, 955 start - key.offset); 956 btrfs_mark_buffer_dirty(leaf); 957 if (update_refs && disk_bytenr > 0) 958 inode_sub_bytes(vfs_inode, extent_end - start); 959 if (end == extent_end) 960 break; 961 962 path->slots[0]++; 963 goto next_slot; 964 } 965 966 /* 967 * | ---- range to drop ----- | 968 * | ------ extent ------ | 969 */ 970 if (start <= key.offset && end >= extent_end) { 971 delete_extent_item: 972 if (del_nr == 0) { 973 del_slot = path->slots[0]; 974 del_nr = 1; 975 } else { 976 BUG_ON(del_slot + del_nr != path->slots[0]); 977 del_nr++; 978 } 979 980 if (update_refs && 981 extent_type == BTRFS_FILE_EXTENT_INLINE) { 982 inode_sub_bytes(vfs_inode, 983 extent_end - key.offset); 984 extent_end = ALIGN(extent_end, 985 fs_info->sectorsize); 986 } else if (update_refs && disk_bytenr > 0) { 987 btrfs_init_generic_ref(&ref, 988 BTRFS_DROP_DELAYED_REF, 989 disk_bytenr, num_bytes, 0); 990 btrfs_init_data_ref(&ref, 991 root->root_key.objectid, 992 key.objectid, 993 key.offset - extent_offset); 994 ret = btrfs_free_extent(trans, &ref); 995 BUG_ON(ret); /* -ENOMEM */ 996 inode_sub_bytes(vfs_inode, 997 extent_end - key.offset); 998 } 999 1000 if (end == extent_end) 1001 break; 1002 1003 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 1004 path->slots[0]++; 1005 goto next_slot; 1006 } 1007 1008 ret = btrfs_del_items(trans, root, path, del_slot, 1009 del_nr); 1010 if (ret) { 1011 btrfs_abort_transaction(trans, ret); 1012 break; 1013 } 1014 1015 del_nr = 0; 1016 del_slot = 0; 1017 1018 btrfs_release_path(path); 1019 continue; 1020 } 1021 1022 BUG(); 1023 } 1024 1025 if (!ret && del_nr > 0) { 1026 /* 1027 * Set path->slots[0] to first slot, so that after the delete 1028 * if items are move off from our leaf to its immediate left or 1029 * right neighbor leafs, we end up with a correct and adjusted 1030 * path->slots[0] for our insertion (if replace_extent != 0). 1031 */ 1032 path->slots[0] = del_slot; 1033 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 1034 if (ret) 1035 btrfs_abort_transaction(trans, ret); 1036 } 1037 1038 leaf = path->nodes[0]; 1039 /* 1040 * If btrfs_del_items() was called, it might have deleted a leaf, in 1041 * which case it unlocked our path, so check path->locks[0] matches a 1042 * write lock. 1043 */ 1044 if (!ret && replace_extent && leafs_visited == 1 && 1045 (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING || 1046 path->locks[0] == BTRFS_WRITE_LOCK) && 1047 btrfs_leaf_free_space(leaf) >= 1048 sizeof(struct btrfs_item) + extent_item_size) { 1049 1050 key.objectid = ino; 1051 key.type = BTRFS_EXTENT_DATA_KEY; 1052 key.offset = start; 1053 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) { 1054 struct btrfs_key slot_key; 1055 1056 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]); 1057 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0) 1058 path->slots[0]++; 1059 } 1060 setup_items_for_insert(root, path, &key, &extent_item_size, 1); 1061 *key_inserted = 1; 1062 } 1063 1064 if (!replace_extent || !(*key_inserted)) 1065 btrfs_release_path(path); 1066 if (drop_end) 1067 *drop_end = found ? min(end, last_end) : end; 1068 return ret; 1069 } 1070 1071 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 1072 struct btrfs_root *root, struct inode *inode, u64 start, 1073 u64 end, int drop_cache) 1074 { 1075 struct btrfs_path *path; 1076 int ret; 1077 1078 path = btrfs_alloc_path(); 1079 if (!path) 1080 return -ENOMEM; 1081 ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, start, 1082 end, NULL, drop_cache, 0, 0, NULL); 1083 btrfs_free_path(path); 1084 return ret; 1085 } 1086 1087 static int extent_mergeable(struct extent_buffer *leaf, int slot, 1088 u64 objectid, u64 bytenr, u64 orig_offset, 1089 u64 *start, u64 *end) 1090 { 1091 struct btrfs_file_extent_item *fi; 1092 struct btrfs_key key; 1093 u64 extent_end; 1094 1095 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 1096 return 0; 1097 1098 btrfs_item_key_to_cpu(leaf, &key, slot); 1099 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 1100 return 0; 1101 1102 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 1103 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 1104 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 1105 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 1106 btrfs_file_extent_compression(leaf, fi) || 1107 btrfs_file_extent_encryption(leaf, fi) || 1108 btrfs_file_extent_other_encoding(leaf, fi)) 1109 return 0; 1110 1111 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1112 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 1113 return 0; 1114 1115 *start = key.offset; 1116 *end = extent_end; 1117 return 1; 1118 } 1119 1120 /* 1121 * Mark extent in the range start - end as written. 1122 * 1123 * This changes extent type from 'pre-allocated' to 'regular'. If only 1124 * part of extent is marked as written, the extent will be split into 1125 * two or three. 1126 */ 1127 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 1128 struct btrfs_inode *inode, u64 start, u64 end) 1129 { 1130 struct btrfs_fs_info *fs_info = trans->fs_info; 1131 struct btrfs_root *root = inode->root; 1132 struct extent_buffer *leaf; 1133 struct btrfs_path *path; 1134 struct btrfs_file_extent_item *fi; 1135 struct btrfs_ref ref = { 0 }; 1136 struct btrfs_key key; 1137 struct btrfs_key new_key; 1138 u64 bytenr; 1139 u64 num_bytes; 1140 u64 extent_end; 1141 u64 orig_offset; 1142 u64 other_start; 1143 u64 other_end; 1144 u64 split; 1145 int del_nr = 0; 1146 int del_slot = 0; 1147 int recow; 1148 int ret; 1149 u64 ino = btrfs_ino(inode); 1150 1151 path = btrfs_alloc_path(); 1152 if (!path) 1153 return -ENOMEM; 1154 again: 1155 recow = 0; 1156 split = start; 1157 key.objectid = ino; 1158 key.type = BTRFS_EXTENT_DATA_KEY; 1159 key.offset = split; 1160 1161 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1162 if (ret < 0) 1163 goto out; 1164 if (ret > 0 && path->slots[0] > 0) 1165 path->slots[0]--; 1166 1167 leaf = path->nodes[0]; 1168 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1169 if (key.objectid != ino || 1170 key.type != BTRFS_EXTENT_DATA_KEY) { 1171 ret = -EINVAL; 1172 btrfs_abort_transaction(trans, ret); 1173 goto out; 1174 } 1175 fi = btrfs_item_ptr(leaf, path->slots[0], 1176 struct btrfs_file_extent_item); 1177 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) { 1178 ret = -EINVAL; 1179 btrfs_abort_transaction(trans, ret); 1180 goto out; 1181 } 1182 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1183 if (key.offset > start || extent_end < end) { 1184 ret = -EINVAL; 1185 btrfs_abort_transaction(trans, ret); 1186 goto out; 1187 } 1188 1189 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1190 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1191 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 1192 memcpy(&new_key, &key, sizeof(new_key)); 1193 1194 if (start == key.offset && end < extent_end) { 1195 other_start = 0; 1196 other_end = start; 1197 if (extent_mergeable(leaf, path->slots[0] - 1, 1198 ino, bytenr, orig_offset, 1199 &other_start, &other_end)) { 1200 new_key.offset = end; 1201 btrfs_set_item_key_safe(fs_info, path, &new_key); 1202 fi = btrfs_item_ptr(leaf, path->slots[0], 1203 struct btrfs_file_extent_item); 1204 btrfs_set_file_extent_generation(leaf, fi, 1205 trans->transid); 1206 btrfs_set_file_extent_num_bytes(leaf, fi, 1207 extent_end - end); 1208 btrfs_set_file_extent_offset(leaf, fi, 1209 end - orig_offset); 1210 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 1211 struct btrfs_file_extent_item); 1212 btrfs_set_file_extent_generation(leaf, fi, 1213 trans->transid); 1214 btrfs_set_file_extent_num_bytes(leaf, fi, 1215 end - other_start); 1216 btrfs_mark_buffer_dirty(leaf); 1217 goto out; 1218 } 1219 } 1220 1221 if (start > key.offset && end == extent_end) { 1222 other_start = end; 1223 other_end = 0; 1224 if (extent_mergeable(leaf, path->slots[0] + 1, 1225 ino, bytenr, orig_offset, 1226 &other_start, &other_end)) { 1227 fi = btrfs_item_ptr(leaf, path->slots[0], 1228 struct btrfs_file_extent_item); 1229 btrfs_set_file_extent_num_bytes(leaf, fi, 1230 start - key.offset); 1231 btrfs_set_file_extent_generation(leaf, fi, 1232 trans->transid); 1233 path->slots[0]++; 1234 new_key.offset = start; 1235 btrfs_set_item_key_safe(fs_info, path, &new_key); 1236 1237 fi = btrfs_item_ptr(leaf, path->slots[0], 1238 struct btrfs_file_extent_item); 1239 btrfs_set_file_extent_generation(leaf, fi, 1240 trans->transid); 1241 btrfs_set_file_extent_num_bytes(leaf, fi, 1242 other_end - start); 1243 btrfs_set_file_extent_offset(leaf, fi, 1244 start - orig_offset); 1245 btrfs_mark_buffer_dirty(leaf); 1246 goto out; 1247 } 1248 } 1249 1250 while (start > key.offset || end < extent_end) { 1251 if (key.offset == start) 1252 split = end; 1253 1254 new_key.offset = split; 1255 ret = btrfs_duplicate_item(trans, root, path, &new_key); 1256 if (ret == -EAGAIN) { 1257 btrfs_release_path(path); 1258 goto again; 1259 } 1260 if (ret < 0) { 1261 btrfs_abort_transaction(trans, ret); 1262 goto out; 1263 } 1264 1265 leaf = path->nodes[0]; 1266 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 1267 struct btrfs_file_extent_item); 1268 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1269 btrfs_set_file_extent_num_bytes(leaf, fi, 1270 split - key.offset); 1271 1272 fi = btrfs_item_ptr(leaf, path->slots[0], 1273 struct btrfs_file_extent_item); 1274 1275 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1276 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 1277 btrfs_set_file_extent_num_bytes(leaf, fi, 1278 extent_end - split); 1279 btrfs_mark_buffer_dirty(leaf); 1280 1281 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr, 1282 num_bytes, 0); 1283 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, 1284 orig_offset); 1285 ret = btrfs_inc_extent_ref(trans, &ref); 1286 if (ret) { 1287 btrfs_abort_transaction(trans, ret); 1288 goto out; 1289 } 1290 1291 if (split == start) { 1292 key.offset = start; 1293 } else { 1294 if (start != key.offset) { 1295 ret = -EINVAL; 1296 btrfs_abort_transaction(trans, ret); 1297 goto out; 1298 } 1299 path->slots[0]--; 1300 extent_end = end; 1301 } 1302 recow = 1; 1303 } 1304 1305 other_start = end; 1306 other_end = 0; 1307 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 1308 num_bytes, 0); 1309 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset); 1310 if (extent_mergeable(leaf, path->slots[0] + 1, 1311 ino, bytenr, orig_offset, 1312 &other_start, &other_end)) { 1313 if (recow) { 1314 btrfs_release_path(path); 1315 goto again; 1316 } 1317 extent_end = other_end; 1318 del_slot = path->slots[0] + 1; 1319 del_nr++; 1320 ret = btrfs_free_extent(trans, &ref); 1321 if (ret) { 1322 btrfs_abort_transaction(trans, ret); 1323 goto out; 1324 } 1325 } 1326 other_start = 0; 1327 other_end = start; 1328 if (extent_mergeable(leaf, path->slots[0] - 1, 1329 ino, bytenr, orig_offset, 1330 &other_start, &other_end)) { 1331 if (recow) { 1332 btrfs_release_path(path); 1333 goto again; 1334 } 1335 key.offset = other_start; 1336 del_slot = path->slots[0]; 1337 del_nr++; 1338 ret = btrfs_free_extent(trans, &ref); 1339 if (ret) { 1340 btrfs_abort_transaction(trans, ret); 1341 goto out; 1342 } 1343 } 1344 if (del_nr == 0) { 1345 fi = btrfs_item_ptr(leaf, path->slots[0], 1346 struct btrfs_file_extent_item); 1347 btrfs_set_file_extent_type(leaf, fi, 1348 BTRFS_FILE_EXTENT_REG); 1349 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1350 btrfs_mark_buffer_dirty(leaf); 1351 } else { 1352 fi = btrfs_item_ptr(leaf, del_slot - 1, 1353 struct btrfs_file_extent_item); 1354 btrfs_set_file_extent_type(leaf, fi, 1355 BTRFS_FILE_EXTENT_REG); 1356 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1357 btrfs_set_file_extent_num_bytes(leaf, fi, 1358 extent_end - key.offset); 1359 btrfs_mark_buffer_dirty(leaf); 1360 1361 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 1362 if (ret < 0) { 1363 btrfs_abort_transaction(trans, ret); 1364 goto out; 1365 } 1366 } 1367 out: 1368 btrfs_free_path(path); 1369 return 0; 1370 } 1371 1372 /* 1373 * on error we return an unlocked page and the error value 1374 * on success we return a locked page and 0 1375 */ 1376 static int prepare_uptodate_page(struct inode *inode, 1377 struct page *page, u64 pos, 1378 bool force_uptodate) 1379 { 1380 int ret = 0; 1381 1382 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) && 1383 !PageUptodate(page)) { 1384 ret = btrfs_readpage(NULL, page); 1385 if (ret) 1386 return ret; 1387 lock_page(page); 1388 if (!PageUptodate(page)) { 1389 unlock_page(page); 1390 return -EIO; 1391 } 1392 if (page->mapping != inode->i_mapping) { 1393 unlock_page(page); 1394 return -EAGAIN; 1395 } 1396 } 1397 return 0; 1398 } 1399 1400 /* 1401 * this just gets pages into the page cache and locks them down. 1402 */ 1403 static noinline int prepare_pages(struct inode *inode, struct page **pages, 1404 size_t num_pages, loff_t pos, 1405 size_t write_bytes, bool force_uptodate) 1406 { 1407 int i; 1408 unsigned long index = pos >> PAGE_SHIFT; 1409 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1410 int err = 0; 1411 int faili; 1412 1413 for (i = 0; i < num_pages; i++) { 1414 again: 1415 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1416 mask | __GFP_WRITE); 1417 if (!pages[i]) { 1418 faili = i - 1; 1419 err = -ENOMEM; 1420 goto fail; 1421 } 1422 1423 if (i == 0) 1424 err = prepare_uptodate_page(inode, pages[i], pos, 1425 force_uptodate); 1426 if (!err && i == num_pages - 1) 1427 err = prepare_uptodate_page(inode, pages[i], 1428 pos + write_bytes, false); 1429 if (err) { 1430 put_page(pages[i]); 1431 if (err == -EAGAIN) { 1432 err = 0; 1433 goto again; 1434 } 1435 faili = i - 1; 1436 goto fail; 1437 } 1438 wait_on_page_writeback(pages[i]); 1439 } 1440 1441 return 0; 1442 fail: 1443 while (faili >= 0) { 1444 unlock_page(pages[faili]); 1445 put_page(pages[faili]); 1446 faili--; 1447 } 1448 return err; 1449 1450 } 1451 1452 /* 1453 * This function locks the extent and properly waits for data=ordered extents 1454 * to finish before allowing the pages to be modified if need. 1455 * 1456 * The return value: 1457 * 1 - the extent is locked 1458 * 0 - the extent is not locked, and everything is OK 1459 * -EAGAIN - need re-prepare the pages 1460 * the other < 0 number - Something wrong happens 1461 */ 1462 static noinline int 1463 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, 1464 size_t num_pages, loff_t pos, 1465 size_t write_bytes, 1466 u64 *lockstart, u64 *lockend, 1467 struct extent_state **cached_state) 1468 { 1469 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1470 u64 start_pos; 1471 u64 last_pos; 1472 int i; 1473 int ret = 0; 1474 1475 start_pos = round_down(pos, fs_info->sectorsize); 1476 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1; 1477 1478 if (start_pos < inode->vfs_inode.i_size) { 1479 struct btrfs_ordered_extent *ordered; 1480 1481 lock_extent_bits(&inode->io_tree, start_pos, last_pos, 1482 cached_state); 1483 ordered = btrfs_lookup_ordered_range(inode, start_pos, 1484 last_pos - start_pos + 1); 1485 if (ordered && 1486 ordered->file_offset + ordered->num_bytes > start_pos && 1487 ordered->file_offset <= last_pos) { 1488 unlock_extent_cached(&inode->io_tree, start_pos, 1489 last_pos, cached_state); 1490 for (i = 0; i < num_pages; i++) { 1491 unlock_page(pages[i]); 1492 put_page(pages[i]); 1493 } 1494 btrfs_start_ordered_extent(ordered, 1); 1495 btrfs_put_ordered_extent(ordered); 1496 return -EAGAIN; 1497 } 1498 if (ordered) 1499 btrfs_put_ordered_extent(ordered); 1500 1501 *lockstart = start_pos; 1502 *lockend = last_pos; 1503 ret = 1; 1504 } 1505 1506 /* 1507 * It's possible the pages are dirty right now, but we don't want 1508 * to clean them yet because copy_from_user may catch a page fault 1509 * and we might have to fall back to one page at a time. If that 1510 * happens, we'll unlock these pages and we'd have a window where 1511 * reclaim could sneak in and drop the once-dirty page on the floor 1512 * without writing it. 1513 * 1514 * We have the pages locked and the extent range locked, so there's 1515 * no way someone can start IO on any dirty pages in this range. 1516 * 1517 * We'll call btrfs_dirty_pages() later on, and that will flip around 1518 * delalloc bits and dirty the pages as required. 1519 */ 1520 for (i = 0; i < num_pages; i++) { 1521 set_page_extent_mapped(pages[i]); 1522 WARN_ON(!PageLocked(pages[i])); 1523 } 1524 1525 return ret; 1526 } 1527 1528 static int check_can_nocow(struct btrfs_inode *inode, loff_t pos, 1529 size_t *write_bytes, bool nowait) 1530 { 1531 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1532 struct btrfs_root *root = inode->root; 1533 u64 lockstart, lockend; 1534 u64 num_bytes; 1535 int ret; 1536 1537 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 1538 return 0; 1539 1540 if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock)) 1541 return -EAGAIN; 1542 1543 lockstart = round_down(pos, fs_info->sectorsize); 1544 lockend = round_up(pos + *write_bytes, 1545 fs_info->sectorsize) - 1; 1546 num_bytes = lockend - lockstart + 1; 1547 1548 if (nowait) { 1549 struct btrfs_ordered_extent *ordered; 1550 1551 if (!try_lock_extent(&inode->io_tree, lockstart, lockend)) 1552 return -EAGAIN; 1553 1554 ordered = btrfs_lookup_ordered_range(inode, lockstart, 1555 num_bytes); 1556 if (ordered) { 1557 btrfs_put_ordered_extent(ordered); 1558 ret = -EAGAIN; 1559 goto out_unlock; 1560 } 1561 } else { 1562 btrfs_lock_and_flush_ordered_range(inode, lockstart, 1563 lockend, NULL); 1564 } 1565 1566 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, 1567 NULL, NULL, NULL, false); 1568 if (ret <= 0) { 1569 ret = 0; 1570 if (!nowait) 1571 btrfs_drew_write_unlock(&root->snapshot_lock); 1572 } else { 1573 *write_bytes = min_t(size_t, *write_bytes , 1574 num_bytes - pos + lockstart); 1575 } 1576 out_unlock: 1577 unlock_extent(&inode->io_tree, lockstart, lockend); 1578 1579 return ret; 1580 } 1581 1582 static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos, 1583 size_t *write_bytes) 1584 { 1585 return check_can_nocow(inode, pos, write_bytes, true); 1586 } 1587 1588 /* 1589 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes) 1590 * 1591 * @pos: File offset 1592 * @write_bytes: The length to write, will be updated to the nocow writeable 1593 * range 1594 * 1595 * This function will flush ordered extents in the range to ensure proper 1596 * nocow checks. 1597 * 1598 * Return: 1599 * >0 and update @write_bytes if we can do nocow write 1600 * 0 if we can't do nocow write 1601 * -EAGAIN if we can't get the needed lock or there are ordered extents 1602 * for * (nowait == true) case 1603 * <0 if other error happened 1604 * 1605 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock(). 1606 */ 1607 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, 1608 size_t *write_bytes) 1609 { 1610 return check_can_nocow(inode, pos, write_bytes, false); 1611 } 1612 1613 void btrfs_check_nocow_unlock(struct btrfs_inode *inode) 1614 { 1615 btrfs_drew_write_unlock(&inode->root->snapshot_lock); 1616 } 1617 1618 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, 1619 struct iov_iter *i) 1620 { 1621 struct file *file = iocb->ki_filp; 1622 loff_t pos = iocb->ki_pos; 1623 struct inode *inode = file_inode(file); 1624 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1625 struct page **pages = NULL; 1626 struct extent_changeset *data_reserved = NULL; 1627 u64 release_bytes = 0; 1628 u64 lockstart; 1629 u64 lockend; 1630 size_t num_written = 0; 1631 int nrptrs; 1632 int ret = 0; 1633 bool only_release_metadata = false; 1634 bool force_page_uptodate = false; 1635 1636 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE), 1637 PAGE_SIZE / (sizeof(struct page *))); 1638 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1639 nrptrs = max(nrptrs, 8); 1640 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); 1641 if (!pages) 1642 return -ENOMEM; 1643 1644 while (iov_iter_count(i) > 0) { 1645 struct extent_state *cached_state = NULL; 1646 size_t offset = offset_in_page(pos); 1647 size_t sector_offset; 1648 size_t write_bytes = min(iov_iter_count(i), 1649 nrptrs * (size_t)PAGE_SIZE - 1650 offset); 1651 size_t num_pages = DIV_ROUND_UP(write_bytes + offset, 1652 PAGE_SIZE); 1653 size_t reserve_bytes; 1654 size_t dirty_pages; 1655 size_t copied; 1656 size_t dirty_sectors; 1657 size_t num_sectors; 1658 int extents_locked; 1659 1660 WARN_ON(num_pages > nrptrs); 1661 1662 /* 1663 * Fault pages before locking them in prepare_pages 1664 * to avoid recursive lock 1665 */ 1666 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) { 1667 ret = -EFAULT; 1668 break; 1669 } 1670 1671 only_release_metadata = false; 1672 sector_offset = pos & (fs_info->sectorsize - 1); 1673 reserve_bytes = round_up(write_bytes + sector_offset, 1674 fs_info->sectorsize); 1675 1676 extent_changeset_release(data_reserved); 1677 ret = btrfs_check_data_free_space(BTRFS_I(inode), 1678 &data_reserved, pos, 1679 write_bytes); 1680 if (ret < 0) { 1681 if (btrfs_check_nocow_lock(BTRFS_I(inode), pos, 1682 &write_bytes) > 0) { 1683 /* 1684 * For nodata cow case, no need to reserve 1685 * data space. 1686 */ 1687 only_release_metadata = true; 1688 /* 1689 * our prealloc extent may be smaller than 1690 * write_bytes, so scale down. 1691 */ 1692 num_pages = DIV_ROUND_UP(write_bytes + offset, 1693 PAGE_SIZE); 1694 reserve_bytes = round_up(write_bytes + 1695 sector_offset, 1696 fs_info->sectorsize); 1697 } else { 1698 break; 1699 } 1700 } 1701 1702 WARN_ON(reserve_bytes == 0); 1703 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 1704 reserve_bytes); 1705 if (ret) { 1706 if (!only_release_metadata) 1707 btrfs_free_reserved_data_space(BTRFS_I(inode), 1708 data_reserved, pos, 1709 write_bytes); 1710 else 1711 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1712 break; 1713 } 1714 1715 release_bytes = reserve_bytes; 1716 again: 1717 /* 1718 * This is going to setup the pages array with the number of 1719 * pages we want, so we don't really need to worry about the 1720 * contents of pages from loop to loop 1721 */ 1722 ret = prepare_pages(inode, pages, num_pages, 1723 pos, write_bytes, 1724 force_page_uptodate); 1725 if (ret) { 1726 btrfs_delalloc_release_extents(BTRFS_I(inode), 1727 reserve_bytes); 1728 break; 1729 } 1730 1731 extents_locked = lock_and_cleanup_extent_if_need( 1732 BTRFS_I(inode), pages, 1733 num_pages, pos, write_bytes, &lockstart, 1734 &lockend, &cached_state); 1735 if (extents_locked < 0) { 1736 if (extents_locked == -EAGAIN) 1737 goto again; 1738 btrfs_delalloc_release_extents(BTRFS_I(inode), 1739 reserve_bytes); 1740 ret = extents_locked; 1741 break; 1742 } 1743 1744 copied = btrfs_copy_from_user(pos, write_bytes, pages, i); 1745 1746 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes); 1747 dirty_sectors = round_up(copied + sector_offset, 1748 fs_info->sectorsize); 1749 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors); 1750 1751 /* 1752 * if we have trouble faulting in the pages, fall 1753 * back to one page at a time 1754 */ 1755 if (copied < write_bytes) 1756 nrptrs = 1; 1757 1758 if (copied == 0) { 1759 force_page_uptodate = true; 1760 dirty_sectors = 0; 1761 dirty_pages = 0; 1762 } else { 1763 force_page_uptodate = false; 1764 dirty_pages = DIV_ROUND_UP(copied + offset, 1765 PAGE_SIZE); 1766 } 1767 1768 if (num_sectors > dirty_sectors) { 1769 /* release everything except the sectors we dirtied */ 1770 release_bytes -= dirty_sectors << 1771 fs_info->sb->s_blocksize_bits; 1772 if (only_release_metadata) { 1773 btrfs_delalloc_release_metadata(BTRFS_I(inode), 1774 release_bytes, true); 1775 } else { 1776 u64 __pos; 1777 1778 __pos = round_down(pos, 1779 fs_info->sectorsize) + 1780 (dirty_pages << PAGE_SHIFT); 1781 btrfs_delalloc_release_space(BTRFS_I(inode), 1782 data_reserved, __pos, 1783 release_bytes, true); 1784 } 1785 } 1786 1787 release_bytes = round_up(copied + sector_offset, 1788 fs_info->sectorsize); 1789 1790 if (copied > 0) 1791 ret = btrfs_dirty_pages(BTRFS_I(inode), pages, 1792 dirty_pages, pos, copied, 1793 &cached_state); 1794 1795 /* 1796 * If we have not locked the extent range, because the range's 1797 * start offset is >= i_size, we might still have a non-NULL 1798 * cached extent state, acquired while marking the extent range 1799 * as delalloc through btrfs_dirty_pages(). Therefore free any 1800 * possible cached extent state to avoid a memory leak. 1801 */ 1802 if (extents_locked) 1803 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1804 lockstart, lockend, &cached_state); 1805 else 1806 free_extent_state(cached_state); 1807 1808 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); 1809 if (ret) { 1810 btrfs_drop_pages(pages, num_pages); 1811 break; 1812 } 1813 1814 release_bytes = 0; 1815 if (only_release_metadata) 1816 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1817 1818 if (only_release_metadata && copied > 0) { 1819 lockstart = round_down(pos, 1820 fs_info->sectorsize); 1821 lockend = round_up(pos + copied, 1822 fs_info->sectorsize) - 1; 1823 1824 set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 1825 lockend, EXTENT_NORESERVE, NULL, 1826 NULL, GFP_NOFS); 1827 } 1828 1829 btrfs_drop_pages(pages, num_pages); 1830 1831 cond_resched(); 1832 1833 balance_dirty_pages_ratelimited(inode->i_mapping); 1834 1835 pos += copied; 1836 num_written += copied; 1837 } 1838 1839 kfree(pages); 1840 1841 if (release_bytes) { 1842 if (only_release_metadata) { 1843 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1844 btrfs_delalloc_release_metadata(BTRFS_I(inode), 1845 release_bytes, true); 1846 } else { 1847 btrfs_delalloc_release_space(BTRFS_I(inode), 1848 data_reserved, 1849 round_down(pos, fs_info->sectorsize), 1850 release_bytes, true); 1851 } 1852 } 1853 1854 extent_changeset_free(data_reserved); 1855 return num_written ? num_written : ret; 1856 } 1857 1858 static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) 1859 { 1860 struct file *file = iocb->ki_filp; 1861 struct inode *inode = file_inode(file); 1862 loff_t pos; 1863 ssize_t written; 1864 ssize_t written_buffered; 1865 loff_t endbyte; 1866 int err; 1867 1868 written = btrfs_direct_IO(iocb, from); 1869 1870 if (written < 0 || !iov_iter_count(from)) 1871 return written; 1872 1873 pos = iocb->ki_pos; 1874 written_buffered = btrfs_buffered_write(iocb, from); 1875 if (written_buffered < 0) { 1876 err = written_buffered; 1877 goto out; 1878 } 1879 /* 1880 * Ensure all data is persisted. We want the next direct IO read to be 1881 * able to read what was just written. 1882 */ 1883 endbyte = pos + written_buffered - 1; 1884 err = btrfs_fdatawrite_range(inode, pos, endbyte); 1885 if (err) 1886 goto out; 1887 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte); 1888 if (err) 1889 goto out; 1890 written += written_buffered; 1891 iocb->ki_pos = pos + written_buffered; 1892 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT, 1893 endbyte >> PAGE_SHIFT); 1894 out: 1895 return written ? written : err; 1896 } 1897 1898 static void update_time_for_write(struct inode *inode) 1899 { 1900 struct timespec64 now; 1901 1902 if (IS_NOCMTIME(inode)) 1903 return; 1904 1905 now = current_time(inode); 1906 if (!timespec64_equal(&inode->i_mtime, &now)) 1907 inode->i_mtime = now; 1908 1909 if (!timespec64_equal(&inode->i_ctime, &now)) 1910 inode->i_ctime = now; 1911 1912 if (IS_I_VERSION(inode)) 1913 inode_inc_iversion(inode); 1914 } 1915 1916 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, 1917 struct iov_iter *from) 1918 { 1919 struct file *file = iocb->ki_filp; 1920 struct inode *inode = file_inode(file); 1921 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1922 struct btrfs_root *root = BTRFS_I(inode)->root; 1923 u64 start_pos; 1924 u64 end_pos; 1925 ssize_t num_written = 0; 1926 const bool sync = iocb->ki_flags & IOCB_DSYNC; 1927 ssize_t err; 1928 loff_t pos; 1929 size_t count; 1930 loff_t oldsize; 1931 int clean_page = 0; 1932 1933 if (!(iocb->ki_flags & IOCB_DIRECT) && 1934 (iocb->ki_flags & IOCB_NOWAIT)) 1935 return -EOPNOTSUPP; 1936 1937 if (iocb->ki_flags & IOCB_NOWAIT) { 1938 if (!inode_trylock(inode)) 1939 return -EAGAIN; 1940 } else { 1941 inode_lock(inode); 1942 } 1943 1944 err = generic_write_checks(iocb, from); 1945 if (err <= 0) { 1946 inode_unlock(inode); 1947 return err; 1948 } 1949 1950 pos = iocb->ki_pos; 1951 count = iov_iter_count(from); 1952 if (iocb->ki_flags & IOCB_NOWAIT) { 1953 size_t nocow_bytes = count; 1954 1955 /* 1956 * We will allocate space in case nodatacow is not set, 1957 * so bail 1958 */ 1959 if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) 1960 <= 0) { 1961 inode_unlock(inode); 1962 return -EAGAIN; 1963 } 1964 /* 1965 * There are holes in the range or parts of the range that must 1966 * be COWed (shared extents, RO block groups, etc), so just bail 1967 * out. 1968 */ 1969 if (nocow_bytes < count) { 1970 inode_unlock(inode); 1971 return -EAGAIN; 1972 } 1973 } 1974 1975 current->backing_dev_info = inode_to_bdi(inode); 1976 err = file_remove_privs(file); 1977 if (err) { 1978 inode_unlock(inode); 1979 goto out; 1980 } 1981 1982 /* 1983 * If BTRFS flips readonly due to some impossible error 1984 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), 1985 * although we have opened a file as writable, we have 1986 * to stop this write operation to ensure FS consistency. 1987 */ 1988 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 1989 inode_unlock(inode); 1990 err = -EROFS; 1991 goto out; 1992 } 1993 1994 /* 1995 * We reserve space for updating the inode when we reserve space for the 1996 * extent we are going to write, so we will enospc out there. We don't 1997 * need to start yet another transaction to update the inode as we will 1998 * update the inode when we finish writing whatever data we write. 1999 */ 2000 update_time_for_write(inode); 2001 2002 start_pos = round_down(pos, fs_info->sectorsize); 2003 oldsize = i_size_read(inode); 2004 if (start_pos > oldsize) { 2005 /* Expand hole size to cover write data, preventing empty gap */ 2006 end_pos = round_up(pos + count, 2007 fs_info->sectorsize); 2008 err = btrfs_cont_expand(inode, oldsize, end_pos); 2009 if (err) { 2010 inode_unlock(inode); 2011 goto out; 2012 } 2013 if (start_pos > round_up(oldsize, fs_info->sectorsize)) 2014 clean_page = 1; 2015 } 2016 2017 if (sync) 2018 atomic_inc(&BTRFS_I(inode)->sync_writers); 2019 2020 if (iocb->ki_flags & IOCB_DIRECT) { 2021 /* 2022 * 1. We must always clear IOCB_DSYNC in order to not deadlock 2023 * in iomap, as it calls generic_write_sync() in this case. 2024 * 2. If we are async, we can call iomap_dio_complete() either 2025 * in 2026 * 2027 * 2.1. A worker thread from the last bio completed. In this 2028 * case we need to mark the btrfs_dio_data that it is 2029 * async in order to call generic_write_sync() properly. 2030 * This is handled by setting BTRFS_DIO_SYNC_STUB in the 2031 * current->journal_info. 2032 * 2.2 The submitter context, because all IO completed 2033 * before we exited iomap_dio_rw(). In this case we can 2034 * just re-set the IOCB_DSYNC on the iocb and we'll do 2035 * the sync below. If our ->end_io() gets called and 2036 * current->journal_info is set, then we know we're in 2037 * our current context and we will clear 2038 * current->journal_info to indicate that we need to 2039 * sync below. 2040 */ 2041 if (sync) { 2042 ASSERT(current->journal_info == NULL); 2043 iocb->ki_flags &= ~IOCB_DSYNC; 2044 current->journal_info = BTRFS_DIO_SYNC_STUB; 2045 } 2046 num_written = __btrfs_direct_write(iocb, from); 2047 2048 /* 2049 * As stated above, we cleared journal_info, so we need to do 2050 * the sync ourselves. 2051 */ 2052 if (sync && current->journal_info == NULL) 2053 iocb->ki_flags |= IOCB_DSYNC; 2054 current->journal_info = NULL; 2055 } else { 2056 num_written = btrfs_buffered_write(iocb, from); 2057 if (num_written > 0) 2058 iocb->ki_pos = pos + num_written; 2059 if (clean_page) 2060 pagecache_isize_extended(inode, oldsize, 2061 i_size_read(inode)); 2062 } 2063 2064 inode_unlock(inode); 2065 2066 /* 2067 * We also have to set last_sub_trans to the current log transid, 2068 * otherwise subsequent syncs to a file that's been synced in this 2069 * transaction will appear to have already occurred. 2070 */ 2071 spin_lock(&BTRFS_I(inode)->lock); 2072 BTRFS_I(inode)->last_sub_trans = root->log_transid; 2073 spin_unlock(&BTRFS_I(inode)->lock); 2074 if (num_written > 0) 2075 num_written = generic_write_sync(iocb, num_written); 2076 2077 if (sync) 2078 atomic_dec(&BTRFS_I(inode)->sync_writers); 2079 out: 2080 current->backing_dev_info = NULL; 2081 return num_written ? num_written : err; 2082 } 2083 2084 int btrfs_release_file(struct inode *inode, struct file *filp) 2085 { 2086 struct btrfs_file_private *private = filp->private_data; 2087 2088 if (private && private->filldir_buf) 2089 kfree(private->filldir_buf); 2090 kfree(private); 2091 filp->private_data = NULL; 2092 2093 /* 2094 * Set by setattr when we are about to truncate a file from a non-zero 2095 * size to a zero size. This tries to flush down new bytes that may 2096 * have been written if the application were using truncate to replace 2097 * a file in place. 2098 */ 2099 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 2100 &BTRFS_I(inode)->runtime_flags)) 2101 filemap_flush(inode->i_mapping); 2102 return 0; 2103 } 2104 2105 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end) 2106 { 2107 int ret; 2108 struct blk_plug plug; 2109 2110 /* 2111 * This is only called in fsync, which would do synchronous writes, so 2112 * a plug can merge adjacent IOs as much as possible. Esp. in case of 2113 * multiple disks using raid profile, a large IO can be split to 2114 * several segments of stripe length (currently 64K). 2115 */ 2116 blk_start_plug(&plug); 2117 atomic_inc(&BTRFS_I(inode)->sync_writers); 2118 ret = btrfs_fdatawrite_range(inode, start, end); 2119 atomic_dec(&BTRFS_I(inode)->sync_writers); 2120 blk_finish_plug(&plug); 2121 2122 return ret; 2123 } 2124 2125 /* 2126 * fsync call for both files and directories. This logs the inode into 2127 * the tree log instead of forcing full commits whenever possible. 2128 * 2129 * It needs to call filemap_fdatawait so that all ordered extent updates are 2130 * in the metadata btree are up to date for copying to the log. 2131 * 2132 * It drops the inode mutex before doing the tree log commit. This is an 2133 * important optimization for directories because holding the mutex prevents 2134 * new operations on the dir while we write to disk. 2135 */ 2136 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 2137 { 2138 struct dentry *dentry = file_dentry(file); 2139 struct inode *inode = d_inode(dentry); 2140 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2141 struct btrfs_root *root = BTRFS_I(inode)->root; 2142 struct btrfs_trans_handle *trans; 2143 struct btrfs_log_ctx ctx; 2144 int ret = 0, err; 2145 u64 len; 2146 bool full_sync; 2147 2148 trace_btrfs_sync_file(file, datasync); 2149 2150 btrfs_init_log_ctx(&ctx, inode); 2151 2152 /* 2153 * Always set the range to a full range, otherwise we can get into 2154 * several problems, from missing file extent items to represent holes 2155 * when not using the NO_HOLES feature, to log tree corruption due to 2156 * races between hole detection during logging and completion of ordered 2157 * extents outside the range, to missing checksums due to ordered extents 2158 * for which we flushed only a subset of their pages. 2159 */ 2160 start = 0; 2161 end = LLONG_MAX; 2162 len = (u64)LLONG_MAX + 1; 2163 2164 /* 2165 * We write the dirty pages in the range and wait until they complete 2166 * out of the ->i_mutex. If so, we can flush the dirty pages by 2167 * multi-task, and make the performance up. See 2168 * btrfs_wait_ordered_range for an explanation of the ASYNC check. 2169 */ 2170 ret = start_ordered_ops(inode, start, end); 2171 if (ret) 2172 goto out; 2173 2174 inode_lock(inode); 2175 2176 /* 2177 * We take the dio_sem here because the tree log stuff can race with 2178 * lockless dio writes and get an extent map logged for an extent we 2179 * never waited on. We need it this high up for lockdep reasons. 2180 */ 2181 down_write(&BTRFS_I(inode)->dio_sem); 2182 2183 atomic_inc(&root->log_batch); 2184 2185 /* 2186 * Always check for the full sync flag while holding the inode's lock, 2187 * to avoid races with other tasks. The flag must be either set all the 2188 * time during logging or always off all the time while logging. 2189 */ 2190 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2191 &BTRFS_I(inode)->runtime_flags); 2192 2193 /* 2194 * Before we acquired the inode's lock, someone may have dirtied more 2195 * pages in the target range. We need to make sure that writeback for 2196 * any such pages does not start while we are logging the inode, because 2197 * if it does, any of the following might happen when we are not doing a 2198 * full inode sync: 2199 * 2200 * 1) We log an extent after its writeback finishes but before its 2201 * checksums are added to the csum tree, leading to -EIO errors 2202 * when attempting to read the extent after a log replay. 2203 * 2204 * 2) We can end up logging an extent before its writeback finishes. 2205 * Therefore after the log replay we will have a file extent item 2206 * pointing to an unwritten extent (and no data checksums as well). 2207 * 2208 * So trigger writeback for any eventual new dirty pages and then we 2209 * wait for all ordered extents to complete below. 2210 */ 2211 ret = start_ordered_ops(inode, start, end); 2212 if (ret) { 2213 up_write(&BTRFS_I(inode)->dio_sem); 2214 inode_unlock(inode); 2215 goto out; 2216 } 2217 2218 /* 2219 * We have to do this here to avoid the priority inversion of waiting on 2220 * IO of a lower priority task while holding a transaction open. 2221 * 2222 * For a full fsync we wait for the ordered extents to complete while 2223 * for a fast fsync we wait just for writeback to complete, and then 2224 * attach the ordered extents to the transaction so that a transaction 2225 * commit waits for their completion, to avoid data loss if we fsync, 2226 * the current transaction commits before the ordered extents complete 2227 * and a power failure happens right after that. 2228 */ 2229 if (full_sync) { 2230 ret = btrfs_wait_ordered_range(inode, start, len); 2231 } else { 2232 /* 2233 * Get our ordered extents as soon as possible to avoid doing 2234 * checksum lookups in the csum tree, and use instead the 2235 * checksums attached to the ordered extents. 2236 */ 2237 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode), 2238 &ctx.ordered_extents); 2239 ret = filemap_fdatawait_range(inode->i_mapping, start, end); 2240 } 2241 2242 if (ret) 2243 goto out_release_extents; 2244 2245 atomic_inc(&root->log_batch); 2246 2247 /* 2248 * If we are doing a fast fsync we can not bail out if the inode's 2249 * last_trans is <= then the last committed transaction, because we only 2250 * update the last_trans of the inode during ordered extent completion, 2251 * and for a fast fsync we don't wait for that, we only wait for the 2252 * writeback to complete. 2253 */ 2254 smp_mb(); 2255 if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) || 2256 (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed && 2257 (full_sync || list_empty(&ctx.ordered_extents)))) { 2258 /* 2259 * We've had everything committed since the last time we were 2260 * modified so clear this flag in case it was set for whatever 2261 * reason, it's no longer relevant. 2262 */ 2263 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2264 &BTRFS_I(inode)->runtime_flags); 2265 /* 2266 * An ordered extent might have started before and completed 2267 * already with io errors, in which case the inode was not 2268 * updated and we end up here. So check the inode's mapping 2269 * for any errors that might have happened since we last 2270 * checked called fsync. 2271 */ 2272 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err); 2273 goto out_release_extents; 2274 } 2275 2276 /* 2277 * We use start here because we will need to wait on the IO to complete 2278 * in btrfs_sync_log, which could require joining a transaction (for 2279 * example checking cross references in the nocow path). If we use join 2280 * here we could get into a situation where we're waiting on IO to 2281 * happen that is blocked on a transaction trying to commit. With start 2282 * we inc the extwriter counter, so we wait for all extwriters to exit 2283 * before we start blocking joiners. This comment is to keep somebody 2284 * from thinking they are super smart and changing this to 2285 * btrfs_join_transaction *cough*Josef*cough*. 2286 */ 2287 trans = btrfs_start_transaction(root, 0); 2288 if (IS_ERR(trans)) { 2289 ret = PTR_ERR(trans); 2290 goto out_release_extents; 2291 } 2292 2293 ret = btrfs_log_dentry_safe(trans, dentry, &ctx); 2294 btrfs_release_log_ctx_extents(&ctx); 2295 if (ret < 0) { 2296 /* Fallthrough and commit/free transaction. */ 2297 ret = 1; 2298 } 2299 2300 /* we've logged all the items and now have a consistent 2301 * version of the file in the log. It is possible that 2302 * someone will come in and modify the file, but that's 2303 * fine because the log is consistent on disk, and we 2304 * have references to all of the file's extents 2305 * 2306 * It is possible that someone will come in and log the 2307 * file again, but that will end up using the synchronization 2308 * inside btrfs_sync_log to keep things safe. 2309 */ 2310 up_write(&BTRFS_I(inode)->dio_sem); 2311 inode_unlock(inode); 2312 2313 if (ret != BTRFS_NO_LOG_SYNC) { 2314 if (!ret) { 2315 ret = btrfs_sync_log(trans, root, &ctx); 2316 if (!ret) { 2317 ret = btrfs_end_transaction(trans); 2318 goto out; 2319 } 2320 } 2321 if (!full_sync) { 2322 ret = btrfs_wait_ordered_range(inode, start, len); 2323 if (ret) { 2324 btrfs_end_transaction(trans); 2325 goto out; 2326 } 2327 } 2328 ret = btrfs_commit_transaction(trans); 2329 } else { 2330 ret = btrfs_end_transaction(trans); 2331 } 2332 out: 2333 ASSERT(list_empty(&ctx.list)); 2334 err = file_check_and_advance_wb_err(file); 2335 if (!ret) 2336 ret = err; 2337 return ret > 0 ? -EIO : ret; 2338 2339 out_release_extents: 2340 btrfs_release_log_ctx_extents(&ctx); 2341 up_write(&BTRFS_I(inode)->dio_sem); 2342 inode_unlock(inode); 2343 goto out; 2344 } 2345 2346 static const struct vm_operations_struct btrfs_file_vm_ops = { 2347 .fault = filemap_fault, 2348 .map_pages = filemap_map_pages, 2349 .page_mkwrite = btrfs_page_mkwrite, 2350 }; 2351 2352 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 2353 { 2354 struct address_space *mapping = filp->f_mapping; 2355 2356 if (!mapping->a_ops->readpage) 2357 return -ENOEXEC; 2358 2359 file_accessed(filp); 2360 vma->vm_ops = &btrfs_file_vm_ops; 2361 2362 return 0; 2363 } 2364 2365 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf, 2366 int slot, u64 start, u64 end) 2367 { 2368 struct btrfs_file_extent_item *fi; 2369 struct btrfs_key key; 2370 2371 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 2372 return 0; 2373 2374 btrfs_item_key_to_cpu(leaf, &key, slot); 2375 if (key.objectid != btrfs_ino(inode) || 2376 key.type != BTRFS_EXTENT_DATA_KEY) 2377 return 0; 2378 2379 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 2380 2381 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) 2382 return 0; 2383 2384 if (btrfs_file_extent_disk_bytenr(leaf, fi)) 2385 return 0; 2386 2387 if (key.offset == end) 2388 return 1; 2389 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start) 2390 return 1; 2391 return 0; 2392 } 2393 2394 static int fill_holes(struct btrfs_trans_handle *trans, 2395 struct btrfs_inode *inode, 2396 struct btrfs_path *path, u64 offset, u64 end) 2397 { 2398 struct btrfs_fs_info *fs_info = trans->fs_info; 2399 struct btrfs_root *root = inode->root; 2400 struct extent_buffer *leaf; 2401 struct btrfs_file_extent_item *fi; 2402 struct extent_map *hole_em; 2403 struct extent_map_tree *em_tree = &inode->extent_tree; 2404 struct btrfs_key key; 2405 int ret; 2406 2407 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 2408 goto out; 2409 2410 key.objectid = btrfs_ino(inode); 2411 key.type = BTRFS_EXTENT_DATA_KEY; 2412 key.offset = offset; 2413 2414 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2415 if (ret <= 0) { 2416 /* 2417 * We should have dropped this offset, so if we find it then 2418 * something has gone horribly wrong. 2419 */ 2420 if (ret == 0) 2421 ret = -EINVAL; 2422 return ret; 2423 } 2424 2425 leaf = path->nodes[0]; 2426 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) { 2427 u64 num_bytes; 2428 2429 path->slots[0]--; 2430 fi = btrfs_item_ptr(leaf, path->slots[0], 2431 struct btrfs_file_extent_item); 2432 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + 2433 end - offset; 2434 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2435 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2436 btrfs_set_file_extent_offset(leaf, fi, 0); 2437 btrfs_mark_buffer_dirty(leaf); 2438 goto out; 2439 } 2440 2441 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) { 2442 u64 num_bytes; 2443 2444 key.offset = offset; 2445 btrfs_set_item_key_safe(fs_info, path, &key); 2446 fi = btrfs_item_ptr(leaf, path->slots[0], 2447 struct btrfs_file_extent_item); 2448 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end - 2449 offset; 2450 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2451 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2452 btrfs_set_file_extent_offset(leaf, fi, 0); 2453 btrfs_mark_buffer_dirty(leaf); 2454 goto out; 2455 } 2456 btrfs_release_path(path); 2457 2458 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), 2459 offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0); 2460 if (ret) 2461 return ret; 2462 2463 out: 2464 btrfs_release_path(path); 2465 2466 hole_em = alloc_extent_map(); 2467 if (!hole_em) { 2468 btrfs_drop_extent_cache(inode, offset, end - 1, 0); 2469 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); 2470 } else { 2471 hole_em->start = offset; 2472 hole_em->len = end - offset; 2473 hole_em->ram_bytes = hole_em->len; 2474 hole_em->orig_start = offset; 2475 2476 hole_em->block_start = EXTENT_MAP_HOLE; 2477 hole_em->block_len = 0; 2478 hole_em->orig_block_len = 0; 2479 hole_em->compress_type = BTRFS_COMPRESS_NONE; 2480 hole_em->generation = trans->transid; 2481 2482 do { 2483 btrfs_drop_extent_cache(inode, offset, end - 1, 0); 2484 write_lock(&em_tree->lock); 2485 ret = add_extent_mapping(em_tree, hole_em, 1); 2486 write_unlock(&em_tree->lock); 2487 } while (ret == -EEXIST); 2488 free_extent_map(hole_em); 2489 if (ret) 2490 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2491 &inode->runtime_flags); 2492 } 2493 2494 return 0; 2495 } 2496 2497 /* 2498 * Find a hole extent on given inode and change start/len to the end of hole 2499 * extent.(hole/vacuum extent whose em->start <= start && 2500 * em->start + em->len > start) 2501 * When a hole extent is found, return 1 and modify start/len. 2502 */ 2503 static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len) 2504 { 2505 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2506 struct extent_map *em; 2507 int ret = 0; 2508 2509 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 2510 round_down(*start, fs_info->sectorsize), 2511 round_up(*len, fs_info->sectorsize)); 2512 if (IS_ERR(em)) 2513 return PTR_ERR(em); 2514 2515 /* Hole or vacuum extent(only exists in no-hole mode) */ 2516 if (em->block_start == EXTENT_MAP_HOLE) { 2517 ret = 1; 2518 *len = em->start + em->len > *start + *len ? 2519 0 : *start + *len - em->start - em->len; 2520 *start = em->start + em->len; 2521 } 2522 free_extent_map(em); 2523 return ret; 2524 } 2525 2526 static int btrfs_punch_hole_lock_range(struct inode *inode, 2527 const u64 lockstart, 2528 const u64 lockend, 2529 struct extent_state **cached_state) 2530 { 2531 while (1) { 2532 struct btrfs_ordered_extent *ordered; 2533 int ret; 2534 2535 truncate_pagecache_range(inode, lockstart, lockend); 2536 2537 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2538 cached_state); 2539 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), 2540 lockend); 2541 2542 /* 2543 * We need to make sure we have no ordered extents in this range 2544 * and nobody raced in and read a page in this range, if we did 2545 * we need to try again. 2546 */ 2547 if ((!ordered || 2548 (ordered->file_offset + ordered->num_bytes <= lockstart || 2549 ordered->file_offset > lockend)) && 2550 !filemap_range_has_page(inode->i_mapping, 2551 lockstart, lockend)) { 2552 if (ordered) 2553 btrfs_put_ordered_extent(ordered); 2554 break; 2555 } 2556 if (ordered) 2557 btrfs_put_ordered_extent(ordered); 2558 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, 2559 lockend, cached_state); 2560 ret = btrfs_wait_ordered_range(inode, lockstart, 2561 lockend - lockstart + 1); 2562 if (ret) 2563 return ret; 2564 } 2565 return 0; 2566 } 2567 2568 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans, 2569 struct inode *inode, 2570 struct btrfs_path *path, 2571 struct btrfs_replace_extent_info *extent_info, 2572 const u64 replace_len) 2573 { 2574 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2575 struct btrfs_root *root = BTRFS_I(inode)->root; 2576 struct btrfs_file_extent_item *extent; 2577 struct extent_buffer *leaf; 2578 struct btrfs_key key; 2579 int slot; 2580 struct btrfs_ref ref = { 0 }; 2581 int ret; 2582 2583 if (replace_len == 0) 2584 return 0; 2585 2586 if (extent_info->disk_offset == 0 && 2587 btrfs_fs_incompat(fs_info, NO_HOLES)) 2588 return 0; 2589 2590 key.objectid = btrfs_ino(BTRFS_I(inode)); 2591 key.type = BTRFS_EXTENT_DATA_KEY; 2592 key.offset = extent_info->file_offset; 2593 ret = btrfs_insert_empty_item(trans, root, path, &key, 2594 sizeof(struct btrfs_file_extent_item)); 2595 if (ret) 2596 return ret; 2597 leaf = path->nodes[0]; 2598 slot = path->slots[0]; 2599 write_extent_buffer(leaf, extent_info->extent_buf, 2600 btrfs_item_ptr_offset(leaf, slot), 2601 sizeof(struct btrfs_file_extent_item)); 2602 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 2603 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE); 2604 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset); 2605 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len); 2606 if (extent_info->is_new_extent) 2607 btrfs_set_file_extent_generation(leaf, extent, trans->transid); 2608 btrfs_mark_buffer_dirty(leaf); 2609 btrfs_release_path(path); 2610 2611 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), 2612 extent_info->file_offset, replace_len); 2613 if (ret) 2614 return ret; 2615 2616 /* If it's a hole, nothing more needs to be done. */ 2617 if (extent_info->disk_offset == 0) 2618 return 0; 2619 2620 inode_add_bytes(inode, replace_len); 2621 2622 if (extent_info->is_new_extent && extent_info->insertions == 0) { 2623 key.objectid = extent_info->disk_offset; 2624 key.type = BTRFS_EXTENT_ITEM_KEY; 2625 key.offset = extent_info->disk_len; 2626 ret = btrfs_alloc_reserved_file_extent(trans, root, 2627 btrfs_ino(BTRFS_I(inode)), 2628 extent_info->file_offset, 2629 extent_info->qgroup_reserved, 2630 &key); 2631 } else { 2632 u64 ref_offset; 2633 2634 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, 2635 extent_info->disk_offset, 2636 extent_info->disk_len, 0); 2637 ref_offset = extent_info->file_offset - extent_info->data_offset; 2638 btrfs_init_data_ref(&ref, root->root_key.objectid, 2639 btrfs_ino(BTRFS_I(inode)), ref_offset); 2640 ret = btrfs_inc_extent_ref(trans, &ref); 2641 } 2642 2643 extent_info->insertions++; 2644 2645 return ret; 2646 } 2647 2648 /* 2649 * The respective range must have been previously locked, as well as the inode. 2650 * The end offset is inclusive (last byte of the range). 2651 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing 2652 * the file range with an extent. 2653 * When not punching a hole, we don't want to end up in a state where we dropped 2654 * extents without inserting a new one, so we must abort the transaction to avoid 2655 * a corruption. 2656 */ 2657 int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path, 2658 const u64 start, const u64 end, 2659 struct btrfs_replace_extent_info *extent_info, 2660 struct btrfs_trans_handle **trans_out) 2661 { 2662 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2663 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1); 2664 u64 ino_size = round_up(inode->i_size, fs_info->sectorsize); 2665 struct btrfs_root *root = BTRFS_I(inode)->root; 2666 struct btrfs_trans_handle *trans = NULL; 2667 struct btrfs_block_rsv *rsv; 2668 unsigned int rsv_count; 2669 u64 cur_offset; 2670 u64 drop_end; 2671 u64 len = end - start; 2672 int ret = 0; 2673 2674 if (end <= start) 2675 return -EINVAL; 2676 2677 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 2678 if (!rsv) { 2679 ret = -ENOMEM; 2680 goto out; 2681 } 2682 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1); 2683 rsv->failfast = 1; 2684 2685 /* 2686 * 1 - update the inode 2687 * 1 - removing the extents in the range 2688 * 1 - adding the hole extent if no_holes isn't set or if we are 2689 * replacing the range with a new extent 2690 */ 2691 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info) 2692 rsv_count = 3; 2693 else 2694 rsv_count = 2; 2695 2696 trans = btrfs_start_transaction(root, rsv_count); 2697 if (IS_ERR(trans)) { 2698 ret = PTR_ERR(trans); 2699 trans = NULL; 2700 goto out_free; 2701 } 2702 2703 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 2704 min_size, false); 2705 BUG_ON(ret); 2706 trans->block_rsv = rsv; 2707 2708 cur_offset = start; 2709 while (cur_offset < end) { 2710 ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, 2711 cur_offset, end + 1, &drop_end, 2712 1, 0, 0, NULL); 2713 if (ret != -ENOSPC) { 2714 /* 2715 * When cloning we want to avoid transaction aborts when 2716 * nothing was done and we are attempting to clone parts 2717 * of inline extents, in such cases -EOPNOTSUPP is 2718 * returned by __btrfs_drop_extents() without having 2719 * changed anything in the file. 2720 */ 2721 if (extent_info && !extent_info->is_new_extent && 2722 ret && ret != -EOPNOTSUPP) 2723 btrfs_abort_transaction(trans, ret); 2724 break; 2725 } 2726 2727 trans->block_rsv = &fs_info->trans_block_rsv; 2728 2729 if (!extent_info && cur_offset < drop_end && 2730 cur_offset < ino_size) { 2731 ret = fill_holes(trans, BTRFS_I(inode), path, 2732 cur_offset, drop_end); 2733 if (ret) { 2734 /* 2735 * If we failed then we didn't insert our hole 2736 * entries for the area we dropped, so now the 2737 * fs is corrupted, so we must abort the 2738 * transaction. 2739 */ 2740 btrfs_abort_transaction(trans, ret); 2741 break; 2742 } 2743 } else if (!extent_info && cur_offset < drop_end) { 2744 /* 2745 * We are past the i_size here, but since we didn't 2746 * insert holes we need to clear the mapped area so we 2747 * know to not set disk_i_size in this area until a new 2748 * file extent is inserted here. 2749 */ 2750 ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode), 2751 cur_offset, drop_end - cur_offset); 2752 if (ret) { 2753 /* 2754 * We couldn't clear our area, so we could 2755 * presumably adjust up and corrupt the fs, so 2756 * we need to abort. 2757 */ 2758 btrfs_abort_transaction(trans, ret); 2759 break; 2760 } 2761 } 2762 2763 if (extent_info && drop_end > extent_info->file_offset) { 2764 u64 replace_len = drop_end - extent_info->file_offset; 2765 2766 ret = btrfs_insert_replace_extent(trans, inode, path, 2767 extent_info, replace_len); 2768 if (ret) { 2769 btrfs_abort_transaction(trans, ret); 2770 break; 2771 } 2772 extent_info->data_len -= replace_len; 2773 extent_info->data_offset += replace_len; 2774 extent_info->file_offset += replace_len; 2775 } 2776 2777 cur_offset = drop_end; 2778 2779 ret = btrfs_update_inode(trans, root, inode); 2780 if (ret) 2781 break; 2782 2783 btrfs_end_transaction(trans); 2784 btrfs_btree_balance_dirty(fs_info); 2785 2786 trans = btrfs_start_transaction(root, rsv_count); 2787 if (IS_ERR(trans)) { 2788 ret = PTR_ERR(trans); 2789 trans = NULL; 2790 break; 2791 } 2792 2793 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 2794 rsv, min_size, false); 2795 BUG_ON(ret); /* shouldn't happen */ 2796 trans->block_rsv = rsv; 2797 2798 if (!extent_info) { 2799 ret = find_first_non_hole(inode, &cur_offset, &len); 2800 if (unlikely(ret < 0)) 2801 break; 2802 if (ret && !len) { 2803 ret = 0; 2804 break; 2805 } 2806 } 2807 } 2808 2809 /* 2810 * If we were cloning, force the next fsync to be a full one since we 2811 * we replaced (or just dropped in the case of cloning holes when 2812 * NO_HOLES is enabled) extents and extent maps. 2813 * This is for the sake of simplicity, and cloning into files larger 2814 * than 16Mb would force the full fsync any way (when 2815 * try_release_extent_mapping() is invoked during page cache truncation. 2816 */ 2817 if (extent_info && !extent_info->is_new_extent) 2818 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2819 &BTRFS_I(inode)->runtime_flags); 2820 2821 if (ret) 2822 goto out_trans; 2823 2824 trans->block_rsv = &fs_info->trans_block_rsv; 2825 /* 2826 * If we are using the NO_HOLES feature we might have had already an 2827 * hole that overlaps a part of the region [lockstart, lockend] and 2828 * ends at (or beyond) lockend. Since we have no file extent items to 2829 * represent holes, drop_end can be less than lockend and so we must 2830 * make sure we have an extent map representing the existing hole (the 2831 * call to __btrfs_drop_extents() might have dropped the existing extent 2832 * map representing the existing hole), otherwise the fast fsync path 2833 * will not record the existence of the hole region 2834 * [existing_hole_start, lockend]. 2835 */ 2836 if (drop_end <= end) 2837 drop_end = end + 1; 2838 /* 2839 * Don't insert file hole extent item if it's for a range beyond eof 2840 * (because it's useless) or if it represents a 0 bytes range (when 2841 * cur_offset == drop_end). 2842 */ 2843 if (!extent_info && cur_offset < ino_size && cur_offset < drop_end) { 2844 ret = fill_holes(trans, BTRFS_I(inode), path, 2845 cur_offset, drop_end); 2846 if (ret) { 2847 /* Same comment as above. */ 2848 btrfs_abort_transaction(trans, ret); 2849 goto out_trans; 2850 } 2851 } else if (!extent_info && cur_offset < drop_end) { 2852 /* See the comment in the loop above for the reasoning here. */ 2853 ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode), 2854 cur_offset, drop_end - cur_offset); 2855 if (ret) { 2856 btrfs_abort_transaction(trans, ret); 2857 goto out_trans; 2858 } 2859 2860 } 2861 if (extent_info) { 2862 ret = btrfs_insert_replace_extent(trans, inode, path, extent_info, 2863 extent_info->data_len); 2864 if (ret) { 2865 btrfs_abort_transaction(trans, ret); 2866 goto out_trans; 2867 } 2868 } 2869 2870 out_trans: 2871 if (!trans) 2872 goto out_free; 2873 2874 trans->block_rsv = &fs_info->trans_block_rsv; 2875 if (ret) 2876 btrfs_end_transaction(trans); 2877 else 2878 *trans_out = trans; 2879 out_free: 2880 btrfs_free_block_rsv(fs_info, rsv); 2881 out: 2882 return ret; 2883 } 2884 2885 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 2886 { 2887 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2888 struct btrfs_root *root = BTRFS_I(inode)->root; 2889 struct extent_state *cached_state = NULL; 2890 struct btrfs_path *path; 2891 struct btrfs_trans_handle *trans = NULL; 2892 u64 lockstart; 2893 u64 lockend; 2894 u64 tail_start; 2895 u64 tail_len; 2896 u64 orig_start = offset; 2897 int ret = 0; 2898 bool same_block; 2899 u64 ino_size; 2900 bool truncated_block = false; 2901 bool updated_inode = false; 2902 2903 ret = btrfs_wait_ordered_range(inode, offset, len); 2904 if (ret) 2905 return ret; 2906 2907 inode_lock(inode); 2908 ino_size = round_up(inode->i_size, fs_info->sectorsize); 2909 ret = find_first_non_hole(inode, &offset, &len); 2910 if (ret < 0) 2911 goto out_only_mutex; 2912 if (ret && !len) { 2913 /* Already in a large hole */ 2914 ret = 0; 2915 goto out_only_mutex; 2916 } 2917 2918 lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode))); 2919 lockend = round_down(offset + len, 2920 btrfs_inode_sectorsize(BTRFS_I(inode))) - 1; 2921 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset)) 2922 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)); 2923 /* 2924 * We needn't truncate any block which is beyond the end of the file 2925 * because we are sure there is no data there. 2926 */ 2927 /* 2928 * Only do this if we are in the same block and we aren't doing the 2929 * entire block. 2930 */ 2931 if (same_block && len < fs_info->sectorsize) { 2932 if (offset < ino_size) { 2933 truncated_block = true; 2934 ret = btrfs_truncate_block(inode, offset, len, 0); 2935 } else { 2936 ret = 0; 2937 } 2938 goto out_only_mutex; 2939 } 2940 2941 /* zero back part of the first block */ 2942 if (offset < ino_size) { 2943 truncated_block = true; 2944 ret = btrfs_truncate_block(inode, offset, 0, 0); 2945 if (ret) { 2946 inode_unlock(inode); 2947 return ret; 2948 } 2949 } 2950 2951 /* Check the aligned pages after the first unaligned page, 2952 * if offset != orig_start, which means the first unaligned page 2953 * including several following pages are already in holes, 2954 * the extra check can be skipped */ 2955 if (offset == orig_start) { 2956 /* after truncate page, check hole again */ 2957 len = offset + len - lockstart; 2958 offset = lockstart; 2959 ret = find_first_non_hole(inode, &offset, &len); 2960 if (ret < 0) 2961 goto out_only_mutex; 2962 if (ret && !len) { 2963 ret = 0; 2964 goto out_only_mutex; 2965 } 2966 lockstart = offset; 2967 } 2968 2969 /* Check the tail unaligned part is in a hole */ 2970 tail_start = lockend + 1; 2971 tail_len = offset + len - tail_start; 2972 if (tail_len) { 2973 ret = find_first_non_hole(inode, &tail_start, &tail_len); 2974 if (unlikely(ret < 0)) 2975 goto out_only_mutex; 2976 if (!ret) { 2977 /* zero the front end of the last page */ 2978 if (tail_start + tail_len < ino_size) { 2979 truncated_block = true; 2980 ret = btrfs_truncate_block(inode, 2981 tail_start + tail_len, 2982 0, 1); 2983 if (ret) 2984 goto out_only_mutex; 2985 } 2986 } 2987 } 2988 2989 if (lockend < lockstart) { 2990 ret = 0; 2991 goto out_only_mutex; 2992 } 2993 2994 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, 2995 &cached_state); 2996 if (ret) 2997 goto out_only_mutex; 2998 2999 path = btrfs_alloc_path(); 3000 if (!path) { 3001 ret = -ENOMEM; 3002 goto out; 3003 } 3004 3005 ret = btrfs_replace_file_extents(inode, path, lockstart, lockend, NULL, 3006 &trans); 3007 btrfs_free_path(path); 3008 if (ret) 3009 goto out; 3010 3011 ASSERT(trans != NULL); 3012 inode_inc_iversion(inode); 3013 inode->i_mtime = inode->i_ctime = current_time(inode); 3014 ret = btrfs_update_inode(trans, root, inode); 3015 updated_inode = true; 3016 btrfs_end_transaction(trans); 3017 btrfs_btree_balance_dirty(fs_info); 3018 out: 3019 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 3020 &cached_state); 3021 out_only_mutex: 3022 if (!updated_inode && truncated_block && !ret) { 3023 /* 3024 * If we only end up zeroing part of a page, we still need to 3025 * update the inode item, so that all the time fields are 3026 * updated as well as the necessary btrfs inode in memory fields 3027 * for detecting, at fsync time, if the inode isn't yet in the 3028 * log tree or it's there but not up to date. 3029 */ 3030 struct timespec64 now = current_time(inode); 3031 3032 inode_inc_iversion(inode); 3033 inode->i_mtime = now; 3034 inode->i_ctime = now; 3035 trans = btrfs_start_transaction(root, 1); 3036 if (IS_ERR(trans)) { 3037 ret = PTR_ERR(trans); 3038 } else { 3039 int ret2; 3040 3041 ret = btrfs_update_inode(trans, root, inode); 3042 ret2 = btrfs_end_transaction(trans); 3043 if (!ret) 3044 ret = ret2; 3045 } 3046 } 3047 inode_unlock(inode); 3048 return ret; 3049 } 3050 3051 /* Helper structure to record which range is already reserved */ 3052 struct falloc_range { 3053 struct list_head list; 3054 u64 start; 3055 u64 len; 3056 }; 3057 3058 /* 3059 * Helper function to add falloc range 3060 * 3061 * Caller should have locked the larger range of extent containing 3062 * [start, len) 3063 */ 3064 static int add_falloc_range(struct list_head *head, u64 start, u64 len) 3065 { 3066 struct falloc_range *prev = NULL; 3067 struct falloc_range *range = NULL; 3068 3069 if (list_empty(head)) 3070 goto insert; 3071 3072 /* 3073 * As fallocate iterate by bytenr order, we only need to check 3074 * the last range. 3075 */ 3076 prev = list_entry(head->prev, struct falloc_range, list); 3077 if (prev->start + prev->len == start) { 3078 prev->len += len; 3079 return 0; 3080 } 3081 insert: 3082 range = kmalloc(sizeof(*range), GFP_KERNEL); 3083 if (!range) 3084 return -ENOMEM; 3085 range->start = start; 3086 range->len = len; 3087 list_add_tail(&range->list, head); 3088 return 0; 3089 } 3090 3091 static int btrfs_fallocate_update_isize(struct inode *inode, 3092 const u64 end, 3093 const int mode) 3094 { 3095 struct btrfs_trans_handle *trans; 3096 struct btrfs_root *root = BTRFS_I(inode)->root; 3097 int ret; 3098 int ret2; 3099 3100 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode)) 3101 return 0; 3102 3103 trans = btrfs_start_transaction(root, 1); 3104 if (IS_ERR(trans)) 3105 return PTR_ERR(trans); 3106 3107 inode->i_ctime = current_time(inode); 3108 i_size_write(inode, end); 3109 btrfs_inode_safe_disk_i_size_write(inode, 0); 3110 ret = btrfs_update_inode(trans, root, inode); 3111 ret2 = btrfs_end_transaction(trans); 3112 3113 return ret ? ret : ret2; 3114 } 3115 3116 enum { 3117 RANGE_BOUNDARY_WRITTEN_EXTENT, 3118 RANGE_BOUNDARY_PREALLOC_EXTENT, 3119 RANGE_BOUNDARY_HOLE, 3120 }; 3121 3122 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode, 3123 u64 offset) 3124 { 3125 const u64 sectorsize = btrfs_inode_sectorsize(inode); 3126 struct extent_map *em; 3127 int ret; 3128 3129 offset = round_down(offset, sectorsize); 3130 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize); 3131 if (IS_ERR(em)) 3132 return PTR_ERR(em); 3133 3134 if (em->block_start == EXTENT_MAP_HOLE) 3135 ret = RANGE_BOUNDARY_HOLE; 3136 else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 3137 ret = RANGE_BOUNDARY_PREALLOC_EXTENT; 3138 else 3139 ret = RANGE_BOUNDARY_WRITTEN_EXTENT; 3140 3141 free_extent_map(em); 3142 return ret; 3143 } 3144 3145 static int btrfs_zero_range(struct inode *inode, 3146 loff_t offset, 3147 loff_t len, 3148 const int mode) 3149 { 3150 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 3151 struct extent_map *em; 3152 struct extent_changeset *data_reserved = NULL; 3153 int ret; 3154 u64 alloc_hint = 0; 3155 const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode)); 3156 u64 alloc_start = round_down(offset, sectorsize); 3157 u64 alloc_end = round_up(offset + len, sectorsize); 3158 u64 bytes_to_reserve = 0; 3159 bool space_reserved = false; 3160 3161 inode_dio_wait(inode); 3162 3163 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start, 3164 alloc_end - alloc_start); 3165 if (IS_ERR(em)) { 3166 ret = PTR_ERR(em); 3167 goto out; 3168 } 3169 3170 /* 3171 * Avoid hole punching and extent allocation for some cases. More cases 3172 * could be considered, but these are unlikely common and we keep things 3173 * as simple as possible for now. Also, intentionally, if the target 3174 * range contains one or more prealloc extents together with regular 3175 * extents and holes, we drop all the existing extents and allocate a 3176 * new prealloc extent, so that we get a larger contiguous disk extent. 3177 */ 3178 if (em->start <= alloc_start && 3179 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3180 const u64 em_end = em->start + em->len; 3181 3182 if (em_end >= offset + len) { 3183 /* 3184 * The whole range is already a prealloc extent, 3185 * do nothing except updating the inode's i_size if 3186 * needed. 3187 */ 3188 free_extent_map(em); 3189 ret = btrfs_fallocate_update_isize(inode, offset + len, 3190 mode); 3191 goto out; 3192 } 3193 /* 3194 * Part of the range is already a prealloc extent, so operate 3195 * only on the remaining part of the range. 3196 */ 3197 alloc_start = em_end; 3198 ASSERT(IS_ALIGNED(alloc_start, sectorsize)); 3199 len = offset + len - alloc_start; 3200 offset = alloc_start; 3201 alloc_hint = em->block_start + em->len; 3202 } 3203 free_extent_map(em); 3204 3205 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) == 3206 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) { 3207 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start, 3208 sectorsize); 3209 if (IS_ERR(em)) { 3210 ret = PTR_ERR(em); 3211 goto out; 3212 } 3213 3214 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3215 free_extent_map(em); 3216 ret = btrfs_fallocate_update_isize(inode, offset + len, 3217 mode); 3218 goto out; 3219 } 3220 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) { 3221 free_extent_map(em); 3222 ret = btrfs_truncate_block(inode, offset, len, 0); 3223 if (!ret) 3224 ret = btrfs_fallocate_update_isize(inode, 3225 offset + len, 3226 mode); 3227 return ret; 3228 } 3229 free_extent_map(em); 3230 alloc_start = round_down(offset, sectorsize); 3231 alloc_end = alloc_start + sectorsize; 3232 goto reserve_space; 3233 } 3234 3235 alloc_start = round_up(offset, sectorsize); 3236 alloc_end = round_down(offset + len, sectorsize); 3237 3238 /* 3239 * For unaligned ranges, check the pages at the boundaries, they might 3240 * map to an extent, in which case we need to partially zero them, or 3241 * they might map to a hole, in which case we need our allocation range 3242 * to cover them. 3243 */ 3244 if (!IS_ALIGNED(offset, sectorsize)) { 3245 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode), 3246 offset); 3247 if (ret < 0) 3248 goto out; 3249 if (ret == RANGE_BOUNDARY_HOLE) { 3250 alloc_start = round_down(offset, sectorsize); 3251 ret = 0; 3252 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { 3253 ret = btrfs_truncate_block(inode, offset, 0, 0); 3254 if (ret) 3255 goto out; 3256 } else { 3257 ret = 0; 3258 } 3259 } 3260 3261 if (!IS_ALIGNED(offset + len, sectorsize)) { 3262 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode), 3263 offset + len); 3264 if (ret < 0) 3265 goto out; 3266 if (ret == RANGE_BOUNDARY_HOLE) { 3267 alloc_end = round_up(offset + len, sectorsize); 3268 ret = 0; 3269 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { 3270 ret = btrfs_truncate_block(inode, offset + len, 0, 1); 3271 if (ret) 3272 goto out; 3273 } else { 3274 ret = 0; 3275 } 3276 } 3277 3278 reserve_space: 3279 if (alloc_start < alloc_end) { 3280 struct extent_state *cached_state = NULL; 3281 const u64 lockstart = alloc_start; 3282 const u64 lockend = alloc_end - 1; 3283 3284 bytes_to_reserve = alloc_end - alloc_start; 3285 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), 3286 bytes_to_reserve); 3287 if (ret < 0) 3288 goto out; 3289 space_reserved = true; 3290 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, 3291 &cached_state); 3292 if (ret) 3293 goto out; 3294 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved, 3295 alloc_start, bytes_to_reserve); 3296 if (ret) 3297 goto out; 3298 ret = btrfs_prealloc_file_range(inode, mode, alloc_start, 3299 alloc_end - alloc_start, 3300 i_blocksize(inode), 3301 offset + len, &alloc_hint); 3302 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, 3303 lockend, &cached_state); 3304 /* btrfs_prealloc_file_range releases reserved space on error */ 3305 if (ret) { 3306 space_reserved = false; 3307 goto out; 3308 } 3309 } 3310 ret = btrfs_fallocate_update_isize(inode, offset + len, mode); 3311 out: 3312 if (ret && space_reserved) 3313 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, 3314 alloc_start, bytes_to_reserve); 3315 extent_changeset_free(data_reserved); 3316 3317 return ret; 3318 } 3319 3320 static long btrfs_fallocate(struct file *file, int mode, 3321 loff_t offset, loff_t len) 3322 { 3323 struct inode *inode = file_inode(file); 3324 struct extent_state *cached_state = NULL; 3325 struct extent_changeset *data_reserved = NULL; 3326 struct falloc_range *range; 3327 struct falloc_range *tmp; 3328 struct list_head reserve_list; 3329 u64 cur_offset; 3330 u64 last_byte; 3331 u64 alloc_start; 3332 u64 alloc_end; 3333 u64 alloc_hint = 0; 3334 u64 locked_end; 3335 u64 actual_end = 0; 3336 struct extent_map *em; 3337 int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode)); 3338 int ret; 3339 3340 alloc_start = round_down(offset, blocksize); 3341 alloc_end = round_up(offset + len, blocksize); 3342 cur_offset = alloc_start; 3343 3344 /* Make sure we aren't being give some crap mode */ 3345 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 3346 FALLOC_FL_ZERO_RANGE)) 3347 return -EOPNOTSUPP; 3348 3349 if (mode & FALLOC_FL_PUNCH_HOLE) 3350 return btrfs_punch_hole(inode, offset, len); 3351 3352 /* 3353 * Only trigger disk allocation, don't trigger qgroup reserve 3354 * 3355 * For qgroup space, it will be checked later. 3356 */ 3357 if (!(mode & FALLOC_FL_ZERO_RANGE)) { 3358 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), 3359 alloc_end - alloc_start); 3360 if (ret < 0) 3361 return ret; 3362 } 3363 3364 inode_lock(inode); 3365 3366 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) { 3367 ret = inode_newsize_ok(inode, offset + len); 3368 if (ret) 3369 goto out; 3370 } 3371 3372 /* 3373 * TODO: Move these two operations after we have checked 3374 * accurate reserved space, or fallocate can still fail but 3375 * with page truncated or size expanded. 3376 * 3377 * But that's a minor problem and won't do much harm BTW. 3378 */ 3379 if (alloc_start > inode->i_size) { 3380 ret = btrfs_cont_expand(inode, i_size_read(inode), 3381 alloc_start); 3382 if (ret) 3383 goto out; 3384 } else if (offset + len > inode->i_size) { 3385 /* 3386 * If we are fallocating from the end of the file onward we 3387 * need to zero out the end of the block if i_size lands in the 3388 * middle of a block. 3389 */ 3390 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0); 3391 if (ret) 3392 goto out; 3393 } 3394 3395 /* 3396 * wait for ordered IO before we have any locks. We'll loop again 3397 * below with the locks held. 3398 */ 3399 ret = btrfs_wait_ordered_range(inode, alloc_start, 3400 alloc_end - alloc_start); 3401 if (ret) 3402 goto out; 3403 3404 if (mode & FALLOC_FL_ZERO_RANGE) { 3405 ret = btrfs_zero_range(inode, offset, len, mode); 3406 inode_unlock(inode); 3407 return ret; 3408 } 3409 3410 locked_end = alloc_end - 1; 3411 while (1) { 3412 struct btrfs_ordered_extent *ordered; 3413 3414 /* the extent lock is ordered inside the running 3415 * transaction 3416 */ 3417 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 3418 locked_end, &cached_state); 3419 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), 3420 locked_end); 3421 3422 if (ordered && 3423 ordered->file_offset + ordered->num_bytes > alloc_start && 3424 ordered->file_offset < alloc_end) { 3425 btrfs_put_ordered_extent(ordered); 3426 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 3427 alloc_start, locked_end, 3428 &cached_state); 3429 /* 3430 * we can't wait on the range with the transaction 3431 * running or with the extent lock held 3432 */ 3433 ret = btrfs_wait_ordered_range(inode, alloc_start, 3434 alloc_end - alloc_start); 3435 if (ret) 3436 goto out; 3437 } else { 3438 if (ordered) 3439 btrfs_put_ordered_extent(ordered); 3440 break; 3441 } 3442 } 3443 3444 /* First, check if we exceed the qgroup limit */ 3445 INIT_LIST_HEAD(&reserve_list); 3446 while (cur_offset < alloc_end) { 3447 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, 3448 alloc_end - cur_offset); 3449 if (IS_ERR(em)) { 3450 ret = PTR_ERR(em); 3451 break; 3452 } 3453 last_byte = min(extent_map_end(em), alloc_end); 3454 actual_end = min_t(u64, extent_map_end(em), offset + len); 3455 last_byte = ALIGN(last_byte, blocksize); 3456 if (em->block_start == EXTENT_MAP_HOLE || 3457 (cur_offset >= inode->i_size && 3458 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 3459 ret = add_falloc_range(&reserve_list, cur_offset, 3460 last_byte - cur_offset); 3461 if (ret < 0) { 3462 free_extent_map(em); 3463 break; 3464 } 3465 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), 3466 &data_reserved, cur_offset, 3467 last_byte - cur_offset); 3468 if (ret < 0) { 3469 cur_offset = last_byte; 3470 free_extent_map(em); 3471 break; 3472 } 3473 } else { 3474 /* 3475 * Do not need to reserve unwritten extent for this 3476 * range, free reserved data space first, otherwise 3477 * it'll result in false ENOSPC error. 3478 */ 3479 btrfs_free_reserved_data_space(BTRFS_I(inode), 3480 data_reserved, cur_offset, 3481 last_byte - cur_offset); 3482 } 3483 free_extent_map(em); 3484 cur_offset = last_byte; 3485 } 3486 3487 /* 3488 * If ret is still 0, means we're OK to fallocate. 3489 * Or just cleanup the list and exit. 3490 */ 3491 list_for_each_entry_safe(range, tmp, &reserve_list, list) { 3492 if (!ret) 3493 ret = btrfs_prealloc_file_range(inode, mode, 3494 range->start, 3495 range->len, i_blocksize(inode), 3496 offset + len, &alloc_hint); 3497 else 3498 btrfs_free_reserved_data_space(BTRFS_I(inode), 3499 data_reserved, range->start, 3500 range->len); 3501 list_del(&range->list); 3502 kfree(range); 3503 } 3504 if (ret < 0) 3505 goto out_unlock; 3506 3507 /* 3508 * We didn't need to allocate any more space, but we still extended the 3509 * size of the file so we need to update i_size and the inode item. 3510 */ 3511 ret = btrfs_fallocate_update_isize(inode, actual_end, mode); 3512 out_unlock: 3513 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 3514 &cached_state); 3515 out: 3516 inode_unlock(inode); 3517 /* Let go of our reservation. */ 3518 if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE)) 3519 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, 3520 cur_offset, alloc_end - cur_offset); 3521 extent_changeset_free(data_reserved); 3522 return ret; 3523 } 3524 3525 static loff_t find_desired_extent(struct inode *inode, loff_t offset, 3526 int whence) 3527 { 3528 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3529 struct extent_map *em = NULL; 3530 struct extent_state *cached_state = NULL; 3531 loff_t i_size = inode->i_size; 3532 u64 lockstart; 3533 u64 lockend; 3534 u64 start; 3535 u64 len; 3536 int ret = 0; 3537 3538 if (i_size == 0 || offset >= i_size) 3539 return -ENXIO; 3540 3541 /* 3542 * offset can be negative, in this case we start finding DATA/HOLE from 3543 * the very start of the file. 3544 */ 3545 start = max_t(loff_t, 0, offset); 3546 3547 lockstart = round_down(start, fs_info->sectorsize); 3548 lockend = round_up(i_size, fs_info->sectorsize); 3549 if (lockend <= lockstart) 3550 lockend = lockstart + fs_info->sectorsize; 3551 lockend--; 3552 len = lockend - lockstart + 1; 3553 3554 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 3555 &cached_state); 3556 3557 while (start < i_size) { 3558 em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len); 3559 if (IS_ERR(em)) { 3560 ret = PTR_ERR(em); 3561 em = NULL; 3562 break; 3563 } 3564 3565 if (whence == SEEK_HOLE && 3566 (em->block_start == EXTENT_MAP_HOLE || 3567 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) 3568 break; 3569 else if (whence == SEEK_DATA && 3570 (em->block_start != EXTENT_MAP_HOLE && 3571 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) 3572 break; 3573 3574 start = em->start + em->len; 3575 free_extent_map(em); 3576 em = NULL; 3577 cond_resched(); 3578 } 3579 free_extent_map(em); 3580 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 3581 &cached_state); 3582 if (ret) { 3583 offset = ret; 3584 } else { 3585 if (whence == SEEK_DATA && start >= i_size) 3586 offset = -ENXIO; 3587 else 3588 offset = min_t(loff_t, start, i_size); 3589 } 3590 3591 return offset; 3592 } 3593 3594 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) 3595 { 3596 struct inode *inode = file->f_mapping->host; 3597 3598 switch (whence) { 3599 default: 3600 return generic_file_llseek(file, offset, whence); 3601 case SEEK_DATA: 3602 case SEEK_HOLE: 3603 inode_lock_shared(inode); 3604 offset = find_desired_extent(inode, offset, whence); 3605 inode_unlock_shared(inode); 3606 break; 3607 } 3608 3609 if (offset < 0) 3610 return offset; 3611 3612 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 3613 } 3614 3615 static int btrfs_file_open(struct inode *inode, struct file *filp) 3616 { 3617 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; 3618 return generic_file_open(inode, filp); 3619 } 3620 3621 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 3622 { 3623 ssize_t ret = 0; 3624 3625 if (iocb->ki_flags & IOCB_DIRECT) { 3626 struct inode *inode = file_inode(iocb->ki_filp); 3627 3628 inode_lock_shared(inode); 3629 ret = btrfs_direct_IO(iocb, to); 3630 inode_unlock_shared(inode); 3631 if (ret < 0) 3632 return ret; 3633 } 3634 3635 return generic_file_buffered_read(iocb, to, ret); 3636 } 3637 3638 const struct file_operations btrfs_file_operations = { 3639 .llseek = btrfs_file_llseek, 3640 .read_iter = btrfs_file_read_iter, 3641 .splice_read = generic_file_splice_read, 3642 .write_iter = btrfs_file_write_iter, 3643 .splice_write = iter_file_splice_write, 3644 .mmap = btrfs_file_mmap, 3645 .open = btrfs_file_open, 3646 .release = btrfs_release_file, 3647 .fsync = btrfs_sync_file, 3648 .fallocate = btrfs_fallocate, 3649 .unlocked_ioctl = btrfs_ioctl, 3650 #ifdef CONFIG_COMPAT 3651 .compat_ioctl = btrfs_compat_ioctl, 3652 #endif 3653 .remap_file_range = btrfs_remap_file_range, 3654 }; 3655 3656 void __cold btrfs_auto_defrag_exit(void) 3657 { 3658 kmem_cache_destroy(btrfs_inode_defrag_cachep); 3659 } 3660 3661 int __init btrfs_auto_defrag_init(void) 3662 { 3663 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag", 3664 sizeof(struct inode_defrag), 0, 3665 SLAB_MEM_SPREAD, 3666 NULL); 3667 if (!btrfs_inode_defrag_cachep) 3668 return -ENOMEM; 3669 3670 return 0; 3671 } 3672 3673 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end) 3674 { 3675 int ret; 3676 3677 /* 3678 * So with compression we will find and lock a dirty page and clear the 3679 * first one as dirty, setup an async extent, and immediately return 3680 * with the entire range locked but with nobody actually marked with 3681 * writeback. So we can't just filemap_write_and_wait_range() and 3682 * expect it to work since it will just kick off a thread to do the 3683 * actual work. So we need to call filemap_fdatawrite_range _again_ 3684 * since it will wait on the page lock, which won't be unlocked until 3685 * after the pages have been marked as writeback and so we're good to go 3686 * from there. We have to do this otherwise we'll miss the ordered 3687 * extents and that results in badness. Please Josef, do not think you 3688 * know better and pull this out at some point in the future, it is 3689 * right and you are wrong. 3690 */ 3691 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 3692 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 3693 &BTRFS_I(inode)->runtime_flags)) 3694 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 3695 3696 return ret; 3697 } 3698