1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/pagemap.h> 8 #include <linux/time.h> 9 #include <linux/init.h> 10 #include <linux/string.h> 11 #include <linux/backing-dev.h> 12 #include <linux/falloc.h> 13 #include <linux/writeback.h> 14 #include <linux/compat.h> 15 #include <linux/slab.h> 16 #include <linux/btrfs.h> 17 #include <linux/uio.h> 18 #include <linux/iversion.h> 19 #include "ctree.h" 20 #include "disk-io.h" 21 #include "transaction.h" 22 #include "btrfs_inode.h" 23 #include "print-tree.h" 24 #include "tree-log.h" 25 #include "locking.h" 26 #include "volumes.h" 27 #include "qgroup.h" 28 #include "compression.h" 29 #include "delalloc-space.h" 30 #include "reflink.h" 31 32 static struct kmem_cache *btrfs_inode_defrag_cachep; 33 /* 34 * when auto defrag is enabled we 35 * queue up these defrag structs to remember which 36 * inodes need defragging passes 37 */ 38 struct inode_defrag { 39 struct rb_node rb_node; 40 /* objectid */ 41 u64 ino; 42 /* 43 * transid where the defrag was added, we search for 44 * extents newer than this 45 */ 46 u64 transid; 47 48 /* root objectid */ 49 u64 root; 50 51 /* last offset we were able to defrag */ 52 u64 last_offset; 53 54 /* if we've wrapped around back to zero once already */ 55 int cycled; 56 }; 57 58 static int __compare_inode_defrag(struct inode_defrag *defrag1, 59 struct inode_defrag *defrag2) 60 { 61 if (defrag1->root > defrag2->root) 62 return 1; 63 else if (defrag1->root < defrag2->root) 64 return -1; 65 else if (defrag1->ino > defrag2->ino) 66 return 1; 67 else if (defrag1->ino < defrag2->ino) 68 return -1; 69 else 70 return 0; 71 } 72 73 /* pop a record for an inode into the defrag tree. The lock 74 * must be held already 75 * 76 * If you're inserting a record for an older transid than an 77 * existing record, the transid already in the tree is lowered 78 * 79 * If an existing record is found the defrag item you 80 * pass in is freed 81 */ 82 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode, 83 struct inode_defrag *defrag) 84 { 85 struct btrfs_fs_info *fs_info = inode->root->fs_info; 86 struct inode_defrag *entry; 87 struct rb_node **p; 88 struct rb_node *parent = NULL; 89 int ret; 90 91 p = &fs_info->defrag_inodes.rb_node; 92 while (*p) { 93 parent = *p; 94 entry = rb_entry(parent, struct inode_defrag, rb_node); 95 96 ret = __compare_inode_defrag(defrag, entry); 97 if (ret < 0) 98 p = &parent->rb_left; 99 else if (ret > 0) 100 p = &parent->rb_right; 101 else { 102 /* if we're reinserting an entry for 103 * an old defrag run, make sure to 104 * lower the transid of our existing record 105 */ 106 if (defrag->transid < entry->transid) 107 entry->transid = defrag->transid; 108 if (defrag->last_offset > entry->last_offset) 109 entry->last_offset = defrag->last_offset; 110 return -EEXIST; 111 } 112 } 113 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); 114 rb_link_node(&defrag->rb_node, parent, p); 115 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); 116 return 0; 117 } 118 119 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info) 120 { 121 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG)) 122 return 0; 123 124 if (btrfs_fs_closing(fs_info)) 125 return 0; 126 127 return 1; 128 } 129 130 /* 131 * insert a defrag record for this inode if auto defrag is 132 * enabled 133 */ 134 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 135 struct btrfs_inode *inode) 136 { 137 struct btrfs_root *root = inode->root; 138 struct btrfs_fs_info *fs_info = root->fs_info; 139 struct inode_defrag *defrag; 140 u64 transid; 141 int ret; 142 143 if (!__need_auto_defrag(fs_info)) 144 return 0; 145 146 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) 147 return 0; 148 149 if (trans) 150 transid = trans->transid; 151 else 152 transid = inode->root->last_trans; 153 154 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); 155 if (!defrag) 156 return -ENOMEM; 157 158 defrag->ino = btrfs_ino(inode); 159 defrag->transid = transid; 160 defrag->root = root->root_key.objectid; 161 162 spin_lock(&fs_info->defrag_inodes_lock); 163 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { 164 /* 165 * If we set IN_DEFRAG flag and evict the inode from memory, 166 * and then re-read this inode, this new inode doesn't have 167 * IN_DEFRAG flag. At the case, we may find the existed defrag. 168 */ 169 ret = __btrfs_add_inode_defrag(inode, defrag); 170 if (ret) 171 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 172 } else { 173 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 174 } 175 spin_unlock(&fs_info->defrag_inodes_lock); 176 return 0; 177 } 178 179 /* 180 * Requeue the defrag object. If there is a defrag object that points to 181 * the same inode in the tree, we will merge them together (by 182 * __btrfs_add_inode_defrag()) and free the one that we want to requeue. 183 */ 184 static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode, 185 struct inode_defrag *defrag) 186 { 187 struct btrfs_fs_info *fs_info = inode->root->fs_info; 188 int ret; 189 190 if (!__need_auto_defrag(fs_info)) 191 goto out; 192 193 /* 194 * Here we don't check the IN_DEFRAG flag, because we need merge 195 * them together. 196 */ 197 spin_lock(&fs_info->defrag_inodes_lock); 198 ret = __btrfs_add_inode_defrag(inode, defrag); 199 spin_unlock(&fs_info->defrag_inodes_lock); 200 if (ret) 201 goto out; 202 return; 203 out: 204 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 205 } 206 207 /* 208 * pick the defragable inode that we want, if it doesn't exist, we will get 209 * the next one. 210 */ 211 static struct inode_defrag * 212 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino) 213 { 214 struct inode_defrag *entry = NULL; 215 struct inode_defrag tmp; 216 struct rb_node *p; 217 struct rb_node *parent = NULL; 218 int ret; 219 220 tmp.ino = ino; 221 tmp.root = root; 222 223 spin_lock(&fs_info->defrag_inodes_lock); 224 p = fs_info->defrag_inodes.rb_node; 225 while (p) { 226 parent = p; 227 entry = rb_entry(parent, struct inode_defrag, rb_node); 228 229 ret = __compare_inode_defrag(&tmp, entry); 230 if (ret < 0) 231 p = parent->rb_left; 232 else if (ret > 0) 233 p = parent->rb_right; 234 else 235 goto out; 236 } 237 238 if (parent && __compare_inode_defrag(&tmp, entry) > 0) { 239 parent = rb_next(parent); 240 if (parent) 241 entry = rb_entry(parent, struct inode_defrag, rb_node); 242 else 243 entry = NULL; 244 } 245 out: 246 if (entry) 247 rb_erase(parent, &fs_info->defrag_inodes); 248 spin_unlock(&fs_info->defrag_inodes_lock); 249 return entry; 250 } 251 252 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info) 253 { 254 struct inode_defrag *defrag; 255 struct rb_node *node; 256 257 spin_lock(&fs_info->defrag_inodes_lock); 258 node = rb_first(&fs_info->defrag_inodes); 259 while (node) { 260 rb_erase(node, &fs_info->defrag_inodes); 261 defrag = rb_entry(node, struct inode_defrag, rb_node); 262 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 263 264 cond_resched_lock(&fs_info->defrag_inodes_lock); 265 266 node = rb_first(&fs_info->defrag_inodes); 267 } 268 spin_unlock(&fs_info->defrag_inodes_lock); 269 } 270 271 #define BTRFS_DEFRAG_BATCH 1024 272 273 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, 274 struct inode_defrag *defrag) 275 { 276 struct btrfs_root *inode_root; 277 struct inode *inode; 278 struct btrfs_ioctl_defrag_range_args range; 279 int num_defrag; 280 int ret; 281 282 /* get the inode */ 283 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); 284 if (IS_ERR(inode_root)) { 285 ret = PTR_ERR(inode_root); 286 goto cleanup; 287 } 288 289 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root); 290 btrfs_put_root(inode_root); 291 if (IS_ERR(inode)) { 292 ret = PTR_ERR(inode); 293 goto cleanup; 294 } 295 296 /* do a chunk of defrag */ 297 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); 298 memset(&range, 0, sizeof(range)); 299 range.len = (u64)-1; 300 range.start = defrag->last_offset; 301 302 sb_start_write(fs_info->sb); 303 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, 304 BTRFS_DEFRAG_BATCH); 305 sb_end_write(fs_info->sb); 306 /* 307 * if we filled the whole defrag batch, there 308 * must be more work to do. Queue this defrag 309 * again 310 */ 311 if (num_defrag == BTRFS_DEFRAG_BATCH) { 312 defrag->last_offset = range.start; 313 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); 314 } else if (defrag->last_offset && !defrag->cycled) { 315 /* 316 * we didn't fill our defrag batch, but 317 * we didn't start at zero. Make sure we loop 318 * around to the start of the file. 319 */ 320 defrag->last_offset = 0; 321 defrag->cycled = 1; 322 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); 323 } else { 324 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 325 } 326 327 iput(inode); 328 return 0; 329 cleanup: 330 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 331 return ret; 332 } 333 334 /* 335 * run through the list of inodes in the FS that need 336 * defragging 337 */ 338 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) 339 { 340 struct inode_defrag *defrag; 341 u64 first_ino = 0; 342 u64 root_objectid = 0; 343 344 atomic_inc(&fs_info->defrag_running); 345 while (1) { 346 /* Pause the auto defragger. */ 347 if (test_bit(BTRFS_FS_STATE_REMOUNTING, 348 &fs_info->fs_state)) 349 break; 350 351 if (!__need_auto_defrag(fs_info)) 352 break; 353 354 /* find an inode to defrag */ 355 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, 356 first_ino); 357 if (!defrag) { 358 if (root_objectid || first_ino) { 359 root_objectid = 0; 360 first_ino = 0; 361 continue; 362 } else { 363 break; 364 } 365 } 366 367 first_ino = defrag->ino + 1; 368 root_objectid = defrag->root; 369 370 __btrfs_run_defrag_inode(fs_info, defrag); 371 } 372 atomic_dec(&fs_info->defrag_running); 373 374 /* 375 * during unmount, we use the transaction_wait queue to 376 * wait for the defragger to stop 377 */ 378 wake_up(&fs_info->transaction_wait); 379 return 0; 380 } 381 382 /* simple helper to fault in pages and copy. This should go away 383 * and be replaced with calls into generic code. 384 */ 385 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes, 386 struct page **prepared_pages, 387 struct iov_iter *i) 388 { 389 size_t copied = 0; 390 size_t total_copied = 0; 391 int pg = 0; 392 int offset = offset_in_page(pos); 393 394 while (write_bytes > 0) { 395 size_t count = min_t(size_t, 396 PAGE_SIZE - offset, write_bytes); 397 struct page *page = prepared_pages[pg]; 398 /* 399 * Copy data from userspace to the current page 400 */ 401 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 402 403 /* Flush processor's dcache for this page */ 404 flush_dcache_page(page); 405 406 /* 407 * if we get a partial write, we can end up with 408 * partially up to date pages. These add 409 * a lot of complexity, so make sure they don't 410 * happen by forcing this copy to be retried. 411 * 412 * The rest of the btrfs_file_write code will fall 413 * back to page at a time copies after we return 0. 414 */ 415 if (!PageUptodate(page) && copied < count) 416 copied = 0; 417 418 iov_iter_advance(i, copied); 419 write_bytes -= copied; 420 total_copied += copied; 421 422 /* Return to btrfs_file_write_iter to fault page */ 423 if (unlikely(copied == 0)) 424 break; 425 426 if (copied < PAGE_SIZE - offset) { 427 offset += copied; 428 } else { 429 pg++; 430 offset = 0; 431 } 432 } 433 return total_copied; 434 } 435 436 /* 437 * unlocks pages after btrfs_file_write is done with them 438 */ 439 static void btrfs_drop_pages(struct page **pages, size_t num_pages) 440 { 441 size_t i; 442 for (i = 0; i < num_pages; i++) { 443 /* page checked is some magic around finding pages that 444 * have been modified without going through btrfs_set_page_dirty 445 * clear it here. There should be no need to mark the pages 446 * accessed as prepare_pages should have marked them accessed 447 * in prepare_pages via find_or_create_page() 448 */ 449 ClearPageChecked(pages[i]); 450 unlock_page(pages[i]); 451 put_page(pages[i]); 452 } 453 } 454 455 /* 456 * After btrfs_copy_from_user(), update the following things for delalloc: 457 * - Mark newly dirtied pages as DELALLOC in the io tree. 458 * Used to advise which range is to be written back. 459 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup 460 * - Update inode size for past EOF write 461 */ 462 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, 463 size_t num_pages, loff_t pos, size_t write_bytes, 464 struct extent_state **cached, bool noreserve) 465 { 466 struct btrfs_fs_info *fs_info = inode->root->fs_info; 467 int err = 0; 468 int i; 469 u64 num_bytes; 470 u64 start_pos; 471 u64 end_of_last_block; 472 u64 end_pos = pos + write_bytes; 473 loff_t isize = i_size_read(&inode->vfs_inode); 474 unsigned int extra_bits = 0; 475 476 if (write_bytes == 0) 477 return 0; 478 479 if (noreserve) 480 extra_bits |= EXTENT_NORESERVE; 481 482 start_pos = round_down(pos, fs_info->sectorsize); 483 num_bytes = round_up(write_bytes + pos - start_pos, 484 fs_info->sectorsize); 485 486 end_of_last_block = start_pos + num_bytes - 1; 487 488 /* 489 * The pages may have already been dirty, clear out old accounting so 490 * we can set things up properly 491 */ 492 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block, 493 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 494 0, 0, cached); 495 496 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 497 extra_bits, cached); 498 if (err) 499 return err; 500 501 for (i = 0; i < num_pages; i++) { 502 struct page *p = pages[i]; 503 SetPageUptodate(p); 504 ClearPageChecked(p); 505 set_page_dirty(p); 506 } 507 508 /* 509 * we've only changed i_size in ram, and we haven't updated 510 * the disk i_size. There is no need to log the inode 511 * at this time. 512 */ 513 if (end_pos > isize) 514 i_size_write(&inode->vfs_inode, end_pos); 515 return 0; 516 } 517 518 /* 519 * this drops all the extents in the cache that intersect the range 520 * [start, end]. Existing extents are split as required. 521 */ 522 void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, 523 int skip_pinned) 524 { 525 struct extent_map *em; 526 struct extent_map *split = NULL; 527 struct extent_map *split2 = NULL; 528 struct extent_map_tree *em_tree = &inode->extent_tree; 529 u64 len = end - start + 1; 530 u64 gen; 531 int ret; 532 int testend = 1; 533 unsigned long flags; 534 int compressed = 0; 535 bool modified; 536 537 WARN_ON(end < start); 538 if (end == (u64)-1) { 539 len = (u64)-1; 540 testend = 0; 541 } 542 while (1) { 543 int no_splits = 0; 544 545 modified = false; 546 if (!split) 547 split = alloc_extent_map(); 548 if (!split2) 549 split2 = alloc_extent_map(); 550 if (!split || !split2) 551 no_splits = 1; 552 553 write_lock(&em_tree->lock); 554 em = lookup_extent_mapping(em_tree, start, len); 555 if (!em) { 556 write_unlock(&em_tree->lock); 557 break; 558 } 559 flags = em->flags; 560 gen = em->generation; 561 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 562 if (testend && em->start + em->len >= start + len) { 563 free_extent_map(em); 564 write_unlock(&em_tree->lock); 565 break; 566 } 567 start = em->start + em->len; 568 if (testend) 569 len = start + len - (em->start + em->len); 570 free_extent_map(em); 571 write_unlock(&em_tree->lock); 572 continue; 573 } 574 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 575 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 576 clear_bit(EXTENT_FLAG_LOGGING, &flags); 577 modified = !list_empty(&em->list); 578 if (no_splits) 579 goto next; 580 581 if (em->start < start) { 582 split->start = em->start; 583 split->len = start - em->start; 584 585 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 586 split->orig_start = em->orig_start; 587 split->block_start = em->block_start; 588 589 if (compressed) 590 split->block_len = em->block_len; 591 else 592 split->block_len = split->len; 593 split->orig_block_len = max(split->block_len, 594 em->orig_block_len); 595 split->ram_bytes = em->ram_bytes; 596 } else { 597 split->orig_start = split->start; 598 split->block_len = 0; 599 split->block_start = em->block_start; 600 split->orig_block_len = 0; 601 split->ram_bytes = split->len; 602 } 603 604 split->generation = gen; 605 split->flags = flags; 606 split->compress_type = em->compress_type; 607 replace_extent_mapping(em_tree, em, split, modified); 608 free_extent_map(split); 609 split = split2; 610 split2 = NULL; 611 } 612 if (testend && em->start + em->len > start + len) { 613 u64 diff = start + len - em->start; 614 615 split->start = start + len; 616 split->len = em->start + em->len - (start + len); 617 split->flags = flags; 618 split->compress_type = em->compress_type; 619 split->generation = gen; 620 621 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 622 split->orig_block_len = max(em->block_len, 623 em->orig_block_len); 624 625 split->ram_bytes = em->ram_bytes; 626 if (compressed) { 627 split->block_len = em->block_len; 628 split->block_start = em->block_start; 629 split->orig_start = em->orig_start; 630 } else { 631 split->block_len = split->len; 632 split->block_start = em->block_start 633 + diff; 634 split->orig_start = em->orig_start; 635 } 636 } else { 637 split->ram_bytes = split->len; 638 split->orig_start = split->start; 639 split->block_len = 0; 640 split->block_start = em->block_start; 641 split->orig_block_len = 0; 642 } 643 644 if (extent_map_in_tree(em)) { 645 replace_extent_mapping(em_tree, em, split, 646 modified); 647 } else { 648 ret = add_extent_mapping(em_tree, split, 649 modified); 650 ASSERT(ret == 0); /* Logic error */ 651 } 652 free_extent_map(split); 653 split = NULL; 654 } 655 next: 656 if (extent_map_in_tree(em)) 657 remove_extent_mapping(em_tree, em); 658 write_unlock(&em_tree->lock); 659 660 /* once for us */ 661 free_extent_map(em); 662 /* once for the tree*/ 663 free_extent_map(em); 664 } 665 if (split) 666 free_extent_map(split); 667 if (split2) 668 free_extent_map(split2); 669 } 670 671 /* 672 * this is very complex, but the basic idea is to drop all extents 673 * in the range start - end. hint_block is filled in with a block number 674 * that would be a good hint to the block allocator for this file. 675 * 676 * If an extent intersects the range but is not entirely inside the range 677 * it is either truncated or split. Anything entirely inside the range 678 * is deleted from the tree. 679 * 680 * Note: the VFS' inode number of bytes is not updated, it's up to the caller 681 * to deal with that. We set the field 'bytes_found' of the arguments structure 682 * with the number of allocated bytes found in the target range, so that the 683 * caller can update the inode's number of bytes in an atomic way when 684 * replacing extents in a range to avoid races with stat(2). 685 */ 686 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 687 struct btrfs_root *root, struct btrfs_inode *inode, 688 struct btrfs_drop_extents_args *args) 689 { 690 struct btrfs_fs_info *fs_info = root->fs_info; 691 struct extent_buffer *leaf; 692 struct btrfs_file_extent_item *fi; 693 struct btrfs_ref ref = { 0 }; 694 struct btrfs_key key; 695 struct btrfs_key new_key; 696 u64 ino = btrfs_ino(inode); 697 u64 search_start = args->start; 698 u64 disk_bytenr = 0; 699 u64 num_bytes = 0; 700 u64 extent_offset = 0; 701 u64 extent_end = 0; 702 u64 last_end = args->start; 703 int del_nr = 0; 704 int del_slot = 0; 705 int extent_type; 706 int recow; 707 int ret; 708 int modify_tree = -1; 709 int update_refs; 710 int found = 0; 711 int leafs_visited = 0; 712 struct btrfs_path *path = args->path; 713 714 args->bytes_found = 0; 715 args->extent_inserted = false; 716 717 /* Must always have a path if ->replace_extent is true */ 718 ASSERT(!(args->replace_extent && !args->path)); 719 720 if (!path) { 721 path = btrfs_alloc_path(); 722 if (!path) { 723 ret = -ENOMEM; 724 goto out; 725 } 726 } 727 728 if (args->drop_cache) 729 btrfs_drop_extent_cache(inode, args->start, args->end - 1, 0); 730 731 if (args->start >= inode->disk_i_size && !args->replace_extent) 732 modify_tree = 0; 733 734 update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || 735 root == fs_info->tree_root); 736 while (1) { 737 recow = 0; 738 ret = btrfs_lookup_file_extent(trans, root, path, ino, 739 search_start, modify_tree); 740 if (ret < 0) 741 break; 742 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) { 743 leaf = path->nodes[0]; 744 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 745 if (key.objectid == ino && 746 key.type == BTRFS_EXTENT_DATA_KEY) 747 path->slots[0]--; 748 } 749 ret = 0; 750 leafs_visited++; 751 next_slot: 752 leaf = path->nodes[0]; 753 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 754 BUG_ON(del_nr > 0); 755 ret = btrfs_next_leaf(root, path); 756 if (ret < 0) 757 break; 758 if (ret > 0) { 759 ret = 0; 760 break; 761 } 762 leafs_visited++; 763 leaf = path->nodes[0]; 764 recow = 1; 765 } 766 767 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 768 769 if (key.objectid > ino) 770 break; 771 if (WARN_ON_ONCE(key.objectid < ino) || 772 key.type < BTRFS_EXTENT_DATA_KEY) { 773 ASSERT(del_nr == 0); 774 path->slots[0]++; 775 goto next_slot; 776 } 777 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end) 778 break; 779 780 fi = btrfs_item_ptr(leaf, path->slots[0], 781 struct btrfs_file_extent_item); 782 extent_type = btrfs_file_extent_type(leaf, fi); 783 784 if (extent_type == BTRFS_FILE_EXTENT_REG || 785 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 786 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 787 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 788 extent_offset = btrfs_file_extent_offset(leaf, fi); 789 extent_end = key.offset + 790 btrfs_file_extent_num_bytes(leaf, fi); 791 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 792 extent_end = key.offset + 793 btrfs_file_extent_ram_bytes(leaf, fi); 794 } else { 795 /* can't happen */ 796 BUG(); 797 } 798 799 /* 800 * Don't skip extent items representing 0 byte lengths. They 801 * used to be created (bug) if while punching holes we hit 802 * -ENOSPC condition. So if we find one here, just ensure we 803 * delete it, otherwise we would insert a new file extent item 804 * with the same key (offset) as that 0 bytes length file 805 * extent item in the call to setup_items_for_insert() later 806 * in this function. 807 */ 808 if (extent_end == key.offset && extent_end >= search_start) { 809 last_end = extent_end; 810 goto delete_extent_item; 811 } 812 813 if (extent_end <= search_start) { 814 path->slots[0]++; 815 goto next_slot; 816 } 817 818 found = 1; 819 search_start = max(key.offset, args->start); 820 if (recow || !modify_tree) { 821 modify_tree = -1; 822 btrfs_release_path(path); 823 continue; 824 } 825 826 /* 827 * | - range to drop - | 828 * | -------- extent -------- | 829 */ 830 if (args->start > key.offset && args->end < extent_end) { 831 BUG_ON(del_nr > 0); 832 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 833 ret = -EOPNOTSUPP; 834 break; 835 } 836 837 memcpy(&new_key, &key, sizeof(new_key)); 838 new_key.offset = args->start; 839 ret = btrfs_duplicate_item(trans, root, path, 840 &new_key); 841 if (ret == -EAGAIN) { 842 btrfs_release_path(path); 843 continue; 844 } 845 if (ret < 0) 846 break; 847 848 leaf = path->nodes[0]; 849 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 850 struct btrfs_file_extent_item); 851 btrfs_set_file_extent_num_bytes(leaf, fi, 852 args->start - key.offset); 853 854 fi = btrfs_item_ptr(leaf, path->slots[0], 855 struct btrfs_file_extent_item); 856 857 extent_offset += args->start - key.offset; 858 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 859 btrfs_set_file_extent_num_bytes(leaf, fi, 860 extent_end - args->start); 861 btrfs_mark_buffer_dirty(leaf); 862 863 if (update_refs && disk_bytenr > 0) { 864 btrfs_init_generic_ref(&ref, 865 BTRFS_ADD_DELAYED_REF, 866 disk_bytenr, num_bytes, 0); 867 btrfs_init_data_ref(&ref, 868 root->root_key.objectid, 869 new_key.objectid, 870 args->start - extent_offset); 871 ret = btrfs_inc_extent_ref(trans, &ref); 872 BUG_ON(ret); /* -ENOMEM */ 873 } 874 key.offset = args->start; 875 } 876 /* 877 * From here on out we will have actually dropped something, so 878 * last_end can be updated. 879 */ 880 last_end = extent_end; 881 882 /* 883 * | ---- range to drop ----- | 884 * | -------- extent -------- | 885 */ 886 if (args->start <= key.offset && args->end < extent_end) { 887 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 888 ret = -EOPNOTSUPP; 889 break; 890 } 891 892 memcpy(&new_key, &key, sizeof(new_key)); 893 new_key.offset = args->end; 894 btrfs_set_item_key_safe(fs_info, path, &new_key); 895 896 extent_offset += args->end - key.offset; 897 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 898 btrfs_set_file_extent_num_bytes(leaf, fi, 899 extent_end - args->end); 900 btrfs_mark_buffer_dirty(leaf); 901 if (update_refs && disk_bytenr > 0) 902 args->bytes_found += args->end - key.offset; 903 break; 904 } 905 906 search_start = extent_end; 907 /* 908 * | ---- range to drop ----- | 909 * | -------- extent -------- | 910 */ 911 if (args->start > key.offset && args->end >= extent_end) { 912 BUG_ON(del_nr > 0); 913 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 914 ret = -EOPNOTSUPP; 915 break; 916 } 917 918 btrfs_set_file_extent_num_bytes(leaf, fi, 919 args->start - key.offset); 920 btrfs_mark_buffer_dirty(leaf); 921 if (update_refs && disk_bytenr > 0) 922 args->bytes_found += extent_end - args->start; 923 if (args->end == extent_end) 924 break; 925 926 path->slots[0]++; 927 goto next_slot; 928 } 929 930 /* 931 * | ---- range to drop ----- | 932 * | ------ extent ------ | 933 */ 934 if (args->start <= key.offset && args->end >= extent_end) { 935 delete_extent_item: 936 if (del_nr == 0) { 937 del_slot = path->slots[0]; 938 del_nr = 1; 939 } else { 940 BUG_ON(del_slot + del_nr != path->slots[0]); 941 del_nr++; 942 } 943 944 if (update_refs && 945 extent_type == BTRFS_FILE_EXTENT_INLINE) { 946 args->bytes_found += extent_end - key.offset; 947 extent_end = ALIGN(extent_end, 948 fs_info->sectorsize); 949 } else if (update_refs && disk_bytenr > 0) { 950 btrfs_init_generic_ref(&ref, 951 BTRFS_DROP_DELAYED_REF, 952 disk_bytenr, num_bytes, 0); 953 btrfs_init_data_ref(&ref, 954 root->root_key.objectid, 955 key.objectid, 956 key.offset - extent_offset); 957 ret = btrfs_free_extent(trans, &ref); 958 BUG_ON(ret); /* -ENOMEM */ 959 args->bytes_found += extent_end - key.offset; 960 } 961 962 if (args->end == extent_end) 963 break; 964 965 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 966 path->slots[0]++; 967 goto next_slot; 968 } 969 970 ret = btrfs_del_items(trans, root, path, del_slot, 971 del_nr); 972 if (ret) { 973 btrfs_abort_transaction(trans, ret); 974 break; 975 } 976 977 del_nr = 0; 978 del_slot = 0; 979 980 btrfs_release_path(path); 981 continue; 982 } 983 984 BUG(); 985 } 986 987 if (!ret && del_nr > 0) { 988 /* 989 * Set path->slots[0] to first slot, so that after the delete 990 * if items are move off from our leaf to its immediate left or 991 * right neighbor leafs, we end up with a correct and adjusted 992 * path->slots[0] for our insertion (if args->replace_extent). 993 */ 994 path->slots[0] = del_slot; 995 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 996 if (ret) 997 btrfs_abort_transaction(trans, ret); 998 } 999 1000 leaf = path->nodes[0]; 1001 /* 1002 * If btrfs_del_items() was called, it might have deleted a leaf, in 1003 * which case it unlocked our path, so check path->locks[0] matches a 1004 * write lock. 1005 */ 1006 if (!ret && args->replace_extent && leafs_visited == 1 && 1007 path->locks[0] == BTRFS_WRITE_LOCK && 1008 btrfs_leaf_free_space(leaf) >= 1009 sizeof(struct btrfs_item) + args->extent_item_size) { 1010 1011 key.objectid = ino; 1012 key.type = BTRFS_EXTENT_DATA_KEY; 1013 key.offset = args->start; 1014 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) { 1015 struct btrfs_key slot_key; 1016 1017 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]); 1018 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0) 1019 path->slots[0]++; 1020 } 1021 setup_items_for_insert(root, path, &key, 1022 &args->extent_item_size, 1); 1023 args->extent_inserted = true; 1024 } 1025 1026 if (!args->path) 1027 btrfs_free_path(path); 1028 else if (!args->extent_inserted) 1029 btrfs_release_path(path); 1030 out: 1031 args->drop_end = found ? min(args->end, last_end) : args->end; 1032 1033 return ret; 1034 } 1035 1036 static int extent_mergeable(struct extent_buffer *leaf, int slot, 1037 u64 objectid, u64 bytenr, u64 orig_offset, 1038 u64 *start, u64 *end) 1039 { 1040 struct btrfs_file_extent_item *fi; 1041 struct btrfs_key key; 1042 u64 extent_end; 1043 1044 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 1045 return 0; 1046 1047 btrfs_item_key_to_cpu(leaf, &key, slot); 1048 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 1049 return 0; 1050 1051 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 1052 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 1053 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 1054 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 1055 btrfs_file_extent_compression(leaf, fi) || 1056 btrfs_file_extent_encryption(leaf, fi) || 1057 btrfs_file_extent_other_encoding(leaf, fi)) 1058 return 0; 1059 1060 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1061 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 1062 return 0; 1063 1064 *start = key.offset; 1065 *end = extent_end; 1066 return 1; 1067 } 1068 1069 /* 1070 * Mark extent in the range start - end as written. 1071 * 1072 * This changes extent type from 'pre-allocated' to 'regular'. If only 1073 * part of extent is marked as written, the extent will be split into 1074 * two or three. 1075 */ 1076 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 1077 struct btrfs_inode *inode, u64 start, u64 end) 1078 { 1079 struct btrfs_fs_info *fs_info = trans->fs_info; 1080 struct btrfs_root *root = inode->root; 1081 struct extent_buffer *leaf; 1082 struct btrfs_path *path; 1083 struct btrfs_file_extent_item *fi; 1084 struct btrfs_ref ref = { 0 }; 1085 struct btrfs_key key; 1086 struct btrfs_key new_key; 1087 u64 bytenr; 1088 u64 num_bytes; 1089 u64 extent_end; 1090 u64 orig_offset; 1091 u64 other_start; 1092 u64 other_end; 1093 u64 split; 1094 int del_nr = 0; 1095 int del_slot = 0; 1096 int recow; 1097 int ret; 1098 u64 ino = btrfs_ino(inode); 1099 1100 path = btrfs_alloc_path(); 1101 if (!path) 1102 return -ENOMEM; 1103 again: 1104 recow = 0; 1105 split = start; 1106 key.objectid = ino; 1107 key.type = BTRFS_EXTENT_DATA_KEY; 1108 key.offset = split; 1109 1110 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1111 if (ret < 0) 1112 goto out; 1113 if (ret > 0 && path->slots[0] > 0) 1114 path->slots[0]--; 1115 1116 leaf = path->nodes[0]; 1117 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1118 if (key.objectid != ino || 1119 key.type != BTRFS_EXTENT_DATA_KEY) { 1120 ret = -EINVAL; 1121 btrfs_abort_transaction(trans, ret); 1122 goto out; 1123 } 1124 fi = btrfs_item_ptr(leaf, path->slots[0], 1125 struct btrfs_file_extent_item); 1126 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) { 1127 ret = -EINVAL; 1128 btrfs_abort_transaction(trans, ret); 1129 goto out; 1130 } 1131 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1132 if (key.offset > start || extent_end < end) { 1133 ret = -EINVAL; 1134 btrfs_abort_transaction(trans, ret); 1135 goto out; 1136 } 1137 1138 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1139 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1140 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 1141 memcpy(&new_key, &key, sizeof(new_key)); 1142 1143 if (start == key.offset && end < extent_end) { 1144 other_start = 0; 1145 other_end = start; 1146 if (extent_mergeable(leaf, path->slots[0] - 1, 1147 ino, bytenr, orig_offset, 1148 &other_start, &other_end)) { 1149 new_key.offset = end; 1150 btrfs_set_item_key_safe(fs_info, path, &new_key); 1151 fi = btrfs_item_ptr(leaf, path->slots[0], 1152 struct btrfs_file_extent_item); 1153 btrfs_set_file_extent_generation(leaf, fi, 1154 trans->transid); 1155 btrfs_set_file_extent_num_bytes(leaf, fi, 1156 extent_end - end); 1157 btrfs_set_file_extent_offset(leaf, fi, 1158 end - orig_offset); 1159 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 1160 struct btrfs_file_extent_item); 1161 btrfs_set_file_extent_generation(leaf, fi, 1162 trans->transid); 1163 btrfs_set_file_extent_num_bytes(leaf, fi, 1164 end - other_start); 1165 btrfs_mark_buffer_dirty(leaf); 1166 goto out; 1167 } 1168 } 1169 1170 if (start > key.offset && end == extent_end) { 1171 other_start = end; 1172 other_end = 0; 1173 if (extent_mergeable(leaf, path->slots[0] + 1, 1174 ino, bytenr, orig_offset, 1175 &other_start, &other_end)) { 1176 fi = btrfs_item_ptr(leaf, path->slots[0], 1177 struct btrfs_file_extent_item); 1178 btrfs_set_file_extent_num_bytes(leaf, fi, 1179 start - key.offset); 1180 btrfs_set_file_extent_generation(leaf, fi, 1181 trans->transid); 1182 path->slots[0]++; 1183 new_key.offset = start; 1184 btrfs_set_item_key_safe(fs_info, path, &new_key); 1185 1186 fi = btrfs_item_ptr(leaf, path->slots[0], 1187 struct btrfs_file_extent_item); 1188 btrfs_set_file_extent_generation(leaf, fi, 1189 trans->transid); 1190 btrfs_set_file_extent_num_bytes(leaf, fi, 1191 other_end - start); 1192 btrfs_set_file_extent_offset(leaf, fi, 1193 start - orig_offset); 1194 btrfs_mark_buffer_dirty(leaf); 1195 goto out; 1196 } 1197 } 1198 1199 while (start > key.offset || end < extent_end) { 1200 if (key.offset == start) 1201 split = end; 1202 1203 new_key.offset = split; 1204 ret = btrfs_duplicate_item(trans, root, path, &new_key); 1205 if (ret == -EAGAIN) { 1206 btrfs_release_path(path); 1207 goto again; 1208 } 1209 if (ret < 0) { 1210 btrfs_abort_transaction(trans, ret); 1211 goto out; 1212 } 1213 1214 leaf = path->nodes[0]; 1215 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 1216 struct btrfs_file_extent_item); 1217 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1218 btrfs_set_file_extent_num_bytes(leaf, fi, 1219 split - key.offset); 1220 1221 fi = btrfs_item_ptr(leaf, path->slots[0], 1222 struct btrfs_file_extent_item); 1223 1224 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1225 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 1226 btrfs_set_file_extent_num_bytes(leaf, fi, 1227 extent_end - split); 1228 btrfs_mark_buffer_dirty(leaf); 1229 1230 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr, 1231 num_bytes, 0); 1232 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, 1233 orig_offset); 1234 ret = btrfs_inc_extent_ref(trans, &ref); 1235 if (ret) { 1236 btrfs_abort_transaction(trans, ret); 1237 goto out; 1238 } 1239 1240 if (split == start) { 1241 key.offset = start; 1242 } else { 1243 if (start != key.offset) { 1244 ret = -EINVAL; 1245 btrfs_abort_transaction(trans, ret); 1246 goto out; 1247 } 1248 path->slots[0]--; 1249 extent_end = end; 1250 } 1251 recow = 1; 1252 } 1253 1254 other_start = end; 1255 other_end = 0; 1256 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 1257 num_bytes, 0); 1258 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset); 1259 if (extent_mergeable(leaf, path->slots[0] + 1, 1260 ino, bytenr, orig_offset, 1261 &other_start, &other_end)) { 1262 if (recow) { 1263 btrfs_release_path(path); 1264 goto again; 1265 } 1266 extent_end = other_end; 1267 del_slot = path->slots[0] + 1; 1268 del_nr++; 1269 ret = btrfs_free_extent(trans, &ref); 1270 if (ret) { 1271 btrfs_abort_transaction(trans, ret); 1272 goto out; 1273 } 1274 } 1275 other_start = 0; 1276 other_end = start; 1277 if (extent_mergeable(leaf, path->slots[0] - 1, 1278 ino, bytenr, orig_offset, 1279 &other_start, &other_end)) { 1280 if (recow) { 1281 btrfs_release_path(path); 1282 goto again; 1283 } 1284 key.offset = other_start; 1285 del_slot = path->slots[0]; 1286 del_nr++; 1287 ret = btrfs_free_extent(trans, &ref); 1288 if (ret) { 1289 btrfs_abort_transaction(trans, ret); 1290 goto out; 1291 } 1292 } 1293 if (del_nr == 0) { 1294 fi = btrfs_item_ptr(leaf, path->slots[0], 1295 struct btrfs_file_extent_item); 1296 btrfs_set_file_extent_type(leaf, fi, 1297 BTRFS_FILE_EXTENT_REG); 1298 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1299 btrfs_mark_buffer_dirty(leaf); 1300 } else { 1301 fi = btrfs_item_ptr(leaf, del_slot - 1, 1302 struct btrfs_file_extent_item); 1303 btrfs_set_file_extent_type(leaf, fi, 1304 BTRFS_FILE_EXTENT_REG); 1305 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1306 btrfs_set_file_extent_num_bytes(leaf, fi, 1307 extent_end - key.offset); 1308 btrfs_mark_buffer_dirty(leaf); 1309 1310 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 1311 if (ret < 0) { 1312 btrfs_abort_transaction(trans, ret); 1313 goto out; 1314 } 1315 } 1316 out: 1317 btrfs_free_path(path); 1318 return 0; 1319 } 1320 1321 /* 1322 * on error we return an unlocked page and the error value 1323 * on success we return a locked page and 0 1324 */ 1325 static int prepare_uptodate_page(struct inode *inode, 1326 struct page *page, u64 pos, 1327 bool force_uptodate) 1328 { 1329 int ret = 0; 1330 1331 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) && 1332 !PageUptodate(page)) { 1333 ret = btrfs_readpage(NULL, page); 1334 if (ret) 1335 return ret; 1336 lock_page(page); 1337 if (!PageUptodate(page)) { 1338 unlock_page(page); 1339 return -EIO; 1340 } 1341 if (page->mapping != inode->i_mapping) { 1342 unlock_page(page); 1343 return -EAGAIN; 1344 } 1345 } 1346 return 0; 1347 } 1348 1349 /* 1350 * this just gets pages into the page cache and locks them down. 1351 */ 1352 static noinline int prepare_pages(struct inode *inode, struct page **pages, 1353 size_t num_pages, loff_t pos, 1354 size_t write_bytes, bool force_uptodate) 1355 { 1356 int i; 1357 unsigned long index = pos >> PAGE_SHIFT; 1358 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1359 int err = 0; 1360 int faili; 1361 1362 for (i = 0; i < num_pages; i++) { 1363 again: 1364 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1365 mask | __GFP_WRITE); 1366 if (!pages[i]) { 1367 faili = i - 1; 1368 err = -ENOMEM; 1369 goto fail; 1370 } 1371 1372 err = set_page_extent_mapped(pages[i]); 1373 if (err < 0) { 1374 faili = i; 1375 goto fail; 1376 } 1377 1378 if (i == 0) 1379 err = prepare_uptodate_page(inode, pages[i], pos, 1380 force_uptodate); 1381 if (!err && i == num_pages - 1) 1382 err = prepare_uptodate_page(inode, pages[i], 1383 pos + write_bytes, false); 1384 if (err) { 1385 put_page(pages[i]); 1386 if (err == -EAGAIN) { 1387 err = 0; 1388 goto again; 1389 } 1390 faili = i - 1; 1391 goto fail; 1392 } 1393 wait_on_page_writeback(pages[i]); 1394 } 1395 1396 return 0; 1397 fail: 1398 while (faili >= 0) { 1399 unlock_page(pages[faili]); 1400 put_page(pages[faili]); 1401 faili--; 1402 } 1403 return err; 1404 1405 } 1406 1407 /* 1408 * This function locks the extent and properly waits for data=ordered extents 1409 * to finish before allowing the pages to be modified if need. 1410 * 1411 * The return value: 1412 * 1 - the extent is locked 1413 * 0 - the extent is not locked, and everything is OK 1414 * -EAGAIN - need re-prepare the pages 1415 * the other < 0 number - Something wrong happens 1416 */ 1417 static noinline int 1418 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, 1419 size_t num_pages, loff_t pos, 1420 size_t write_bytes, 1421 u64 *lockstart, u64 *lockend, 1422 struct extent_state **cached_state) 1423 { 1424 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1425 u64 start_pos; 1426 u64 last_pos; 1427 int i; 1428 int ret = 0; 1429 1430 start_pos = round_down(pos, fs_info->sectorsize); 1431 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1; 1432 1433 if (start_pos < inode->vfs_inode.i_size) { 1434 struct btrfs_ordered_extent *ordered; 1435 1436 lock_extent_bits(&inode->io_tree, start_pos, last_pos, 1437 cached_state); 1438 ordered = btrfs_lookup_ordered_range(inode, start_pos, 1439 last_pos - start_pos + 1); 1440 if (ordered && 1441 ordered->file_offset + ordered->num_bytes > start_pos && 1442 ordered->file_offset <= last_pos) { 1443 unlock_extent_cached(&inode->io_tree, start_pos, 1444 last_pos, cached_state); 1445 for (i = 0; i < num_pages; i++) { 1446 unlock_page(pages[i]); 1447 put_page(pages[i]); 1448 } 1449 btrfs_start_ordered_extent(ordered, 1); 1450 btrfs_put_ordered_extent(ordered); 1451 return -EAGAIN; 1452 } 1453 if (ordered) 1454 btrfs_put_ordered_extent(ordered); 1455 1456 *lockstart = start_pos; 1457 *lockend = last_pos; 1458 ret = 1; 1459 } 1460 1461 /* 1462 * We should be called after prepare_pages() which should have locked 1463 * all pages in the range. 1464 */ 1465 for (i = 0; i < num_pages; i++) 1466 WARN_ON(!PageLocked(pages[i])); 1467 1468 return ret; 1469 } 1470 1471 static int check_can_nocow(struct btrfs_inode *inode, loff_t pos, 1472 size_t *write_bytes, bool nowait) 1473 { 1474 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1475 struct btrfs_root *root = inode->root; 1476 u64 lockstart, lockend; 1477 u64 num_bytes; 1478 int ret; 1479 1480 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 1481 return 0; 1482 1483 if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock)) 1484 return -EAGAIN; 1485 1486 lockstart = round_down(pos, fs_info->sectorsize); 1487 lockend = round_up(pos + *write_bytes, 1488 fs_info->sectorsize) - 1; 1489 num_bytes = lockend - lockstart + 1; 1490 1491 if (nowait) { 1492 struct btrfs_ordered_extent *ordered; 1493 1494 if (!try_lock_extent(&inode->io_tree, lockstart, lockend)) 1495 return -EAGAIN; 1496 1497 ordered = btrfs_lookup_ordered_range(inode, lockstart, 1498 num_bytes); 1499 if (ordered) { 1500 btrfs_put_ordered_extent(ordered); 1501 ret = -EAGAIN; 1502 goto out_unlock; 1503 } 1504 } else { 1505 btrfs_lock_and_flush_ordered_range(inode, lockstart, 1506 lockend, NULL); 1507 } 1508 1509 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, 1510 NULL, NULL, NULL, false); 1511 if (ret <= 0) { 1512 ret = 0; 1513 if (!nowait) 1514 btrfs_drew_write_unlock(&root->snapshot_lock); 1515 } else { 1516 *write_bytes = min_t(size_t, *write_bytes , 1517 num_bytes - pos + lockstart); 1518 } 1519 out_unlock: 1520 unlock_extent(&inode->io_tree, lockstart, lockend); 1521 1522 return ret; 1523 } 1524 1525 static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos, 1526 size_t *write_bytes) 1527 { 1528 return check_can_nocow(inode, pos, write_bytes, true); 1529 } 1530 1531 /* 1532 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes) 1533 * 1534 * @pos: File offset 1535 * @write_bytes: The length to write, will be updated to the nocow writeable 1536 * range 1537 * 1538 * This function will flush ordered extents in the range to ensure proper 1539 * nocow checks. 1540 * 1541 * Return: 1542 * >0 and update @write_bytes if we can do nocow write 1543 * 0 if we can't do nocow write 1544 * -EAGAIN if we can't get the needed lock or there are ordered extents 1545 * for * (nowait == true) case 1546 * <0 if other error happened 1547 * 1548 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock(). 1549 */ 1550 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, 1551 size_t *write_bytes) 1552 { 1553 return check_can_nocow(inode, pos, write_bytes, false); 1554 } 1555 1556 void btrfs_check_nocow_unlock(struct btrfs_inode *inode) 1557 { 1558 btrfs_drew_write_unlock(&inode->root->snapshot_lock); 1559 } 1560 1561 static void update_time_for_write(struct inode *inode) 1562 { 1563 struct timespec64 now; 1564 1565 if (IS_NOCMTIME(inode)) 1566 return; 1567 1568 now = current_time(inode); 1569 if (!timespec64_equal(&inode->i_mtime, &now)) 1570 inode->i_mtime = now; 1571 1572 if (!timespec64_equal(&inode->i_ctime, &now)) 1573 inode->i_ctime = now; 1574 1575 if (IS_I_VERSION(inode)) 1576 inode_inc_iversion(inode); 1577 } 1578 1579 static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, 1580 size_t count) 1581 { 1582 struct file *file = iocb->ki_filp; 1583 struct inode *inode = file_inode(file); 1584 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1585 loff_t pos = iocb->ki_pos; 1586 int ret; 1587 loff_t oldsize; 1588 loff_t start_pos; 1589 1590 if (iocb->ki_flags & IOCB_NOWAIT) { 1591 size_t nocow_bytes = count; 1592 1593 /* We will allocate space in case nodatacow is not set, so bail */ 1594 if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) <= 0) 1595 return -EAGAIN; 1596 /* 1597 * There are holes in the range or parts of the range that must 1598 * be COWed (shared extents, RO block groups, etc), so just bail 1599 * out. 1600 */ 1601 if (nocow_bytes < count) 1602 return -EAGAIN; 1603 } 1604 1605 current->backing_dev_info = inode_to_bdi(inode); 1606 ret = file_remove_privs(file); 1607 if (ret) 1608 return ret; 1609 1610 /* 1611 * We reserve space for updating the inode when we reserve space for the 1612 * extent we are going to write, so we will enospc out there. We don't 1613 * need to start yet another transaction to update the inode as we will 1614 * update the inode when we finish writing whatever data we write. 1615 */ 1616 update_time_for_write(inode); 1617 1618 start_pos = round_down(pos, fs_info->sectorsize); 1619 oldsize = i_size_read(inode); 1620 if (start_pos > oldsize) { 1621 /* Expand hole size to cover write data, preventing empty gap */ 1622 loff_t end_pos = round_up(pos + count, fs_info->sectorsize); 1623 1624 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos); 1625 if (ret) { 1626 current->backing_dev_info = NULL; 1627 return ret; 1628 } 1629 } 1630 1631 return 0; 1632 } 1633 1634 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, 1635 struct iov_iter *i) 1636 { 1637 struct file *file = iocb->ki_filp; 1638 loff_t pos; 1639 struct inode *inode = file_inode(file); 1640 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1641 struct page **pages = NULL; 1642 struct extent_changeset *data_reserved = NULL; 1643 u64 release_bytes = 0; 1644 u64 lockstart; 1645 u64 lockend; 1646 size_t num_written = 0; 1647 int nrptrs; 1648 ssize_t ret; 1649 bool only_release_metadata = false; 1650 bool force_page_uptodate = false; 1651 loff_t old_isize = i_size_read(inode); 1652 unsigned int ilock_flags = 0; 1653 1654 if (iocb->ki_flags & IOCB_NOWAIT) 1655 ilock_flags |= BTRFS_ILOCK_TRY; 1656 1657 ret = btrfs_inode_lock(inode, ilock_flags); 1658 if (ret < 0) 1659 return ret; 1660 1661 ret = generic_write_checks(iocb, i); 1662 if (ret <= 0) 1663 goto out; 1664 1665 ret = btrfs_write_check(iocb, i, ret); 1666 if (ret < 0) 1667 goto out; 1668 1669 pos = iocb->ki_pos; 1670 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE), 1671 PAGE_SIZE / (sizeof(struct page *))); 1672 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1673 nrptrs = max(nrptrs, 8); 1674 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); 1675 if (!pages) { 1676 ret = -ENOMEM; 1677 goto out; 1678 } 1679 1680 while (iov_iter_count(i) > 0) { 1681 struct extent_state *cached_state = NULL; 1682 size_t offset = offset_in_page(pos); 1683 size_t sector_offset; 1684 size_t write_bytes = min(iov_iter_count(i), 1685 nrptrs * (size_t)PAGE_SIZE - 1686 offset); 1687 size_t num_pages; 1688 size_t reserve_bytes; 1689 size_t dirty_pages; 1690 size_t copied; 1691 size_t dirty_sectors; 1692 size_t num_sectors; 1693 int extents_locked; 1694 1695 /* 1696 * Fault pages before locking them in prepare_pages 1697 * to avoid recursive lock 1698 */ 1699 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) { 1700 ret = -EFAULT; 1701 break; 1702 } 1703 1704 only_release_metadata = false; 1705 sector_offset = pos & (fs_info->sectorsize - 1); 1706 1707 extent_changeset_release(data_reserved); 1708 ret = btrfs_check_data_free_space(BTRFS_I(inode), 1709 &data_reserved, pos, 1710 write_bytes); 1711 if (ret < 0) { 1712 /* 1713 * If we don't have to COW at the offset, reserve 1714 * metadata only. write_bytes may get smaller than 1715 * requested here. 1716 */ 1717 if (btrfs_check_nocow_lock(BTRFS_I(inode), pos, 1718 &write_bytes) > 0) 1719 only_release_metadata = true; 1720 else 1721 break; 1722 } 1723 1724 num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE); 1725 WARN_ON(num_pages > nrptrs); 1726 reserve_bytes = round_up(write_bytes + sector_offset, 1727 fs_info->sectorsize); 1728 WARN_ON(reserve_bytes == 0); 1729 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 1730 reserve_bytes); 1731 if (ret) { 1732 if (!only_release_metadata) 1733 btrfs_free_reserved_data_space(BTRFS_I(inode), 1734 data_reserved, pos, 1735 write_bytes); 1736 else 1737 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1738 break; 1739 } 1740 1741 release_bytes = reserve_bytes; 1742 again: 1743 /* 1744 * This is going to setup the pages array with the number of 1745 * pages we want, so we don't really need to worry about the 1746 * contents of pages from loop to loop 1747 */ 1748 ret = prepare_pages(inode, pages, num_pages, 1749 pos, write_bytes, 1750 force_page_uptodate); 1751 if (ret) { 1752 btrfs_delalloc_release_extents(BTRFS_I(inode), 1753 reserve_bytes); 1754 break; 1755 } 1756 1757 extents_locked = lock_and_cleanup_extent_if_need( 1758 BTRFS_I(inode), pages, 1759 num_pages, pos, write_bytes, &lockstart, 1760 &lockend, &cached_state); 1761 if (extents_locked < 0) { 1762 if (extents_locked == -EAGAIN) 1763 goto again; 1764 btrfs_delalloc_release_extents(BTRFS_I(inode), 1765 reserve_bytes); 1766 ret = extents_locked; 1767 break; 1768 } 1769 1770 copied = btrfs_copy_from_user(pos, write_bytes, pages, i); 1771 1772 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes); 1773 dirty_sectors = round_up(copied + sector_offset, 1774 fs_info->sectorsize); 1775 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors); 1776 1777 /* 1778 * if we have trouble faulting in the pages, fall 1779 * back to one page at a time 1780 */ 1781 if (copied < write_bytes) 1782 nrptrs = 1; 1783 1784 if (copied == 0) { 1785 force_page_uptodate = true; 1786 dirty_sectors = 0; 1787 dirty_pages = 0; 1788 } else { 1789 force_page_uptodate = false; 1790 dirty_pages = DIV_ROUND_UP(copied + offset, 1791 PAGE_SIZE); 1792 } 1793 1794 if (num_sectors > dirty_sectors) { 1795 /* release everything except the sectors we dirtied */ 1796 release_bytes -= dirty_sectors << fs_info->sectorsize_bits; 1797 if (only_release_metadata) { 1798 btrfs_delalloc_release_metadata(BTRFS_I(inode), 1799 release_bytes, true); 1800 } else { 1801 u64 __pos; 1802 1803 __pos = round_down(pos, 1804 fs_info->sectorsize) + 1805 (dirty_pages << PAGE_SHIFT); 1806 btrfs_delalloc_release_space(BTRFS_I(inode), 1807 data_reserved, __pos, 1808 release_bytes, true); 1809 } 1810 } 1811 1812 release_bytes = round_up(copied + sector_offset, 1813 fs_info->sectorsize); 1814 1815 ret = btrfs_dirty_pages(BTRFS_I(inode), pages, 1816 dirty_pages, pos, copied, 1817 &cached_state, only_release_metadata); 1818 1819 /* 1820 * If we have not locked the extent range, because the range's 1821 * start offset is >= i_size, we might still have a non-NULL 1822 * cached extent state, acquired while marking the extent range 1823 * as delalloc through btrfs_dirty_pages(). Therefore free any 1824 * possible cached extent state to avoid a memory leak. 1825 */ 1826 if (extents_locked) 1827 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1828 lockstart, lockend, &cached_state); 1829 else 1830 free_extent_state(cached_state); 1831 1832 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); 1833 if (ret) { 1834 btrfs_drop_pages(pages, num_pages); 1835 break; 1836 } 1837 1838 release_bytes = 0; 1839 if (only_release_metadata) 1840 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1841 1842 btrfs_drop_pages(pages, num_pages); 1843 1844 cond_resched(); 1845 1846 balance_dirty_pages_ratelimited(inode->i_mapping); 1847 1848 pos += copied; 1849 num_written += copied; 1850 } 1851 1852 kfree(pages); 1853 1854 if (release_bytes) { 1855 if (only_release_metadata) { 1856 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1857 btrfs_delalloc_release_metadata(BTRFS_I(inode), 1858 release_bytes, true); 1859 } else { 1860 btrfs_delalloc_release_space(BTRFS_I(inode), 1861 data_reserved, 1862 round_down(pos, fs_info->sectorsize), 1863 release_bytes, true); 1864 } 1865 } 1866 1867 extent_changeset_free(data_reserved); 1868 if (num_written > 0) { 1869 pagecache_isize_extended(inode, old_isize, iocb->ki_pos); 1870 iocb->ki_pos += num_written; 1871 } 1872 out: 1873 btrfs_inode_unlock(inode, ilock_flags); 1874 return num_written ? num_written : ret; 1875 } 1876 1877 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, 1878 const struct iov_iter *iter, loff_t offset) 1879 { 1880 const u32 blocksize_mask = fs_info->sectorsize - 1; 1881 1882 if (offset & blocksize_mask) 1883 return -EINVAL; 1884 1885 if (iov_iter_alignment(iter) & blocksize_mask) 1886 return -EINVAL; 1887 1888 return 0; 1889 } 1890 1891 static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) 1892 { 1893 struct file *file = iocb->ki_filp; 1894 struct inode *inode = file_inode(file); 1895 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1896 loff_t pos; 1897 ssize_t written = 0; 1898 ssize_t written_buffered; 1899 loff_t endbyte; 1900 ssize_t err; 1901 unsigned int ilock_flags = 0; 1902 struct iomap_dio *dio = NULL; 1903 1904 if (iocb->ki_flags & IOCB_NOWAIT) 1905 ilock_flags |= BTRFS_ILOCK_TRY; 1906 1907 /* If the write DIO is within EOF, use a shared lock */ 1908 if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode)) 1909 ilock_flags |= BTRFS_ILOCK_SHARED; 1910 1911 relock: 1912 err = btrfs_inode_lock(inode, ilock_flags); 1913 if (err < 0) 1914 return err; 1915 1916 err = generic_write_checks(iocb, from); 1917 if (err <= 0) { 1918 btrfs_inode_unlock(inode, ilock_flags); 1919 return err; 1920 } 1921 1922 err = btrfs_write_check(iocb, from, err); 1923 if (err < 0) { 1924 btrfs_inode_unlock(inode, ilock_flags); 1925 goto out; 1926 } 1927 1928 pos = iocb->ki_pos; 1929 /* 1930 * Re-check since file size may have changed just before taking the 1931 * lock or pos may have changed because of O_APPEND in generic_write_check() 1932 */ 1933 if ((ilock_flags & BTRFS_ILOCK_SHARED) && 1934 pos + iov_iter_count(from) > i_size_read(inode)) { 1935 btrfs_inode_unlock(inode, ilock_flags); 1936 ilock_flags &= ~BTRFS_ILOCK_SHARED; 1937 goto relock; 1938 } 1939 1940 if (check_direct_IO(fs_info, from, pos)) { 1941 btrfs_inode_unlock(inode, ilock_flags); 1942 goto buffered; 1943 } 1944 1945 dio = __iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 1946 0); 1947 1948 btrfs_inode_unlock(inode, ilock_flags); 1949 1950 if (IS_ERR_OR_NULL(dio)) { 1951 err = PTR_ERR_OR_ZERO(dio); 1952 if (err < 0 && err != -ENOTBLK) 1953 goto out; 1954 } else { 1955 written = iomap_dio_complete(dio); 1956 } 1957 1958 if (written < 0 || !iov_iter_count(from)) { 1959 err = written; 1960 goto out; 1961 } 1962 1963 buffered: 1964 pos = iocb->ki_pos; 1965 written_buffered = btrfs_buffered_write(iocb, from); 1966 if (written_buffered < 0) { 1967 err = written_buffered; 1968 goto out; 1969 } 1970 /* 1971 * Ensure all data is persisted. We want the next direct IO read to be 1972 * able to read what was just written. 1973 */ 1974 endbyte = pos + written_buffered - 1; 1975 err = btrfs_fdatawrite_range(inode, pos, endbyte); 1976 if (err) 1977 goto out; 1978 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte); 1979 if (err) 1980 goto out; 1981 written += written_buffered; 1982 iocb->ki_pos = pos + written_buffered; 1983 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT, 1984 endbyte >> PAGE_SHIFT); 1985 out: 1986 return written ? written : err; 1987 } 1988 1989 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, 1990 struct iov_iter *from) 1991 { 1992 struct file *file = iocb->ki_filp; 1993 struct btrfs_inode *inode = BTRFS_I(file_inode(file)); 1994 ssize_t num_written = 0; 1995 const bool sync = iocb->ki_flags & IOCB_DSYNC; 1996 1997 /* 1998 * If the fs flips readonly due to some impossible error, although we 1999 * have opened a file as writable, we have to stop this write operation 2000 * to ensure consistency. 2001 */ 2002 if (test_bit(BTRFS_FS_STATE_ERROR, &inode->root->fs_info->fs_state)) 2003 return -EROFS; 2004 2005 if (!(iocb->ki_flags & IOCB_DIRECT) && 2006 (iocb->ki_flags & IOCB_NOWAIT)) 2007 return -EOPNOTSUPP; 2008 2009 if (sync) 2010 atomic_inc(&inode->sync_writers); 2011 2012 if (iocb->ki_flags & IOCB_DIRECT) 2013 num_written = btrfs_direct_write(iocb, from); 2014 else 2015 num_written = btrfs_buffered_write(iocb, from); 2016 2017 /* 2018 * We also have to set last_sub_trans to the current log transid, 2019 * otherwise subsequent syncs to a file that's been synced in this 2020 * transaction will appear to have already occurred. 2021 */ 2022 spin_lock(&inode->lock); 2023 inode->last_sub_trans = inode->root->log_transid; 2024 spin_unlock(&inode->lock); 2025 if (num_written > 0) 2026 num_written = generic_write_sync(iocb, num_written); 2027 2028 if (sync) 2029 atomic_dec(&inode->sync_writers); 2030 2031 current->backing_dev_info = NULL; 2032 return num_written; 2033 } 2034 2035 int btrfs_release_file(struct inode *inode, struct file *filp) 2036 { 2037 struct btrfs_file_private *private = filp->private_data; 2038 2039 if (private && private->filldir_buf) 2040 kfree(private->filldir_buf); 2041 kfree(private); 2042 filp->private_data = NULL; 2043 2044 /* 2045 * Set by setattr when we are about to truncate a file from a non-zero 2046 * size to a zero size. This tries to flush down new bytes that may 2047 * have been written if the application were using truncate to replace 2048 * a file in place. 2049 */ 2050 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 2051 &BTRFS_I(inode)->runtime_flags)) 2052 filemap_flush(inode->i_mapping); 2053 return 0; 2054 } 2055 2056 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end) 2057 { 2058 int ret; 2059 struct blk_plug plug; 2060 2061 /* 2062 * This is only called in fsync, which would do synchronous writes, so 2063 * a plug can merge adjacent IOs as much as possible. Esp. in case of 2064 * multiple disks using raid profile, a large IO can be split to 2065 * several segments of stripe length (currently 64K). 2066 */ 2067 blk_start_plug(&plug); 2068 atomic_inc(&BTRFS_I(inode)->sync_writers); 2069 ret = btrfs_fdatawrite_range(inode, start, end); 2070 atomic_dec(&BTRFS_I(inode)->sync_writers); 2071 blk_finish_plug(&plug); 2072 2073 return ret; 2074 } 2075 2076 /* 2077 * fsync call for both files and directories. This logs the inode into 2078 * the tree log instead of forcing full commits whenever possible. 2079 * 2080 * It needs to call filemap_fdatawait so that all ordered extent updates are 2081 * in the metadata btree are up to date for copying to the log. 2082 * 2083 * It drops the inode mutex before doing the tree log commit. This is an 2084 * important optimization for directories because holding the mutex prevents 2085 * new operations on the dir while we write to disk. 2086 */ 2087 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 2088 { 2089 struct dentry *dentry = file_dentry(file); 2090 struct inode *inode = d_inode(dentry); 2091 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2092 struct btrfs_root *root = BTRFS_I(inode)->root; 2093 struct btrfs_trans_handle *trans; 2094 struct btrfs_log_ctx ctx; 2095 int ret = 0, err; 2096 u64 len; 2097 bool full_sync; 2098 2099 trace_btrfs_sync_file(file, datasync); 2100 2101 btrfs_init_log_ctx(&ctx, inode); 2102 2103 /* 2104 * Always set the range to a full range, otherwise we can get into 2105 * several problems, from missing file extent items to represent holes 2106 * when not using the NO_HOLES feature, to log tree corruption due to 2107 * races between hole detection during logging and completion of ordered 2108 * extents outside the range, to missing checksums due to ordered extents 2109 * for which we flushed only a subset of their pages. 2110 */ 2111 start = 0; 2112 end = LLONG_MAX; 2113 len = (u64)LLONG_MAX + 1; 2114 2115 /* 2116 * We write the dirty pages in the range and wait until they complete 2117 * out of the ->i_mutex. If so, we can flush the dirty pages by 2118 * multi-task, and make the performance up. See 2119 * btrfs_wait_ordered_range for an explanation of the ASYNC check. 2120 */ 2121 ret = start_ordered_ops(inode, start, end); 2122 if (ret) 2123 goto out; 2124 2125 inode_lock(inode); 2126 2127 atomic_inc(&root->log_batch); 2128 2129 /* 2130 * Always check for the full sync flag while holding the inode's lock, 2131 * to avoid races with other tasks. The flag must be either set all the 2132 * time during logging or always off all the time while logging. 2133 */ 2134 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2135 &BTRFS_I(inode)->runtime_flags); 2136 2137 /* 2138 * Before we acquired the inode's lock, someone may have dirtied more 2139 * pages in the target range. We need to make sure that writeback for 2140 * any such pages does not start while we are logging the inode, because 2141 * if it does, any of the following might happen when we are not doing a 2142 * full inode sync: 2143 * 2144 * 1) We log an extent after its writeback finishes but before its 2145 * checksums are added to the csum tree, leading to -EIO errors 2146 * when attempting to read the extent after a log replay. 2147 * 2148 * 2) We can end up logging an extent before its writeback finishes. 2149 * Therefore after the log replay we will have a file extent item 2150 * pointing to an unwritten extent (and no data checksums as well). 2151 * 2152 * So trigger writeback for any eventual new dirty pages and then we 2153 * wait for all ordered extents to complete below. 2154 */ 2155 ret = start_ordered_ops(inode, start, end); 2156 if (ret) { 2157 inode_unlock(inode); 2158 goto out; 2159 } 2160 2161 /* 2162 * We have to do this here to avoid the priority inversion of waiting on 2163 * IO of a lower priority task while holding a transaction open. 2164 * 2165 * For a full fsync we wait for the ordered extents to complete while 2166 * for a fast fsync we wait just for writeback to complete, and then 2167 * attach the ordered extents to the transaction so that a transaction 2168 * commit waits for their completion, to avoid data loss if we fsync, 2169 * the current transaction commits before the ordered extents complete 2170 * and a power failure happens right after that. 2171 * 2172 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the 2173 * logical address recorded in the ordered extent may change. We need 2174 * to wait for the IO to stabilize the logical address. 2175 */ 2176 if (full_sync || btrfs_is_zoned(fs_info)) { 2177 ret = btrfs_wait_ordered_range(inode, start, len); 2178 } else { 2179 /* 2180 * Get our ordered extents as soon as possible to avoid doing 2181 * checksum lookups in the csum tree, and use instead the 2182 * checksums attached to the ordered extents. 2183 */ 2184 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode), 2185 &ctx.ordered_extents); 2186 ret = filemap_fdatawait_range(inode->i_mapping, start, end); 2187 } 2188 2189 if (ret) 2190 goto out_release_extents; 2191 2192 atomic_inc(&root->log_batch); 2193 2194 /* 2195 * If we are doing a fast fsync we can not bail out if the inode's 2196 * last_trans is <= then the last committed transaction, because we only 2197 * update the last_trans of the inode during ordered extent completion, 2198 * and for a fast fsync we don't wait for that, we only wait for the 2199 * writeback to complete. 2200 */ 2201 smp_mb(); 2202 if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) || 2203 (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed && 2204 (full_sync || list_empty(&ctx.ordered_extents)))) { 2205 /* 2206 * We've had everything committed since the last time we were 2207 * modified so clear this flag in case it was set for whatever 2208 * reason, it's no longer relevant. 2209 */ 2210 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2211 &BTRFS_I(inode)->runtime_flags); 2212 /* 2213 * An ordered extent might have started before and completed 2214 * already with io errors, in which case the inode was not 2215 * updated and we end up here. So check the inode's mapping 2216 * for any errors that might have happened since we last 2217 * checked called fsync. 2218 */ 2219 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err); 2220 goto out_release_extents; 2221 } 2222 2223 /* 2224 * We use start here because we will need to wait on the IO to complete 2225 * in btrfs_sync_log, which could require joining a transaction (for 2226 * example checking cross references in the nocow path). If we use join 2227 * here we could get into a situation where we're waiting on IO to 2228 * happen that is blocked on a transaction trying to commit. With start 2229 * we inc the extwriter counter, so we wait for all extwriters to exit 2230 * before we start blocking joiners. This comment is to keep somebody 2231 * from thinking they are super smart and changing this to 2232 * btrfs_join_transaction *cough*Josef*cough*. 2233 */ 2234 trans = btrfs_start_transaction(root, 0); 2235 if (IS_ERR(trans)) { 2236 ret = PTR_ERR(trans); 2237 goto out_release_extents; 2238 } 2239 trans->in_fsync = true; 2240 2241 ret = btrfs_log_dentry_safe(trans, dentry, &ctx); 2242 btrfs_release_log_ctx_extents(&ctx); 2243 if (ret < 0) { 2244 /* Fallthrough and commit/free transaction. */ 2245 ret = 1; 2246 } 2247 2248 /* we've logged all the items and now have a consistent 2249 * version of the file in the log. It is possible that 2250 * someone will come in and modify the file, but that's 2251 * fine because the log is consistent on disk, and we 2252 * have references to all of the file's extents 2253 * 2254 * It is possible that someone will come in and log the 2255 * file again, but that will end up using the synchronization 2256 * inside btrfs_sync_log to keep things safe. 2257 */ 2258 inode_unlock(inode); 2259 2260 if (ret != BTRFS_NO_LOG_SYNC) { 2261 if (!ret) { 2262 ret = btrfs_sync_log(trans, root, &ctx); 2263 if (!ret) { 2264 ret = btrfs_end_transaction(trans); 2265 goto out; 2266 } 2267 } 2268 if (!full_sync) { 2269 ret = btrfs_wait_ordered_range(inode, start, len); 2270 if (ret) { 2271 btrfs_end_transaction(trans); 2272 goto out; 2273 } 2274 } 2275 ret = btrfs_commit_transaction(trans); 2276 } else { 2277 ret = btrfs_end_transaction(trans); 2278 } 2279 out: 2280 ASSERT(list_empty(&ctx.list)); 2281 err = file_check_and_advance_wb_err(file); 2282 if (!ret) 2283 ret = err; 2284 return ret > 0 ? -EIO : ret; 2285 2286 out_release_extents: 2287 btrfs_release_log_ctx_extents(&ctx); 2288 inode_unlock(inode); 2289 goto out; 2290 } 2291 2292 static const struct vm_operations_struct btrfs_file_vm_ops = { 2293 .fault = filemap_fault, 2294 .map_pages = filemap_map_pages, 2295 .page_mkwrite = btrfs_page_mkwrite, 2296 }; 2297 2298 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 2299 { 2300 struct address_space *mapping = filp->f_mapping; 2301 2302 if (!mapping->a_ops->readpage) 2303 return -ENOEXEC; 2304 2305 file_accessed(filp); 2306 vma->vm_ops = &btrfs_file_vm_ops; 2307 2308 return 0; 2309 } 2310 2311 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf, 2312 int slot, u64 start, u64 end) 2313 { 2314 struct btrfs_file_extent_item *fi; 2315 struct btrfs_key key; 2316 2317 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 2318 return 0; 2319 2320 btrfs_item_key_to_cpu(leaf, &key, slot); 2321 if (key.objectid != btrfs_ino(inode) || 2322 key.type != BTRFS_EXTENT_DATA_KEY) 2323 return 0; 2324 2325 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 2326 2327 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) 2328 return 0; 2329 2330 if (btrfs_file_extent_disk_bytenr(leaf, fi)) 2331 return 0; 2332 2333 if (key.offset == end) 2334 return 1; 2335 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start) 2336 return 1; 2337 return 0; 2338 } 2339 2340 static int fill_holes(struct btrfs_trans_handle *trans, 2341 struct btrfs_inode *inode, 2342 struct btrfs_path *path, u64 offset, u64 end) 2343 { 2344 struct btrfs_fs_info *fs_info = trans->fs_info; 2345 struct btrfs_root *root = inode->root; 2346 struct extent_buffer *leaf; 2347 struct btrfs_file_extent_item *fi; 2348 struct extent_map *hole_em; 2349 struct extent_map_tree *em_tree = &inode->extent_tree; 2350 struct btrfs_key key; 2351 int ret; 2352 2353 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 2354 goto out; 2355 2356 key.objectid = btrfs_ino(inode); 2357 key.type = BTRFS_EXTENT_DATA_KEY; 2358 key.offset = offset; 2359 2360 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2361 if (ret <= 0) { 2362 /* 2363 * We should have dropped this offset, so if we find it then 2364 * something has gone horribly wrong. 2365 */ 2366 if (ret == 0) 2367 ret = -EINVAL; 2368 return ret; 2369 } 2370 2371 leaf = path->nodes[0]; 2372 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) { 2373 u64 num_bytes; 2374 2375 path->slots[0]--; 2376 fi = btrfs_item_ptr(leaf, path->slots[0], 2377 struct btrfs_file_extent_item); 2378 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + 2379 end - offset; 2380 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2381 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2382 btrfs_set_file_extent_offset(leaf, fi, 0); 2383 btrfs_mark_buffer_dirty(leaf); 2384 goto out; 2385 } 2386 2387 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) { 2388 u64 num_bytes; 2389 2390 key.offset = offset; 2391 btrfs_set_item_key_safe(fs_info, path, &key); 2392 fi = btrfs_item_ptr(leaf, path->slots[0], 2393 struct btrfs_file_extent_item); 2394 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end - 2395 offset; 2396 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2397 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2398 btrfs_set_file_extent_offset(leaf, fi, 0); 2399 btrfs_mark_buffer_dirty(leaf); 2400 goto out; 2401 } 2402 btrfs_release_path(path); 2403 2404 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), 2405 offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0); 2406 if (ret) 2407 return ret; 2408 2409 out: 2410 btrfs_release_path(path); 2411 2412 hole_em = alloc_extent_map(); 2413 if (!hole_em) { 2414 btrfs_drop_extent_cache(inode, offset, end - 1, 0); 2415 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); 2416 } else { 2417 hole_em->start = offset; 2418 hole_em->len = end - offset; 2419 hole_em->ram_bytes = hole_em->len; 2420 hole_em->orig_start = offset; 2421 2422 hole_em->block_start = EXTENT_MAP_HOLE; 2423 hole_em->block_len = 0; 2424 hole_em->orig_block_len = 0; 2425 hole_em->compress_type = BTRFS_COMPRESS_NONE; 2426 hole_em->generation = trans->transid; 2427 2428 do { 2429 btrfs_drop_extent_cache(inode, offset, end - 1, 0); 2430 write_lock(&em_tree->lock); 2431 ret = add_extent_mapping(em_tree, hole_em, 1); 2432 write_unlock(&em_tree->lock); 2433 } while (ret == -EEXIST); 2434 free_extent_map(hole_em); 2435 if (ret) 2436 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2437 &inode->runtime_flags); 2438 } 2439 2440 return 0; 2441 } 2442 2443 /* 2444 * Find a hole extent on given inode and change start/len to the end of hole 2445 * extent.(hole/vacuum extent whose em->start <= start && 2446 * em->start + em->len > start) 2447 * When a hole extent is found, return 1 and modify start/len. 2448 */ 2449 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len) 2450 { 2451 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2452 struct extent_map *em; 2453 int ret = 0; 2454 2455 em = btrfs_get_extent(inode, NULL, 0, 2456 round_down(*start, fs_info->sectorsize), 2457 round_up(*len, fs_info->sectorsize)); 2458 if (IS_ERR(em)) 2459 return PTR_ERR(em); 2460 2461 /* Hole or vacuum extent(only exists in no-hole mode) */ 2462 if (em->block_start == EXTENT_MAP_HOLE) { 2463 ret = 1; 2464 *len = em->start + em->len > *start + *len ? 2465 0 : *start + *len - em->start - em->len; 2466 *start = em->start + em->len; 2467 } 2468 free_extent_map(em); 2469 return ret; 2470 } 2471 2472 static int btrfs_punch_hole_lock_range(struct inode *inode, 2473 const u64 lockstart, 2474 const u64 lockend, 2475 struct extent_state **cached_state) 2476 { 2477 while (1) { 2478 struct btrfs_ordered_extent *ordered; 2479 int ret; 2480 2481 truncate_pagecache_range(inode, lockstart, lockend); 2482 2483 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2484 cached_state); 2485 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), 2486 lockend); 2487 2488 /* 2489 * We need to make sure we have no ordered extents in this range 2490 * and nobody raced in and read a page in this range, if we did 2491 * we need to try again. 2492 */ 2493 if ((!ordered || 2494 (ordered->file_offset + ordered->num_bytes <= lockstart || 2495 ordered->file_offset > lockend)) && 2496 !filemap_range_has_page(inode->i_mapping, 2497 lockstart, lockend)) { 2498 if (ordered) 2499 btrfs_put_ordered_extent(ordered); 2500 break; 2501 } 2502 if (ordered) 2503 btrfs_put_ordered_extent(ordered); 2504 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, 2505 lockend, cached_state); 2506 ret = btrfs_wait_ordered_range(inode, lockstart, 2507 lockend - lockstart + 1); 2508 if (ret) 2509 return ret; 2510 } 2511 return 0; 2512 } 2513 2514 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans, 2515 struct btrfs_inode *inode, 2516 struct btrfs_path *path, 2517 struct btrfs_replace_extent_info *extent_info, 2518 const u64 replace_len, 2519 const u64 bytes_to_drop) 2520 { 2521 struct btrfs_fs_info *fs_info = trans->fs_info; 2522 struct btrfs_root *root = inode->root; 2523 struct btrfs_file_extent_item *extent; 2524 struct extent_buffer *leaf; 2525 struct btrfs_key key; 2526 int slot; 2527 struct btrfs_ref ref = { 0 }; 2528 int ret; 2529 2530 if (replace_len == 0) 2531 return 0; 2532 2533 if (extent_info->disk_offset == 0 && 2534 btrfs_fs_incompat(fs_info, NO_HOLES)) { 2535 btrfs_update_inode_bytes(inode, 0, bytes_to_drop); 2536 return 0; 2537 } 2538 2539 key.objectid = btrfs_ino(inode); 2540 key.type = BTRFS_EXTENT_DATA_KEY; 2541 key.offset = extent_info->file_offset; 2542 ret = btrfs_insert_empty_item(trans, root, path, &key, 2543 sizeof(struct btrfs_file_extent_item)); 2544 if (ret) 2545 return ret; 2546 leaf = path->nodes[0]; 2547 slot = path->slots[0]; 2548 write_extent_buffer(leaf, extent_info->extent_buf, 2549 btrfs_item_ptr_offset(leaf, slot), 2550 sizeof(struct btrfs_file_extent_item)); 2551 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 2552 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE); 2553 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset); 2554 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len); 2555 if (extent_info->is_new_extent) 2556 btrfs_set_file_extent_generation(leaf, extent, trans->transid); 2557 btrfs_mark_buffer_dirty(leaf); 2558 btrfs_release_path(path); 2559 2560 ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset, 2561 replace_len); 2562 if (ret) 2563 return ret; 2564 2565 /* If it's a hole, nothing more needs to be done. */ 2566 if (extent_info->disk_offset == 0) { 2567 btrfs_update_inode_bytes(inode, 0, bytes_to_drop); 2568 return 0; 2569 } 2570 2571 btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop); 2572 2573 if (extent_info->is_new_extent && extent_info->insertions == 0) { 2574 key.objectid = extent_info->disk_offset; 2575 key.type = BTRFS_EXTENT_ITEM_KEY; 2576 key.offset = extent_info->disk_len; 2577 ret = btrfs_alloc_reserved_file_extent(trans, root, 2578 btrfs_ino(inode), 2579 extent_info->file_offset, 2580 extent_info->qgroup_reserved, 2581 &key); 2582 } else { 2583 u64 ref_offset; 2584 2585 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, 2586 extent_info->disk_offset, 2587 extent_info->disk_len, 0); 2588 ref_offset = extent_info->file_offset - extent_info->data_offset; 2589 btrfs_init_data_ref(&ref, root->root_key.objectid, 2590 btrfs_ino(inode), ref_offset); 2591 ret = btrfs_inc_extent_ref(trans, &ref); 2592 } 2593 2594 extent_info->insertions++; 2595 2596 return ret; 2597 } 2598 2599 /* 2600 * The respective range must have been previously locked, as well as the inode. 2601 * The end offset is inclusive (last byte of the range). 2602 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing 2603 * the file range with an extent. 2604 * When not punching a hole, we don't want to end up in a state where we dropped 2605 * extents without inserting a new one, so we must abort the transaction to avoid 2606 * a corruption. 2607 */ 2608 int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path, 2609 const u64 start, const u64 end, 2610 struct btrfs_replace_extent_info *extent_info, 2611 struct btrfs_trans_handle **trans_out) 2612 { 2613 struct btrfs_drop_extents_args drop_args = { 0 }; 2614 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2615 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1); 2616 u64 ino_size = round_up(inode->i_size, fs_info->sectorsize); 2617 struct btrfs_root *root = BTRFS_I(inode)->root; 2618 struct btrfs_trans_handle *trans = NULL; 2619 struct btrfs_block_rsv *rsv; 2620 unsigned int rsv_count; 2621 u64 cur_offset; 2622 u64 len = end - start; 2623 int ret = 0; 2624 2625 if (end <= start) 2626 return -EINVAL; 2627 2628 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 2629 if (!rsv) { 2630 ret = -ENOMEM; 2631 goto out; 2632 } 2633 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1); 2634 rsv->failfast = 1; 2635 2636 /* 2637 * 1 - update the inode 2638 * 1 - removing the extents in the range 2639 * 1 - adding the hole extent if no_holes isn't set or if we are 2640 * replacing the range with a new extent 2641 */ 2642 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info) 2643 rsv_count = 3; 2644 else 2645 rsv_count = 2; 2646 2647 trans = btrfs_start_transaction(root, rsv_count); 2648 if (IS_ERR(trans)) { 2649 ret = PTR_ERR(trans); 2650 trans = NULL; 2651 goto out_free; 2652 } 2653 2654 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 2655 min_size, false); 2656 BUG_ON(ret); 2657 trans->block_rsv = rsv; 2658 2659 cur_offset = start; 2660 drop_args.path = path; 2661 drop_args.end = end + 1; 2662 drop_args.drop_cache = true; 2663 while (cur_offset < end) { 2664 drop_args.start = cur_offset; 2665 ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args); 2666 /* If we are punching a hole decrement the inode's byte count */ 2667 if (!extent_info) 2668 btrfs_update_inode_bytes(BTRFS_I(inode), 0, 2669 drop_args.bytes_found); 2670 if (ret != -ENOSPC) { 2671 /* 2672 * When cloning we want to avoid transaction aborts when 2673 * nothing was done and we are attempting to clone parts 2674 * of inline extents, in such cases -EOPNOTSUPP is 2675 * returned by __btrfs_drop_extents() without having 2676 * changed anything in the file. 2677 */ 2678 if (extent_info && !extent_info->is_new_extent && 2679 ret && ret != -EOPNOTSUPP) 2680 btrfs_abort_transaction(trans, ret); 2681 break; 2682 } 2683 2684 trans->block_rsv = &fs_info->trans_block_rsv; 2685 2686 if (!extent_info && cur_offset < drop_args.drop_end && 2687 cur_offset < ino_size) { 2688 ret = fill_holes(trans, BTRFS_I(inode), path, 2689 cur_offset, drop_args.drop_end); 2690 if (ret) { 2691 /* 2692 * If we failed then we didn't insert our hole 2693 * entries for the area we dropped, so now the 2694 * fs is corrupted, so we must abort the 2695 * transaction. 2696 */ 2697 btrfs_abort_transaction(trans, ret); 2698 break; 2699 } 2700 } else if (!extent_info && cur_offset < drop_args.drop_end) { 2701 /* 2702 * We are past the i_size here, but since we didn't 2703 * insert holes we need to clear the mapped area so we 2704 * know to not set disk_i_size in this area until a new 2705 * file extent is inserted here. 2706 */ 2707 ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode), 2708 cur_offset, 2709 drop_args.drop_end - cur_offset); 2710 if (ret) { 2711 /* 2712 * We couldn't clear our area, so we could 2713 * presumably adjust up and corrupt the fs, so 2714 * we need to abort. 2715 */ 2716 btrfs_abort_transaction(trans, ret); 2717 break; 2718 } 2719 } 2720 2721 if (extent_info && 2722 drop_args.drop_end > extent_info->file_offset) { 2723 u64 replace_len = drop_args.drop_end - 2724 extent_info->file_offset; 2725 2726 ret = btrfs_insert_replace_extent(trans, BTRFS_I(inode), 2727 path, extent_info, replace_len, 2728 drop_args.bytes_found); 2729 if (ret) { 2730 btrfs_abort_transaction(trans, ret); 2731 break; 2732 } 2733 extent_info->data_len -= replace_len; 2734 extent_info->data_offset += replace_len; 2735 extent_info->file_offset += replace_len; 2736 } 2737 2738 cur_offset = drop_args.drop_end; 2739 2740 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 2741 if (ret) 2742 break; 2743 2744 btrfs_end_transaction(trans); 2745 btrfs_btree_balance_dirty(fs_info); 2746 2747 trans = btrfs_start_transaction(root, rsv_count); 2748 if (IS_ERR(trans)) { 2749 ret = PTR_ERR(trans); 2750 trans = NULL; 2751 break; 2752 } 2753 2754 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 2755 rsv, min_size, false); 2756 BUG_ON(ret); /* shouldn't happen */ 2757 trans->block_rsv = rsv; 2758 2759 if (!extent_info) { 2760 ret = find_first_non_hole(BTRFS_I(inode), &cur_offset, 2761 &len); 2762 if (unlikely(ret < 0)) 2763 break; 2764 if (ret && !len) { 2765 ret = 0; 2766 break; 2767 } 2768 } 2769 } 2770 2771 /* 2772 * If we were cloning, force the next fsync to be a full one since we 2773 * we replaced (or just dropped in the case of cloning holes when 2774 * NO_HOLES is enabled) extents and extent maps. 2775 * This is for the sake of simplicity, and cloning into files larger 2776 * than 16Mb would force the full fsync any way (when 2777 * try_release_extent_mapping() is invoked during page cache truncation. 2778 */ 2779 if (extent_info && !extent_info->is_new_extent) 2780 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2781 &BTRFS_I(inode)->runtime_flags); 2782 2783 if (ret) 2784 goto out_trans; 2785 2786 trans->block_rsv = &fs_info->trans_block_rsv; 2787 /* 2788 * If we are using the NO_HOLES feature we might have had already an 2789 * hole that overlaps a part of the region [lockstart, lockend] and 2790 * ends at (or beyond) lockend. Since we have no file extent items to 2791 * represent holes, drop_end can be less than lockend and so we must 2792 * make sure we have an extent map representing the existing hole (the 2793 * call to __btrfs_drop_extents() might have dropped the existing extent 2794 * map representing the existing hole), otherwise the fast fsync path 2795 * will not record the existence of the hole region 2796 * [existing_hole_start, lockend]. 2797 */ 2798 if (drop_args.drop_end <= end) 2799 drop_args.drop_end = end + 1; 2800 /* 2801 * Don't insert file hole extent item if it's for a range beyond eof 2802 * (because it's useless) or if it represents a 0 bytes range (when 2803 * cur_offset == drop_end). 2804 */ 2805 if (!extent_info && cur_offset < ino_size && 2806 cur_offset < drop_args.drop_end) { 2807 ret = fill_holes(trans, BTRFS_I(inode), path, 2808 cur_offset, drop_args.drop_end); 2809 if (ret) { 2810 /* Same comment as above. */ 2811 btrfs_abort_transaction(trans, ret); 2812 goto out_trans; 2813 } 2814 } else if (!extent_info && cur_offset < drop_args.drop_end) { 2815 /* See the comment in the loop above for the reasoning here. */ 2816 ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode), 2817 cur_offset, drop_args.drop_end - cur_offset); 2818 if (ret) { 2819 btrfs_abort_transaction(trans, ret); 2820 goto out_trans; 2821 } 2822 2823 } 2824 if (extent_info) { 2825 ret = btrfs_insert_replace_extent(trans, BTRFS_I(inode), path, 2826 extent_info, extent_info->data_len, 2827 drop_args.bytes_found); 2828 if (ret) { 2829 btrfs_abort_transaction(trans, ret); 2830 goto out_trans; 2831 } 2832 } 2833 2834 out_trans: 2835 if (!trans) 2836 goto out_free; 2837 2838 trans->block_rsv = &fs_info->trans_block_rsv; 2839 if (ret) 2840 btrfs_end_transaction(trans); 2841 else 2842 *trans_out = trans; 2843 out_free: 2844 btrfs_free_block_rsv(fs_info, rsv); 2845 out: 2846 return ret; 2847 } 2848 2849 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 2850 { 2851 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2852 struct btrfs_root *root = BTRFS_I(inode)->root; 2853 struct extent_state *cached_state = NULL; 2854 struct btrfs_path *path; 2855 struct btrfs_trans_handle *trans = NULL; 2856 u64 lockstart; 2857 u64 lockend; 2858 u64 tail_start; 2859 u64 tail_len; 2860 u64 orig_start = offset; 2861 int ret = 0; 2862 bool same_block; 2863 u64 ino_size; 2864 bool truncated_block = false; 2865 bool updated_inode = false; 2866 2867 ret = btrfs_wait_ordered_range(inode, offset, len); 2868 if (ret) 2869 return ret; 2870 2871 inode_lock(inode); 2872 ino_size = round_up(inode->i_size, fs_info->sectorsize); 2873 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len); 2874 if (ret < 0) 2875 goto out_only_mutex; 2876 if (ret && !len) { 2877 /* Already in a large hole */ 2878 ret = 0; 2879 goto out_only_mutex; 2880 } 2881 2882 lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode))); 2883 lockend = round_down(offset + len, 2884 btrfs_inode_sectorsize(BTRFS_I(inode))) - 1; 2885 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset)) 2886 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)); 2887 /* 2888 * We needn't truncate any block which is beyond the end of the file 2889 * because we are sure there is no data there. 2890 */ 2891 /* 2892 * Only do this if we are in the same block and we aren't doing the 2893 * entire block. 2894 */ 2895 if (same_block && len < fs_info->sectorsize) { 2896 if (offset < ino_size) { 2897 truncated_block = true; 2898 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len, 2899 0); 2900 } else { 2901 ret = 0; 2902 } 2903 goto out_only_mutex; 2904 } 2905 2906 /* zero back part of the first block */ 2907 if (offset < ino_size) { 2908 truncated_block = true; 2909 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0); 2910 if (ret) { 2911 inode_unlock(inode); 2912 return ret; 2913 } 2914 } 2915 2916 /* Check the aligned pages after the first unaligned page, 2917 * if offset != orig_start, which means the first unaligned page 2918 * including several following pages are already in holes, 2919 * the extra check can be skipped */ 2920 if (offset == orig_start) { 2921 /* after truncate page, check hole again */ 2922 len = offset + len - lockstart; 2923 offset = lockstart; 2924 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len); 2925 if (ret < 0) 2926 goto out_only_mutex; 2927 if (ret && !len) { 2928 ret = 0; 2929 goto out_only_mutex; 2930 } 2931 lockstart = offset; 2932 } 2933 2934 /* Check the tail unaligned part is in a hole */ 2935 tail_start = lockend + 1; 2936 tail_len = offset + len - tail_start; 2937 if (tail_len) { 2938 ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len); 2939 if (unlikely(ret < 0)) 2940 goto out_only_mutex; 2941 if (!ret) { 2942 /* zero the front end of the last page */ 2943 if (tail_start + tail_len < ino_size) { 2944 truncated_block = true; 2945 ret = btrfs_truncate_block(BTRFS_I(inode), 2946 tail_start + tail_len, 2947 0, 1); 2948 if (ret) 2949 goto out_only_mutex; 2950 } 2951 } 2952 } 2953 2954 if (lockend < lockstart) { 2955 ret = 0; 2956 goto out_only_mutex; 2957 } 2958 2959 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, 2960 &cached_state); 2961 if (ret) 2962 goto out_only_mutex; 2963 2964 path = btrfs_alloc_path(); 2965 if (!path) { 2966 ret = -ENOMEM; 2967 goto out; 2968 } 2969 2970 ret = btrfs_replace_file_extents(inode, path, lockstart, lockend, NULL, 2971 &trans); 2972 btrfs_free_path(path); 2973 if (ret) 2974 goto out; 2975 2976 ASSERT(trans != NULL); 2977 inode_inc_iversion(inode); 2978 inode->i_mtime = inode->i_ctime = current_time(inode); 2979 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 2980 updated_inode = true; 2981 btrfs_end_transaction(trans); 2982 btrfs_btree_balance_dirty(fs_info); 2983 out: 2984 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2985 &cached_state); 2986 out_only_mutex: 2987 if (!updated_inode && truncated_block && !ret) { 2988 /* 2989 * If we only end up zeroing part of a page, we still need to 2990 * update the inode item, so that all the time fields are 2991 * updated as well as the necessary btrfs inode in memory fields 2992 * for detecting, at fsync time, if the inode isn't yet in the 2993 * log tree or it's there but not up to date. 2994 */ 2995 struct timespec64 now = current_time(inode); 2996 2997 inode_inc_iversion(inode); 2998 inode->i_mtime = now; 2999 inode->i_ctime = now; 3000 trans = btrfs_start_transaction(root, 1); 3001 if (IS_ERR(trans)) { 3002 ret = PTR_ERR(trans); 3003 } else { 3004 int ret2; 3005 3006 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 3007 ret2 = btrfs_end_transaction(trans); 3008 if (!ret) 3009 ret = ret2; 3010 } 3011 } 3012 inode_unlock(inode); 3013 return ret; 3014 } 3015 3016 /* Helper structure to record which range is already reserved */ 3017 struct falloc_range { 3018 struct list_head list; 3019 u64 start; 3020 u64 len; 3021 }; 3022 3023 /* 3024 * Helper function to add falloc range 3025 * 3026 * Caller should have locked the larger range of extent containing 3027 * [start, len) 3028 */ 3029 static int add_falloc_range(struct list_head *head, u64 start, u64 len) 3030 { 3031 struct falloc_range *prev = NULL; 3032 struct falloc_range *range = NULL; 3033 3034 if (list_empty(head)) 3035 goto insert; 3036 3037 /* 3038 * As fallocate iterate by bytenr order, we only need to check 3039 * the last range. 3040 */ 3041 prev = list_entry(head->prev, struct falloc_range, list); 3042 if (prev->start + prev->len == start) { 3043 prev->len += len; 3044 return 0; 3045 } 3046 insert: 3047 range = kmalloc(sizeof(*range), GFP_KERNEL); 3048 if (!range) 3049 return -ENOMEM; 3050 range->start = start; 3051 range->len = len; 3052 list_add_tail(&range->list, head); 3053 return 0; 3054 } 3055 3056 static int btrfs_fallocate_update_isize(struct inode *inode, 3057 const u64 end, 3058 const int mode) 3059 { 3060 struct btrfs_trans_handle *trans; 3061 struct btrfs_root *root = BTRFS_I(inode)->root; 3062 int ret; 3063 int ret2; 3064 3065 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode)) 3066 return 0; 3067 3068 trans = btrfs_start_transaction(root, 1); 3069 if (IS_ERR(trans)) 3070 return PTR_ERR(trans); 3071 3072 inode->i_ctime = current_time(inode); 3073 i_size_write(inode, end); 3074 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 3075 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 3076 ret2 = btrfs_end_transaction(trans); 3077 3078 return ret ? ret : ret2; 3079 } 3080 3081 enum { 3082 RANGE_BOUNDARY_WRITTEN_EXTENT, 3083 RANGE_BOUNDARY_PREALLOC_EXTENT, 3084 RANGE_BOUNDARY_HOLE, 3085 }; 3086 3087 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode, 3088 u64 offset) 3089 { 3090 const u64 sectorsize = btrfs_inode_sectorsize(inode); 3091 struct extent_map *em; 3092 int ret; 3093 3094 offset = round_down(offset, sectorsize); 3095 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize); 3096 if (IS_ERR(em)) 3097 return PTR_ERR(em); 3098 3099 if (em->block_start == EXTENT_MAP_HOLE) 3100 ret = RANGE_BOUNDARY_HOLE; 3101 else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 3102 ret = RANGE_BOUNDARY_PREALLOC_EXTENT; 3103 else 3104 ret = RANGE_BOUNDARY_WRITTEN_EXTENT; 3105 3106 free_extent_map(em); 3107 return ret; 3108 } 3109 3110 static int btrfs_zero_range(struct inode *inode, 3111 loff_t offset, 3112 loff_t len, 3113 const int mode) 3114 { 3115 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 3116 struct extent_map *em; 3117 struct extent_changeset *data_reserved = NULL; 3118 int ret; 3119 u64 alloc_hint = 0; 3120 const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode)); 3121 u64 alloc_start = round_down(offset, sectorsize); 3122 u64 alloc_end = round_up(offset + len, sectorsize); 3123 u64 bytes_to_reserve = 0; 3124 bool space_reserved = false; 3125 3126 inode_dio_wait(inode); 3127 3128 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start, 3129 alloc_end - alloc_start); 3130 if (IS_ERR(em)) { 3131 ret = PTR_ERR(em); 3132 goto out; 3133 } 3134 3135 /* 3136 * Avoid hole punching and extent allocation for some cases. More cases 3137 * could be considered, but these are unlikely common and we keep things 3138 * as simple as possible for now. Also, intentionally, if the target 3139 * range contains one or more prealloc extents together with regular 3140 * extents and holes, we drop all the existing extents and allocate a 3141 * new prealloc extent, so that we get a larger contiguous disk extent. 3142 */ 3143 if (em->start <= alloc_start && 3144 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3145 const u64 em_end = em->start + em->len; 3146 3147 if (em_end >= offset + len) { 3148 /* 3149 * The whole range is already a prealloc extent, 3150 * do nothing except updating the inode's i_size if 3151 * needed. 3152 */ 3153 free_extent_map(em); 3154 ret = btrfs_fallocate_update_isize(inode, offset + len, 3155 mode); 3156 goto out; 3157 } 3158 /* 3159 * Part of the range is already a prealloc extent, so operate 3160 * only on the remaining part of the range. 3161 */ 3162 alloc_start = em_end; 3163 ASSERT(IS_ALIGNED(alloc_start, sectorsize)); 3164 len = offset + len - alloc_start; 3165 offset = alloc_start; 3166 alloc_hint = em->block_start + em->len; 3167 } 3168 free_extent_map(em); 3169 3170 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) == 3171 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) { 3172 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start, 3173 sectorsize); 3174 if (IS_ERR(em)) { 3175 ret = PTR_ERR(em); 3176 goto out; 3177 } 3178 3179 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3180 free_extent_map(em); 3181 ret = btrfs_fallocate_update_isize(inode, offset + len, 3182 mode); 3183 goto out; 3184 } 3185 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) { 3186 free_extent_map(em); 3187 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len, 3188 0); 3189 if (!ret) 3190 ret = btrfs_fallocate_update_isize(inode, 3191 offset + len, 3192 mode); 3193 return ret; 3194 } 3195 free_extent_map(em); 3196 alloc_start = round_down(offset, sectorsize); 3197 alloc_end = alloc_start + sectorsize; 3198 goto reserve_space; 3199 } 3200 3201 alloc_start = round_up(offset, sectorsize); 3202 alloc_end = round_down(offset + len, sectorsize); 3203 3204 /* 3205 * For unaligned ranges, check the pages at the boundaries, they might 3206 * map to an extent, in which case we need to partially zero them, or 3207 * they might map to a hole, in which case we need our allocation range 3208 * to cover them. 3209 */ 3210 if (!IS_ALIGNED(offset, sectorsize)) { 3211 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode), 3212 offset); 3213 if (ret < 0) 3214 goto out; 3215 if (ret == RANGE_BOUNDARY_HOLE) { 3216 alloc_start = round_down(offset, sectorsize); 3217 ret = 0; 3218 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { 3219 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0); 3220 if (ret) 3221 goto out; 3222 } else { 3223 ret = 0; 3224 } 3225 } 3226 3227 if (!IS_ALIGNED(offset + len, sectorsize)) { 3228 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode), 3229 offset + len); 3230 if (ret < 0) 3231 goto out; 3232 if (ret == RANGE_BOUNDARY_HOLE) { 3233 alloc_end = round_up(offset + len, sectorsize); 3234 ret = 0; 3235 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { 3236 ret = btrfs_truncate_block(BTRFS_I(inode), offset + len, 3237 0, 1); 3238 if (ret) 3239 goto out; 3240 } else { 3241 ret = 0; 3242 } 3243 } 3244 3245 reserve_space: 3246 if (alloc_start < alloc_end) { 3247 struct extent_state *cached_state = NULL; 3248 const u64 lockstart = alloc_start; 3249 const u64 lockend = alloc_end - 1; 3250 3251 bytes_to_reserve = alloc_end - alloc_start; 3252 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), 3253 bytes_to_reserve); 3254 if (ret < 0) 3255 goto out; 3256 space_reserved = true; 3257 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, 3258 &cached_state); 3259 if (ret) 3260 goto out; 3261 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved, 3262 alloc_start, bytes_to_reserve); 3263 if (ret) 3264 goto out; 3265 ret = btrfs_prealloc_file_range(inode, mode, alloc_start, 3266 alloc_end - alloc_start, 3267 i_blocksize(inode), 3268 offset + len, &alloc_hint); 3269 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, 3270 lockend, &cached_state); 3271 /* btrfs_prealloc_file_range releases reserved space on error */ 3272 if (ret) { 3273 space_reserved = false; 3274 goto out; 3275 } 3276 } 3277 ret = btrfs_fallocate_update_isize(inode, offset + len, mode); 3278 out: 3279 if (ret && space_reserved) 3280 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, 3281 alloc_start, bytes_to_reserve); 3282 extent_changeset_free(data_reserved); 3283 3284 return ret; 3285 } 3286 3287 static long btrfs_fallocate(struct file *file, int mode, 3288 loff_t offset, loff_t len) 3289 { 3290 struct inode *inode = file_inode(file); 3291 struct extent_state *cached_state = NULL; 3292 struct extent_changeset *data_reserved = NULL; 3293 struct falloc_range *range; 3294 struct falloc_range *tmp; 3295 struct list_head reserve_list; 3296 u64 cur_offset; 3297 u64 last_byte; 3298 u64 alloc_start; 3299 u64 alloc_end; 3300 u64 alloc_hint = 0; 3301 u64 locked_end; 3302 u64 actual_end = 0; 3303 struct extent_map *em; 3304 int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode)); 3305 int ret; 3306 3307 /* Do not allow fallocate in ZONED mode */ 3308 if (btrfs_is_zoned(btrfs_sb(inode->i_sb))) 3309 return -EOPNOTSUPP; 3310 3311 alloc_start = round_down(offset, blocksize); 3312 alloc_end = round_up(offset + len, blocksize); 3313 cur_offset = alloc_start; 3314 3315 /* Make sure we aren't being give some crap mode */ 3316 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 3317 FALLOC_FL_ZERO_RANGE)) 3318 return -EOPNOTSUPP; 3319 3320 if (mode & FALLOC_FL_PUNCH_HOLE) 3321 return btrfs_punch_hole(inode, offset, len); 3322 3323 /* 3324 * Only trigger disk allocation, don't trigger qgroup reserve 3325 * 3326 * For qgroup space, it will be checked later. 3327 */ 3328 if (!(mode & FALLOC_FL_ZERO_RANGE)) { 3329 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), 3330 alloc_end - alloc_start); 3331 if (ret < 0) 3332 return ret; 3333 } 3334 3335 btrfs_inode_lock(inode, 0); 3336 3337 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) { 3338 ret = inode_newsize_ok(inode, offset + len); 3339 if (ret) 3340 goto out; 3341 } 3342 3343 /* 3344 * TODO: Move these two operations after we have checked 3345 * accurate reserved space, or fallocate can still fail but 3346 * with page truncated or size expanded. 3347 * 3348 * But that's a minor problem and won't do much harm BTW. 3349 */ 3350 if (alloc_start > inode->i_size) { 3351 ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode), 3352 alloc_start); 3353 if (ret) 3354 goto out; 3355 } else if (offset + len > inode->i_size) { 3356 /* 3357 * If we are fallocating from the end of the file onward we 3358 * need to zero out the end of the block if i_size lands in the 3359 * middle of a block. 3360 */ 3361 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0); 3362 if (ret) 3363 goto out; 3364 } 3365 3366 /* 3367 * wait for ordered IO before we have any locks. We'll loop again 3368 * below with the locks held. 3369 */ 3370 ret = btrfs_wait_ordered_range(inode, alloc_start, 3371 alloc_end - alloc_start); 3372 if (ret) 3373 goto out; 3374 3375 if (mode & FALLOC_FL_ZERO_RANGE) { 3376 ret = btrfs_zero_range(inode, offset, len, mode); 3377 inode_unlock(inode); 3378 return ret; 3379 } 3380 3381 locked_end = alloc_end - 1; 3382 while (1) { 3383 struct btrfs_ordered_extent *ordered; 3384 3385 /* the extent lock is ordered inside the running 3386 * transaction 3387 */ 3388 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 3389 locked_end, &cached_state); 3390 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), 3391 locked_end); 3392 3393 if (ordered && 3394 ordered->file_offset + ordered->num_bytes > alloc_start && 3395 ordered->file_offset < alloc_end) { 3396 btrfs_put_ordered_extent(ordered); 3397 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 3398 alloc_start, locked_end, 3399 &cached_state); 3400 /* 3401 * we can't wait on the range with the transaction 3402 * running or with the extent lock held 3403 */ 3404 ret = btrfs_wait_ordered_range(inode, alloc_start, 3405 alloc_end - alloc_start); 3406 if (ret) 3407 goto out; 3408 } else { 3409 if (ordered) 3410 btrfs_put_ordered_extent(ordered); 3411 break; 3412 } 3413 } 3414 3415 /* First, check if we exceed the qgroup limit */ 3416 INIT_LIST_HEAD(&reserve_list); 3417 while (cur_offset < alloc_end) { 3418 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, 3419 alloc_end - cur_offset); 3420 if (IS_ERR(em)) { 3421 ret = PTR_ERR(em); 3422 break; 3423 } 3424 last_byte = min(extent_map_end(em), alloc_end); 3425 actual_end = min_t(u64, extent_map_end(em), offset + len); 3426 last_byte = ALIGN(last_byte, blocksize); 3427 if (em->block_start == EXTENT_MAP_HOLE || 3428 (cur_offset >= inode->i_size && 3429 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 3430 ret = add_falloc_range(&reserve_list, cur_offset, 3431 last_byte - cur_offset); 3432 if (ret < 0) { 3433 free_extent_map(em); 3434 break; 3435 } 3436 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), 3437 &data_reserved, cur_offset, 3438 last_byte - cur_offset); 3439 if (ret < 0) { 3440 cur_offset = last_byte; 3441 free_extent_map(em); 3442 break; 3443 } 3444 } else { 3445 /* 3446 * Do not need to reserve unwritten extent for this 3447 * range, free reserved data space first, otherwise 3448 * it'll result in false ENOSPC error. 3449 */ 3450 btrfs_free_reserved_data_space(BTRFS_I(inode), 3451 data_reserved, cur_offset, 3452 last_byte - cur_offset); 3453 } 3454 free_extent_map(em); 3455 cur_offset = last_byte; 3456 } 3457 3458 /* 3459 * If ret is still 0, means we're OK to fallocate. 3460 * Or just cleanup the list and exit. 3461 */ 3462 list_for_each_entry_safe(range, tmp, &reserve_list, list) { 3463 if (!ret) 3464 ret = btrfs_prealloc_file_range(inode, mode, 3465 range->start, 3466 range->len, i_blocksize(inode), 3467 offset + len, &alloc_hint); 3468 else 3469 btrfs_free_reserved_data_space(BTRFS_I(inode), 3470 data_reserved, range->start, 3471 range->len); 3472 list_del(&range->list); 3473 kfree(range); 3474 } 3475 if (ret < 0) 3476 goto out_unlock; 3477 3478 /* 3479 * We didn't need to allocate any more space, but we still extended the 3480 * size of the file so we need to update i_size and the inode item. 3481 */ 3482 ret = btrfs_fallocate_update_isize(inode, actual_end, mode); 3483 out_unlock: 3484 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 3485 &cached_state); 3486 out: 3487 inode_unlock(inode); 3488 /* Let go of our reservation. */ 3489 if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE)) 3490 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, 3491 cur_offset, alloc_end - cur_offset); 3492 extent_changeset_free(data_reserved); 3493 return ret; 3494 } 3495 3496 static loff_t find_desired_extent(struct inode *inode, loff_t offset, 3497 int whence) 3498 { 3499 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3500 struct extent_map *em = NULL; 3501 struct extent_state *cached_state = NULL; 3502 loff_t i_size = inode->i_size; 3503 u64 lockstart; 3504 u64 lockend; 3505 u64 start; 3506 u64 len; 3507 int ret = 0; 3508 3509 if (i_size == 0 || offset >= i_size) 3510 return -ENXIO; 3511 3512 /* 3513 * offset can be negative, in this case we start finding DATA/HOLE from 3514 * the very start of the file. 3515 */ 3516 start = max_t(loff_t, 0, offset); 3517 3518 lockstart = round_down(start, fs_info->sectorsize); 3519 lockend = round_up(i_size, fs_info->sectorsize); 3520 if (lockend <= lockstart) 3521 lockend = lockstart + fs_info->sectorsize; 3522 lockend--; 3523 len = lockend - lockstart + 1; 3524 3525 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 3526 &cached_state); 3527 3528 while (start < i_size) { 3529 em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len); 3530 if (IS_ERR(em)) { 3531 ret = PTR_ERR(em); 3532 em = NULL; 3533 break; 3534 } 3535 3536 if (whence == SEEK_HOLE && 3537 (em->block_start == EXTENT_MAP_HOLE || 3538 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) 3539 break; 3540 else if (whence == SEEK_DATA && 3541 (em->block_start != EXTENT_MAP_HOLE && 3542 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) 3543 break; 3544 3545 start = em->start + em->len; 3546 free_extent_map(em); 3547 em = NULL; 3548 cond_resched(); 3549 } 3550 free_extent_map(em); 3551 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 3552 &cached_state); 3553 if (ret) { 3554 offset = ret; 3555 } else { 3556 if (whence == SEEK_DATA && start >= i_size) 3557 offset = -ENXIO; 3558 else 3559 offset = min_t(loff_t, start, i_size); 3560 } 3561 3562 return offset; 3563 } 3564 3565 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) 3566 { 3567 struct inode *inode = file->f_mapping->host; 3568 3569 switch (whence) { 3570 default: 3571 return generic_file_llseek(file, offset, whence); 3572 case SEEK_DATA: 3573 case SEEK_HOLE: 3574 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED); 3575 offset = find_desired_extent(inode, offset, whence); 3576 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 3577 break; 3578 } 3579 3580 if (offset < 0) 3581 return offset; 3582 3583 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 3584 } 3585 3586 static int btrfs_file_open(struct inode *inode, struct file *filp) 3587 { 3588 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; 3589 return generic_file_open(inode, filp); 3590 } 3591 3592 static int check_direct_read(struct btrfs_fs_info *fs_info, 3593 const struct iov_iter *iter, loff_t offset) 3594 { 3595 int ret; 3596 int i, seg; 3597 3598 ret = check_direct_IO(fs_info, iter, offset); 3599 if (ret < 0) 3600 return ret; 3601 3602 if (!iter_is_iovec(iter)) 3603 return 0; 3604 3605 for (seg = 0; seg < iter->nr_segs; seg++) 3606 for (i = seg + 1; i < iter->nr_segs; i++) 3607 if (iter->iov[seg].iov_base == iter->iov[i].iov_base) 3608 return -EINVAL; 3609 return 0; 3610 } 3611 3612 static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to) 3613 { 3614 struct inode *inode = file_inode(iocb->ki_filp); 3615 ssize_t ret; 3616 3617 if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos)) 3618 return 0; 3619 3620 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED); 3621 ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 0); 3622 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 3623 return ret; 3624 } 3625 3626 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 3627 { 3628 ssize_t ret = 0; 3629 3630 if (iocb->ki_flags & IOCB_DIRECT) { 3631 ret = btrfs_direct_read(iocb, to); 3632 if (ret < 0 || !iov_iter_count(to) || 3633 iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp))) 3634 return ret; 3635 } 3636 3637 return generic_file_buffered_read(iocb, to, ret); 3638 } 3639 3640 const struct file_operations btrfs_file_operations = { 3641 .llseek = btrfs_file_llseek, 3642 .read_iter = btrfs_file_read_iter, 3643 .splice_read = generic_file_splice_read, 3644 .write_iter = btrfs_file_write_iter, 3645 .splice_write = iter_file_splice_write, 3646 .mmap = btrfs_file_mmap, 3647 .open = btrfs_file_open, 3648 .release = btrfs_release_file, 3649 .fsync = btrfs_sync_file, 3650 .fallocate = btrfs_fallocate, 3651 .unlocked_ioctl = btrfs_ioctl, 3652 #ifdef CONFIG_COMPAT 3653 .compat_ioctl = btrfs_compat_ioctl, 3654 #endif 3655 .remap_file_range = btrfs_remap_file_range, 3656 }; 3657 3658 void __cold btrfs_auto_defrag_exit(void) 3659 { 3660 kmem_cache_destroy(btrfs_inode_defrag_cachep); 3661 } 3662 3663 int __init btrfs_auto_defrag_init(void) 3664 { 3665 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag", 3666 sizeof(struct inode_defrag), 0, 3667 SLAB_MEM_SPREAD, 3668 NULL); 3669 if (!btrfs_inode_defrag_cachep) 3670 return -ENOMEM; 3671 3672 return 0; 3673 } 3674 3675 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end) 3676 { 3677 int ret; 3678 3679 /* 3680 * So with compression we will find and lock a dirty page and clear the 3681 * first one as dirty, setup an async extent, and immediately return 3682 * with the entire range locked but with nobody actually marked with 3683 * writeback. So we can't just filemap_write_and_wait_range() and 3684 * expect it to work since it will just kick off a thread to do the 3685 * actual work. So we need to call filemap_fdatawrite_range _again_ 3686 * since it will wait on the page lock, which won't be unlocked until 3687 * after the pages have been marked as writeback and so we're good to go 3688 * from there. We have to do this otherwise we'll miss the ordered 3689 * extents and that results in badness. Please Josef, do not think you 3690 * know better and pull this out at some point in the future, it is 3691 * right and you are wrong. 3692 */ 3693 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 3694 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 3695 &BTRFS_I(inode)->runtime_flags)) 3696 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 3697 3698 return ret; 3699 } 3700