1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/pagemap.h> 8 #include <linux/time.h> 9 #include <linux/init.h> 10 #include <linux/string.h> 11 #include <linux/backing-dev.h> 12 #include <linux/falloc.h> 13 #include <linux/writeback.h> 14 #include <linux/compat.h> 15 #include <linux/slab.h> 16 #include <linux/btrfs.h> 17 #include <linux/uio.h> 18 #include <linux/iversion.h> 19 #include "ctree.h" 20 #include "disk-io.h" 21 #include "transaction.h" 22 #include "btrfs_inode.h" 23 #include "print-tree.h" 24 #include "tree-log.h" 25 #include "locking.h" 26 #include "volumes.h" 27 #include "qgroup.h" 28 #include "compression.h" 29 #include "delalloc-space.h" 30 #include "reflink.h" 31 32 static struct kmem_cache *btrfs_inode_defrag_cachep; 33 /* 34 * when auto defrag is enabled we 35 * queue up these defrag structs to remember which 36 * inodes need defragging passes 37 */ 38 struct inode_defrag { 39 struct rb_node rb_node; 40 /* objectid */ 41 u64 ino; 42 /* 43 * transid where the defrag was added, we search for 44 * extents newer than this 45 */ 46 u64 transid; 47 48 /* root objectid */ 49 u64 root; 50 51 /* last offset we were able to defrag */ 52 u64 last_offset; 53 54 /* if we've wrapped around back to zero once already */ 55 int cycled; 56 }; 57 58 static int __compare_inode_defrag(struct inode_defrag *defrag1, 59 struct inode_defrag *defrag2) 60 { 61 if (defrag1->root > defrag2->root) 62 return 1; 63 else if (defrag1->root < defrag2->root) 64 return -1; 65 else if (defrag1->ino > defrag2->ino) 66 return 1; 67 else if (defrag1->ino < defrag2->ino) 68 return -1; 69 else 70 return 0; 71 } 72 73 /* pop a record for an inode into the defrag tree. The lock 74 * must be held already 75 * 76 * If you're inserting a record for an older transid than an 77 * existing record, the transid already in the tree is lowered 78 * 79 * If an existing record is found the defrag item you 80 * pass in is freed 81 */ 82 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode, 83 struct inode_defrag *defrag) 84 { 85 struct btrfs_fs_info *fs_info = inode->root->fs_info; 86 struct inode_defrag *entry; 87 struct rb_node **p; 88 struct rb_node *parent = NULL; 89 int ret; 90 91 p = &fs_info->defrag_inodes.rb_node; 92 while (*p) { 93 parent = *p; 94 entry = rb_entry(parent, struct inode_defrag, rb_node); 95 96 ret = __compare_inode_defrag(defrag, entry); 97 if (ret < 0) 98 p = &parent->rb_left; 99 else if (ret > 0) 100 p = &parent->rb_right; 101 else { 102 /* if we're reinserting an entry for 103 * an old defrag run, make sure to 104 * lower the transid of our existing record 105 */ 106 if (defrag->transid < entry->transid) 107 entry->transid = defrag->transid; 108 if (defrag->last_offset > entry->last_offset) 109 entry->last_offset = defrag->last_offset; 110 return -EEXIST; 111 } 112 } 113 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); 114 rb_link_node(&defrag->rb_node, parent, p); 115 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); 116 return 0; 117 } 118 119 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info) 120 { 121 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG)) 122 return 0; 123 124 if (btrfs_fs_closing(fs_info)) 125 return 0; 126 127 return 1; 128 } 129 130 /* 131 * insert a defrag record for this inode if auto defrag is 132 * enabled 133 */ 134 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 135 struct btrfs_inode *inode) 136 { 137 struct btrfs_root *root = inode->root; 138 struct btrfs_fs_info *fs_info = root->fs_info; 139 struct inode_defrag *defrag; 140 u64 transid; 141 int ret; 142 143 if (!__need_auto_defrag(fs_info)) 144 return 0; 145 146 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) 147 return 0; 148 149 if (trans) 150 transid = trans->transid; 151 else 152 transid = inode->root->last_trans; 153 154 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); 155 if (!defrag) 156 return -ENOMEM; 157 158 defrag->ino = btrfs_ino(inode); 159 defrag->transid = transid; 160 defrag->root = root->root_key.objectid; 161 162 spin_lock(&fs_info->defrag_inodes_lock); 163 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { 164 /* 165 * If we set IN_DEFRAG flag and evict the inode from memory, 166 * and then re-read this inode, this new inode doesn't have 167 * IN_DEFRAG flag. At the case, we may find the existed defrag. 168 */ 169 ret = __btrfs_add_inode_defrag(inode, defrag); 170 if (ret) 171 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 172 } else { 173 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 174 } 175 spin_unlock(&fs_info->defrag_inodes_lock); 176 return 0; 177 } 178 179 /* 180 * Requeue the defrag object. If there is a defrag object that points to 181 * the same inode in the tree, we will merge them together (by 182 * __btrfs_add_inode_defrag()) and free the one that we want to requeue. 183 */ 184 static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode, 185 struct inode_defrag *defrag) 186 { 187 struct btrfs_fs_info *fs_info = inode->root->fs_info; 188 int ret; 189 190 if (!__need_auto_defrag(fs_info)) 191 goto out; 192 193 /* 194 * Here we don't check the IN_DEFRAG flag, because we need merge 195 * them together. 196 */ 197 spin_lock(&fs_info->defrag_inodes_lock); 198 ret = __btrfs_add_inode_defrag(inode, defrag); 199 spin_unlock(&fs_info->defrag_inodes_lock); 200 if (ret) 201 goto out; 202 return; 203 out: 204 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 205 } 206 207 /* 208 * pick the defragable inode that we want, if it doesn't exist, we will get 209 * the next one. 210 */ 211 static struct inode_defrag * 212 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino) 213 { 214 struct inode_defrag *entry = NULL; 215 struct inode_defrag tmp; 216 struct rb_node *p; 217 struct rb_node *parent = NULL; 218 int ret; 219 220 tmp.ino = ino; 221 tmp.root = root; 222 223 spin_lock(&fs_info->defrag_inodes_lock); 224 p = fs_info->defrag_inodes.rb_node; 225 while (p) { 226 parent = p; 227 entry = rb_entry(parent, struct inode_defrag, rb_node); 228 229 ret = __compare_inode_defrag(&tmp, entry); 230 if (ret < 0) 231 p = parent->rb_left; 232 else if (ret > 0) 233 p = parent->rb_right; 234 else 235 goto out; 236 } 237 238 if (parent && __compare_inode_defrag(&tmp, entry) > 0) { 239 parent = rb_next(parent); 240 if (parent) 241 entry = rb_entry(parent, struct inode_defrag, rb_node); 242 else 243 entry = NULL; 244 } 245 out: 246 if (entry) 247 rb_erase(parent, &fs_info->defrag_inodes); 248 spin_unlock(&fs_info->defrag_inodes_lock); 249 return entry; 250 } 251 252 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info) 253 { 254 struct inode_defrag *defrag; 255 struct rb_node *node; 256 257 spin_lock(&fs_info->defrag_inodes_lock); 258 node = rb_first(&fs_info->defrag_inodes); 259 while (node) { 260 rb_erase(node, &fs_info->defrag_inodes); 261 defrag = rb_entry(node, struct inode_defrag, rb_node); 262 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 263 264 cond_resched_lock(&fs_info->defrag_inodes_lock); 265 266 node = rb_first(&fs_info->defrag_inodes); 267 } 268 spin_unlock(&fs_info->defrag_inodes_lock); 269 } 270 271 #define BTRFS_DEFRAG_BATCH 1024 272 273 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, 274 struct inode_defrag *defrag) 275 { 276 struct btrfs_root *inode_root; 277 struct inode *inode; 278 struct btrfs_ioctl_defrag_range_args range; 279 int num_defrag; 280 int ret; 281 282 /* get the inode */ 283 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); 284 if (IS_ERR(inode_root)) { 285 ret = PTR_ERR(inode_root); 286 goto cleanup; 287 } 288 289 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root); 290 btrfs_put_root(inode_root); 291 if (IS_ERR(inode)) { 292 ret = PTR_ERR(inode); 293 goto cleanup; 294 } 295 296 /* do a chunk of defrag */ 297 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); 298 memset(&range, 0, sizeof(range)); 299 range.len = (u64)-1; 300 range.start = defrag->last_offset; 301 302 sb_start_write(fs_info->sb); 303 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, 304 BTRFS_DEFRAG_BATCH); 305 sb_end_write(fs_info->sb); 306 /* 307 * if we filled the whole defrag batch, there 308 * must be more work to do. Queue this defrag 309 * again 310 */ 311 if (num_defrag == BTRFS_DEFRAG_BATCH) { 312 defrag->last_offset = range.start; 313 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); 314 } else if (defrag->last_offset && !defrag->cycled) { 315 /* 316 * we didn't fill our defrag batch, but 317 * we didn't start at zero. Make sure we loop 318 * around to the start of the file. 319 */ 320 defrag->last_offset = 0; 321 defrag->cycled = 1; 322 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); 323 } else { 324 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 325 } 326 327 iput(inode); 328 return 0; 329 cleanup: 330 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 331 return ret; 332 } 333 334 /* 335 * run through the list of inodes in the FS that need 336 * defragging 337 */ 338 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) 339 { 340 struct inode_defrag *defrag; 341 u64 first_ino = 0; 342 u64 root_objectid = 0; 343 344 atomic_inc(&fs_info->defrag_running); 345 while (1) { 346 /* Pause the auto defragger. */ 347 if (test_bit(BTRFS_FS_STATE_REMOUNTING, 348 &fs_info->fs_state)) 349 break; 350 351 if (!__need_auto_defrag(fs_info)) 352 break; 353 354 /* find an inode to defrag */ 355 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, 356 first_ino); 357 if (!defrag) { 358 if (root_objectid || first_ino) { 359 root_objectid = 0; 360 first_ino = 0; 361 continue; 362 } else { 363 break; 364 } 365 } 366 367 first_ino = defrag->ino + 1; 368 root_objectid = defrag->root; 369 370 __btrfs_run_defrag_inode(fs_info, defrag); 371 } 372 atomic_dec(&fs_info->defrag_running); 373 374 /* 375 * during unmount, we use the transaction_wait queue to 376 * wait for the defragger to stop 377 */ 378 wake_up(&fs_info->transaction_wait); 379 return 0; 380 } 381 382 /* simple helper to fault in pages and copy. This should go away 383 * and be replaced with calls into generic code. 384 */ 385 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes, 386 struct page **prepared_pages, 387 struct iov_iter *i) 388 { 389 size_t copied = 0; 390 size_t total_copied = 0; 391 int pg = 0; 392 int offset = offset_in_page(pos); 393 394 while (write_bytes > 0) { 395 size_t count = min_t(size_t, 396 PAGE_SIZE - offset, write_bytes); 397 struct page *page = prepared_pages[pg]; 398 /* 399 * Copy data from userspace to the current page 400 */ 401 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 402 403 /* Flush processor's dcache for this page */ 404 flush_dcache_page(page); 405 406 /* 407 * if we get a partial write, we can end up with 408 * partially up to date pages. These add 409 * a lot of complexity, so make sure they don't 410 * happen by forcing this copy to be retried. 411 * 412 * The rest of the btrfs_file_write code will fall 413 * back to page at a time copies after we return 0. 414 */ 415 if (!PageUptodate(page) && copied < count) 416 copied = 0; 417 418 iov_iter_advance(i, copied); 419 write_bytes -= copied; 420 total_copied += copied; 421 422 /* Return to btrfs_file_write_iter to fault page */ 423 if (unlikely(copied == 0)) 424 break; 425 426 if (copied < PAGE_SIZE - offset) { 427 offset += copied; 428 } else { 429 pg++; 430 offset = 0; 431 } 432 } 433 return total_copied; 434 } 435 436 /* 437 * unlocks pages after btrfs_file_write is done with them 438 */ 439 static void btrfs_drop_pages(struct page **pages, size_t num_pages) 440 { 441 size_t i; 442 for (i = 0; i < num_pages; i++) { 443 /* page checked is some magic around finding pages that 444 * have been modified without going through btrfs_set_page_dirty 445 * clear it here. There should be no need to mark the pages 446 * accessed as prepare_pages should have marked them accessed 447 * in prepare_pages via find_or_create_page() 448 */ 449 ClearPageChecked(pages[i]); 450 unlock_page(pages[i]); 451 put_page(pages[i]); 452 } 453 } 454 455 /* 456 * after copy_from_user, pages need to be dirtied and we need to make 457 * sure holes are created between the current EOF and the start of 458 * any next extents (if required). 459 * 460 * this also makes the decision about creating an inline extent vs 461 * doing real data extents, marking pages dirty and delalloc as required. 462 */ 463 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, 464 size_t num_pages, loff_t pos, size_t write_bytes, 465 struct extent_state **cached) 466 { 467 struct btrfs_fs_info *fs_info = inode->root->fs_info; 468 int err = 0; 469 int i; 470 u64 num_bytes; 471 u64 start_pos; 472 u64 end_of_last_block; 473 u64 end_pos = pos + write_bytes; 474 loff_t isize = i_size_read(&inode->vfs_inode); 475 unsigned int extra_bits = 0; 476 477 start_pos = pos & ~((u64) fs_info->sectorsize - 1); 478 num_bytes = round_up(write_bytes + pos - start_pos, 479 fs_info->sectorsize); 480 481 end_of_last_block = start_pos + num_bytes - 1; 482 483 /* 484 * The pages may have already been dirty, clear out old accounting so 485 * we can set things up properly 486 */ 487 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block, 488 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 489 0, 0, cached); 490 491 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 492 extra_bits, cached); 493 if (err) 494 return err; 495 496 for (i = 0; i < num_pages; i++) { 497 struct page *p = pages[i]; 498 SetPageUptodate(p); 499 ClearPageChecked(p); 500 set_page_dirty(p); 501 } 502 503 /* 504 * we've only changed i_size in ram, and we haven't updated 505 * the disk i_size. There is no need to log the inode 506 * at this time. 507 */ 508 if (end_pos > isize) 509 i_size_write(&inode->vfs_inode, end_pos); 510 return 0; 511 } 512 513 /* 514 * this drops all the extents in the cache that intersect the range 515 * [start, end]. Existing extents are split as required. 516 */ 517 void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, 518 int skip_pinned) 519 { 520 struct extent_map *em; 521 struct extent_map *split = NULL; 522 struct extent_map *split2 = NULL; 523 struct extent_map_tree *em_tree = &inode->extent_tree; 524 u64 len = end - start + 1; 525 u64 gen; 526 int ret; 527 int testend = 1; 528 unsigned long flags; 529 int compressed = 0; 530 bool modified; 531 532 WARN_ON(end < start); 533 if (end == (u64)-1) { 534 len = (u64)-1; 535 testend = 0; 536 } 537 while (1) { 538 int no_splits = 0; 539 540 modified = false; 541 if (!split) 542 split = alloc_extent_map(); 543 if (!split2) 544 split2 = alloc_extent_map(); 545 if (!split || !split2) 546 no_splits = 1; 547 548 write_lock(&em_tree->lock); 549 em = lookup_extent_mapping(em_tree, start, len); 550 if (!em) { 551 write_unlock(&em_tree->lock); 552 break; 553 } 554 flags = em->flags; 555 gen = em->generation; 556 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 557 if (testend && em->start + em->len >= start + len) { 558 free_extent_map(em); 559 write_unlock(&em_tree->lock); 560 break; 561 } 562 start = em->start + em->len; 563 if (testend) 564 len = start + len - (em->start + em->len); 565 free_extent_map(em); 566 write_unlock(&em_tree->lock); 567 continue; 568 } 569 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 570 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 571 clear_bit(EXTENT_FLAG_LOGGING, &flags); 572 modified = !list_empty(&em->list); 573 if (no_splits) 574 goto next; 575 576 if (em->start < start) { 577 split->start = em->start; 578 split->len = start - em->start; 579 580 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 581 split->orig_start = em->orig_start; 582 split->block_start = em->block_start; 583 584 if (compressed) 585 split->block_len = em->block_len; 586 else 587 split->block_len = split->len; 588 split->orig_block_len = max(split->block_len, 589 em->orig_block_len); 590 split->ram_bytes = em->ram_bytes; 591 } else { 592 split->orig_start = split->start; 593 split->block_len = 0; 594 split->block_start = em->block_start; 595 split->orig_block_len = 0; 596 split->ram_bytes = split->len; 597 } 598 599 split->generation = gen; 600 split->flags = flags; 601 split->compress_type = em->compress_type; 602 replace_extent_mapping(em_tree, em, split, modified); 603 free_extent_map(split); 604 split = split2; 605 split2 = NULL; 606 } 607 if (testend && em->start + em->len > start + len) { 608 u64 diff = start + len - em->start; 609 610 split->start = start + len; 611 split->len = em->start + em->len - (start + len); 612 split->flags = flags; 613 split->compress_type = em->compress_type; 614 split->generation = gen; 615 616 if (em->block_start < EXTENT_MAP_LAST_BYTE) { 617 split->orig_block_len = max(em->block_len, 618 em->orig_block_len); 619 620 split->ram_bytes = em->ram_bytes; 621 if (compressed) { 622 split->block_len = em->block_len; 623 split->block_start = em->block_start; 624 split->orig_start = em->orig_start; 625 } else { 626 split->block_len = split->len; 627 split->block_start = em->block_start 628 + diff; 629 split->orig_start = em->orig_start; 630 } 631 } else { 632 split->ram_bytes = split->len; 633 split->orig_start = split->start; 634 split->block_len = 0; 635 split->block_start = em->block_start; 636 split->orig_block_len = 0; 637 } 638 639 if (extent_map_in_tree(em)) { 640 replace_extent_mapping(em_tree, em, split, 641 modified); 642 } else { 643 ret = add_extent_mapping(em_tree, split, 644 modified); 645 ASSERT(ret == 0); /* Logic error */ 646 } 647 free_extent_map(split); 648 split = NULL; 649 } 650 next: 651 if (extent_map_in_tree(em)) 652 remove_extent_mapping(em_tree, em); 653 write_unlock(&em_tree->lock); 654 655 /* once for us */ 656 free_extent_map(em); 657 /* once for the tree*/ 658 free_extent_map(em); 659 } 660 if (split) 661 free_extent_map(split); 662 if (split2) 663 free_extent_map(split2); 664 } 665 666 /* 667 * this is very complex, but the basic idea is to drop all extents 668 * in the range start - end. hint_block is filled in with a block number 669 * that would be a good hint to the block allocator for this file. 670 * 671 * If an extent intersects the range but is not entirely inside the range 672 * it is either truncated or split. Anything entirely inside the range 673 * is deleted from the tree. 674 */ 675 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 676 struct btrfs_root *root, struct btrfs_inode *inode, 677 struct btrfs_path *path, u64 start, u64 end, 678 u64 *drop_end, int drop_cache, 679 int replace_extent, 680 u32 extent_item_size, 681 int *key_inserted) 682 { 683 struct btrfs_fs_info *fs_info = root->fs_info; 684 struct extent_buffer *leaf; 685 struct btrfs_file_extent_item *fi; 686 struct btrfs_ref ref = { 0 }; 687 struct btrfs_key key; 688 struct btrfs_key new_key; 689 struct inode *vfs_inode = &inode->vfs_inode; 690 u64 ino = btrfs_ino(inode); 691 u64 search_start = start; 692 u64 disk_bytenr = 0; 693 u64 num_bytes = 0; 694 u64 extent_offset = 0; 695 u64 extent_end = 0; 696 u64 last_end = start; 697 int del_nr = 0; 698 int del_slot = 0; 699 int extent_type; 700 int recow; 701 int ret; 702 int modify_tree = -1; 703 int update_refs; 704 int found = 0; 705 int leafs_visited = 0; 706 707 if (drop_cache) 708 btrfs_drop_extent_cache(inode, start, end - 1, 0); 709 710 if (start >= inode->disk_i_size && !replace_extent) 711 modify_tree = 0; 712 713 update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || 714 root == fs_info->tree_root); 715 while (1) { 716 recow = 0; 717 ret = btrfs_lookup_file_extent(trans, root, path, ino, 718 search_start, modify_tree); 719 if (ret < 0) 720 break; 721 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 722 leaf = path->nodes[0]; 723 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 724 if (key.objectid == ino && 725 key.type == BTRFS_EXTENT_DATA_KEY) 726 path->slots[0]--; 727 } 728 ret = 0; 729 leafs_visited++; 730 next_slot: 731 leaf = path->nodes[0]; 732 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 733 BUG_ON(del_nr > 0); 734 ret = btrfs_next_leaf(root, path); 735 if (ret < 0) 736 break; 737 if (ret > 0) { 738 ret = 0; 739 break; 740 } 741 leafs_visited++; 742 leaf = path->nodes[0]; 743 recow = 1; 744 } 745 746 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 747 748 if (key.objectid > ino) 749 break; 750 if (WARN_ON_ONCE(key.objectid < ino) || 751 key.type < BTRFS_EXTENT_DATA_KEY) { 752 ASSERT(del_nr == 0); 753 path->slots[0]++; 754 goto next_slot; 755 } 756 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 757 break; 758 759 fi = btrfs_item_ptr(leaf, path->slots[0], 760 struct btrfs_file_extent_item); 761 extent_type = btrfs_file_extent_type(leaf, fi); 762 763 if (extent_type == BTRFS_FILE_EXTENT_REG || 764 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 765 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 766 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 767 extent_offset = btrfs_file_extent_offset(leaf, fi); 768 extent_end = key.offset + 769 btrfs_file_extent_num_bytes(leaf, fi); 770 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 771 extent_end = key.offset + 772 btrfs_file_extent_ram_bytes(leaf, fi); 773 } else { 774 /* can't happen */ 775 BUG(); 776 } 777 778 /* 779 * Don't skip extent items representing 0 byte lengths. They 780 * used to be created (bug) if while punching holes we hit 781 * -ENOSPC condition. So if we find one here, just ensure we 782 * delete it, otherwise we would insert a new file extent item 783 * with the same key (offset) as that 0 bytes length file 784 * extent item in the call to setup_items_for_insert() later 785 * in this function. 786 */ 787 if (extent_end == key.offset && extent_end >= search_start) { 788 last_end = extent_end; 789 goto delete_extent_item; 790 } 791 792 if (extent_end <= search_start) { 793 path->slots[0]++; 794 goto next_slot; 795 } 796 797 found = 1; 798 search_start = max(key.offset, start); 799 if (recow || !modify_tree) { 800 modify_tree = -1; 801 btrfs_release_path(path); 802 continue; 803 } 804 805 /* 806 * | - range to drop - | 807 * | -------- extent -------- | 808 */ 809 if (start > key.offset && end < extent_end) { 810 BUG_ON(del_nr > 0); 811 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 812 ret = -EOPNOTSUPP; 813 break; 814 } 815 816 memcpy(&new_key, &key, sizeof(new_key)); 817 new_key.offset = start; 818 ret = btrfs_duplicate_item(trans, root, path, 819 &new_key); 820 if (ret == -EAGAIN) { 821 btrfs_release_path(path); 822 continue; 823 } 824 if (ret < 0) 825 break; 826 827 leaf = path->nodes[0]; 828 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 829 struct btrfs_file_extent_item); 830 btrfs_set_file_extent_num_bytes(leaf, fi, 831 start - key.offset); 832 833 fi = btrfs_item_ptr(leaf, path->slots[0], 834 struct btrfs_file_extent_item); 835 836 extent_offset += start - key.offset; 837 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 838 btrfs_set_file_extent_num_bytes(leaf, fi, 839 extent_end - start); 840 btrfs_mark_buffer_dirty(leaf); 841 842 if (update_refs && disk_bytenr > 0) { 843 btrfs_init_generic_ref(&ref, 844 BTRFS_ADD_DELAYED_REF, 845 disk_bytenr, num_bytes, 0); 846 btrfs_init_data_ref(&ref, 847 root->root_key.objectid, 848 new_key.objectid, 849 start - extent_offset); 850 ret = btrfs_inc_extent_ref(trans, &ref); 851 BUG_ON(ret); /* -ENOMEM */ 852 } 853 key.offset = start; 854 } 855 /* 856 * From here on out we will have actually dropped something, so 857 * last_end can be updated. 858 */ 859 last_end = extent_end; 860 861 /* 862 * | ---- range to drop ----- | 863 * | -------- extent -------- | 864 */ 865 if (start <= key.offset && end < extent_end) { 866 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 867 ret = -EOPNOTSUPP; 868 break; 869 } 870 871 memcpy(&new_key, &key, sizeof(new_key)); 872 new_key.offset = end; 873 btrfs_set_item_key_safe(fs_info, path, &new_key); 874 875 extent_offset += end - key.offset; 876 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 877 btrfs_set_file_extent_num_bytes(leaf, fi, 878 extent_end - end); 879 btrfs_mark_buffer_dirty(leaf); 880 if (update_refs && disk_bytenr > 0) 881 inode_sub_bytes(vfs_inode, end - key.offset); 882 break; 883 } 884 885 search_start = extent_end; 886 /* 887 * | ---- range to drop ----- | 888 * | -------- extent -------- | 889 */ 890 if (start > key.offset && end >= extent_end) { 891 BUG_ON(del_nr > 0); 892 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 893 ret = -EOPNOTSUPP; 894 break; 895 } 896 897 btrfs_set_file_extent_num_bytes(leaf, fi, 898 start - key.offset); 899 btrfs_mark_buffer_dirty(leaf); 900 if (update_refs && disk_bytenr > 0) 901 inode_sub_bytes(vfs_inode, extent_end - start); 902 if (end == extent_end) 903 break; 904 905 path->slots[0]++; 906 goto next_slot; 907 } 908 909 /* 910 * | ---- range to drop ----- | 911 * | ------ extent ------ | 912 */ 913 if (start <= key.offset && end >= extent_end) { 914 delete_extent_item: 915 if (del_nr == 0) { 916 del_slot = path->slots[0]; 917 del_nr = 1; 918 } else { 919 BUG_ON(del_slot + del_nr != path->slots[0]); 920 del_nr++; 921 } 922 923 if (update_refs && 924 extent_type == BTRFS_FILE_EXTENT_INLINE) { 925 inode_sub_bytes(vfs_inode, 926 extent_end - key.offset); 927 extent_end = ALIGN(extent_end, 928 fs_info->sectorsize); 929 } else if (update_refs && disk_bytenr > 0) { 930 btrfs_init_generic_ref(&ref, 931 BTRFS_DROP_DELAYED_REF, 932 disk_bytenr, num_bytes, 0); 933 btrfs_init_data_ref(&ref, 934 root->root_key.objectid, 935 key.objectid, 936 key.offset - extent_offset); 937 ret = btrfs_free_extent(trans, &ref); 938 BUG_ON(ret); /* -ENOMEM */ 939 inode_sub_bytes(vfs_inode, 940 extent_end - key.offset); 941 } 942 943 if (end == extent_end) 944 break; 945 946 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 947 path->slots[0]++; 948 goto next_slot; 949 } 950 951 ret = btrfs_del_items(trans, root, path, del_slot, 952 del_nr); 953 if (ret) { 954 btrfs_abort_transaction(trans, ret); 955 break; 956 } 957 958 del_nr = 0; 959 del_slot = 0; 960 961 btrfs_release_path(path); 962 continue; 963 } 964 965 BUG(); 966 } 967 968 if (!ret && del_nr > 0) { 969 /* 970 * Set path->slots[0] to first slot, so that after the delete 971 * if items are move off from our leaf to its immediate left or 972 * right neighbor leafs, we end up with a correct and adjusted 973 * path->slots[0] for our insertion (if replace_extent != 0). 974 */ 975 path->slots[0] = del_slot; 976 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 977 if (ret) 978 btrfs_abort_transaction(trans, ret); 979 } 980 981 leaf = path->nodes[0]; 982 /* 983 * If btrfs_del_items() was called, it might have deleted a leaf, in 984 * which case it unlocked our path, so check path->locks[0] matches a 985 * write lock. 986 */ 987 if (!ret && replace_extent && leafs_visited == 1 && 988 (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING || 989 path->locks[0] == BTRFS_WRITE_LOCK) && 990 btrfs_leaf_free_space(leaf) >= 991 sizeof(struct btrfs_item) + extent_item_size) { 992 993 key.objectid = ino; 994 key.type = BTRFS_EXTENT_DATA_KEY; 995 key.offset = start; 996 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) { 997 struct btrfs_key slot_key; 998 999 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]); 1000 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0) 1001 path->slots[0]++; 1002 } 1003 setup_items_for_insert(root, path, &key, &extent_item_size, 1); 1004 *key_inserted = 1; 1005 } 1006 1007 if (!replace_extent || !(*key_inserted)) 1008 btrfs_release_path(path); 1009 if (drop_end) 1010 *drop_end = found ? min(end, last_end) : end; 1011 return ret; 1012 } 1013 1014 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 1015 struct btrfs_root *root, struct inode *inode, u64 start, 1016 u64 end, int drop_cache) 1017 { 1018 struct btrfs_path *path; 1019 int ret; 1020 1021 path = btrfs_alloc_path(); 1022 if (!path) 1023 return -ENOMEM; 1024 ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, start, 1025 end, NULL, drop_cache, 0, 0, NULL); 1026 btrfs_free_path(path); 1027 return ret; 1028 } 1029 1030 static int extent_mergeable(struct extent_buffer *leaf, int slot, 1031 u64 objectid, u64 bytenr, u64 orig_offset, 1032 u64 *start, u64 *end) 1033 { 1034 struct btrfs_file_extent_item *fi; 1035 struct btrfs_key key; 1036 u64 extent_end; 1037 1038 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 1039 return 0; 1040 1041 btrfs_item_key_to_cpu(leaf, &key, slot); 1042 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 1043 return 0; 1044 1045 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 1046 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 1047 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 1048 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 1049 btrfs_file_extent_compression(leaf, fi) || 1050 btrfs_file_extent_encryption(leaf, fi) || 1051 btrfs_file_extent_other_encoding(leaf, fi)) 1052 return 0; 1053 1054 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1055 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 1056 return 0; 1057 1058 *start = key.offset; 1059 *end = extent_end; 1060 return 1; 1061 } 1062 1063 /* 1064 * Mark extent in the range start - end as written. 1065 * 1066 * This changes extent type from 'pre-allocated' to 'regular'. If only 1067 * part of extent is marked as written, the extent will be split into 1068 * two or three. 1069 */ 1070 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 1071 struct btrfs_inode *inode, u64 start, u64 end) 1072 { 1073 struct btrfs_fs_info *fs_info = trans->fs_info; 1074 struct btrfs_root *root = inode->root; 1075 struct extent_buffer *leaf; 1076 struct btrfs_path *path; 1077 struct btrfs_file_extent_item *fi; 1078 struct btrfs_ref ref = { 0 }; 1079 struct btrfs_key key; 1080 struct btrfs_key new_key; 1081 u64 bytenr; 1082 u64 num_bytes; 1083 u64 extent_end; 1084 u64 orig_offset; 1085 u64 other_start; 1086 u64 other_end; 1087 u64 split; 1088 int del_nr = 0; 1089 int del_slot = 0; 1090 int recow; 1091 int ret; 1092 u64 ino = btrfs_ino(inode); 1093 1094 path = btrfs_alloc_path(); 1095 if (!path) 1096 return -ENOMEM; 1097 again: 1098 recow = 0; 1099 split = start; 1100 key.objectid = ino; 1101 key.type = BTRFS_EXTENT_DATA_KEY; 1102 key.offset = split; 1103 1104 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1105 if (ret < 0) 1106 goto out; 1107 if (ret > 0 && path->slots[0] > 0) 1108 path->slots[0]--; 1109 1110 leaf = path->nodes[0]; 1111 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1112 if (key.objectid != ino || 1113 key.type != BTRFS_EXTENT_DATA_KEY) { 1114 ret = -EINVAL; 1115 btrfs_abort_transaction(trans, ret); 1116 goto out; 1117 } 1118 fi = btrfs_item_ptr(leaf, path->slots[0], 1119 struct btrfs_file_extent_item); 1120 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) { 1121 ret = -EINVAL; 1122 btrfs_abort_transaction(trans, ret); 1123 goto out; 1124 } 1125 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1126 if (key.offset > start || extent_end < end) { 1127 ret = -EINVAL; 1128 btrfs_abort_transaction(trans, ret); 1129 goto out; 1130 } 1131 1132 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1133 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1134 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 1135 memcpy(&new_key, &key, sizeof(new_key)); 1136 1137 if (start == key.offset && end < extent_end) { 1138 other_start = 0; 1139 other_end = start; 1140 if (extent_mergeable(leaf, path->slots[0] - 1, 1141 ino, bytenr, orig_offset, 1142 &other_start, &other_end)) { 1143 new_key.offset = end; 1144 btrfs_set_item_key_safe(fs_info, path, &new_key); 1145 fi = btrfs_item_ptr(leaf, path->slots[0], 1146 struct btrfs_file_extent_item); 1147 btrfs_set_file_extent_generation(leaf, fi, 1148 trans->transid); 1149 btrfs_set_file_extent_num_bytes(leaf, fi, 1150 extent_end - end); 1151 btrfs_set_file_extent_offset(leaf, fi, 1152 end - orig_offset); 1153 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 1154 struct btrfs_file_extent_item); 1155 btrfs_set_file_extent_generation(leaf, fi, 1156 trans->transid); 1157 btrfs_set_file_extent_num_bytes(leaf, fi, 1158 end - other_start); 1159 btrfs_mark_buffer_dirty(leaf); 1160 goto out; 1161 } 1162 } 1163 1164 if (start > key.offset && end == extent_end) { 1165 other_start = end; 1166 other_end = 0; 1167 if (extent_mergeable(leaf, path->slots[0] + 1, 1168 ino, bytenr, orig_offset, 1169 &other_start, &other_end)) { 1170 fi = btrfs_item_ptr(leaf, path->slots[0], 1171 struct btrfs_file_extent_item); 1172 btrfs_set_file_extent_num_bytes(leaf, fi, 1173 start - key.offset); 1174 btrfs_set_file_extent_generation(leaf, fi, 1175 trans->transid); 1176 path->slots[0]++; 1177 new_key.offset = start; 1178 btrfs_set_item_key_safe(fs_info, path, &new_key); 1179 1180 fi = btrfs_item_ptr(leaf, path->slots[0], 1181 struct btrfs_file_extent_item); 1182 btrfs_set_file_extent_generation(leaf, fi, 1183 trans->transid); 1184 btrfs_set_file_extent_num_bytes(leaf, fi, 1185 other_end - start); 1186 btrfs_set_file_extent_offset(leaf, fi, 1187 start - orig_offset); 1188 btrfs_mark_buffer_dirty(leaf); 1189 goto out; 1190 } 1191 } 1192 1193 while (start > key.offset || end < extent_end) { 1194 if (key.offset == start) 1195 split = end; 1196 1197 new_key.offset = split; 1198 ret = btrfs_duplicate_item(trans, root, path, &new_key); 1199 if (ret == -EAGAIN) { 1200 btrfs_release_path(path); 1201 goto again; 1202 } 1203 if (ret < 0) { 1204 btrfs_abort_transaction(trans, ret); 1205 goto out; 1206 } 1207 1208 leaf = path->nodes[0]; 1209 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 1210 struct btrfs_file_extent_item); 1211 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1212 btrfs_set_file_extent_num_bytes(leaf, fi, 1213 split - key.offset); 1214 1215 fi = btrfs_item_ptr(leaf, path->slots[0], 1216 struct btrfs_file_extent_item); 1217 1218 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1219 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 1220 btrfs_set_file_extent_num_bytes(leaf, fi, 1221 extent_end - split); 1222 btrfs_mark_buffer_dirty(leaf); 1223 1224 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr, 1225 num_bytes, 0); 1226 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, 1227 orig_offset); 1228 ret = btrfs_inc_extent_ref(trans, &ref); 1229 if (ret) { 1230 btrfs_abort_transaction(trans, ret); 1231 goto out; 1232 } 1233 1234 if (split == start) { 1235 key.offset = start; 1236 } else { 1237 if (start != key.offset) { 1238 ret = -EINVAL; 1239 btrfs_abort_transaction(trans, ret); 1240 goto out; 1241 } 1242 path->slots[0]--; 1243 extent_end = end; 1244 } 1245 recow = 1; 1246 } 1247 1248 other_start = end; 1249 other_end = 0; 1250 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 1251 num_bytes, 0); 1252 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset); 1253 if (extent_mergeable(leaf, path->slots[0] + 1, 1254 ino, bytenr, orig_offset, 1255 &other_start, &other_end)) { 1256 if (recow) { 1257 btrfs_release_path(path); 1258 goto again; 1259 } 1260 extent_end = other_end; 1261 del_slot = path->slots[0] + 1; 1262 del_nr++; 1263 ret = btrfs_free_extent(trans, &ref); 1264 if (ret) { 1265 btrfs_abort_transaction(trans, ret); 1266 goto out; 1267 } 1268 } 1269 other_start = 0; 1270 other_end = start; 1271 if (extent_mergeable(leaf, path->slots[0] - 1, 1272 ino, bytenr, orig_offset, 1273 &other_start, &other_end)) { 1274 if (recow) { 1275 btrfs_release_path(path); 1276 goto again; 1277 } 1278 key.offset = other_start; 1279 del_slot = path->slots[0]; 1280 del_nr++; 1281 ret = btrfs_free_extent(trans, &ref); 1282 if (ret) { 1283 btrfs_abort_transaction(trans, ret); 1284 goto out; 1285 } 1286 } 1287 if (del_nr == 0) { 1288 fi = btrfs_item_ptr(leaf, path->slots[0], 1289 struct btrfs_file_extent_item); 1290 btrfs_set_file_extent_type(leaf, fi, 1291 BTRFS_FILE_EXTENT_REG); 1292 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1293 btrfs_mark_buffer_dirty(leaf); 1294 } else { 1295 fi = btrfs_item_ptr(leaf, del_slot - 1, 1296 struct btrfs_file_extent_item); 1297 btrfs_set_file_extent_type(leaf, fi, 1298 BTRFS_FILE_EXTENT_REG); 1299 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1300 btrfs_set_file_extent_num_bytes(leaf, fi, 1301 extent_end - key.offset); 1302 btrfs_mark_buffer_dirty(leaf); 1303 1304 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 1305 if (ret < 0) { 1306 btrfs_abort_transaction(trans, ret); 1307 goto out; 1308 } 1309 } 1310 out: 1311 btrfs_free_path(path); 1312 return 0; 1313 } 1314 1315 /* 1316 * on error we return an unlocked page and the error value 1317 * on success we return a locked page and 0 1318 */ 1319 static int prepare_uptodate_page(struct inode *inode, 1320 struct page *page, u64 pos, 1321 bool force_uptodate) 1322 { 1323 int ret = 0; 1324 1325 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) && 1326 !PageUptodate(page)) { 1327 ret = btrfs_readpage(NULL, page); 1328 if (ret) 1329 return ret; 1330 lock_page(page); 1331 if (!PageUptodate(page)) { 1332 unlock_page(page); 1333 return -EIO; 1334 } 1335 if (page->mapping != inode->i_mapping) { 1336 unlock_page(page); 1337 return -EAGAIN; 1338 } 1339 } 1340 return 0; 1341 } 1342 1343 /* 1344 * this just gets pages into the page cache and locks them down. 1345 */ 1346 static noinline int prepare_pages(struct inode *inode, struct page **pages, 1347 size_t num_pages, loff_t pos, 1348 size_t write_bytes, bool force_uptodate) 1349 { 1350 int i; 1351 unsigned long index = pos >> PAGE_SHIFT; 1352 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1353 int err = 0; 1354 int faili; 1355 1356 for (i = 0; i < num_pages; i++) { 1357 again: 1358 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1359 mask | __GFP_WRITE); 1360 if (!pages[i]) { 1361 faili = i - 1; 1362 err = -ENOMEM; 1363 goto fail; 1364 } 1365 1366 if (i == 0) 1367 err = prepare_uptodate_page(inode, pages[i], pos, 1368 force_uptodate); 1369 if (!err && i == num_pages - 1) 1370 err = prepare_uptodate_page(inode, pages[i], 1371 pos + write_bytes, false); 1372 if (err) { 1373 put_page(pages[i]); 1374 if (err == -EAGAIN) { 1375 err = 0; 1376 goto again; 1377 } 1378 faili = i - 1; 1379 goto fail; 1380 } 1381 wait_on_page_writeback(pages[i]); 1382 } 1383 1384 return 0; 1385 fail: 1386 while (faili >= 0) { 1387 unlock_page(pages[faili]); 1388 put_page(pages[faili]); 1389 faili--; 1390 } 1391 return err; 1392 1393 } 1394 1395 /* 1396 * This function locks the extent and properly waits for data=ordered extents 1397 * to finish before allowing the pages to be modified if need. 1398 * 1399 * The return value: 1400 * 1 - the extent is locked 1401 * 0 - the extent is not locked, and everything is OK 1402 * -EAGAIN - need re-prepare the pages 1403 * the other < 0 number - Something wrong happens 1404 */ 1405 static noinline int 1406 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, 1407 size_t num_pages, loff_t pos, 1408 size_t write_bytes, 1409 u64 *lockstart, u64 *lockend, 1410 struct extent_state **cached_state) 1411 { 1412 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1413 u64 start_pos; 1414 u64 last_pos; 1415 int i; 1416 int ret = 0; 1417 1418 start_pos = round_down(pos, fs_info->sectorsize); 1419 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1; 1420 1421 if (start_pos < inode->vfs_inode.i_size) { 1422 struct btrfs_ordered_extent *ordered; 1423 1424 lock_extent_bits(&inode->io_tree, start_pos, last_pos, 1425 cached_state); 1426 ordered = btrfs_lookup_ordered_range(inode, start_pos, 1427 last_pos - start_pos + 1); 1428 if (ordered && 1429 ordered->file_offset + ordered->num_bytes > start_pos && 1430 ordered->file_offset <= last_pos) { 1431 unlock_extent_cached(&inode->io_tree, start_pos, 1432 last_pos, cached_state); 1433 for (i = 0; i < num_pages; i++) { 1434 unlock_page(pages[i]); 1435 put_page(pages[i]); 1436 } 1437 btrfs_start_ordered_extent(ordered, 1); 1438 btrfs_put_ordered_extent(ordered); 1439 return -EAGAIN; 1440 } 1441 if (ordered) 1442 btrfs_put_ordered_extent(ordered); 1443 1444 *lockstart = start_pos; 1445 *lockend = last_pos; 1446 ret = 1; 1447 } 1448 1449 /* 1450 * It's possible the pages are dirty right now, but we don't want 1451 * to clean them yet because copy_from_user may catch a page fault 1452 * and we might have to fall back to one page at a time. If that 1453 * happens, we'll unlock these pages and we'd have a window where 1454 * reclaim could sneak in and drop the once-dirty page on the floor 1455 * without writing it. 1456 * 1457 * We have the pages locked and the extent range locked, so there's 1458 * no way someone can start IO on any dirty pages in this range. 1459 * 1460 * We'll call btrfs_dirty_pages() later on, and that will flip around 1461 * delalloc bits and dirty the pages as required. 1462 */ 1463 for (i = 0; i < num_pages; i++) { 1464 set_page_extent_mapped(pages[i]); 1465 WARN_ON(!PageLocked(pages[i])); 1466 } 1467 1468 return ret; 1469 } 1470 1471 static int check_can_nocow(struct btrfs_inode *inode, loff_t pos, 1472 size_t *write_bytes, bool nowait) 1473 { 1474 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1475 struct btrfs_root *root = inode->root; 1476 u64 lockstart, lockend; 1477 u64 num_bytes; 1478 int ret; 1479 1480 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 1481 return 0; 1482 1483 if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock)) 1484 return -EAGAIN; 1485 1486 lockstart = round_down(pos, fs_info->sectorsize); 1487 lockend = round_up(pos + *write_bytes, 1488 fs_info->sectorsize) - 1; 1489 num_bytes = lockend - lockstart + 1; 1490 1491 if (nowait) { 1492 struct btrfs_ordered_extent *ordered; 1493 1494 if (!try_lock_extent(&inode->io_tree, lockstart, lockend)) 1495 return -EAGAIN; 1496 1497 ordered = btrfs_lookup_ordered_range(inode, lockstart, 1498 num_bytes); 1499 if (ordered) { 1500 btrfs_put_ordered_extent(ordered); 1501 ret = -EAGAIN; 1502 goto out_unlock; 1503 } 1504 } else { 1505 btrfs_lock_and_flush_ordered_range(inode, lockstart, 1506 lockend, NULL); 1507 } 1508 1509 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, 1510 NULL, NULL, NULL, false); 1511 if (ret <= 0) { 1512 ret = 0; 1513 if (!nowait) 1514 btrfs_drew_write_unlock(&root->snapshot_lock); 1515 } else { 1516 *write_bytes = min_t(size_t, *write_bytes , 1517 num_bytes - pos + lockstart); 1518 } 1519 out_unlock: 1520 unlock_extent(&inode->io_tree, lockstart, lockend); 1521 1522 return ret; 1523 } 1524 1525 static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos, 1526 size_t *write_bytes) 1527 { 1528 return check_can_nocow(inode, pos, write_bytes, true); 1529 } 1530 1531 /* 1532 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes) 1533 * 1534 * @pos: File offset 1535 * @write_bytes: The length to write, will be updated to the nocow writeable 1536 * range 1537 * 1538 * This function will flush ordered extents in the range to ensure proper 1539 * nocow checks. 1540 * 1541 * Return: 1542 * >0 and update @write_bytes if we can do nocow write 1543 * 0 if we can't do nocow write 1544 * -EAGAIN if we can't get the needed lock or there are ordered extents 1545 * for * (nowait == true) case 1546 * <0 if other error happened 1547 * 1548 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock(). 1549 */ 1550 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, 1551 size_t *write_bytes) 1552 { 1553 return check_can_nocow(inode, pos, write_bytes, false); 1554 } 1555 1556 void btrfs_check_nocow_unlock(struct btrfs_inode *inode) 1557 { 1558 btrfs_drew_write_unlock(&inode->root->snapshot_lock); 1559 } 1560 1561 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, 1562 struct iov_iter *i) 1563 { 1564 struct file *file = iocb->ki_filp; 1565 loff_t pos = iocb->ki_pos; 1566 struct inode *inode = file_inode(file); 1567 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1568 struct page **pages = NULL; 1569 struct extent_changeset *data_reserved = NULL; 1570 u64 release_bytes = 0; 1571 u64 lockstart; 1572 u64 lockend; 1573 size_t num_written = 0; 1574 int nrptrs; 1575 int ret = 0; 1576 bool only_release_metadata = false; 1577 bool force_page_uptodate = false; 1578 1579 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE), 1580 PAGE_SIZE / (sizeof(struct page *))); 1581 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1582 nrptrs = max(nrptrs, 8); 1583 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); 1584 if (!pages) 1585 return -ENOMEM; 1586 1587 while (iov_iter_count(i) > 0) { 1588 struct extent_state *cached_state = NULL; 1589 size_t offset = offset_in_page(pos); 1590 size_t sector_offset; 1591 size_t write_bytes = min(iov_iter_count(i), 1592 nrptrs * (size_t)PAGE_SIZE - 1593 offset); 1594 size_t num_pages = DIV_ROUND_UP(write_bytes + offset, 1595 PAGE_SIZE); 1596 size_t reserve_bytes; 1597 size_t dirty_pages; 1598 size_t copied; 1599 size_t dirty_sectors; 1600 size_t num_sectors; 1601 int extents_locked; 1602 1603 WARN_ON(num_pages > nrptrs); 1604 1605 /* 1606 * Fault pages before locking them in prepare_pages 1607 * to avoid recursive lock 1608 */ 1609 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) { 1610 ret = -EFAULT; 1611 break; 1612 } 1613 1614 only_release_metadata = false; 1615 sector_offset = pos & (fs_info->sectorsize - 1); 1616 reserve_bytes = round_up(write_bytes + sector_offset, 1617 fs_info->sectorsize); 1618 1619 extent_changeset_release(data_reserved); 1620 ret = btrfs_check_data_free_space(BTRFS_I(inode), 1621 &data_reserved, pos, 1622 write_bytes); 1623 if (ret < 0) { 1624 if (btrfs_check_nocow_lock(BTRFS_I(inode), pos, 1625 &write_bytes) > 0) { 1626 /* 1627 * For nodata cow case, no need to reserve 1628 * data space. 1629 */ 1630 only_release_metadata = true; 1631 /* 1632 * our prealloc extent may be smaller than 1633 * write_bytes, so scale down. 1634 */ 1635 num_pages = DIV_ROUND_UP(write_bytes + offset, 1636 PAGE_SIZE); 1637 reserve_bytes = round_up(write_bytes + 1638 sector_offset, 1639 fs_info->sectorsize); 1640 } else { 1641 break; 1642 } 1643 } 1644 1645 WARN_ON(reserve_bytes == 0); 1646 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 1647 reserve_bytes); 1648 if (ret) { 1649 if (!only_release_metadata) 1650 btrfs_free_reserved_data_space(BTRFS_I(inode), 1651 data_reserved, pos, 1652 write_bytes); 1653 else 1654 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1655 break; 1656 } 1657 1658 release_bytes = reserve_bytes; 1659 again: 1660 /* 1661 * This is going to setup the pages array with the number of 1662 * pages we want, so we don't really need to worry about the 1663 * contents of pages from loop to loop 1664 */ 1665 ret = prepare_pages(inode, pages, num_pages, 1666 pos, write_bytes, 1667 force_page_uptodate); 1668 if (ret) { 1669 btrfs_delalloc_release_extents(BTRFS_I(inode), 1670 reserve_bytes); 1671 break; 1672 } 1673 1674 extents_locked = lock_and_cleanup_extent_if_need( 1675 BTRFS_I(inode), pages, 1676 num_pages, pos, write_bytes, &lockstart, 1677 &lockend, &cached_state); 1678 if (extents_locked < 0) { 1679 if (extents_locked == -EAGAIN) 1680 goto again; 1681 btrfs_delalloc_release_extents(BTRFS_I(inode), 1682 reserve_bytes); 1683 ret = extents_locked; 1684 break; 1685 } 1686 1687 copied = btrfs_copy_from_user(pos, write_bytes, pages, i); 1688 1689 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes); 1690 dirty_sectors = round_up(copied + sector_offset, 1691 fs_info->sectorsize); 1692 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors); 1693 1694 /* 1695 * if we have trouble faulting in the pages, fall 1696 * back to one page at a time 1697 */ 1698 if (copied < write_bytes) 1699 nrptrs = 1; 1700 1701 if (copied == 0) { 1702 force_page_uptodate = true; 1703 dirty_sectors = 0; 1704 dirty_pages = 0; 1705 } else { 1706 force_page_uptodate = false; 1707 dirty_pages = DIV_ROUND_UP(copied + offset, 1708 PAGE_SIZE); 1709 } 1710 1711 if (num_sectors > dirty_sectors) { 1712 /* release everything except the sectors we dirtied */ 1713 release_bytes -= dirty_sectors << 1714 fs_info->sb->s_blocksize_bits; 1715 if (only_release_metadata) { 1716 btrfs_delalloc_release_metadata(BTRFS_I(inode), 1717 release_bytes, true); 1718 } else { 1719 u64 __pos; 1720 1721 __pos = round_down(pos, 1722 fs_info->sectorsize) + 1723 (dirty_pages << PAGE_SHIFT); 1724 btrfs_delalloc_release_space(BTRFS_I(inode), 1725 data_reserved, __pos, 1726 release_bytes, true); 1727 } 1728 } 1729 1730 release_bytes = round_up(copied + sector_offset, 1731 fs_info->sectorsize); 1732 1733 if (copied > 0) 1734 ret = btrfs_dirty_pages(BTRFS_I(inode), pages, 1735 dirty_pages, pos, copied, 1736 &cached_state); 1737 1738 /* 1739 * If we have not locked the extent range, because the range's 1740 * start offset is >= i_size, we might still have a non-NULL 1741 * cached extent state, acquired while marking the extent range 1742 * as delalloc through btrfs_dirty_pages(). Therefore free any 1743 * possible cached extent state to avoid a memory leak. 1744 */ 1745 if (extents_locked) 1746 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1747 lockstart, lockend, &cached_state); 1748 else 1749 free_extent_state(cached_state); 1750 1751 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); 1752 if (ret) { 1753 btrfs_drop_pages(pages, num_pages); 1754 break; 1755 } 1756 1757 release_bytes = 0; 1758 if (only_release_metadata) 1759 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1760 1761 if (only_release_metadata && copied > 0) { 1762 lockstart = round_down(pos, 1763 fs_info->sectorsize); 1764 lockend = round_up(pos + copied, 1765 fs_info->sectorsize) - 1; 1766 1767 set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 1768 lockend, EXTENT_NORESERVE, NULL, 1769 NULL, GFP_NOFS); 1770 } 1771 1772 btrfs_drop_pages(pages, num_pages); 1773 1774 cond_resched(); 1775 1776 balance_dirty_pages_ratelimited(inode->i_mapping); 1777 1778 pos += copied; 1779 num_written += copied; 1780 } 1781 1782 kfree(pages); 1783 1784 if (release_bytes) { 1785 if (only_release_metadata) { 1786 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1787 btrfs_delalloc_release_metadata(BTRFS_I(inode), 1788 release_bytes, true); 1789 } else { 1790 btrfs_delalloc_release_space(BTRFS_I(inode), 1791 data_reserved, 1792 round_down(pos, fs_info->sectorsize), 1793 release_bytes, true); 1794 } 1795 } 1796 1797 extent_changeset_free(data_reserved); 1798 return num_written ? num_written : ret; 1799 } 1800 1801 static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) 1802 { 1803 struct file *file = iocb->ki_filp; 1804 struct inode *inode = file_inode(file); 1805 loff_t pos; 1806 ssize_t written; 1807 ssize_t written_buffered; 1808 loff_t endbyte; 1809 int err; 1810 1811 written = btrfs_direct_IO(iocb, from); 1812 1813 if (written < 0 || !iov_iter_count(from)) 1814 return written; 1815 1816 pos = iocb->ki_pos; 1817 written_buffered = btrfs_buffered_write(iocb, from); 1818 if (written_buffered < 0) { 1819 err = written_buffered; 1820 goto out; 1821 } 1822 /* 1823 * Ensure all data is persisted. We want the next direct IO read to be 1824 * able to read what was just written. 1825 */ 1826 endbyte = pos + written_buffered - 1; 1827 err = btrfs_fdatawrite_range(inode, pos, endbyte); 1828 if (err) 1829 goto out; 1830 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte); 1831 if (err) 1832 goto out; 1833 written += written_buffered; 1834 iocb->ki_pos = pos + written_buffered; 1835 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT, 1836 endbyte >> PAGE_SHIFT); 1837 out: 1838 return written ? written : err; 1839 } 1840 1841 static void update_time_for_write(struct inode *inode) 1842 { 1843 struct timespec64 now; 1844 1845 if (IS_NOCMTIME(inode)) 1846 return; 1847 1848 now = current_time(inode); 1849 if (!timespec64_equal(&inode->i_mtime, &now)) 1850 inode->i_mtime = now; 1851 1852 if (!timespec64_equal(&inode->i_ctime, &now)) 1853 inode->i_ctime = now; 1854 1855 if (IS_I_VERSION(inode)) 1856 inode_inc_iversion(inode); 1857 } 1858 1859 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, 1860 struct iov_iter *from) 1861 { 1862 struct file *file = iocb->ki_filp; 1863 struct inode *inode = file_inode(file); 1864 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1865 struct btrfs_root *root = BTRFS_I(inode)->root; 1866 u64 start_pos; 1867 u64 end_pos; 1868 ssize_t num_written = 0; 1869 const bool sync = iocb->ki_flags & IOCB_DSYNC; 1870 ssize_t err; 1871 loff_t pos; 1872 size_t count; 1873 loff_t oldsize; 1874 int clean_page = 0; 1875 1876 if (!(iocb->ki_flags & IOCB_DIRECT) && 1877 (iocb->ki_flags & IOCB_NOWAIT)) 1878 return -EOPNOTSUPP; 1879 1880 if (iocb->ki_flags & IOCB_NOWAIT) { 1881 if (!inode_trylock(inode)) 1882 return -EAGAIN; 1883 } else { 1884 inode_lock(inode); 1885 } 1886 1887 err = generic_write_checks(iocb, from); 1888 if (err <= 0) { 1889 inode_unlock(inode); 1890 return err; 1891 } 1892 1893 pos = iocb->ki_pos; 1894 count = iov_iter_count(from); 1895 if (iocb->ki_flags & IOCB_NOWAIT) { 1896 size_t nocow_bytes = count; 1897 1898 /* 1899 * We will allocate space in case nodatacow is not set, 1900 * so bail 1901 */ 1902 if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) 1903 <= 0) { 1904 inode_unlock(inode); 1905 return -EAGAIN; 1906 } 1907 /* 1908 * There are holes in the range or parts of the range that must 1909 * be COWed (shared extents, RO block groups, etc), so just bail 1910 * out. 1911 */ 1912 if (nocow_bytes < count) { 1913 inode_unlock(inode); 1914 return -EAGAIN; 1915 } 1916 } 1917 1918 current->backing_dev_info = inode_to_bdi(inode); 1919 err = file_remove_privs(file); 1920 if (err) { 1921 inode_unlock(inode); 1922 goto out; 1923 } 1924 1925 /* 1926 * If BTRFS flips readonly due to some impossible error 1927 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), 1928 * although we have opened a file as writable, we have 1929 * to stop this write operation to ensure FS consistency. 1930 */ 1931 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 1932 inode_unlock(inode); 1933 err = -EROFS; 1934 goto out; 1935 } 1936 1937 /* 1938 * We reserve space for updating the inode when we reserve space for the 1939 * extent we are going to write, so we will enospc out there. We don't 1940 * need to start yet another transaction to update the inode as we will 1941 * update the inode when we finish writing whatever data we write. 1942 */ 1943 update_time_for_write(inode); 1944 1945 start_pos = round_down(pos, fs_info->sectorsize); 1946 oldsize = i_size_read(inode); 1947 if (start_pos > oldsize) { 1948 /* Expand hole size to cover write data, preventing empty gap */ 1949 end_pos = round_up(pos + count, 1950 fs_info->sectorsize); 1951 err = btrfs_cont_expand(inode, oldsize, end_pos); 1952 if (err) { 1953 inode_unlock(inode); 1954 goto out; 1955 } 1956 if (start_pos > round_up(oldsize, fs_info->sectorsize)) 1957 clean_page = 1; 1958 } 1959 1960 if (sync) 1961 atomic_inc(&BTRFS_I(inode)->sync_writers); 1962 1963 if (iocb->ki_flags & IOCB_DIRECT) { 1964 /* 1965 * 1. We must always clear IOCB_DSYNC in order to not deadlock 1966 * in iomap, as it calls generic_write_sync() in this case. 1967 * 2. If we are async, we can call iomap_dio_complete() either 1968 * in 1969 * 1970 * 2.1. A worker thread from the last bio completed. In this 1971 * case we need to mark the btrfs_dio_data that it is 1972 * async in order to call generic_write_sync() properly. 1973 * This is handled by setting BTRFS_DIO_SYNC_STUB in the 1974 * current->journal_info. 1975 * 2.2 The submitter context, because all IO completed 1976 * before we exited iomap_dio_rw(). In this case we can 1977 * just re-set the IOCB_DSYNC on the iocb and we'll do 1978 * the sync below. If our ->end_io() gets called and 1979 * current->journal_info is set, then we know we're in 1980 * our current context and we will clear 1981 * current->journal_info to indicate that we need to 1982 * sync below. 1983 */ 1984 if (sync) { 1985 ASSERT(current->journal_info == NULL); 1986 iocb->ki_flags &= ~IOCB_DSYNC; 1987 current->journal_info = BTRFS_DIO_SYNC_STUB; 1988 } 1989 num_written = __btrfs_direct_write(iocb, from); 1990 1991 /* 1992 * As stated above, we cleared journal_info, so we need to do 1993 * the sync ourselves. 1994 */ 1995 if (sync && current->journal_info == NULL) 1996 iocb->ki_flags |= IOCB_DSYNC; 1997 current->journal_info = NULL; 1998 } else { 1999 num_written = btrfs_buffered_write(iocb, from); 2000 if (num_written > 0) 2001 iocb->ki_pos = pos + num_written; 2002 if (clean_page) 2003 pagecache_isize_extended(inode, oldsize, 2004 i_size_read(inode)); 2005 } 2006 2007 inode_unlock(inode); 2008 2009 /* 2010 * We also have to set last_sub_trans to the current log transid, 2011 * otherwise subsequent syncs to a file that's been synced in this 2012 * transaction will appear to have already occurred. 2013 */ 2014 spin_lock(&BTRFS_I(inode)->lock); 2015 BTRFS_I(inode)->last_sub_trans = root->log_transid; 2016 spin_unlock(&BTRFS_I(inode)->lock); 2017 if (num_written > 0) 2018 num_written = generic_write_sync(iocb, num_written); 2019 2020 if (sync) 2021 atomic_dec(&BTRFS_I(inode)->sync_writers); 2022 out: 2023 current->backing_dev_info = NULL; 2024 return num_written ? num_written : err; 2025 } 2026 2027 int btrfs_release_file(struct inode *inode, struct file *filp) 2028 { 2029 struct btrfs_file_private *private = filp->private_data; 2030 2031 if (private && private->filldir_buf) 2032 kfree(private->filldir_buf); 2033 kfree(private); 2034 filp->private_data = NULL; 2035 2036 /* 2037 * Set by setattr when we are about to truncate a file from a non-zero 2038 * size to a zero size. This tries to flush down new bytes that may 2039 * have been written if the application were using truncate to replace 2040 * a file in place. 2041 */ 2042 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 2043 &BTRFS_I(inode)->runtime_flags)) 2044 filemap_flush(inode->i_mapping); 2045 return 0; 2046 } 2047 2048 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end) 2049 { 2050 int ret; 2051 struct blk_plug plug; 2052 2053 /* 2054 * This is only called in fsync, which would do synchronous writes, so 2055 * a plug can merge adjacent IOs as much as possible. Esp. in case of 2056 * multiple disks using raid profile, a large IO can be split to 2057 * several segments of stripe length (currently 64K). 2058 */ 2059 blk_start_plug(&plug); 2060 atomic_inc(&BTRFS_I(inode)->sync_writers); 2061 ret = btrfs_fdatawrite_range(inode, start, end); 2062 atomic_dec(&BTRFS_I(inode)->sync_writers); 2063 blk_finish_plug(&plug); 2064 2065 return ret; 2066 } 2067 2068 /* 2069 * fsync call for both files and directories. This logs the inode into 2070 * the tree log instead of forcing full commits whenever possible. 2071 * 2072 * It needs to call filemap_fdatawait so that all ordered extent updates are 2073 * in the metadata btree are up to date for copying to the log. 2074 * 2075 * It drops the inode mutex before doing the tree log commit. This is an 2076 * important optimization for directories because holding the mutex prevents 2077 * new operations on the dir while we write to disk. 2078 */ 2079 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 2080 { 2081 struct dentry *dentry = file_dentry(file); 2082 struct inode *inode = d_inode(dentry); 2083 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2084 struct btrfs_root *root = BTRFS_I(inode)->root; 2085 struct btrfs_trans_handle *trans; 2086 struct btrfs_log_ctx ctx; 2087 int ret = 0, err; 2088 u64 len; 2089 bool full_sync; 2090 2091 trace_btrfs_sync_file(file, datasync); 2092 2093 btrfs_init_log_ctx(&ctx, inode); 2094 2095 /* 2096 * Always set the range to a full range, otherwise we can get into 2097 * several problems, from missing file extent items to represent holes 2098 * when not using the NO_HOLES feature, to log tree corruption due to 2099 * races between hole detection during logging and completion of ordered 2100 * extents outside the range, to missing checksums due to ordered extents 2101 * for which we flushed only a subset of their pages. 2102 */ 2103 start = 0; 2104 end = LLONG_MAX; 2105 len = (u64)LLONG_MAX + 1; 2106 2107 /* 2108 * We write the dirty pages in the range and wait until they complete 2109 * out of the ->i_mutex. If so, we can flush the dirty pages by 2110 * multi-task, and make the performance up. See 2111 * btrfs_wait_ordered_range for an explanation of the ASYNC check. 2112 */ 2113 ret = start_ordered_ops(inode, start, end); 2114 if (ret) 2115 goto out; 2116 2117 inode_lock(inode); 2118 2119 /* 2120 * We take the dio_sem here because the tree log stuff can race with 2121 * lockless dio writes and get an extent map logged for an extent we 2122 * never waited on. We need it this high up for lockdep reasons. 2123 */ 2124 down_write(&BTRFS_I(inode)->dio_sem); 2125 2126 atomic_inc(&root->log_batch); 2127 2128 /* 2129 * Always check for the full sync flag while holding the inode's lock, 2130 * to avoid races with other tasks. The flag must be either set all the 2131 * time during logging or always off all the time while logging. 2132 */ 2133 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2134 &BTRFS_I(inode)->runtime_flags); 2135 2136 /* 2137 * Before we acquired the inode's lock, someone may have dirtied more 2138 * pages in the target range. We need to make sure that writeback for 2139 * any such pages does not start while we are logging the inode, because 2140 * if it does, any of the following might happen when we are not doing a 2141 * full inode sync: 2142 * 2143 * 1) We log an extent after its writeback finishes but before its 2144 * checksums are added to the csum tree, leading to -EIO errors 2145 * when attempting to read the extent after a log replay. 2146 * 2147 * 2) We can end up logging an extent before its writeback finishes. 2148 * Therefore after the log replay we will have a file extent item 2149 * pointing to an unwritten extent (and no data checksums as well). 2150 * 2151 * So trigger writeback for any eventual new dirty pages and then we 2152 * wait for all ordered extents to complete below. 2153 */ 2154 ret = start_ordered_ops(inode, start, end); 2155 if (ret) { 2156 up_write(&BTRFS_I(inode)->dio_sem); 2157 inode_unlock(inode); 2158 goto out; 2159 } 2160 2161 /* 2162 * We have to do this here to avoid the priority inversion of waiting on 2163 * IO of a lower priority task while holding a transaction open. 2164 * 2165 * For a full fsync we wait for the ordered extents to complete while 2166 * for a fast fsync we wait just for writeback to complete, and then 2167 * attach the ordered extents to the transaction so that a transaction 2168 * commit waits for their completion, to avoid data loss if we fsync, 2169 * the current transaction commits before the ordered extents complete 2170 * and a power failure happens right after that. 2171 */ 2172 if (full_sync) { 2173 ret = btrfs_wait_ordered_range(inode, start, len); 2174 } else { 2175 /* 2176 * Get our ordered extents as soon as possible to avoid doing 2177 * checksum lookups in the csum tree, and use instead the 2178 * checksums attached to the ordered extents. 2179 */ 2180 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode), 2181 &ctx.ordered_extents); 2182 ret = filemap_fdatawait_range(inode->i_mapping, start, end); 2183 } 2184 2185 if (ret) 2186 goto out_release_extents; 2187 2188 atomic_inc(&root->log_batch); 2189 2190 /* 2191 * If we are doing a fast fsync we can not bail out if the inode's 2192 * last_trans is <= then the last committed transaction, because we only 2193 * update the last_trans of the inode during ordered extent completion, 2194 * and for a fast fsync we don't wait for that, we only wait for the 2195 * writeback to complete. 2196 */ 2197 smp_mb(); 2198 if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) || 2199 (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed && 2200 (full_sync || list_empty(&ctx.ordered_extents)))) { 2201 /* 2202 * We've had everything committed since the last time we were 2203 * modified so clear this flag in case it was set for whatever 2204 * reason, it's no longer relevant. 2205 */ 2206 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2207 &BTRFS_I(inode)->runtime_flags); 2208 /* 2209 * An ordered extent might have started before and completed 2210 * already with io errors, in which case the inode was not 2211 * updated and we end up here. So check the inode's mapping 2212 * for any errors that might have happened since we last 2213 * checked called fsync. 2214 */ 2215 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err); 2216 goto out_release_extents; 2217 } 2218 2219 /* 2220 * We use start here because we will need to wait on the IO to complete 2221 * in btrfs_sync_log, which could require joining a transaction (for 2222 * example checking cross references in the nocow path). If we use join 2223 * here we could get into a situation where we're waiting on IO to 2224 * happen that is blocked on a transaction trying to commit. With start 2225 * we inc the extwriter counter, so we wait for all extwriters to exit 2226 * before we start blocking joiners. This comment is to keep somebody 2227 * from thinking they are super smart and changing this to 2228 * btrfs_join_transaction *cough*Josef*cough*. 2229 */ 2230 trans = btrfs_start_transaction(root, 0); 2231 if (IS_ERR(trans)) { 2232 ret = PTR_ERR(trans); 2233 goto out_release_extents; 2234 } 2235 2236 ret = btrfs_log_dentry_safe(trans, dentry, &ctx); 2237 btrfs_release_log_ctx_extents(&ctx); 2238 if (ret < 0) { 2239 /* Fallthrough and commit/free transaction. */ 2240 ret = 1; 2241 } 2242 2243 /* we've logged all the items and now have a consistent 2244 * version of the file in the log. It is possible that 2245 * someone will come in and modify the file, but that's 2246 * fine because the log is consistent on disk, and we 2247 * have references to all of the file's extents 2248 * 2249 * It is possible that someone will come in and log the 2250 * file again, but that will end up using the synchronization 2251 * inside btrfs_sync_log to keep things safe. 2252 */ 2253 up_write(&BTRFS_I(inode)->dio_sem); 2254 inode_unlock(inode); 2255 2256 if (ret != BTRFS_NO_LOG_SYNC) { 2257 if (!ret) { 2258 ret = btrfs_sync_log(trans, root, &ctx); 2259 if (!ret) { 2260 ret = btrfs_end_transaction(trans); 2261 goto out; 2262 } 2263 } 2264 if (!full_sync) { 2265 ret = btrfs_wait_ordered_range(inode, start, len); 2266 if (ret) { 2267 btrfs_end_transaction(trans); 2268 goto out; 2269 } 2270 } 2271 ret = btrfs_commit_transaction(trans); 2272 } else { 2273 ret = btrfs_end_transaction(trans); 2274 } 2275 out: 2276 ASSERT(list_empty(&ctx.list)); 2277 err = file_check_and_advance_wb_err(file); 2278 if (!ret) 2279 ret = err; 2280 return ret > 0 ? -EIO : ret; 2281 2282 out_release_extents: 2283 btrfs_release_log_ctx_extents(&ctx); 2284 up_write(&BTRFS_I(inode)->dio_sem); 2285 inode_unlock(inode); 2286 goto out; 2287 } 2288 2289 static const struct vm_operations_struct btrfs_file_vm_ops = { 2290 .fault = filemap_fault, 2291 .map_pages = filemap_map_pages, 2292 .page_mkwrite = btrfs_page_mkwrite, 2293 }; 2294 2295 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 2296 { 2297 struct address_space *mapping = filp->f_mapping; 2298 2299 if (!mapping->a_ops->readpage) 2300 return -ENOEXEC; 2301 2302 file_accessed(filp); 2303 vma->vm_ops = &btrfs_file_vm_ops; 2304 2305 return 0; 2306 } 2307 2308 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf, 2309 int slot, u64 start, u64 end) 2310 { 2311 struct btrfs_file_extent_item *fi; 2312 struct btrfs_key key; 2313 2314 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 2315 return 0; 2316 2317 btrfs_item_key_to_cpu(leaf, &key, slot); 2318 if (key.objectid != btrfs_ino(inode) || 2319 key.type != BTRFS_EXTENT_DATA_KEY) 2320 return 0; 2321 2322 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 2323 2324 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) 2325 return 0; 2326 2327 if (btrfs_file_extent_disk_bytenr(leaf, fi)) 2328 return 0; 2329 2330 if (key.offset == end) 2331 return 1; 2332 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start) 2333 return 1; 2334 return 0; 2335 } 2336 2337 static int fill_holes(struct btrfs_trans_handle *trans, 2338 struct btrfs_inode *inode, 2339 struct btrfs_path *path, u64 offset, u64 end) 2340 { 2341 struct btrfs_fs_info *fs_info = trans->fs_info; 2342 struct btrfs_root *root = inode->root; 2343 struct extent_buffer *leaf; 2344 struct btrfs_file_extent_item *fi; 2345 struct extent_map *hole_em; 2346 struct extent_map_tree *em_tree = &inode->extent_tree; 2347 struct btrfs_key key; 2348 int ret; 2349 2350 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 2351 goto out; 2352 2353 key.objectid = btrfs_ino(inode); 2354 key.type = BTRFS_EXTENT_DATA_KEY; 2355 key.offset = offset; 2356 2357 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2358 if (ret <= 0) { 2359 /* 2360 * We should have dropped this offset, so if we find it then 2361 * something has gone horribly wrong. 2362 */ 2363 if (ret == 0) 2364 ret = -EINVAL; 2365 return ret; 2366 } 2367 2368 leaf = path->nodes[0]; 2369 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) { 2370 u64 num_bytes; 2371 2372 path->slots[0]--; 2373 fi = btrfs_item_ptr(leaf, path->slots[0], 2374 struct btrfs_file_extent_item); 2375 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + 2376 end - offset; 2377 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2378 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2379 btrfs_set_file_extent_offset(leaf, fi, 0); 2380 btrfs_mark_buffer_dirty(leaf); 2381 goto out; 2382 } 2383 2384 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) { 2385 u64 num_bytes; 2386 2387 key.offset = offset; 2388 btrfs_set_item_key_safe(fs_info, path, &key); 2389 fi = btrfs_item_ptr(leaf, path->slots[0], 2390 struct btrfs_file_extent_item); 2391 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end - 2392 offset; 2393 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2394 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2395 btrfs_set_file_extent_offset(leaf, fi, 0); 2396 btrfs_mark_buffer_dirty(leaf); 2397 goto out; 2398 } 2399 btrfs_release_path(path); 2400 2401 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), 2402 offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0); 2403 if (ret) 2404 return ret; 2405 2406 out: 2407 btrfs_release_path(path); 2408 2409 hole_em = alloc_extent_map(); 2410 if (!hole_em) { 2411 btrfs_drop_extent_cache(inode, offset, end - 1, 0); 2412 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); 2413 } else { 2414 hole_em->start = offset; 2415 hole_em->len = end - offset; 2416 hole_em->ram_bytes = hole_em->len; 2417 hole_em->orig_start = offset; 2418 2419 hole_em->block_start = EXTENT_MAP_HOLE; 2420 hole_em->block_len = 0; 2421 hole_em->orig_block_len = 0; 2422 hole_em->compress_type = BTRFS_COMPRESS_NONE; 2423 hole_em->generation = trans->transid; 2424 2425 do { 2426 btrfs_drop_extent_cache(inode, offset, end - 1, 0); 2427 write_lock(&em_tree->lock); 2428 ret = add_extent_mapping(em_tree, hole_em, 1); 2429 write_unlock(&em_tree->lock); 2430 } while (ret == -EEXIST); 2431 free_extent_map(hole_em); 2432 if (ret) 2433 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2434 &inode->runtime_flags); 2435 } 2436 2437 return 0; 2438 } 2439 2440 /* 2441 * Find a hole extent on given inode and change start/len to the end of hole 2442 * extent.(hole/vacuum extent whose em->start <= start && 2443 * em->start + em->len > start) 2444 * When a hole extent is found, return 1 and modify start/len. 2445 */ 2446 static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len) 2447 { 2448 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2449 struct extent_map *em; 2450 int ret = 0; 2451 2452 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 2453 round_down(*start, fs_info->sectorsize), 2454 round_up(*len, fs_info->sectorsize)); 2455 if (IS_ERR(em)) 2456 return PTR_ERR(em); 2457 2458 /* Hole or vacuum extent(only exists in no-hole mode) */ 2459 if (em->block_start == EXTENT_MAP_HOLE) { 2460 ret = 1; 2461 *len = em->start + em->len > *start + *len ? 2462 0 : *start + *len - em->start - em->len; 2463 *start = em->start + em->len; 2464 } 2465 free_extent_map(em); 2466 return ret; 2467 } 2468 2469 static int btrfs_punch_hole_lock_range(struct inode *inode, 2470 const u64 lockstart, 2471 const u64 lockend, 2472 struct extent_state **cached_state) 2473 { 2474 while (1) { 2475 struct btrfs_ordered_extent *ordered; 2476 int ret; 2477 2478 truncate_pagecache_range(inode, lockstart, lockend); 2479 2480 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2481 cached_state); 2482 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), 2483 lockend); 2484 2485 /* 2486 * We need to make sure we have no ordered extents in this range 2487 * and nobody raced in and read a page in this range, if we did 2488 * we need to try again. 2489 */ 2490 if ((!ordered || 2491 (ordered->file_offset + ordered->num_bytes <= lockstart || 2492 ordered->file_offset > lockend)) && 2493 !filemap_range_has_page(inode->i_mapping, 2494 lockstart, lockend)) { 2495 if (ordered) 2496 btrfs_put_ordered_extent(ordered); 2497 break; 2498 } 2499 if (ordered) 2500 btrfs_put_ordered_extent(ordered); 2501 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, 2502 lockend, cached_state); 2503 ret = btrfs_wait_ordered_range(inode, lockstart, 2504 lockend - lockstart + 1); 2505 if (ret) 2506 return ret; 2507 } 2508 return 0; 2509 } 2510 2511 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans, 2512 struct inode *inode, 2513 struct btrfs_path *path, 2514 struct btrfs_replace_extent_info *extent_info, 2515 const u64 replace_len) 2516 { 2517 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2518 struct btrfs_root *root = BTRFS_I(inode)->root; 2519 struct btrfs_file_extent_item *extent; 2520 struct extent_buffer *leaf; 2521 struct btrfs_key key; 2522 int slot; 2523 struct btrfs_ref ref = { 0 }; 2524 int ret; 2525 2526 if (replace_len == 0) 2527 return 0; 2528 2529 if (extent_info->disk_offset == 0 && 2530 btrfs_fs_incompat(fs_info, NO_HOLES)) 2531 return 0; 2532 2533 key.objectid = btrfs_ino(BTRFS_I(inode)); 2534 key.type = BTRFS_EXTENT_DATA_KEY; 2535 key.offset = extent_info->file_offset; 2536 ret = btrfs_insert_empty_item(trans, root, path, &key, 2537 sizeof(struct btrfs_file_extent_item)); 2538 if (ret) 2539 return ret; 2540 leaf = path->nodes[0]; 2541 slot = path->slots[0]; 2542 write_extent_buffer(leaf, extent_info->extent_buf, 2543 btrfs_item_ptr_offset(leaf, slot), 2544 sizeof(struct btrfs_file_extent_item)); 2545 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 2546 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE); 2547 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset); 2548 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len); 2549 if (extent_info->is_new_extent) 2550 btrfs_set_file_extent_generation(leaf, extent, trans->transid); 2551 btrfs_mark_buffer_dirty(leaf); 2552 btrfs_release_path(path); 2553 2554 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), 2555 extent_info->file_offset, replace_len); 2556 if (ret) 2557 return ret; 2558 2559 /* If it's a hole, nothing more needs to be done. */ 2560 if (extent_info->disk_offset == 0) 2561 return 0; 2562 2563 inode_add_bytes(inode, replace_len); 2564 2565 if (extent_info->is_new_extent && extent_info->insertions == 0) { 2566 key.objectid = extent_info->disk_offset; 2567 key.type = BTRFS_EXTENT_ITEM_KEY; 2568 key.offset = extent_info->disk_len; 2569 ret = btrfs_alloc_reserved_file_extent(trans, root, 2570 btrfs_ino(BTRFS_I(inode)), 2571 extent_info->file_offset, 2572 extent_info->qgroup_reserved, 2573 &key); 2574 } else { 2575 u64 ref_offset; 2576 2577 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, 2578 extent_info->disk_offset, 2579 extent_info->disk_len, 0); 2580 ref_offset = extent_info->file_offset - extent_info->data_offset; 2581 btrfs_init_data_ref(&ref, root->root_key.objectid, 2582 btrfs_ino(BTRFS_I(inode)), ref_offset); 2583 ret = btrfs_inc_extent_ref(trans, &ref); 2584 } 2585 2586 extent_info->insertions++; 2587 2588 return ret; 2589 } 2590 2591 /* 2592 * The respective range must have been previously locked, as well as the inode. 2593 * The end offset is inclusive (last byte of the range). 2594 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing 2595 * the file range with an extent. 2596 * When not punching a hole, we don't want to end up in a state where we dropped 2597 * extents without inserting a new one, so we must abort the transaction to avoid 2598 * a corruption. 2599 */ 2600 int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path, 2601 const u64 start, const u64 end, 2602 struct btrfs_replace_extent_info *extent_info, 2603 struct btrfs_trans_handle **trans_out) 2604 { 2605 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2606 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1); 2607 u64 ino_size = round_up(inode->i_size, fs_info->sectorsize); 2608 struct btrfs_root *root = BTRFS_I(inode)->root; 2609 struct btrfs_trans_handle *trans = NULL; 2610 struct btrfs_block_rsv *rsv; 2611 unsigned int rsv_count; 2612 u64 cur_offset; 2613 u64 drop_end; 2614 u64 len = end - start; 2615 int ret = 0; 2616 2617 if (end <= start) 2618 return -EINVAL; 2619 2620 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 2621 if (!rsv) { 2622 ret = -ENOMEM; 2623 goto out; 2624 } 2625 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1); 2626 rsv->failfast = 1; 2627 2628 /* 2629 * 1 - update the inode 2630 * 1 - removing the extents in the range 2631 * 1 - adding the hole extent if no_holes isn't set or if we are 2632 * replacing the range with a new extent 2633 */ 2634 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info) 2635 rsv_count = 3; 2636 else 2637 rsv_count = 2; 2638 2639 trans = btrfs_start_transaction(root, rsv_count); 2640 if (IS_ERR(trans)) { 2641 ret = PTR_ERR(trans); 2642 trans = NULL; 2643 goto out_free; 2644 } 2645 2646 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 2647 min_size, false); 2648 BUG_ON(ret); 2649 trans->block_rsv = rsv; 2650 2651 cur_offset = start; 2652 while (cur_offset < end) { 2653 ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, 2654 cur_offset, end + 1, &drop_end, 2655 1, 0, 0, NULL); 2656 if (ret != -ENOSPC) { 2657 /* 2658 * When cloning we want to avoid transaction aborts when 2659 * nothing was done and we are attempting to clone parts 2660 * of inline extents, in such cases -EOPNOTSUPP is 2661 * returned by __btrfs_drop_extents() without having 2662 * changed anything in the file. 2663 */ 2664 if (extent_info && !extent_info->is_new_extent && 2665 ret && ret != -EOPNOTSUPP) 2666 btrfs_abort_transaction(trans, ret); 2667 break; 2668 } 2669 2670 trans->block_rsv = &fs_info->trans_block_rsv; 2671 2672 if (!extent_info && cur_offset < drop_end && 2673 cur_offset < ino_size) { 2674 ret = fill_holes(trans, BTRFS_I(inode), path, 2675 cur_offset, drop_end); 2676 if (ret) { 2677 /* 2678 * If we failed then we didn't insert our hole 2679 * entries for the area we dropped, so now the 2680 * fs is corrupted, so we must abort the 2681 * transaction. 2682 */ 2683 btrfs_abort_transaction(trans, ret); 2684 break; 2685 } 2686 } else if (!extent_info && cur_offset < drop_end) { 2687 /* 2688 * We are past the i_size here, but since we didn't 2689 * insert holes we need to clear the mapped area so we 2690 * know to not set disk_i_size in this area until a new 2691 * file extent is inserted here. 2692 */ 2693 ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode), 2694 cur_offset, drop_end - cur_offset); 2695 if (ret) { 2696 /* 2697 * We couldn't clear our area, so we could 2698 * presumably adjust up and corrupt the fs, so 2699 * we need to abort. 2700 */ 2701 btrfs_abort_transaction(trans, ret); 2702 break; 2703 } 2704 } 2705 2706 if (extent_info && drop_end > extent_info->file_offset) { 2707 u64 replace_len = drop_end - extent_info->file_offset; 2708 2709 ret = btrfs_insert_replace_extent(trans, inode, path, 2710 extent_info, replace_len); 2711 if (ret) { 2712 btrfs_abort_transaction(trans, ret); 2713 break; 2714 } 2715 extent_info->data_len -= replace_len; 2716 extent_info->data_offset += replace_len; 2717 extent_info->file_offset += replace_len; 2718 } 2719 2720 cur_offset = drop_end; 2721 2722 ret = btrfs_update_inode(trans, root, inode); 2723 if (ret) 2724 break; 2725 2726 btrfs_end_transaction(trans); 2727 btrfs_btree_balance_dirty(fs_info); 2728 2729 trans = btrfs_start_transaction(root, rsv_count); 2730 if (IS_ERR(trans)) { 2731 ret = PTR_ERR(trans); 2732 trans = NULL; 2733 break; 2734 } 2735 2736 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 2737 rsv, min_size, false); 2738 BUG_ON(ret); /* shouldn't happen */ 2739 trans->block_rsv = rsv; 2740 2741 if (!extent_info) { 2742 ret = find_first_non_hole(inode, &cur_offset, &len); 2743 if (unlikely(ret < 0)) 2744 break; 2745 if (ret && !len) { 2746 ret = 0; 2747 break; 2748 } 2749 } 2750 } 2751 2752 /* 2753 * If we were cloning, force the next fsync to be a full one since we 2754 * we replaced (or just dropped in the case of cloning holes when 2755 * NO_HOLES is enabled) extents and extent maps. 2756 * This is for the sake of simplicity, and cloning into files larger 2757 * than 16Mb would force the full fsync any way (when 2758 * try_release_extent_mapping() is invoked during page cache truncation. 2759 */ 2760 if (extent_info && !extent_info->is_new_extent) 2761 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2762 &BTRFS_I(inode)->runtime_flags); 2763 2764 if (ret) 2765 goto out_trans; 2766 2767 trans->block_rsv = &fs_info->trans_block_rsv; 2768 /* 2769 * If we are using the NO_HOLES feature we might have had already an 2770 * hole that overlaps a part of the region [lockstart, lockend] and 2771 * ends at (or beyond) lockend. Since we have no file extent items to 2772 * represent holes, drop_end can be less than lockend and so we must 2773 * make sure we have an extent map representing the existing hole (the 2774 * call to __btrfs_drop_extents() might have dropped the existing extent 2775 * map representing the existing hole), otherwise the fast fsync path 2776 * will not record the existence of the hole region 2777 * [existing_hole_start, lockend]. 2778 */ 2779 if (drop_end <= end) 2780 drop_end = end + 1; 2781 /* 2782 * Don't insert file hole extent item if it's for a range beyond eof 2783 * (because it's useless) or if it represents a 0 bytes range (when 2784 * cur_offset == drop_end). 2785 */ 2786 if (!extent_info && cur_offset < ino_size && cur_offset < drop_end) { 2787 ret = fill_holes(trans, BTRFS_I(inode), path, 2788 cur_offset, drop_end); 2789 if (ret) { 2790 /* Same comment as above. */ 2791 btrfs_abort_transaction(trans, ret); 2792 goto out_trans; 2793 } 2794 } else if (!extent_info && cur_offset < drop_end) { 2795 /* See the comment in the loop above for the reasoning here. */ 2796 ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode), 2797 cur_offset, drop_end - cur_offset); 2798 if (ret) { 2799 btrfs_abort_transaction(trans, ret); 2800 goto out_trans; 2801 } 2802 2803 } 2804 if (extent_info) { 2805 ret = btrfs_insert_replace_extent(trans, inode, path, extent_info, 2806 extent_info->data_len); 2807 if (ret) { 2808 btrfs_abort_transaction(trans, ret); 2809 goto out_trans; 2810 } 2811 } 2812 2813 out_trans: 2814 if (!trans) 2815 goto out_free; 2816 2817 trans->block_rsv = &fs_info->trans_block_rsv; 2818 if (ret) 2819 btrfs_end_transaction(trans); 2820 else 2821 *trans_out = trans; 2822 out_free: 2823 btrfs_free_block_rsv(fs_info, rsv); 2824 out: 2825 return ret; 2826 } 2827 2828 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 2829 { 2830 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2831 struct btrfs_root *root = BTRFS_I(inode)->root; 2832 struct extent_state *cached_state = NULL; 2833 struct btrfs_path *path; 2834 struct btrfs_trans_handle *trans = NULL; 2835 u64 lockstart; 2836 u64 lockend; 2837 u64 tail_start; 2838 u64 tail_len; 2839 u64 orig_start = offset; 2840 int ret = 0; 2841 bool same_block; 2842 u64 ino_size; 2843 bool truncated_block = false; 2844 bool updated_inode = false; 2845 2846 ret = btrfs_wait_ordered_range(inode, offset, len); 2847 if (ret) 2848 return ret; 2849 2850 inode_lock(inode); 2851 ino_size = round_up(inode->i_size, fs_info->sectorsize); 2852 ret = find_first_non_hole(inode, &offset, &len); 2853 if (ret < 0) 2854 goto out_only_mutex; 2855 if (ret && !len) { 2856 /* Already in a large hole */ 2857 ret = 0; 2858 goto out_only_mutex; 2859 } 2860 2861 lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode))); 2862 lockend = round_down(offset + len, 2863 btrfs_inode_sectorsize(BTRFS_I(inode))) - 1; 2864 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset)) 2865 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)); 2866 /* 2867 * We needn't truncate any block which is beyond the end of the file 2868 * because we are sure there is no data there. 2869 */ 2870 /* 2871 * Only do this if we are in the same block and we aren't doing the 2872 * entire block. 2873 */ 2874 if (same_block && len < fs_info->sectorsize) { 2875 if (offset < ino_size) { 2876 truncated_block = true; 2877 ret = btrfs_truncate_block(inode, offset, len, 0); 2878 } else { 2879 ret = 0; 2880 } 2881 goto out_only_mutex; 2882 } 2883 2884 /* zero back part of the first block */ 2885 if (offset < ino_size) { 2886 truncated_block = true; 2887 ret = btrfs_truncate_block(inode, offset, 0, 0); 2888 if (ret) { 2889 inode_unlock(inode); 2890 return ret; 2891 } 2892 } 2893 2894 /* Check the aligned pages after the first unaligned page, 2895 * if offset != orig_start, which means the first unaligned page 2896 * including several following pages are already in holes, 2897 * the extra check can be skipped */ 2898 if (offset == orig_start) { 2899 /* after truncate page, check hole again */ 2900 len = offset + len - lockstart; 2901 offset = lockstart; 2902 ret = find_first_non_hole(inode, &offset, &len); 2903 if (ret < 0) 2904 goto out_only_mutex; 2905 if (ret && !len) { 2906 ret = 0; 2907 goto out_only_mutex; 2908 } 2909 lockstart = offset; 2910 } 2911 2912 /* Check the tail unaligned part is in a hole */ 2913 tail_start = lockend + 1; 2914 tail_len = offset + len - tail_start; 2915 if (tail_len) { 2916 ret = find_first_non_hole(inode, &tail_start, &tail_len); 2917 if (unlikely(ret < 0)) 2918 goto out_only_mutex; 2919 if (!ret) { 2920 /* zero the front end of the last page */ 2921 if (tail_start + tail_len < ino_size) { 2922 truncated_block = true; 2923 ret = btrfs_truncate_block(inode, 2924 tail_start + tail_len, 2925 0, 1); 2926 if (ret) 2927 goto out_only_mutex; 2928 } 2929 } 2930 } 2931 2932 if (lockend < lockstart) { 2933 ret = 0; 2934 goto out_only_mutex; 2935 } 2936 2937 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, 2938 &cached_state); 2939 if (ret) 2940 goto out_only_mutex; 2941 2942 path = btrfs_alloc_path(); 2943 if (!path) { 2944 ret = -ENOMEM; 2945 goto out; 2946 } 2947 2948 ret = btrfs_replace_file_extents(inode, path, lockstart, lockend, NULL, 2949 &trans); 2950 btrfs_free_path(path); 2951 if (ret) 2952 goto out; 2953 2954 ASSERT(trans != NULL); 2955 inode_inc_iversion(inode); 2956 inode->i_mtime = inode->i_ctime = current_time(inode); 2957 ret = btrfs_update_inode(trans, root, inode); 2958 updated_inode = true; 2959 btrfs_end_transaction(trans); 2960 btrfs_btree_balance_dirty(fs_info); 2961 out: 2962 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2963 &cached_state); 2964 out_only_mutex: 2965 if (!updated_inode && truncated_block && !ret) { 2966 /* 2967 * If we only end up zeroing part of a page, we still need to 2968 * update the inode item, so that all the time fields are 2969 * updated as well as the necessary btrfs inode in memory fields 2970 * for detecting, at fsync time, if the inode isn't yet in the 2971 * log tree or it's there but not up to date. 2972 */ 2973 struct timespec64 now = current_time(inode); 2974 2975 inode_inc_iversion(inode); 2976 inode->i_mtime = now; 2977 inode->i_ctime = now; 2978 trans = btrfs_start_transaction(root, 1); 2979 if (IS_ERR(trans)) { 2980 ret = PTR_ERR(trans); 2981 } else { 2982 int ret2; 2983 2984 ret = btrfs_update_inode(trans, root, inode); 2985 ret2 = btrfs_end_transaction(trans); 2986 if (!ret) 2987 ret = ret2; 2988 } 2989 } 2990 inode_unlock(inode); 2991 return ret; 2992 } 2993 2994 /* Helper structure to record which range is already reserved */ 2995 struct falloc_range { 2996 struct list_head list; 2997 u64 start; 2998 u64 len; 2999 }; 3000 3001 /* 3002 * Helper function to add falloc range 3003 * 3004 * Caller should have locked the larger range of extent containing 3005 * [start, len) 3006 */ 3007 static int add_falloc_range(struct list_head *head, u64 start, u64 len) 3008 { 3009 struct falloc_range *prev = NULL; 3010 struct falloc_range *range = NULL; 3011 3012 if (list_empty(head)) 3013 goto insert; 3014 3015 /* 3016 * As fallocate iterate by bytenr order, we only need to check 3017 * the last range. 3018 */ 3019 prev = list_entry(head->prev, struct falloc_range, list); 3020 if (prev->start + prev->len == start) { 3021 prev->len += len; 3022 return 0; 3023 } 3024 insert: 3025 range = kmalloc(sizeof(*range), GFP_KERNEL); 3026 if (!range) 3027 return -ENOMEM; 3028 range->start = start; 3029 range->len = len; 3030 list_add_tail(&range->list, head); 3031 return 0; 3032 } 3033 3034 static int btrfs_fallocate_update_isize(struct inode *inode, 3035 const u64 end, 3036 const int mode) 3037 { 3038 struct btrfs_trans_handle *trans; 3039 struct btrfs_root *root = BTRFS_I(inode)->root; 3040 int ret; 3041 int ret2; 3042 3043 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode)) 3044 return 0; 3045 3046 trans = btrfs_start_transaction(root, 1); 3047 if (IS_ERR(trans)) 3048 return PTR_ERR(trans); 3049 3050 inode->i_ctime = current_time(inode); 3051 i_size_write(inode, end); 3052 btrfs_inode_safe_disk_i_size_write(inode, 0); 3053 ret = btrfs_update_inode(trans, root, inode); 3054 ret2 = btrfs_end_transaction(trans); 3055 3056 return ret ? ret : ret2; 3057 } 3058 3059 enum { 3060 RANGE_BOUNDARY_WRITTEN_EXTENT, 3061 RANGE_BOUNDARY_PREALLOC_EXTENT, 3062 RANGE_BOUNDARY_HOLE, 3063 }; 3064 3065 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode, 3066 u64 offset) 3067 { 3068 const u64 sectorsize = btrfs_inode_sectorsize(inode); 3069 struct extent_map *em; 3070 int ret; 3071 3072 offset = round_down(offset, sectorsize); 3073 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize); 3074 if (IS_ERR(em)) 3075 return PTR_ERR(em); 3076 3077 if (em->block_start == EXTENT_MAP_HOLE) 3078 ret = RANGE_BOUNDARY_HOLE; 3079 else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 3080 ret = RANGE_BOUNDARY_PREALLOC_EXTENT; 3081 else 3082 ret = RANGE_BOUNDARY_WRITTEN_EXTENT; 3083 3084 free_extent_map(em); 3085 return ret; 3086 } 3087 3088 static int btrfs_zero_range(struct inode *inode, 3089 loff_t offset, 3090 loff_t len, 3091 const int mode) 3092 { 3093 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 3094 struct extent_map *em; 3095 struct extent_changeset *data_reserved = NULL; 3096 int ret; 3097 u64 alloc_hint = 0; 3098 const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode)); 3099 u64 alloc_start = round_down(offset, sectorsize); 3100 u64 alloc_end = round_up(offset + len, sectorsize); 3101 u64 bytes_to_reserve = 0; 3102 bool space_reserved = false; 3103 3104 inode_dio_wait(inode); 3105 3106 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start, 3107 alloc_end - alloc_start); 3108 if (IS_ERR(em)) { 3109 ret = PTR_ERR(em); 3110 goto out; 3111 } 3112 3113 /* 3114 * Avoid hole punching and extent allocation for some cases. More cases 3115 * could be considered, but these are unlikely common and we keep things 3116 * as simple as possible for now. Also, intentionally, if the target 3117 * range contains one or more prealloc extents together with regular 3118 * extents and holes, we drop all the existing extents and allocate a 3119 * new prealloc extent, so that we get a larger contiguous disk extent. 3120 */ 3121 if (em->start <= alloc_start && 3122 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3123 const u64 em_end = em->start + em->len; 3124 3125 if (em_end >= offset + len) { 3126 /* 3127 * The whole range is already a prealloc extent, 3128 * do nothing except updating the inode's i_size if 3129 * needed. 3130 */ 3131 free_extent_map(em); 3132 ret = btrfs_fallocate_update_isize(inode, offset + len, 3133 mode); 3134 goto out; 3135 } 3136 /* 3137 * Part of the range is already a prealloc extent, so operate 3138 * only on the remaining part of the range. 3139 */ 3140 alloc_start = em_end; 3141 ASSERT(IS_ALIGNED(alloc_start, sectorsize)); 3142 len = offset + len - alloc_start; 3143 offset = alloc_start; 3144 alloc_hint = em->block_start + em->len; 3145 } 3146 free_extent_map(em); 3147 3148 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) == 3149 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) { 3150 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start, 3151 sectorsize); 3152 if (IS_ERR(em)) { 3153 ret = PTR_ERR(em); 3154 goto out; 3155 } 3156 3157 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3158 free_extent_map(em); 3159 ret = btrfs_fallocate_update_isize(inode, offset + len, 3160 mode); 3161 goto out; 3162 } 3163 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) { 3164 free_extent_map(em); 3165 ret = btrfs_truncate_block(inode, offset, len, 0); 3166 if (!ret) 3167 ret = btrfs_fallocate_update_isize(inode, 3168 offset + len, 3169 mode); 3170 return ret; 3171 } 3172 free_extent_map(em); 3173 alloc_start = round_down(offset, sectorsize); 3174 alloc_end = alloc_start + sectorsize; 3175 goto reserve_space; 3176 } 3177 3178 alloc_start = round_up(offset, sectorsize); 3179 alloc_end = round_down(offset + len, sectorsize); 3180 3181 /* 3182 * For unaligned ranges, check the pages at the boundaries, they might 3183 * map to an extent, in which case we need to partially zero them, or 3184 * they might map to a hole, in which case we need our allocation range 3185 * to cover them. 3186 */ 3187 if (!IS_ALIGNED(offset, sectorsize)) { 3188 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode), 3189 offset); 3190 if (ret < 0) 3191 goto out; 3192 if (ret == RANGE_BOUNDARY_HOLE) { 3193 alloc_start = round_down(offset, sectorsize); 3194 ret = 0; 3195 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { 3196 ret = btrfs_truncate_block(inode, offset, 0, 0); 3197 if (ret) 3198 goto out; 3199 } else { 3200 ret = 0; 3201 } 3202 } 3203 3204 if (!IS_ALIGNED(offset + len, sectorsize)) { 3205 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode), 3206 offset + len); 3207 if (ret < 0) 3208 goto out; 3209 if (ret == RANGE_BOUNDARY_HOLE) { 3210 alloc_end = round_up(offset + len, sectorsize); 3211 ret = 0; 3212 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { 3213 ret = btrfs_truncate_block(inode, offset + len, 0, 1); 3214 if (ret) 3215 goto out; 3216 } else { 3217 ret = 0; 3218 } 3219 } 3220 3221 reserve_space: 3222 if (alloc_start < alloc_end) { 3223 struct extent_state *cached_state = NULL; 3224 const u64 lockstart = alloc_start; 3225 const u64 lockend = alloc_end - 1; 3226 3227 bytes_to_reserve = alloc_end - alloc_start; 3228 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), 3229 bytes_to_reserve); 3230 if (ret < 0) 3231 goto out; 3232 space_reserved = true; 3233 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, 3234 &cached_state); 3235 if (ret) 3236 goto out; 3237 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved, 3238 alloc_start, bytes_to_reserve); 3239 if (ret) 3240 goto out; 3241 ret = btrfs_prealloc_file_range(inode, mode, alloc_start, 3242 alloc_end - alloc_start, 3243 i_blocksize(inode), 3244 offset + len, &alloc_hint); 3245 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, 3246 lockend, &cached_state); 3247 /* btrfs_prealloc_file_range releases reserved space on error */ 3248 if (ret) { 3249 space_reserved = false; 3250 goto out; 3251 } 3252 } 3253 ret = btrfs_fallocate_update_isize(inode, offset + len, mode); 3254 out: 3255 if (ret && space_reserved) 3256 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, 3257 alloc_start, bytes_to_reserve); 3258 extent_changeset_free(data_reserved); 3259 3260 return ret; 3261 } 3262 3263 static long btrfs_fallocate(struct file *file, int mode, 3264 loff_t offset, loff_t len) 3265 { 3266 struct inode *inode = file_inode(file); 3267 struct extent_state *cached_state = NULL; 3268 struct extent_changeset *data_reserved = NULL; 3269 struct falloc_range *range; 3270 struct falloc_range *tmp; 3271 struct list_head reserve_list; 3272 u64 cur_offset; 3273 u64 last_byte; 3274 u64 alloc_start; 3275 u64 alloc_end; 3276 u64 alloc_hint = 0; 3277 u64 locked_end; 3278 u64 actual_end = 0; 3279 struct extent_map *em; 3280 int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode)); 3281 int ret; 3282 3283 alloc_start = round_down(offset, blocksize); 3284 alloc_end = round_up(offset + len, blocksize); 3285 cur_offset = alloc_start; 3286 3287 /* Make sure we aren't being give some crap mode */ 3288 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 3289 FALLOC_FL_ZERO_RANGE)) 3290 return -EOPNOTSUPP; 3291 3292 if (mode & FALLOC_FL_PUNCH_HOLE) 3293 return btrfs_punch_hole(inode, offset, len); 3294 3295 /* 3296 * Only trigger disk allocation, don't trigger qgroup reserve 3297 * 3298 * For qgroup space, it will be checked later. 3299 */ 3300 if (!(mode & FALLOC_FL_ZERO_RANGE)) { 3301 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), 3302 alloc_end - alloc_start); 3303 if (ret < 0) 3304 return ret; 3305 } 3306 3307 inode_lock(inode); 3308 3309 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) { 3310 ret = inode_newsize_ok(inode, offset + len); 3311 if (ret) 3312 goto out; 3313 } 3314 3315 /* 3316 * TODO: Move these two operations after we have checked 3317 * accurate reserved space, or fallocate can still fail but 3318 * with page truncated or size expanded. 3319 * 3320 * But that's a minor problem and won't do much harm BTW. 3321 */ 3322 if (alloc_start > inode->i_size) { 3323 ret = btrfs_cont_expand(inode, i_size_read(inode), 3324 alloc_start); 3325 if (ret) 3326 goto out; 3327 } else if (offset + len > inode->i_size) { 3328 /* 3329 * If we are fallocating from the end of the file onward we 3330 * need to zero out the end of the block if i_size lands in the 3331 * middle of a block. 3332 */ 3333 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0); 3334 if (ret) 3335 goto out; 3336 } 3337 3338 /* 3339 * wait for ordered IO before we have any locks. We'll loop again 3340 * below with the locks held. 3341 */ 3342 ret = btrfs_wait_ordered_range(inode, alloc_start, 3343 alloc_end - alloc_start); 3344 if (ret) 3345 goto out; 3346 3347 if (mode & FALLOC_FL_ZERO_RANGE) { 3348 ret = btrfs_zero_range(inode, offset, len, mode); 3349 inode_unlock(inode); 3350 return ret; 3351 } 3352 3353 locked_end = alloc_end - 1; 3354 while (1) { 3355 struct btrfs_ordered_extent *ordered; 3356 3357 /* the extent lock is ordered inside the running 3358 * transaction 3359 */ 3360 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 3361 locked_end, &cached_state); 3362 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), 3363 locked_end); 3364 3365 if (ordered && 3366 ordered->file_offset + ordered->num_bytes > alloc_start && 3367 ordered->file_offset < alloc_end) { 3368 btrfs_put_ordered_extent(ordered); 3369 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 3370 alloc_start, locked_end, 3371 &cached_state); 3372 /* 3373 * we can't wait on the range with the transaction 3374 * running or with the extent lock held 3375 */ 3376 ret = btrfs_wait_ordered_range(inode, alloc_start, 3377 alloc_end - alloc_start); 3378 if (ret) 3379 goto out; 3380 } else { 3381 if (ordered) 3382 btrfs_put_ordered_extent(ordered); 3383 break; 3384 } 3385 } 3386 3387 /* First, check if we exceed the qgroup limit */ 3388 INIT_LIST_HEAD(&reserve_list); 3389 while (cur_offset < alloc_end) { 3390 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, 3391 alloc_end - cur_offset); 3392 if (IS_ERR(em)) { 3393 ret = PTR_ERR(em); 3394 break; 3395 } 3396 last_byte = min(extent_map_end(em), alloc_end); 3397 actual_end = min_t(u64, extent_map_end(em), offset + len); 3398 last_byte = ALIGN(last_byte, blocksize); 3399 if (em->block_start == EXTENT_MAP_HOLE || 3400 (cur_offset >= inode->i_size && 3401 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 3402 ret = add_falloc_range(&reserve_list, cur_offset, 3403 last_byte - cur_offset); 3404 if (ret < 0) { 3405 free_extent_map(em); 3406 break; 3407 } 3408 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), 3409 &data_reserved, cur_offset, 3410 last_byte - cur_offset); 3411 if (ret < 0) { 3412 cur_offset = last_byte; 3413 free_extent_map(em); 3414 break; 3415 } 3416 } else { 3417 /* 3418 * Do not need to reserve unwritten extent for this 3419 * range, free reserved data space first, otherwise 3420 * it'll result in false ENOSPC error. 3421 */ 3422 btrfs_free_reserved_data_space(BTRFS_I(inode), 3423 data_reserved, cur_offset, 3424 last_byte - cur_offset); 3425 } 3426 free_extent_map(em); 3427 cur_offset = last_byte; 3428 } 3429 3430 /* 3431 * If ret is still 0, means we're OK to fallocate. 3432 * Or just cleanup the list and exit. 3433 */ 3434 list_for_each_entry_safe(range, tmp, &reserve_list, list) { 3435 if (!ret) 3436 ret = btrfs_prealloc_file_range(inode, mode, 3437 range->start, 3438 range->len, i_blocksize(inode), 3439 offset + len, &alloc_hint); 3440 else 3441 btrfs_free_reserved_data_space(BTRFS_I(inode), 3442 data_reserved, range->start, 3443 range->len); 3444 list_del(&range->list); 3445 kfree(range); 3446 } 3447 if (ret < 0) 3448 goto out_unlock; 3449 3450 /* 3451 * We didn't need to allocate any more space, but we still extended the 3452 * size of the file so we need to update i_size and the inode item. 3453 */ 3454 ret = btrfs_fallocate_update_isize(inode, actual_end, mode); 3455 out_unlock: 3456 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 3457 &cached_state); 3458 out: 3459 inode_unlock(inode); 3460 /* Let go of our reservation. */ 3461 if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE)) 3462 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, 3463 cur_offset, alloc_end - cur_offset); 3464 extent_changeset_free(data_reserved); 3465 return ret; 3466 } 3467 3468 static loff_t find_desired_extent(struct inode *inode, loff_t offset, 3469 int whence) 3470 { 3471 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3472 struct extent_map *em = NULL; 3473 struct extent_state *cached_state = NULL; 3474 loff_t i_size = inode->i_size; 3475 u64 lockstart; 3476 u64 lockend; 3477 u64 start; 3478 u64 len; 3479 int ret = 0; 3480 3481 if (i_size == 0 || offset >= i_size) 3482 return -ENXIO; 3483 3484 /* 3485 * offset can be negative, in this case we start finding DATA/HOLE from 3486 * the very start of the file. 3487 */ 3488 start = max_t(loff_t, 0, offset); 3489 3490 lockstart = round_down(start, fs_info->sectorsize); 3491 lockend = round_up(i_size, fs_info->sectorsize); 3492 if (lockend <= lockstart) 3493 lockend = lockstart + fs_info->sectorsize; 3494 lockend--; 3495 len = lockend - lockstart + 1; 3496 3497 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 3498 &cached_state); 3499 3500 while (start < i_size) { 3501 em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len); 3502 if (IS_ERR(em)) { 3503 ret = PTR_ERR(em); 3504 em = NULL; 3505 break; 3506 } 3507 3508 if (whence == SEEK_HOLE && 3509 (em->block_start == EXTENT_MAP_HOLE || 3510 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) 3511 break; 3512 else if (whence == SEEK_DATA && 3513 (em->block_start != EXTENT_MAP_HOLE && 3514 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) 3515 break; 3516 3517 start = em->start + em->len; 3518 free_extent_map(em); 3519 em = NULL; 3520 cond_resched(); 3521 } 3522 free_extent_map(em); 3523 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 3524 &cached_state); 3525 if (ret) { 3526 offset = ret; 3527 } else { 3528 if (whence == SEEK_DATA && start >= i_size) 3529 offset = -ENXIO; 3530 else 3531 offset = min_t(loff_t, start, i_size); 3532 } 3533 3534 return offset; 3535 } 3536 3537 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) 3538 { 3539 struct inode *inode = file->f_mapping->host; 3540 3541 switch (whence) { 3542 default: 3543 return generic_file_llseek(file, offset, whence); 3544 case SEEK_DATA: 3545 case SEEK_HOLE: 3546 inode_lock_shared(inode); 3547 offset = find_desired_extent(inode, offset, whence); 3548 inode_unlock_shared(inode); 3549 break; 3550 } 3551 3552 if (offset < 0) 3553 return offset; 3554 3555 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 3556 } 3557 3558 static int btrfs_file_open(struct inode *inode, struct file *filp) 3559 { 3560 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; 3561 return generic_file_open(inode, filp); 3562 } 3563 3564 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 3565 { 3566 ssize_t ret = 0; 3567 3568 if (iocb->ki_flags & IOCB_DIRECT) { 3569 struct inode *inode = file_inode(iocb->ki_filp); 3570 3571 inode_lock_shared(inode); 3572 ret = btrfs_direct_IO(iocb, to); 3573 inode_unlock_shared(inode); 3574 if (ret < 0 || !iov_iter_count(to) || 3575 iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp))) 3576 return ret; 3577 } 3578 3579 return generic_file_buffered_read(iocb, to, ret); 3580 } 3581 3582 const struct file_operations btrfs_file_operations = { 3583 .llseek = btrfs_file_llseek, 3584 .read_iter = btrfs_file_read_iter, 3585 .splice_read = generic_file_splice_read, 3586 .write_iter = btrfs_file_write_iter, 3587 .splice_write = iter_file_splice_write, 3588 .mmap = btrfs_file_mmap, 3589 .open = btrfs_file_open, 3590 .release = btrfs_release_file, 3591 .fsync = btrfs_sync_file, 3592 .fallocate = btrfs_fallocate, 3593 .unlocked_ioctl = btrfs_ioctl, 3594 #ifdef CONFIG_COMPAT 3595 .compat_ioctl = btrfs_compat_ioctl, 3596 #endif 3597 .remap_file_range = btrfs_remap_file_range, 3598 }; 3599 3600 void __cold btrfs_auto_defrag_exit(void) 3601 { 3602 kmem_cache_destroy(btrfs_inode_defrag_cachep); 3603 } 3604 3605 int __init btrfs_auto_defrag_init(void) 3606 { 3607 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag", 3608 sizeof(struct inode_defrag), 0, 3609 SLAB_MEM_SPREAD, 3610 NULL); 3611 if (!btrfs_inode_defrag_cachep) 3612 return -ENOMEM; 3613 3614 return 0; 3615 } 3616 3617 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end) 3618 { 3619 int ret; 3620 3621 /* 3622 * So with compression we will find and lock a dirty page and clear the 3623 * first one as dirty, setup an async extent, and immediately return 3624 * with the entire range locked but with nobody actually marked with 3625 * writeback. So we can't just filemap_write_and_wait_range() and 3626 * expect it to work since it will just kick off a thread to do the 3627 * actual work. So we need to call filemap_fdatawrite_range _again_ 3628 * since it will wait on the page lock, which won't be unlocked until 3629 * after the pages have been marked as writeback and so we're good to go 3630 * from there. We have to do this otherwise we'll miss the ordered 3631 * extents and that results in badness. Please Josef, do not think you 3632 * know better and pull this out at some point in the future, it is 3633 * right and you are wrong. 3634 */ 3635 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 3636 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 3637 &BTRFS_I(inode)->runtime_flags)) 3638 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 3639 3640 return ret; 3641 } 3642