1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 #include <linux/time.h> 15 #include <linux/init.h> 16 #include <linux/string.h> 17 #include <linux/backing-dev.h> 18 #include <linux/writeback.h> 19 #include <linux/compat.h> 20 #include <linux/xattr.h> 21 #include <linux/posix_acl.h> 22 #include <linux/falloc.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/btrfs.h> 26 #include <linux/blkdev.h> 27 #include <linux/posix_acl_xattr.h> 28 #include <linux/uio.h> 29 #include <linux/magic.h> 30 #include <linux/iversion.h> 31 #include <linux/swap.h> 32 #include <linux/migrate.h> 33 #include <linux/sched/mm.h> 34 #include <linux/iomap.h> 35 #include <asm/unaligned.h> 36 #include <linux/fsverity.h> 37 #include "misc.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "print-tree.h" 43 #include "ordered-data.h" 44 #include "xattr.h" 45 #include "tree-log.h" 46 #include "bio.h" 47 #include "compression.h" 48 #include "locking.h" 49 #include "free-space-cache.h" 50 #include "props.h" 51 #include "qgroup.h" 52 #include "delalloc-space.h" 53 #include "block-group.h" 54 #include "space-info.h" 55 #include "zoned.h" 56 #include "subpage.h" 57 #include "inode-item.h" 58 #include "fs.h" 59 #include "accessors.h" 60 #include "extent-tree.h" 61 #include "root-tree.h" 62 #include "defrag.h" 63 #include "dir-item.h" 64 #include "file-item.h" 65 #include "uuid-tree.h" 66 #include "ioctl.h" 67 #include "file.h" 68 #include "acl.h" 69 #include "relocation.h" 70 #include "verity.h" 71 #include "super.h" 72 #include "orphan.h" 73 74 struct btrfs_iget_args { 75 u64 ino; 76 struct btrfs_root *root; 77 }; 78 79 struct btrfs_dio_data { 80 ssize_t submitted; 81 struct extent_changeset *data_reserved; 82 bool data_space_reserved; 83 bool nocow_done; 84 }; 85 86 struct btrfs_dio_private { 87 struct btrfs_inode *inode; 88 89 /* 90 * Since DIO can use anonymous page, we cannot use page_offset() to 91 * grab the file offset, thus need a dedicated member for file offset. 92 */ 93 u64 file_offset; 94 /* Used for bio::bi_size */ 95 u32 bytes; 96 97 /* 98 * References to this structure. There is one reference per in-flight 99 * bio plus one while we're still setting up. 100 */ 101 refcount_t refs; 102 103 /* Array of checksums */ 104 u8 *csums; 105 106 /* This must be last */ 107 struct bio bio; 108 }; 109 110 static struct bio_set btrfs_dio_bioset; 111 112 struct btrfs_rename_ctx { 113 /* Output field. Stores the index number of the old directory entry. */ 114 u64 index; 115 }; 116 117 static const struct inode_operations btrfs_dir_inode_operations; 118 static const struct inode_operations btrfs_symlink_inode_operations; 119 static const struct inode_operations btrfs_special_inode_operations; 120 static const struct inode_operations btrfs_file_inode_operations; 121 static const struct address_space_operations btrfs_aops; 122 static const struct file_operations btrfs_dir_file_operations; 123 124 static struct kmem_cache *btrfs_inode_cachep; 125 126 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 127 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback); 128 static noinline int cow_file_range(struct btrfs_inode *inode, 129 struct page *locked_page, 130 u64 start, u64 end, int *page_started, 131 unsigned long *nr_written, int unlock, 132 u64 *done_offset); 133 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 134 u64 len, u64 orig_start, u64 block_start, 135 u64 block_len, u64 orig_block_len, 136 u64 ram_bytes, int compress_type, 137 int type); 138 139 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, 140 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) 141 { 142 struct btrfs_root *root = inode->root; 143 const u32 csum_size = root->fs_info->csum_size; 144 145 /* Output without objectid, which is more meaningful */ 146 if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) { 147 btrfs_warn_rl(root->fs_info, 148 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 149 root->root_key.objectid, btrfs_ino(inode), 150 logical_start, 151 CSUM_FMT_VALUE(csum_size, csum), 152 CSUM_FMT_VALUE(csum_size, csum_expected), 153 mirror_num); 154 } else { 155 btrfs_warn_rl(root->fs_info, 156 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 157 root->root_key.objectid, btrfs_ino(inode), 158 logical_start, 159 CSUM_FMT_VALUE(csum_size, csum), 160 CSUM_FMT_VALUE(csum_size, csum_expected), 161 mirror_num); 162 } 163 } 164 165 /* 166 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed 167 * 168 * ilock_flags can have the following bit set: 169 * 170 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 171 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 172 * return -EAGAIN 173 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 174 */ 175 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) 176 { 177 if (ilock_flags & BTRFS_ILOCK_SHARED) { 178 if (ilock_flags & BTRFS_ILOCK_TRY) { 179 if (!inode_trylock_shared(&inode->vfs_inode)) 180 return -EAGAIN; 181 else 182 return 0; 183 } 184 inode_lock_shared(&inode->vfs_inode); 185 } else { 186 if (ilock_flags & BTRFS_ILOCK_TRY) { 187 if (!inode_trylock(&inode->vfs_inode)) 188 return -EAGAIN; 189 else 190 return 0; 191 } 192 inode_lock(&inode->vfs_inode); 193 } 194 if (ilock_flags & BTRFS_ILOCK_MMAP) 195 down_write(&inode->i_mmap_lock); 196 return 0; 197 } 198 199 /* 200 * btrfs_inode_unlock - unock inode i_rwsem 201 * 202 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 203 * to decide whether the lock acquired is shared or exclusive. 204 */ 205 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) 206 { 207 if (ilock_flags & BTRFS_ILOCK_MMAP) 208 up_write(&inode->i_mmap_lock); 209 if (ilock_flags & BTRFS_ILOCK_SHARED) 210 inode_unlock_shared(&inode->vfs_inode); 211 else 212 inode_unlock(&inode->vfs_inode); 213 } 214 215 /* 216 * Cleanup all submitted ordered extents in specified range to handle errors 217 * from the btrfs_run_delalloc_range() callback. 218 * 219 * NOTE: caller must ensure that when an error happens, it can not call 220 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 221 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 222 * to be released, which we want to happen only when finishing the ordered 223 * extent (btrfs_finish_ordered_io()). 224 */ 225 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 226 struct page *locked_page, 227 u64 offset, u64 bytes) 228 { 229 unsigned long index = offset >> PAGE_SHIFT; 230 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 231 u64 page_start, page_end; 232 struct page *page; 233 234 if (locked_page) { 235 page_start = page_offset(locked_page); 236 page_end = page_start + PAGE_SIZE - 1; 237 } 238 239 while (index <= end_index) { 240 /* 241 * For locked page, we will call end_extent_writepage() on it 242 * in run_delalloc_range() for the error handling. That 243 * end_extent_writepage() function will call 244 * btrfs_mark_ordered_io_finished() to clear page Ordered and 245 * run the ordered extent accounting. 246 * 247 * Here we can't just clear the Ordered bit, or 248 * btrfs_mark_ordered_io_finished() would skip the accounting 249 * for the page range, and the ordered extent will never finish. 250 */ 251 if (locked_page && index == (page_start >> PAGE_SHIFT)) { 252 index++; 253 continue; 254 } 255 page = find_get_page(inode->vfs_inode.i_mapping, index); 256 index++; 257 if (!page) 258 continue; 259 260 /* 261 * Here we just clear all Ordered bits for every page in the 262 * range, then btrfs_mark_ordered_io_finished() will handle 263 * the ordered extent accounting for the range. 264 */ 265 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page, 266 offset, bytes); 267 put_page(page); 268 } 269 270 if (locked_page) { 271 /* The locked page covers the full range, nothing needs to be done */ 272 if (bytes + offset <= page_start + PAGE_SIZE) 273 return; 274 /* 275 * In case this page belongs to the delalloc range being 276 * instantiated then skip it, since the first page of a range is 277 * going to be properly cleaned up by the caller of 278 * run_delalloc_range 279 */ 280 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 281 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE; 282 offset = page_offset(locked_page) + PAGE_SIZE; 283 } 284 } 285 286 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false); 287 } 288 289 static int btrfs_dirty_inode(struct btrfs_inode *inode); 290 291 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 292 struct btrfs_new_inode_args *args) 293 { 294 int err; 295 296 if (args->default_acl) { 297 err = __btrfs_set_acl(trans, args->inode, args->default_acl, 298 ACL_TYPE_DEFAULT); 299 if (err) 300 return err; 301 } 302 if (args->acl) { 303 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 304 if (err) 305 return err; 306 } 307 if (!args->default_acl && !args->acl) 308 cache_no_acl(args->inode); 309 return btrfs_xattr_security_init(trans, args->inode, args->dir, 310 &args->dentry->d_name); 311 } 312 313 /* 314 * this does all the hard work for inserting an inline extent into 315 * the btree. The caller should have done a btrfs_drop_extents so that 316 * no overlapping inline items exist in the btree 317 */ 318 static int insert_inline_extent(struct btrfs_trans_handle *trans, 319 struct btrfs_path *path, 320 struct btrfs_inode *inode, bool extent_inserted, 321 size_t size, size_t compressed_size, 322 int compress_type, 323 struct page **compressed_pages, 324 bool update_i_size) 325 { 326 struct btrfs_root *root = inode->root; 327 struct extent_buffer *leaf; 328 struct page *page = NULL; 329 char *kaddr; 330 unsigned long ptr; 331 struct btrfs_file_extent_item *ei; 332 int ret; 333 size_t cur_size = size; 334 u64 i_size; 335 336 ASSERT((compressed_size > 0 && compressed_pages) || 337 (compressed_size == 0 && !compressed_pages)); 338 339 if (compressed_size && compressed_pages) 340 cur_size = compressed_size; 341 342 if (!extent_inserted) { 343 struct btrfs_key key; 344 size_t datasize; 345 346 key.objectid = btrfs_ino(inode); 347 key.offset = 0; 348 key.type = BTRFS_EXTENT_DATA_KEY; 349 350 datasize = btrfs_file_extent_calc_inline_size(cur_size); 351 ret = btrfs_insert_empty_item(trans, root, path, &key, 352 datasize); 353 if (ret) 354 goto fail; 355 } 356 leaf = path->nodes[0]; 357 ei = btrfs_item_ptr(leaf, path->slots[0], 358 struct btrfs_file_extent_item); 359 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 360 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 361 btrfs_set_file_extent_encryption(leaf, ei, 0); 362 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 363 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 364 ptr = btrfs_file_extent_inline_start(ei); 365 366 if (compress_type != BTRFS_COMPRESS_NONE) { 367 struct page *cpage; 368 int i = 0; 369 while (compressed_size > 0) { 370 cpage = compressed_pages[i]; 371 cur_size = min_t(unsigned long, compressed_size, 372 PAGE_SIZE); 373 374 kaddr = kmap_local_page(cpage); 375 write_extent_buffer(leaf, kaddr, ptr, cur_size); 376 kunmap_local(kaddr); 377 378 i++; 379 ptr += cur_size; 380 compressed_size -= cur_size; 381 } 382 btrfs_set_file_extent_compression(leaf, ei, 383 compress_type); 384 } else { 385 page = find_get_page(inode->vfs_inode.i_mapping, 0); 386 btrfs_set_file_extent_compression(leaf, ei, 0); 387 kaddr = kmap_local_page(page); 388 write_extent_buffer(leaf, kaddr, ptr, size); 389 kunmap_local(kaddr); 390 put_page(page); 391 } 392 btrfs_mark_buffer_dirty(leaf); 393 btrfs_release_path(path); 394 395 /* 396 * We align size to sectorsize for inline extents just for simplicity 397 * sake. 398 */ 399 ret = btrfs_inode_set_file_extent_range(inode, 0, 400 ALIGN(size, root->fs_info->sectorsize)); 401 if (ret) 402 goto fail; 403 404 /* 405 * We're an inline extent, so nobody can extend the file past i_size 406 * without locking a page we already have locked. 407 * 408 * We must do any i_size and inode updates before we unlock the pages. 409 * Otherwise we could end up racing with unlink. 410 */ 411 i_size = i_size_read(&inode->vfs_inode); 412 if (update_i_size && size > i_size) { 413 i_size_write(&inode->vfs_inode, size); 414 i_size = size; 415 } 416 inode->disk_i_size = i_size; 417 418 fail: 419 return ret; 420 } 421 422 423 /* 424 * conditionally insert an inline extent into the file. This 425 * does the checks required to make sure the data is small enough 426 * to fit as an inline extent. 427 */ 428 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size, 429 size_t compressed_size, 430 int compress_type, 431 struct page **compressed_pages, 432 bool update_i_size) 433 { 434 struct btrfs_drop_extents_args drop_args = { 0 }; 435 struct btrfs_root *root = inode->root; 436 struct btrfs_fs_info *fs_info = root->fs_info; 437 struct btrfs_trans_handle *trans; 438 u64 data_len = (compressed_size ?: size); 439 int ret; 440 struct btrfs_path *path; 441 442 /* 443 * We can create an inline extent if it ends at or beyond the current 444 * i_size, is no larger than a sector (decompressed), and the (possibly 445 * compressed) data fits in a leaf and the configured maximum inline 446 * size. 447 */ 448 if (size < i_size_read(&inode->vfs_inode) || 449 size > fs_info->sectorsize || 450 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || 451 data_len > fs_info->max_inline) 452 return 1; 453 454 path = btrfs_alloc_path(); 455 if (!path) 456 return -ENOMEM; 457 458 trans = btrfs_join_transaction(root); 459 if (IS_ERR(trans)) { 460 btrfs_free_path(path); 461 return PTR_ERR(trans); 462 } 463 trans->block_rsv = &inode->block_rsv; 464 465 drop_args.path = path; 466 drop_args.start = 0; 467 drop_args.end = fs_info->sectorsize; 468 drop_args.drop_cache = true; 469 drop_args.replace_extent = true; 470 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 471 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 472 if (ret) { 473 btrfs_abort_transaction(trans, ret); 474 goto out; 475 } 476 477 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 478 size, compressed_size, compress_type, 479 compressed_pages, update_i_size); 480 if (ret && ret != -ENOSPC) { 481 btrfs_abort_transaction(trans, ret); 482 goto out; 483 } else if (ret == -ENOSPC) { 484 ret = 1; 485 goto out; 486 } 487 488 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 489 ret = btrfs_update_inode(trans, root, inode); 490 if (ret && ret != -ENOSPC) { 491 btrfs_abort_transaction(trans, ret); 492 goto out; 493 } else if (ret == -ENOSPC) { 494 ret = 1; 495 goto out; 496 } 497 498 btrfs_set_inode_full_sync(inode); 499 out: 500 /* 501 * Don't forget to free the reserved space, as for inlined extent 502 * it won't count as data extent, free them directly here. 503 * And at reserve time, it's always aligned to page size, so 504 * just free one page here. 505 */ 506 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE); 507 btrfs_free_path(path); 508 btrfs_end_transaction(trans); 509 return ret; 510 } 511 512 struct async_extent { 513 u64 start; 514 u64 ram_size; 515 u64 compressed_size; 516 struct page **pages; 517 unsigned long nr_pages; 518 int compress_type; 519 struct list_head list; 520 }; 521 522 struct async_chunk { 523 struct btrfs_inode *inode; 524 struct page *locked_page; 525 u64 start; 526 u64 end; 527 blk_opf_t write_flags; 528 struct list_head extents; 529 struct cgroup_subsys_state *blkcg_css; 530 struct btrfs_work work; 531 struct async_cow *async_cow; 532 }; 533 534 struct async_cow { 535 atomic_t num_chunks; 536 struct async_chunk chunks[]; 537 }; 538 539 static noinline int add_async_extent(struct async_chunk *cow, 540 u64 start, u64 ram_size, 541 u64 compressed_size, 542 struct page **pages, 543 unsigned long nr_pages, 544 int compress_type) 545 { 546 struct async_extent *async_extent; 547 548 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 549 BUG_ON(!async_extent); /* -ENOMEM */ 550 async_extent->start = start; 551 async_extent->ram_size = ram_size; 552 async_extent->compressed_size = compressed_size; 553 async_extent->pages = pages; 554 async_extent->nr_pages = nr_pages; 555 async_extent->compress_type = compress_type; 556 list_add_tail(&async_extent->list, &cow->extents); 557 return 0; 558 } 559 560 /* 561 * Check if the inode needs to be submitted to compression, based on mount 562 * options, defragmentation, properties or heuristics. 563 */ 564 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 565 u64 end) 566 { 567 struct btrfs_fs_info *fs_info = inode->root->fs_info; 568 569 if (!btrfs_inode_can_compress(inode)) { 570 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 571 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 572 btrfs_ino(inode)); 573 return 0; 574 } 575 /* 576 * Special check for subpage. 577 * 578 * We lock the full page then run each delalloc range in the page, thus 579 * for the following case, we will hit some subpage specific corner case: 580 * 581 * 0 32K 64K 582 * | |///////| |///////| 583 * \- A \- B 584 * 585 * In above case, both range A and range B will try to unlock the full 586 * page [0, 64K), causing the one finished later will have page 587 * unlocked already, triggering various page lock requirement BUG_ON()s. 588 * 589 * So here we add an artificial limit that subpage compression can only 590 * if the range is fully page aligned. 591 * 592 * In theory we only need to ensure the first page is fully covered, but 593 * the tailing partial page will be locked until the full compression 594 * finishes, delaying the write of other range. 595 * 596 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range 597 * first to prevent any submitted async extent to unlock the full page. 598 * By this, we can ensure for subpage case that only the last async_cow 599 * will unlock the full page. 600 */ 601 if (fs_info->sectorsize < PAGE_SIZE) { 602 if (!PAGE_ALIGNED(start) || 603 !PAGE_ALIGNED(end + 1)) 604 return 0; 605 } 606 607 /* force compress */ 608 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 609 return 1; 610 /* defrag ioctl */ 611 if (inode->defrag_compress) 612 return 1; 613 /* bad compression ratios */ 614 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 615 return 0; 616 if (btrfs_test_opt(fs_info, COMPRESS) || 617 inode->flags & BTRFS_INODE_COMPRESS || 618 inode->prop_compress) 619 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); 620 return 0; 621 } 622 623 static inline void inode_should_defrag(struct btrfs_inode *inode, 624 u64 start, u64 end, u64 num_bytes, u32 small_write) 625 { 626 /* If this is a small write inside eof, kick off a defrag */ 627 if (num_bytes < small_write && 628 (start > 0 || end + 1 < inode->disk_i_size)) 629 btrfs_add_inode_defrag(NULL, inode, small_write); 630 } 631 632 /* 633 * we create compressed extents in two phases. The first 634 * phase compresses a range of pages that have already been 635 * locked (both pages and state bits are locked). 636 * 637 * This is done inside an ordered work queue, and the compression 638 * is spread across many cpus. The actual IO submission is step 639 * two, and the ordered work queue takes care of making sure that 640 * happens in the same order things were put onto the queue by 641 * writepages and friends. 642 * 643 * If this code finds it can't get good compression, it puts an 644 * entry onto the work queue to write the uncompressed bytes. This 645 * makes sure that both compressed inodes and uncompressed inodes 646 * are written in the same order that the flusher thread sent them 647 * down. 648 */ 649 static noinline int compress_file_range(struct async_chunk *async_chunk) 650 { 651 struct btrfs_inode *inode = async_chunk->inode; 652 struct btrfs_fs_info *fs_info = inode->root->fs_info; 653 u64 blocksize = fs_info->sectorsize; 654 u64 start = async_chunk->start; 655 u64 end = async_chunk->end; 656 u64 actual_end; 657 u64 i_size; 658 int ret = 0; 659 struct page **pages = NULL; 660 unsigned long nr_pages; 661 unsigned long total_compressed = 0; 662 unsigned long total_in = 0; 663 int i; 664 int will_compress; 665 int compress_type = fs_info->compress_type; 666 int compressed_extents = 0; 667 int redirty = 0; 668 669 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); 670 671 /* 672 * We need to save i_size before now because it could change in between 673 * us evaluating the size and assigning it. This is because we lock and 674 * unlock the page in truncate and fallocate, and then modify the i_size 675 * later on. 676 * 677 * The barriers are to emulate READ_ONCE, remove that once i_size_read 678 * does that for us. 679 */ 680 barrier(); 681 i_size = i_size_read(&inode->vfs_inode); 682 barrier(); 683 actual_end = min_t(u64, i_size, end + 1); 684 again: 685 will_compress = 0; 686 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 687 nr_pages = min_t(unsigned long, nr_pages, 688 BTRFS_MAX_COMPRESSED / PAGE_SIZE); 689 690 /* 691 * we don't want to send crud past the end of i_size through 692 * compression, that's just a waste of CPU time. So, if the 693 * end of the file is before the start of our current 694 * requested range of bytes, we bail out to the uncompressed 695 * cleanup code that can deal with all of this. 696 * 697 * It isn't really the fastest way to fix things, but this is a 698 * very uncommon corner. 699 */ 700 if (actual_end <= start) 701 goto cleanup_and_bail_uncompressed; 702 703 total_compressed = actual_end - start; 704 705 /* 706 * Skip compression for a small file range(<=blocksize) that 707 * isn't an inline extent, since it doesn't save disk space at all. 708 */ 709 if (total_compressed <= blocksize && 710 (start > 0 || end + 1 < inode->disk_i_size)) 711 goto cleanup_and_bail_uncompressed; 712 713 /* 714 * For subpage case, we require full page alignment for the sector 715 * aligned range. 716 * Thus we must also check against @actual_end, not just @end. 717 */ 718 if (blocksize < PAGE_SIZE) { 719 if (!PAGE_ALIGNED(start) || 720 !PAGE_ALIGNED(round_up(actual_end, blocksize))) 721 goto cleanup_and_bail_uncompressed; 722 } 723 724 total_compressed = min_t(unsigned long, total_compressed, 725 BTRFS_MAX_UNCOMPRESSED); 726 total_in = 0; 727 ret = 0; 728 729 /* 730 * we do compression for mount -o compress and when the 731 * inode has not been flagged as nocompress. This flag can 732 * change at any time if we discover bad compression ratios. 733 */ 734 if (inode_need_compress(inode, start, end)) { 735 WARN_ON(pages); 736 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 737 if (!pages) { 738 /* just bail out to the uncompressed code */ 739 nr_pages = 0; 740 goto cont; 741 } 742 743 if (inode->defrag_compress) 744 compress_type = inode->defrag_compress; 745 else if (inode->prop_compress) 746 compress_type = inode->prop_compress; 747 748 /* 749 * we need to call clear_page_dirty_for_io on each 750 * page in the range. Otherwise applications with the file 751 * mmap'd can wander in and change the page contents while 752 * we are compressing them. 753 * 754 * If the compression fails for any reason, we set the pages 755 * dirty again later on. 756 * 757 * Note that the remaining part is redirtied, the start pointer 758 * has moved, the end is the original one. 759 */ 760 if (!redirty) { 761 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); 762 redirty = 1; 763 } 764 765 /* Compression level is applied here and only here */ 766 ret = btrfs_compress_pages( 767 compress_type | (fs_info->compress_level << 4), 768 inode->vfs_inode.i_mapping, start, 769 pages, 770 &nr_pages, 771 &total_in, 772 &total_compressed); 773 774 if (!ret) { 775 unsigned long offset = offset_in_page(total_compressed); 776 struct page *page = pages[nr_pages - 1]; 777 778 /* zero the tail end of the last page, we might be 779 * sending it down to disk 780 */ 781 if (offset) 782 memzero_page(page, offset, PAGE_SIZE - offset); 783 will_compress = 1; 784 } 785 } 786 cont: 787 /* 788 * Check cow_file_range() for why we don't even try to create inline 789 * extent for subpage case. 790 */ 791 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 792 /* lets try to make an inline extent */ 793 if (ret || total_in < actual_end) { 794 /* we didn't compress the entire range, try 795 * to make an uncompressed inline extent. 796 */ 797 ret = cow_file_range_inline(inode, actual_end, 798 0, BTRFS_COMPRESS_NONE, 799 NULL, false); 800 } else { 801 /* try making a compressed inline extent */ 802 ret = cow_file_range_inline(inode, actual_end, 803 total_compressed, 804 compress_type, pages, 805 false); 806 } 807 if (ret <= 0) { 808 unsigned long clear_flags = EXTENT_DELALLOC | 809 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 810 EXTENT_DO_ACCOUNTING; 811 unsigned long page_error_op; 812 813 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; 814 815 /* 816 * inline extent creation worked or returned error, 817 * we don't need to create any more async work items. 818 * Unlock and free up our temp pages. 819 * 820 * We use DO_ACCOUNTING here because we need the 821 * delalloc_release_metadata to be done _after_ we drop 822 * our outstanding extent for clearing delalloc for this 823 * range. 824 */ 825 extent_clear_unlock_delalloc(inode, start, end, 826 NULL, 827 clear_flags, 828 PAGE_UNLOCK | 829 PAGE_START_WRITEBACK | 830 page_error_op | 831 PAGE_END_WRITEBACK); 832 833 /* 834 * Ensure we only free the compressed pages if we have 835 * them allocated, as we can still reach here with 836 * inode_need_compress() == false. 837 */ 838 if (pages) { 839 for (i = 0; i < nr_pages; i++) { 840 WARN_ON(pages[i]->mapping); 841 put_page(pages[i]); 842 } 843 kfree(pages); 844 } 845 return 0; 846 } 847 } 848 849 if (will_compress) { 850 /* 851 * we aren't doing an inline extent round the compressed size 852 * up to a block size boundary so the allocator does sane 853 * things 854 */ 855 total_compressed = ALIGN(total_compressed, blocksize); 856 857 /* 858 * one last check to make sure the compression is really a 859 * win, compare the page count read with the blocks on disk, 860 * compression must free at least one sector size 861 */ 862 total_in = round_up(total_in, fs_info->sectorsize); 863 if (total_compressed + blocksize <= total_in) { 864 compressed_extents++; 865 866 /* 867 * The async work queues will take care of doing actual 868 * allocation on disk for these compressed pages, and 869 * will submit them to the elevator. 870 */ 871 add_async_extent(async_chunk, start, total_in, 872 total_compressed, pages, nr_pages, 873 compress_type); 874 875 if (start + total_in < end) { 876 start += total_in; 877 pages = NULL; 878 cond_resched(); 879 goto again; 880 } 881 return compressed_extents; 882 } 883 } 884 if (pages) { 885 /* 886 * the compression code ran but failed to make things smaller, 887 * free any pages it allocated and our page pointer array 888 */ 889 for (i = 0; i < nr_pages; i++) { 890 WARN_ON(pages[i]->mapping); 891 put_page(pages[i]); 892 } 893 kfree(pages); 894 pages = NULL; 895 total_compressed = 0; 896 nr_pages = 0; 897 898 /* flag the file so we don't compress in the future */ 899 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && 900 !(inode->prop_compress)) { 901 inode->flags |= BTRFS_INODE_NOCOMPRESS; 902 } 903 } 904 cleanup_and_bail_uncompressed: 905 /* 906 * No compression, but we still need to write the pages in the file 907 * we've been given so far. redirty the locked page if it corresponds 908 * to our extent and set things up for the async work queue to run 909 * cow_file_range to do the normal delalloc dance. 910 */ 911 if (async_chunk->locked_page && 912 (page_offset(async_chunk->locked_page) >= start && 913 page_offset(async_chunk->locked_page)) <= end) { 914 __set_page_dirty_nobuffers(async_chunk->locked_page); 915 /* unlocked later on in the async handlers */ 916 } 917 918 if (redirty) 919 extent_range_redirty_for_io(&inode->vfs_inode, start, end); 920 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 921 BTRFS_COMPRESS_NONE); 922 compressed_extents++; 923 924 return compressed_extents; 925 } 926 927 static void free_async_extent_pages(struct async_extent *async_extent) 928 { 929 int i; 930 931 if (!async_extent->pages) 932 return; 933 934 for (i = 0; i < async_extent->nr_pages; i++) { 935 WARN_ON(async_extent->pages[i]->mapping); 936 put_page(async_extent->pages[i]); 937 } 938 kfree(async_extent->pages); 939 async_extent->nr_pages = 0; 940 async_extent->pages = NULL; 941 } 942 943 static int submit_uncompressed_range(struct btrfs_inode *inode, 944 struct async_extent *async_extent, 945 struct page *locked_page) 946 { 947 u64 start = async_extent->start; 948 u64 end = async_extent->start + async_extent->ram_size - 1; 949 unsigned long nr_written = 0; 950 int page_started = 0; 951 int ret; 952 953 /* 954 * Call cow_file_range() to run the delalloc range directly, since we 955 * won't go to NOCOW or async path again. 956 * 957 * Also we call cow_file_range() with @unlock_page == 0, so that we 958 * can directly submit them without interruption. 959 */ 960 ret = cow_file_range(inode, locked_page, start, end, &page_started, 961 &nr_written, 0, NULL); 962 /* Inline extent inserted, page gets unlocked and everything is done */ 963 if (page_started) { 964 ret = 0; 965 goto out; 966 } 967 if (ret < 0) { 968 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1); 969 if (locked_page) { 970 const u64 page_start = page_offset(locked_page); 971 const u64 page_end = page_start + PAGE_SIZE - 1; 972 973 btrfs_page_set_error(inode->root->fs_info, locked_page, 974 page_start, PAGE_SIZE); 975 set_page_writeback(locked_page); 976 end_page_writeback(locked_page); 977 end_extent_writepage(locked_page, ret, page_start, page_end); 978 unlock_page(locked_page); 979 } 980 goto out; 981 } 982 983 ret = extent_write_locked_range(&inode->vfs_inode, start, end); 984 /* All pages will be unlocked, including @locked_page */ 985 out: 986 kfree(async_extent); 987 return ret; 988 } 989 990 static int submit_one_async_extent(struct btrfs_inode *inode, 991 struct async_chunk *async_chunk, 992 struct async_extent *async_extent, 993 u64 *alloc_hint) 994 { 995 struct extent_io_tree *io_tree = &inode->io_tree; 996 struct btrfs_root *root = inode->root; 997 struct btrfs_fs_info *fs_info = root->fs_info; 998 struct btrfs_key ins; 999 struct page *locked_page = NULL; 1000 struct extent_map *em; 1001 int ret = 0; 1002 u64 start = async_extent->start; 1003 u64 end = async_extent->start + async_extent->ram_size - 1; 1004 1005 /* 1006 * If async_chunk->locked_page is in the async_extent range, we need to 1007 * handle it. 1008 */ 1009 if (async_chunk->locked_page) { 1010 u64 locked_page_start = page_offset(async_chunk->locked_page); 1011 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1; 1012 1013 if (!(start >= locked_page_end || end <= locked_page_start)) 1014 locked_page = async_chunk->locked_page; 1015 } 1016 lock_extent(io_tree, start, end, NULL); 1017 1018 /* We have fall back to uncompressed write */ 1019 if (!async_extent->pages) 1020 return submit_uncompressed_range(inode, async_extent, locked_page); 1021 1022 ret = btrfs_reserve_extent(root, async_extent->ram_size, 1023 async_extent->compressed_size, 1024 async_extent->compressed_size, 1025 0, *alloc_hint, &ins, 1, 1); 1026 if (ret) { 1027 free_async_extent_pages(async_extent); 1028 /* 1029 * Here we used to try again by going back to non-compressed 1030 * path for ENOSPC. But we can't reserve space even for 1031 * compressed size, how could it work for uncompressed size 1032 * which requires larger size? So here we directly go error 1033 * path. 1034 */ 1035 goto out_free; 1036 } 1037 1038 /* Here we're doing allocation and writeback of the compressed pages */ 1039 em = create_io_em(inode, start, 1040 async_extent->ram_size, /* len */ 1041 start, /* orig_start */ 1042 ins.objectid, /* block_start */ 1043 ins.offset, /* block_len */ 1044 ins.offset, /* orig_block_len */ 1045 async_extent->ram_size, /* ram_bytes */ 1046 async_extent->compress_type, 1047 BTRFS_ORDERED_COMPRESSED); 1048 if (IS_ERR(em)) { 1049 ret = PTR_ERR(em); 1050 goto out_free_reserve; 1051 } 1052 free_extent_map(em); 1053 1054 ret = btrfs_add_ordered_extent(inode, start, /* file_offset */ 1055 async_extent->ram_size, /* num_bytes */ 1056 async_extent->ram_size, /* ram_bytes */ 1057 ins.objectid, /* disk_bytenr */ 1058 ins.offset, /* disk_num_bytes */ 1059 0, /* offset */ 1060 1 << BTRFS_ORDERED_COMPRESSED, 1061 async_extent->compress_type); 1062 if (ret) { 1063 btrfs_drop_extent_map_range(inode, start, end, false); 1064 goto out_free_reserve; 1065 } 1066 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1067 1068 /* Clear dirty, set writeback and unlock the pages. */ 1069 extent_clear_unlock_delalloc(inode, start, end, 1070 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 1071 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1072 if (btrfs_submit_compressed_write(inode, start, /* file_offset */ 1073 async_extent->ram_size, /* num_bytes */ 1074 ins.objectid, /* disk_bytenr */ 1075 ins.offset, /* compressed_len */ 1076 async_extent->pages, /* compressed_pages */ 1077 async_extent->nr_pages, 1078 async_chunk->write_flags, 1079 async_chunk->blkcg_css, true)) { 1080 const u64 start = async_extent->start; 1081 const u64 end = start + async_extent->ram_size - 1; 1082 1083 btrfs_writepage_endio_finish_ordered(inode, NULL, start, end, 0); 1084 1085 extent_clear_unlock_delalloc(inode, start, end, NULL, 0, 1086 PAGE_END_WRITEBACK | PAGE_SET_ERROR); 1087 free_async_extent_pages(async_extent); 1088 } 1089 *alloc_hint = ins.objectid + ins.offset; 1090 kfree(async_extent); 1091 return ret; 1092 1093 out_free_reserve: 1094 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1095 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1096 out_free: 1097 extent_clear_unlock_delalloc(inode, start, end, 1098 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 1099 EXTENT_DELALLOC_NEW | 1100 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1101 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1102 PAGE_END_WRITEBACK | PAGE_SET_ERROR); 1103 free_async_extent_pages(async_extent); 1104 kfree(async_extent); 1105 return ret; 1106 } 1107 1108 /* 1109 * Phase two of compressed writeback. This is the ordered portion of the code, 1110 * which only gets called in the order the work was queued. We walk all the 1111 * async extents created by compress_file_range and send them down to the disk. 1112 */ 1113 static noinline void submit_compressed_extents(struct async_chunk *async_chunk) 1114 { 1115 struct btrfs_inode *inode = async_chunk->inode; 1116 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1117 struct async_extent *async_extent; 1118 u64 alloc_hint = 0; 1119 int ret = 0; 1120 1121 while (!list_empty(&async_chunk->extents)) { 1122 u64 extent_start; 1123 u64 ram_size; 1124 1125 async_extent = list_entry(async_chunk->extents.next, 1126 struct async_extent, list); 1127 list_del(&async_extent->list); 1128 extent_start = async_extent->start; 1129 ram_size = async_extent->ram_size; 1130 1131 ret = submit_one_async_extent(inode, async_chunk, async_extent, 1132 &alloc_hint); 1133 btrfs_debug(fs_info, 1134 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1135 inode->root->root_key.objectid, 1136 btrfs_ino(inode), extent_start, ram_size, ret); 1137 } 1138 } 1139 1140 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1141 u64 num_bytes) 1142 { 1143 struct extent_map_tree *em_tree = &inode->extent_tree; 1144 struct extent_map *em; 1145 u64 alloc_hint = 0; 1146 1147 read_lock(&em_tree->lock); 1148 em = search_extent_mapping(em_tree, start, num_bytes); 1149 if (em) { 1150 /* 1151 * if block start isn't an actual block number then find the 1152 * first block in this inode and use that as a hint. If that 1153 * block is also bogus then just don't worry about it. 1154 */ 1155 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 1156 free_extent_map(em); 1157 em = search_extent_mapping(em_tree, 0, 0); 1158 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 1159 alloc_hint = em->block_start; 1160 if (em) 1161 free_extent_map(em); 1162 } else { 1163 alloc_hint = em->block_start; 1164 free_extent_map(em); 1165 } 1166 } 1167 read_unlock(&em_tree->lock); 1168 1169 return alloc_hint; 1170 } 1171 1172 /* 1173 * when extent_io.c finds a delayed allocation range in the file, 1174 * the call backs end up in this code. The basic idea is to 1175 * allocate extents on disk for the range, and create ordered data structs 1176 * in ram to track those extents. 1177 * 1178 * locked_page is the page that writepage had locked already. We use 1179 * it to make sure we don't do extra locks or unlocks. 1180 * 1181 * *page_started is set to one if we unlock locked_page and do everything 1182 * required to start IO on it. It may be clean and already done with 1183 * IO when we return. 1184 * 1185 * When unlock == 1, we unlock the pages in successfully allocated regions. 1186 * When unlock == 0, we leave them locked for writing them out. 1187 * 1188 * However, we unlock all the pages except @locked_page in case of failure. 1189 * 1190 * In summary, page locking state will be as follow: 1191 * 1192 * - page_started == 1 (return value) 1193 * - All the pages are unlocked. IO is started. 1194 * - Note that this can happen only on success 1195 * - unlock == 1 1196 * - All the pages except @locked_page are unlocked in any case 1197 * - unlock == 0 1198 * - On success, all the pages are locked for writing out them 1199 * - On failure, all the pages except @locked_page are unlocked 1200 * 1201 * When a failure happens in the second or later iteration of the 1202 * while-loop, the ordered extents created in previous iterations are kept 1203 * intact. So, the caller must clean them up by calling 1204 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for 1205 * example. 1206 */ 1207 static noinline int cow_file_range(struct btrfs_inode *inode, 1208 struct page *locked_page, 1209 u64 start, u64 end, int *page_started, 1210 unsigned long *nr_written, int unlock, 1211 u64 *done_offset) 1212 { 1213 struct btrfs_root *root = inode->root; 1214 struct btrfs_fs_info *fs_info = root->fs_info; 1215 u64 alloc_hint = 0; 1216 u64 orig_start = start; 1217 u64 num_bytes; 1218 unsigned long ram_size; 1219 u64 cur_alloc_size = 0; 1220 u64 min_alloc_size; 1221 u64 blocksize = fs_info->sectorsize; 1222 struct btrfs_key ins; 1223 struct extent_map *em; 1224 unsigned clear_bits; 1225 unsigned long page_ops; 1226 bool extent_reserved = false; 1227 int ret = 0; 1228 1229 if (btrfs_is_free_space_inode(inode)) { 1230 ret = -EINVAL; 1231 goto out_unlock; 1232 } 1233 1234 num_bytes = ALIGN(end - start + 1, blocksize); 1235 num_bytes = max(blocksize, num_bytes); 1236 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1237 1238 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1239 1240 /* 1241 * Due to the page size limit, for subpage we can only trigger the 1242 * writeback for the dirty sectors of page, that means data writeback 1243 * is doing more writeback than what we want. 1244 * 1245 * This is especially unexpected for some call sites like fallocate, 1246 * where we only increase i_size after everything is done. 1247 * This means we can trigger inline extent even if we didn't want to. 1248 * So here we skip inline extent creation completely. 1249 */ 1250 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 1251 u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode), 1252 end + 1); 1253 1254 /* lets try to make an inline extent */ 1255 ret = cow_file_range_inline(inode, actual_end, 0, 1256 BTRFS_COMPRESS_NONE, NULL, false); 1257 if (ret == 0) { 1258 /* 1259 * We use DO_ACCOUNTING here because we need the 1260 * delalloc_release_metadata to be run _after_ we drop 1261 * our outstanding extent for clearing delalloc for this 1262 * range. 1263 */ 1264 extent_clear_unlock_delalloc(inode, start, end, 1265 locked_page, 1266 EXTENT_LOCKED | EXTENT_DELALLOC | 1267 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1268 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1269 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); 1270 *nr_written = *nr_written + 1271 (end - start + PAGE_SIZE) / PAGE_SIZE; 1272 *page_started = 1; 1273 /* 1274 * locked_page is locked by the caller of 1275 * writepage_delalloc(), not locked by 1276 * __process_pages_contig(). 1277 * 1278 * We can't let __process_pages_contig() to unlock it, 1279 * as it doesn't have any subpage::writers recorded. 1280 * 1281 * Here we manually unlock the page, since the caller 1282 * can't use page_started to determine if it's an 1283 * inline extent or a compressed extent. 1284 */ 1285 unlock_page(locked_page); 1286 goto out; 1287 } else if (ret < 0) { 1288 goto out_unlock; 1289 } 1290 } 1291 1292 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 1293 1294 /* 1295 * Relocation relies on the relocated extents to have exactly the same 1296 * size as the original extents. Normally writeback for relocation data 1297 * extents follows a NOCOW path because relocation preallocates the 1298 * extents. However, due to an operation such as scrub turning a block 1299 * group to RO mode, it may fallback to COW mode, so we must make sure 1300 * an extent allocated during COW has exactly the requested size and can 1301 * not be split into smaller extents, otherwise relocation breaks and 1302 * fails during the stage where it updates the bytenr of file extent 1303 * items. 1304 */ 1305 if (btrfs_is_data_reloc_root(root)) 1306 min_alloc_size = num_bytes; 1307 else 1308 min_alloc_size = fs_info->sectorsize; 1309 1310 while (num_bytes > 0) { 1311 cur_alloc_size = num_bytes; 1312 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, 1313 min_alloc_size, 0, alloc_hint, 1314 &ins, 1, 1); 1315 if (ret < 0) 1316 goto out_unlock; 1317 cur_alloc_size = ins.offset; 1318 extent_reserved = true; 1319 1320 ram_size = ins.offset; 1321 em = create_io_em(inode, start, ins.offset, /* len */ 1322 start, /* orig_start */ 1323 ins.objectid, /* block_start */ 1324 ins.offset, /* block_len */ 1325 ins.offset, /* orig_block_len */ 1326 ram_size, /* ram_bytes */ 1327 BTRFS_COMPRESS_NONE, /* compress_type */ 1328 BTRFS_ORDERED_REGULAR /* type */); 1329 if (IS_ERR(em)) { 1330 ret = PTR_ERR(em); 1331 goto out_reserve; 1332 } 1333 free_extent_map(em); 1334 1335 ret = btrfs_add_ordered_extent(inode, start, ram_size, ram_size, 1336 ins.objectid, cur_alloc_size, 0, 1337 1 << BTRFS_ORDERED_REGULAR, 1338 BTRFS_COMPRESS_NONE); 1339 if (ret) 1340 goto out_drop_extent_cache; 1341 1342 if (btrfs_is_data_reloc_root(root)) { 1343 ret = btrfs_reloc_clone_csums(inode, start, 1344 cur_alloc_size); 1345 /* 1346 * Only drop cache here, and process as normal. 1347 * 1348 * We must not allow extent_clear_unlock_delalloc() 1349 * at out_unlock label to free meta of this ordered 1350 * extent, as its meta should be freed by 1351 * btrfs_finish_ordered_io(). 1352 * 1353 * So we must continue until @start is increased to 1354 * skip current ordered extent. 1355 */ 1356 if (ret) 1357 btrfs_drop_extent_map_range(inode, start, 1358 start + ram_size - 1, 1359 false); 1360 } 1361 1362 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1363 1364 /* 1365 * We're not doing compressed IO, don't unlock the first page 1366 * (which the caller expects to stay locked), don't clear any 1367 * dirty bits and don't set any writeback bits 1368 * 1369 * Do set the Ordered (Private2) bit so we know this page was 1370 * properly setup for writepage. 1371 */ 1372 page_ops = unlock ? PAGE_UNLOCK : 0; 1373 page_ops |= PAGE_SET_ORDERED; 1374 1375 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, 1376 locked_page, 1377 EXTENT_LOCKED | EXTENT_DELALLOC, 1378 page_ops); 1379 if (num_bytes < cur_alloc_size) 1380 num_bytes = 0; 1381 else 1382 num_bytes -= cur_alloc_size; 1383 alloc_hint = ins.objectid + ins.offset; 1384 start += cur_alloc_size; 1385 extent_reserved = false; 1386 1387 /* 1388 * btrfs_reloc_clone_csums() error, since start is increased 1389 * extent_clear_unlock_delalloc() at out_unlock label won't 1390 * free metadata of current ordered extent, we're OK to exit. 1391 */ 1392 if (ret) 1393 goto out_unlock; 1394 } 1395 out: 1396 return ret; 1397 1398 out_drop_extent_cache: 1399 btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false); 1400 out_reserve: 1401 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1402 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1403 out_unlock: 1404 /* 1405 * If done_offset is non-NULL and ret == -EAGAIN, we expect the 1406 * caller to write out the successfully allocated region and retry. 1407 */ 1408 if (done_offset && ret == -EAGAIN) { 1409 if (orig_start < start) 1410 *done_offset = start - 1; 1411 else 1412 *done_offset = start; 1413 return ret; 1414 } else if (ret == -EAGAIN) { 1415 /* Convert to -ENOSPC since the caller cannot retry. */ 1416 ret = -ENOSPC; 1417 } 1418 1419 /* 1420 * Now, we have three regions to clean up: 1421 * 1422 * |-------(1)----|---(2)---|-------------(3)----------| 1423 * `- orig_start `- start `- start + cur_alloc_size `- end 1424 * 1425 * We process each region below. 1426 */ 1427 1428 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1429 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1430 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1431 1432 /* 1433 * For the range (1). We have already instantiated the ordered extents 1434 * for this region. They are cleaned up by 1435 * btrfs_cleanup_ordered_extents() in e.g, 1436 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are 1437 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW | 1438 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup 1439 * function. 1440 * 1441 * However, in case of unlock == 0, we still need to unlock the pages 1442 * (except @locked_page) to ensure all the pages are unlocked. 1443 */ 1444 if (!unlock && orig_start < start) { 1445 if (!locked_page) 1446 mapping_set_error(inode->vfs_inode.i_mapping, ret); 1447 extent_clear_unlock_delalloc(inode, orig_start, start - 1, 1448 locked_page, 0, page_ops); 1449 } 1450 1451 /* 1452 * For the range (2). If we reserved an extent for our delalloc range 1453 * (or a subrange) and failed to create the respective ordered extent, 1454 * then it means that when we reserved the extent we decremented the 1455 * extent's size from the data space_info's bytes_may_use counter and 1456 * incremented the space_info's bytes_reserved counter by the same 1457 * amount. We must make sure extent_clear_unlock_delalloc() does not try 1458 * to decrement again the data space_info's bytes_may_use counter, 1459 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV. 1460 */ 1461 if (extent_reserved) { 1462 extent_clear_unlock_delalloc(inode, start, 1463 start + cur_alloc_size - 1, 1464 locked_page, 1465 clear_bits, 1466 page_ops); 1467 start += cur_alloc_size; 1468 if (start >= end) 1469 return ret; 1470 } 1471 1472 /* 1473 * For the range (3). We never touched the region. In addition to the 1474 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data 1475 * space_info's bytes_may_use counter, reserved in 1476 * btrfs_check_data_free_space(). 1477 */ 1478 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1479 clear_bits | EXTENT_CLEAR_DATA_RESV, 1480 page_ops); 1481 return ret; 1482 } 1483 1484 /* 1485 * work queue call back to started compression on a file and pages 1486 */ 1487 static noinline void async_cow_start(struct btrfs_work *work) 1488 { 1489 struct async_chunk *async_chunk; 1490 int compressed_extents; 1491 1492 async_chunk = container_of(work, struct async_chunk, work); 1493 1494 compressed_extents = compress_file_range(async_chunk); 1495 if (compressed_extents == 0) { 1496 btrfs_add_delayed_iput(async_chunk->inode); 1497 async_chunk->inode = NULL; 1498 } 1499 } 1500 1501 /* 1502 * work queue call back to submit previously compressed pages 1503 */ 1504 static noinline void async_cow_submit(struct btrfs_work *work) 1505 { 1506 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1507 work); 1508 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1509 unsigned long nr_pages; 1510 1511 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1512 PAGE_SHIFT; 1513 1514 /* 1515 * ->inode could be NULL if async_chunk_start has failed to compress, 1516 * in which case we don't have anything to submit, yet we need to 1517 * always adjust ->async_delalloc_pages as its paired with the init 1518 * happening in cow_file_range_async 1519 */ 1520 if (async_chunk->inode) 1521 submit_compressed_extents(async_chunk); 1522 1523 /* atomic_sub_return implies a barrier */ 1524 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1525 5 * SZ_1M) 1526 cond_wake_up_nomb(&fs_info->async_submit_wait); 1527 } 1528 1529 static noinline void async_cow_free(struct btrfs_work *work) 1530 { 1531 struct async_chunk *async_chunk; 1532 struct async_cow *async_cow; 1533 1534 async_chunk = container_of(work, struct async_chunk, work); 1535 if (async_chunk->inode) 1536 btrfs_add_delayed_iput(async_chunk->inode); 1537 if (async_chunk->blkcg_css) 1538 css_put(async_chunk->blkcg_css); 1539 1540 async_cow = async_chunk->async_cow; 1541 if (atomic_dec_and_test(&async_cow->num_chunks)) 1542 kvfree(async_cow); 1543 } 1544 1545 static int cow_file_range_async(struct btrfs_inode *inode, 1546 struct writeback_control *wbc, 1547 struct page *locked_page, 1548 u64 start, u64 end, int *page_started, 1549 unsigned long *nr_written) 1550 { 1551 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1552 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1553 struct async_cow *ctx; 1554 struct async_chunk *async_chunk; 1555 unsigned long nr_pages; 1556 u64 cur_end; 1557 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1558 int i; 1559 bool should_compress; 1560 unsigned nofs_flag; 1561 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1562 1563 unlock_extent(&inode->io_tree, start, end, NULL); 1564 1565 if (inode->flags & BTRFS_INODE_NOCOMPRESS && 1566 !btrfs_test_opt(fs_info, FORCE_COMPRESS)) { 1567 num_chunks = 1; 1568 should_compress = false; 1569 } else { 1570 should_compress = true; 1571 } 1572 1573 nofs_flag = memalloc_nofs_save(); 1574 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1575 memalloc_nofs_restore(nofs_flag); 1576 1577 if (!ctx) { 1578 unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | 1579 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1580 EXTENT_DO_ACCOUNTING; 1581 unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | 1582 PAGE_END_WRITEBACK | PAGE_SET_ERROR; 1583 1584 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1585 clear_bits, page_ops); 1586 return -ENOMEM; 1587 } 1588 1589 async_chunk = ctx->chunks; 1590 atomic_set(&ctx->num_chunks, num_chunks); 1591 1592 for (i = 0; i < num_chunks; i++) { 1593 if (should_compress) 1594 cur_end = min(end, start + SZ_512K - 1); 1595 else 1596 cur_end = end; 1597 1598 /* 1599 * igrab is called higher up in the call chain, take only the 1600 * lightweight reference for the callback lifetime 1601 */ 1602 ihold(&inode->vfs_inode); 1603 async_chunk[i].async_cow = ctx; 1604 async_chunk[i].inode = inode; 1605 async_chunk[i].start = start; 1606 async_chunk[i].end = cur_end; 1607 async_chunk[i].write_flags = write_flags; 1608 INIT_LIST_HEAD(&async_chunk[i].extents); 1609 1610 /* 1611 * The locked_page comes all the way from writepage and its 1612 * the original page we were actually given. As we spread 1613 * this large delalloc region across multiple async_chunk 1614 * structs, only the first struct needs a pointer to locked_page 1615 * 1616 * This way we don't need racey decisions about who is supposed 1617 * to unlock it. 1618 */ 1619 if (locked_page) { 1620 /* 1621 * Depending on the compressibility, the pages might or 1622 * might not go through async. We want all of them to 1623 * be accounted against wbc once. Let's do it here 1624 * before the paths diverge. wbc accounting is used 1625 * only for foreign writeback detection and doesn't 1626 * need full accuracy. Just account the whole thing 1627 * against the first page. 1628 */ 1629 wbc_account_cgroup_owner(wbc, locked_page, 1630 cur_end - start); 1631 async_chunk[i].locked_page = locked_page; 1632 locked_page = NULL; 1633 } else { 1634 async_chunk[i].locked_page = NULL; 1635 } 1636 1637 if (blkcg_css != blkcg_root_css) { 1638 css_get(blkcg_css); 1639 async_chunk[i].blkcg_css = blkcg_css; 1640 } else { 1641 async_chunk[i].blkcg_css = NULL; 1642 } 1643 1644 btrfs_init_work(&async_chunk[i].work, async_cow_start, 1645 async_cow_submit, async_cow_free); 1646 1647 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1648 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1649 1650 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1651 1652 *nr_written += nr_pages; 1653 start = cur_end + 1; 1654 } 1655 *page_started = 1; 1656 return 0; 1657 } 1658 1659 static noinline int run_delalloc_zoned(struct btrfs_inode *inode, 1660 struct page *locked_page, u64 start, 1661 u64 end, int *page_started, 1662 unsigned long *nr_written) 1663 { 1664 u64 done_offset = end; 1665 int ret; 1666 bool locked_page_done = false; 1667 1668 while (start <= end) { 1669 ret = cow_file_range(inode, locked_page, start, end, page_started, 1670 nr_written, 0, &done_offset); 1671 if (ret && ret != -EAGAIN) 1672 return ret; 1673 1674 if (*page_started) { 1675 ASSERT(ret == 0); 1676 return 0; 1677 } 1678 1679 if (ret == 0) 1680 done_offset = end; 1681 1682 if (done_offset == start) { 1683 wait_on_bit_io(&inode->root->fs_info->flags, 1684 BTRFS_FS_NEED_ZONE_FINISH, 1685 TASK_UNINTERRUPTIBLE); 1686 continue; 1687 } 1688 1689 if (!locked_page_done) { 1690 __set_page_dirty_nobuffers(locked_page); 1691 account_page_redirty(locked_page); 1692 } 1693 locked_page_done = true; 1694 extent_write_locked_range(&inode->vfs_inode, start, done_offset); 1695 1696 start = done_offset + 1; 1697 } 1698 1699 *page_started = 1; 1700 1701 return 0; 1702 } 1703 1704 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, 1705 u64 bytenr, u64 num_bytes, bool nowait) 1706 { 1707 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr); 1708 struct btrfs_ordered_sum *sums; 1709 int ret; 1710 LIST_HEAD(list); 1711 1712 ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1, 1713 &list, 0, nowait); 1714 if (ret == 0 && list_empty(&list)) 1715 return 0; 1716 1717 while (!list_empty(&list)) { 1718 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1719 list_del(&sums->list); 1720 kfree(sums); 1721 } 1722 if (ret < 0) 1723 return ret; 1724 return 1; 1725 } 1726 1727 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, 1728 const u64 start, const u64 end, 1729 int *page_started, unsigned long *nr_written) 1730 { 1731 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1732 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1733 const u64 range_bytes = end + 1 - start; 1734 struct extent_io_tree *io_tree = &inode->io_tree; 1735 u64 range_start = start; 1736 u64 count; 1737 1738 /* 1739 * If EXTENT_NORESERVE is set it means that when the buffered write was 1740 * made we had not enough available data space and therefore we did not 1741 * reserve data space for it, since we though we could do NOCOW for the 1742 * respective file range (either there is prealloc extent or the inode 1743 * has the NOCOW bit set). 1744 * 1745 * However when we need to fallback to COW mode (because for example the 1746 * block group for the corresponding extent was turned to RO mode by a 1747 * scrub or relocation) we need to do the following: 1748 * 1749 * 1) We increment the bytes_may_use counter of the data space info. 1750 * If COW succeeds, it allocates a new data extent and after doing 1751 * that it decrements the space info's bytes_may_use counter and 1752 * increments its bytes_reserved counter by the same amount (we do 1753 * this at btrfs_add_reserved_bytes()). So we need to increment the 1754 * bytes_may_use counter to compensate (when space is reserved at 1755 * buffered write time, the bytes_may_use counter is incremented); 1756 * 1757 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1758 * that if the COW path fails for any reason, it decrements (through 1759 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1760 * data space info, which we incremented in the step above. 1761 * 1762 * If we need to fallback to cow and the inode corresponds to a free 1763 * space cache inode or an inode of the data relocation tree, we must 1764 * also increment bytes_may_use of the data space_info for the same 1765 * reason. Space caches and relocated data extents always get a prealloc 1766 * extent for them, however scrub or balance may have set the block 1767 * group that contains that extent to RO mode and therefore force COW 1768 * when starting writeback. 1769 */ 1770 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1771 EXTENT_NORESERVE, 0, NULL); 1772 if (count > 0 || is_space_ino || is_reloc_ino) { 1773 u64 bytes = count; 1774 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1775 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1776 1777 if (is_space_ino || is_reloc_ino) 1778 bytes = range_bytes; 1779 1780 spin_lock(&sinfo->lock); 1781 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1782 spin_unlock(&sinfo->lock); 1783 1784 if (count > 0) 1785 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1786 NULL); 1787 } 1788 1789 return cow_file_range(inode, locked_page, start, end, page_started, 1790 nr_written, 1, NULL); 1791 } 1792 1793 struct can_nocow_file_extent_args { 1794 /* Input fields. */ 1795 1796 /* Start file offset of the range we want to NOCOW. */ 1797 u64 start; 1798 /* End file offset (inclusive) of the range we want to NOCOW. */ 1799 u64 end; 1800 bool writeback_path; 1801 bool strict; 1802 /* 1803 * Free the path passed to can_nocow_file_extent() once it's not needed 1804 * anymore. 1805 */ 1806 bool free_path; 1807 1808 /* Output fields. Only set when can_nocow_file_extent() returns 1. */ 1809 1810 u64 disk_bytenr; 1811 u64 disk_num_bytes; 1812 u64 extent_offset; 1813 /* Number of bytes that can be written to in NOCOW mode. */ 1814 u64 num_bytes; 1815 }; 1816 1817 /* 1818 * Check if we can NOCOW the file extent that the path points to. 1819 * This function may return with the path released, so the caller should check 1820 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1821 * 1822 * Returns: < 0 on error 1823 * 0 if we can not NOCOW 1824 * 1 if we can NOCOW 1825 */ 1826 static int can_nocow_file_extent(struct btrfs_path *path, 1827 struct btrfs_key *key, 1828 struct btrfs_inode *inode, 1829 struct can_nocow_file_extent_args *args) 1830 { 1831 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 1832 struct extent_buffer *leaf = path->nodes[0]; 1833 struct btrfs_root *root = inode->root; 1834 struct btrfs_file_extent_item *fi; 1835 u64 extent_end; 1836 u8 extent_type; 1837 int can_nocow = 0; 1838 int ret = 0; 1839 bool nowait = path->nowait; 1840 1841 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 1842 extent_type = btrfs_file_extent_type(leaf, fi); 1843 1844 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1845 goto out; 1846 1847 /* Can't access these fields unless we know it's not an inline extent. */ 1848 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1849 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1850 args->extent_offset = btrfs_file_extent_offset(leaf, fi); 1851 1852 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1853 extent_type == BTRFS_FILE_EXTENT_REG) 1854 goto out; 1855 1856 /* 1857 * If the extent was created before the generation where the last snapshot 1858 * for its subvolume was created, then this implies the extent is shared, 1859 * hence we must COW. 1860 */ 1861 if (!args->strict && 1862 btrfs_file_extent_generation(leaf, fi) <= 1863 btrfs_root_last_snapshot(&root->root_item)) 1864 goto out; 1865 1866 /* An explicit hole, must COW. */ 1867 if (args->disk_bytenr == 0) 1868 goto out; 1869 1870 /* Compressed/encrypted/encoded extents must be COWed. */ 1871 if (btrfs_file_extent_compression(leaf, fi) || 1872 btrfs_file_extent_encryption(leaf, fi) || 1873 btrfs_file_extent_other_encoding(leaf, fi)) 1874 goto out; 1875 1876 extent_end = btrfs_file_extent_end(path); 1877 1878 /* 1879 * The following checks can be expensive, as they need to take other 1880 * locks and do btree or rbtree searches, so release the path to avoid 1881 * blocking other tasks for too long. 1882 */ 1883 btrfs_release_path(path); 1884 1885 ret = btrfs_cross_ref_exist(root, btrfs_ino(inode), 1886 key->offset - args->extent_offset, 1887 args->disk_bytenr, false, path); 1888 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1889 if (ret != 0) 1890 goto out; 1891 1892 if (args->free_path) { 1893 /* 1894 * We don't need the path anymore, plus through the 1895 * csum_exist_in_range() call below we will end up allocating 1896 * another path. So free the path to avoid unnecessary extra 1897 * memory usage. 1898 */ 1899 btrfs_free_path(path); 1900 path = NULL; 1901 } 1902 1903 /* If there are pending snapshots for this root, we must COW. */ 1904 if (args->writeback_path && !is_freespace_inode && 1905 atomic_read(&root->snapshot_force_cow)) 1906 goto out; 1907 1908 args->disk_bytenr += args->extent_offset; 1909 args->disk_bytenr += args->start - key->offset; 1910 args->num_bytes = min(args->end + 1, extent_end) - args->start; 1911 1912 /* 1913 * Force COW if csums exist in the range. This ensures that csums for a 1914 * given extent are either valid or do not exist. 1915 */ 1916 ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes, 1917 nowait); 1918 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1919 if (ret != 0) 1920 goto out; 1921 1922 can_nocow = 1; 1923 out: 1924 if (args->free_path && path) 1925 btrfs_free_path(path); 1926 1927 return ret < 0 ? ret : can_nocow; 1928 } 1929 1930 /* 1931 * when nowcow writeback call back. This checks for snapshots or COW copies 1932 * of the extents that exist in the file, and COWs the file as required. 1933 * 1934 * If no cow copies or snapshots exist, we write directly to the existing 1935 * blocks on disk 1936 */ 1937 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 1938 struct page *locked_page, 1939 const u64 start, const u64 end, 1940 int *page_started, 1941 unsigned long *nr_written) 1942 { 1943 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1944 struct btrfs_root *root = inode->root; 1945 struct btrfs_path *path; 1946 u64 cow_start = (u64)-1; 1947 u64 cur_offset = start; 1948 int ret; 1949 bool check_prev = true; 1950 u64 ino = btrfs_ino(inode); 1951 struct btrfs_block_group *bg; 1952 bool nocow = false; 1953 struct can_nocow_file_extent_args nocow_args = { 0 }; 1954 1955 path = btrfs_alloc_path(); 1956 if (!path) { 1957 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1958 EXTENT_LOCKED | EXTENT_DELALLOC | 1959 EXTENT_DO_ACCOUNTING | 1960 EXTENT_DEFRAG, PAGE_UNLOCK | 1961 PAGE_START_WRITEBACK | 1962 PAGE_END_WRITEBACK); 1963 return -ENOMEM; 1964 } 1965 1966 nocow_args.end = end; 1967 nocow_args.writeback_path = true; 1968 1969 while (1) { 1970 struct btrfs_key found_key; 1971 struct btrfs_file_extent_item *fi; 1972 struct extent_buffer *leaf; 1973 u64 extent_end; 1974 u64 ram_bytes; 1975 u64 nocow_end; 1976 int extent_type; 1977 1978 nocow = false; 1979 1980 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 1981 cur_offset, 0); 1982 if (ret < 0) 1983 goto error; 1984 1985 /* 1986 * If there is no extent for our range when doing the initial 1987 * search, then go back to the previous slot as it will be the 1988 * one containing the search offset 1989 */ 1990 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1991 leaf = path->nodes[0]; 1992 btrfs_item_key_to_cpu(leaf, &found_key, 1993 path->slots[0] - 1); 1994 if (found_key.objectid == ino && 1995 found_key.type == BTRFS_EXTENT_DATA_KEY) 1996 path->slots[0]--; 1997 } 1998 check_prev = false; 1999 next_slot: 2000 /* Go to next leaf if we have exhausted the current one */ 2001 leaf = path->nodes[0]; 2002 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2003 ret = btrfs_next_leaf(root, path); 2004 if (ret < 0) { 2005 if (cow_start != (u64)-1) 2006 cur_offset = cow_start; 2007 goto error; 2008 } 2009 if (ret > 0) 2010 break; 2011 leaf = path->nodes[0]; 2012 } 2013 2014 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2015 2016 /* Didn't find anything for our INO */ 2017 if (found_key.objectid > ino) 2018 break; 2019 /* 2020 * Keep searching until we find an EXTENT_ITEM or there are no 2021 * more extents for this inode 2022 */ 2023 if (WARN_ON_ONCE(found_key.objectid < ino) || 2024 found_key.type < BTRFS_EXTENT_DATA_KEY) { 2025 path->slots[0]++; 2026 goto next_slot; 2027 } 2028 2029 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 2030 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 2031 found_key.offset > end) 2032 break; 2033 2034 /* 2035 * If the found extent starts after requested offset, then 2036 * adjust extent_end to be right before this extent begins 2037 */ 2038 if (found_key.offset > cur_offset) { 2039 extent_end = found_key.offset; 2040 extent_type = 0; 2041 goto out_check; 2042 } 2043 2044 /* 2045 * Found extent which begins before our range and potentially 2046 * intersect it 2047 */ 2048 fi = btrfs_item_ptr(leaf, path->slots[0], 2049 struct btrfs_file_extent_item); 2050 extent_type = btrfs_file_extent_type(leaf, fi); 2051 /* If this is triggered then we have a memory corruption. */ 2052 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 2053 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 2054 ret = -EUCLEAN; 2055 goto error; 2056 } 2057 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 2058 extent_end = btrfs_file_extent_end(path); 2059 2060 /* 2061 * If the extent we got ends before our current offset, skip to 2062 * the next extent. 2063 */ 2064 if (extent_end <= cur_offset) { 2065 path->slots[0]++; 2066 goto next_slot; 2067 } 2068 2069 nocow_args.start = cur_offset; 2070 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 2071 if (ret < 0) { 2072 if (cow_start != (u64)-1) 2073 cur_offset = cow_start; 2074 goto error; 2075 } else if (ret == 0) { 2076 goto out_check; 2077 } 2078 2079 ret = 0; 2080 bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr); 2081 if (bg) 2082 nocow = true; 2083 out_check: 2084 /* 2085 * If nocow is false then record the beginning of the range 2086 * that needs to be COWed 2087 */ 2088 if (!nocow) { 2089 if (cow_start == (u64)-1) 2090 cow_start = cur_offset; 2091 cur_offset = extent_end; 2092 if (cur_offset > end) 2093 break; 2094 if (!path->nodes[0]) 2095 continue; 2096 path->slots[0]++; 2097 goto next_slot; 2098 } 2099 2100 /* 2101 * COW range from cow_start to found_key.offset - 1. As the key 2102 * will contain the beginning of the first extent that can be 2103 * NOCOW, following one which needs to be COW'ed 2104 */ 2105 if (cow_start != (u64)-1) { 2106 ret = fallback_to_cow(inode, locked_page, 2107 cow_start, found_key.offset - 1, 2108 page_started, nr_written); 2109 if (ret) 2110 goto error; 2111 cow_start = (u64)-1; 2112 } 2113 2114 nocow_end = cur_offset + nocow_args.num_bytes - 1; 2115 2116 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 2117 u64 orig_start = found_key.offset - nocow_args.extent_offset; 2118 struct extent_map *em; 2119 2120 em = create_io_em(inode, cur_offset, nocow_args.num_bytes, 2121 orig_start, 2122 nocow_args.disk_bytenr, /* block_start */ 2123 nocow_args.num_bytes, /* block_len */ 2124 nocow_args.disk_num_bytes, /* orig_block_len */ 2125 ram_bytes, BTRFS_COMPRESS_NONE, 2126 BTRFS_ORDERED_PREALLOC); 2127 if (IS_ERR(em)) { 2128 ret = PTR_ERR(em); 2129 goto error; 2130 } 2131 free_extent_map(em); 2132 ret = btrfs_add_ordered_extent(inode, 2133 cur_offset, nocow_args.num_bytes, 2134 nocow_args.num_bytes, 2135 nocow_args.disk_bytenr, 2136 nocow_args.num_bytes, 0, 2137 1 << BTRFS_ORDERED_PREALLOC, 2138 BTRFS_COMPRESS_NONE); 2139 if (ret) { 2140 btrfs_drop_extent_map_range(inode, cur_offset, 2141 nocow_end, false); 2142 goto error; 2143 } 2144 } else { 2145 ret = btrfs_add_ordered_extent(inode, cur_offset, 2146 nocow_args.num_bytes, 2147 nocow_args.num_bytes, 2148 nocow_args.disk_bytenr, 2149 nocow_args.num_bytes, 2150 0, 2151 1 << BTRFS_ORDERED_NOCOW, 2152 BTRFS_COMPRESS_NONE); 2153 if (ret) 2154 goto error; 2155 } 2156 2157 if (nocow) { 2158 btrfs_dec_nocow_writers(bg); 2159 nocow = false; 2160 } 2161 2162 if (btrfs_is_data_reloc_root(root)) 2163 /* 2164 * Error handled later, as we must prevent 2165 * extent_clear_unlock_delalloc() in error handler 2166 * from freeing metadata of created ordered extent. 2167 */ 2168 ret = btrfs_reloc_clone_csums(inode, cur_offset, 2169 nocow_args.num_bytes); 2170 2171 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, 2172 locked_page, EXTENT_LOCKED | 2173 EXTENT_DELALLOC | 2174 EXTENT_CLEAR_DATA_RESV, 2175 PAGE_UNLOCK | PAGE_SET_ORDERED); 2176 2177 cur_offset = extent_end; 2178 2179 /* 2180 * btrfs_reloc_clone_csums() error, now we're OK to call error 2181 * handler, as metadata for created ordered extent will only 2182 * be freed by btrfs_finish_ordered_io(). 2183 */ 2184 if (ret) 2185 goto error; 2186 if (cur_offset > end) 2187 break; 2188 } 2189 btrfs_release_path(path); 2190 2191 if (cur_offset <= end && cow_start == (u64)-1) 2192 cow_start = cur_offset; 2193 2194 if (cow_start != (u64)-1) { 2195 cur_offset = end; 2196 ret = fallback_to_cow(inode, locked_page, cow_start, end, 2197 page_started, nr_written); 2198 if (ret) 2199 goto error; 2200 } 2201 2202 error: 2203 if (nocow) 2204 btrfs_dec_nocow_writers(bg); 2205 2206 if (ret && cur_offset < end) 2207 extent_clear_unlock_delalloc(inode, cur_offset, end, 2208 locked_page, EXTENT_LOCKED | 2209 EXTENT_DELALLOC | EXTENT_DEFRAG | 2210 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2211 PAGE_START_WRITEBACK | 2212 PAGE_END_WRITEBACK); 2213 btrfs_free_path(path); 2214 return ret; 2215 } 2216 2217 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2218 { 2219 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2220 if (inode->defrag_bytes && 2221 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 2222 0, NULL)) 2223 return false; 2224 return true; 2225 } 2226 return false; 2227 } 2228 2229 /* 2230 * Function to process delayed allocation (create CoW) for ranges which are 2231 * being touched for the first time. 2232 */ 2233 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, 2234 u64 start, u64 end, int *page_started, unsigned long *nr_written, 2235 struct writeback_control *wbc) 2236 { 2237 int ret; 2238 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2239 2240 /* 2241 * The range must cover part of the @locked_page, or the returned 2242 * @page_started can confuse the caller. 2243 */ 2244 ASSERT(!(end <= page_offset(locked_page) || 2245 start >= page_offset(locked_page) + PAGE_SIZE)); 2246 2247 if (should_nocow(inode, start, end)) { 2248 /* 2249 * Normally on a zoned device we're only doing COW writes, but 2250 * in case of relocation on a zoned filesystem we have taken 2251 * precaution, that we're only writing sequentially. It's safe 2252 * to use run_delalloc_nocow() here, like for regular 2253 * preallocated inodes. 2254 */ 2255 ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root)); 2256 ret = run_delalloc_nocow(inode, locked_page, start, end, 2257 page_started, nr_written); 2258 } else if (!btrfs_inode_can_compress(inode) || 2259 !inode_need_compress(inode, start, end)) { 2260 if (zoned) 2261 ret = run_delalloc_zoned(inode, locked_page, start, end, 2262 page_started, nr_written); 2263 else 2264 ret = cow_file_range(inode, locked_page, start, end, 2265 page_started, nr_written, 1, NULL); 2266 } else { 2267 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 2268 ret = cow_file_range_async(inode, wbc, locked_page, start, end, 2269 page_started, nr_written); 2270 } 2271 ASSERT(ret <= 0); 2272 if (ret) 2273 btrfs_cleanup_ordered_extents(inode, locked_page, start, 2274 end - start + 1); 2275 return ret; 2276 } 2277 2278 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 2279 struct extent_state *orig, u64 split) 2280 { 2281 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2282 u64 size; 2283 2284 /* not delalloc, ignore it */ 2285 if (!(orig->state & EXTENT_DELALLOC)) 2286 return; 2287 2288 size = orig->end - orig->start + 1; 2289 if (size > fs_info->max_extent_size) { 2290 u32 num_extents; 2291 u64 new_size; 2292 2293 /* 2294 * See the explanation in btrfs_merge_delalloc_extent, the same 2295 * applies here, just in reverse. 2296 */ 2297 new_size = orig->end - split + 1; 2298 num_extents = count_max_extents(fs_info, new_size); 2299 new_size = split - orig->start; 2300 num_extents += count_max_extents(fs_info, new_size); 2301 if (count_max_extents(fs_info, size) >= num_extents) 2302 return; 2303 } 2304 2305 spin_lock(&inode->lock); 2306 btrfs_mod_outstanding_extents(inode, 1); 2307 spin_unlock(&inode->lock); 2308 } 2309 2310 /* 2311 * Handle merged delayed allocation extents so we can keep track of new extents 2312 * that are just merged onto old extents, such as when we are doing sequential 2313 * writes, so we can properly account for the metadata space we'll need. 2314 */ 2315 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 2316 struct extent_state *other) 2317 { 2318 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2319 u64 new_size, old_size; 2320 u32 num_extents; 2321 2322 /* not delalloc, ignore it */ 2323 if (!(other->state & EXTENT_DELALLOC)) 2324 return; 2325 2326 if (new->start > other->start) 2327 new_size = new->end - other->start + 1; 2328 else 2329 new_size = other->end - new->start + 1; 2330 2331 /* we're not bigger than the max, unreserve the space and go */ 2332 if (new_size <= fs_info->max_extent_size) { 2333 spin_lock(&inode->lock); 2334 btrfs_mod_outstanding_extents(inode, -1); 2335 spin_unlock(&inode->lock); 2336 return; 2337 } 2338 2339 /* 2340 * We have to add up either side to figure out how many extents were 2341 * accounted for before we merged into one big extent. If the number of 2342 * extents we accounted for is <= the amount we need for the new range 2343 * then we can return, otherwise drop. Think of it like this 2344 * 2345 * [ 4k][MAX_SIZE] 2346 * 2347 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2348 * need 2 outstanding extents, on one side we have 1 and the other side 2349 * we have 1 so they are == and we can return. But in this case 2350 * 2351 * [MAX_SIZE+4k][MAX_SIZE+4k] 2352 * 2353 * Each range on their own accounts for 2 extents, but merged together 2354 * they are only 3 extents worth of accounting, so we need to drop in 2355 * this case. 2356 */ 2357 old_size = other->end - other->start + 1; 2358 num_extents = count_max_extents(fs_info, old_size); 2359 old_size = new->end - new->start + 1; 2360 num_extents += count_max_extents(fs_info, old_size); 2361 if (count_max_extents(fs_info, new_size) >= num_extents) 2362 return; 2363 2364 spin_lock(&inode->lock); 2365 btrfs_mod_outstanding_extents(inode, -1); 2366 spin_unlock(&inode->lock); 2367 } 2368 2369 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 2370 struct btrfs_inode *inode) 2371 { 2372 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2373 2374 spin_lock(&root->delalloc_lock); 2375 if (list_empty(&inode->delalloc_inodes)) { 2376 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 2377 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, &inode->runtime_flags); 2378 root->nr_delalloc_inodes++; 2379 if (root->nr_delalloc_inodes == 1) { 2380 spin_lock(&fs_info->delalloc_root_lock); 2381 BUG_ON(!list_empty(&root->delalloc_root)); 2382 list_add_tail(&root->delalloc_root, 2383 &fs_info->delalloc_roots); 2384 spin_unlock(&fs_info->delalloc_root_lock); 2385 } 2386 } 2387 spin_unlock(&root->delalloc_lock); 2388 } 2389 2390 void __btrfs_del_delalloc_inode(struct btrfs_root *root, 2391 struct btrfs_inode *inode) 2392 { 2393 struct btrfs_fs_info *fs_info = root->fs_info; 2394 2395 if (!list_empty(&inode->delalloc_inodes)) { 2396 list_del_init(&inode->delalloc_inodes); 2397 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2398 &inode->runtime_flags); 2399 root->nr_delalloc_inodes--; 2400 if (!root->nr_delalloc_inodes) { 2401 ASSERT(list_empty(&root->delalloc_inodes)); 2402 spin_lock(&fs_info->delalloc_root_lock); 2403 BUG_ON(list_empty(&root->delalloc_root)); 2404 list_del_init(&root->delalloc_root); 2405 spin_unlock(&fs_info->delalloc_root_lock); 2406 } 2407 } 2408 } 2409 2410 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 2411 struct btrfs_inode *inode) 2412 { 2413 spin_lock(&root->delalloc_lock); 2414 __btrfs_del_delalloc_inode(root, inode); 2415 spin_unlock(&root->delalloc_lock); 2416 } 2417 2418 /* 2419 * Properly track delayed allocation bytes in the inode and to maintain the 2420 * list of inodes that have pending delalloc work to be done. 2421 */ 2422 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 2423 u32 bits) 2424 { 2425 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2426 2427 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC)) 2428 WARN_ON(1); 2429 /* 2430 * set_bit and clear bit hooks normally require _irqsave/restore 2431 * but in this case, we are only testing for the DELALLOC 2432 * bit, which is only set or cleared with irqs on 2433 */ 2434 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2435 struct btrfs_root *root = inode->root; 2436 u64 len = state->end + 1 - state->start; 2437 u32 num_extents = count_max_extents(fs_info, len); 2438 bool do_list = !btrfs_is_free_space_inode(inode); 2439 2440 spin_lock(&inode->lock); 2441 btrfs_mod_outstanding_extents(inode, num_extents); 2442 spin_unlock(&inode->lock); 2443 2444 /* For sanity tests */ 2445 if (btrfs_is_testing(fs_info)) 2446 return; 2447 2448 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2449 fs_info->delalloc_batch); 2450 spin_lock(&inode->lock); 2451 inode->delalloc_bytes += len; 2452 if (bits & EXTENT_DEFRAG) 2453 inode->defrag_bytes += len; 2454 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2455 &inode->runtime_flags)) 2456 btrfs_add_delalloc_inodes(root, inode); 2457 spin_unlock(&inode->lock); 2458 } 2459 2460 if (!(state->state & EXTENT_DELALLOC_NEW) && 2461 (bits & EXTENT_DELALLOC_NEW)) { 2462 spin_lock(&inode->lock); 2463 inode->new_delalloc_bytes += state->end + 1 - state->start; 2464 spin_unlock(&inode->lock); 2465 } 2466 } 2467 2468 /* 2469 * Once a range is no longer delalloc this function ensures that proper 2470 * accounting happens. 2471 */ 2472 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 2473 struct extent_state *state, u32 bits) 2474 { 2475 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2476 u64 len = state->end + 1 - state->start; 2477 u32 num_extents = count_max_extents(fs_info, len); 2478 2479 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { 2480 spin_lock(&inode->lock); 2481 inode->defrag_bytes -= len; 2482 spin_unlock(&inode->lock); 2483 } 2484 2485 /* 2486 * set_bit and clear bit hooks normally require _irqsave/restore 2487 * but in this case, we are only testing for the DELALLOC 2488 * bit, which is only set or cleared with irqs on 2489 */ 2490 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2491 struct btrfs_root *root = inode->root; 2492 bool do_list = !btrfs_is_free_space_inode(inode); 2493 2494 spin_lock(&inode->lock); 2495 btrfs_mod_outstanding_extents(inode, -num_extents); 2496 spin_unlock(&inode->lock); 2497 2498 /* 2499 * We don't reserve metadata space for space cache inodes so we 2500 * don't need to call delalloc_release_metadata if there is an 2501 * error. 2502 */ 2503 if (bits & EXTENT_CLEAR_META_RESV && 2504 root != fs_info->tree_root) 2505 btrfs_delalloc_release_metadata(inode, len, false); 2506 2507 /* For sanity tests. */ 2508 if (btrfs_is_testing(fs_info)) 2509 return; 2510 2511 if (!btrfs_is_data_reloc_root(root) && 2512 do_list && !(state->state & EXTENT_NORESERVE) && 2513 (bits & EXTENT_CLEAR_DATA_RESV)) 2514 btrfs_free_reserved_data_space_noquota(fs_info, len); 2515 2516 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2517 fs_info->delalloc_batch); 2518 spin_lock(&inode->lock); 2519 inode->delalloc_bytes -= len; 2520 if (do_list && inode->delalloc_bytes == 0 && 2521 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2522 &inode->runtime_flags)) 2523 btrfs_del_delalloc_inode(root, inode); 2524 spin_unlock(&inode->lock); 2525 } 2526 2527 if ((state->state & EXTENT_DELALLOC_NEW) && 2528 (bits & EXTENT_DELALLOC_NEW)) { 2529 spin_lock(&inode->lock); 2530 ASSERT(inode->new_delalloc_bytes >= len); 2531 inode->new_delalloc_bytes -= len; 2532 if (bits & EXTENT_ADD_INODE_BYTES) 2533 inode_add_bytes(&inode->vfs_inode, len); 2534 spin_unlock(&inode->lock); 2535 } 2536 } 2537 2538 /* 2539 * in order to insert checksums into the metadata in large chunks, 2540 * we wait until bio submission time. All the pages in the bio are 2541 * checksummed and sums are attached onto the ordered extent record. 2542 * 2543 * At IO completion time the cums attached on the ordered extent record 2544 * are inserted into the btree 2545 */ 2546 blk_status_t btrfs_submit_bio_start(struct btrfs_inode *inode, struct bio *bio) 2547 { 2548 return btrfs_csum_one_bio(inode, bio, (u64)-1, false); 2549 } 2550 2551 /* 2552 * Split an extent_map at [start, start + len] 2553 * 2554 * This function is intended to be used only for extract_ordered_extent(). 2555 */ 2556 static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len, 2557 u64 pre, u64 post) 2558 { 2559 struct extent_map_tree *em_tree = &inode->extent_tree; 2560 struct extent_map *em; 2561 struct extent_map *split_pre = NULL; 2562 struct extent_map *split_mid = NULL; 2563 struct extent_map *split_post = NULL; 2564 int ret = 0; 2565 unsigned long flags; 2566 2567 /* Sanity check */ 2568 if (pre == 0 && post == 0) 2569 return 0; 2570 2571 split_pre = alloc_extent_map(); 2572 if (pre) 2573 split_mid = alloc_extent_map(); 2574 if (post) 2575 split_post = alloc_extent_map(); 2576 if (!split_pre || (pre && !split_mid) || (post && !split_post)) { 2577 ret = -ENOMEM; 2578 goto out; 2579 } 2580 2581 ASSERT(pre + post < len); 2582 2583 lock_extent(&inode->io_tree, start, start + len - 1, NULL); 2584 write_lock(&em_tree->lock); 2585 em = lookup_extent_mapping(em_tree, start, len); 2586 if (!em) { 2587 ret = -EIO; 2588 goto out_unlock; 2589 } 2590 2591 ASSERT(em->len == len); 2592 ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); 2593 ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE); 2594 ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 2595 ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags)); 2596 ASSERT(!list_empty(&em->list)); 2597 2598 flags = em->flags; 2599 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 2600 2601 /* First, replace the em with a new extent_map starting from * em->start */ 2602 split_pre->start = em->start; 2603 split_pre->len = (pre ? pre : em->len - post); 2604 split_pre->orig_start = split_pre->start; 2605 split_pre->block_start = em->block_start; 2606 split_pre->block_len = split_pre->len; 2607 split_pre->orig_block_len = split_pre->block_len; 2608 split_pre->ram_bytes = split_pre->len; 2609 split_pre->flags = flags; 2610 split_pre->compress_type = em->compress_type; 2611 split_pre->generation = em->generation; 2612 2613 replace_extent_mapping(em_tree, em, split_pre, 1); 2614 2615 /* 2616 * Now we only have an extent_map at: 2617 * [em->start, em->start + pre] if pre != 0 2618 * [em->start, em->start + em->len - post] if pre == 0 2619 */ 2620 2621 if (pre) { 2622 /* Insert the middle extent_map */ 2623 split_mid->start = em->start + pre; 2624 split_mid->len = em->len - pre - post; 2625 split_mid->orig_start = split_mid->start; 2626 split_mid->block_start = em->block_start + pre; 2627 split_mid->block_len = split_mid->len; 2628 split_mid->orig_block_len = split_mid->block_len; 2629 split_mid->ram_bytes = split_mid->len; 2630 split_mid->flags = flags; 2631 split_mid->compress_type = em->compress_type; 2632 split_mid->generation = em->generation; 2633 add_extent_mapping(em_tree, split_mid, 1); 2634 } 2635 2636 if (post) { 2637 split_post->start = em->start + em->len - post; 2638 split_post->len = post; 2639 split_post->orig_start = split_post->start; 2640 split_post->block_start = em->block_start + em->len - post; 2641 split_post->block_len = split_post->len; 2642 split_post->orig_block_len = split_post->block_len; 2643 split_post->ram_bytes = split_post->len; 2644 split_post->flags = flags; 2645 split_post->compress_type = em->compress_type; 2646 split_post->generation = em->generation; 2647 add_extent_mapping(em_tree, split_post, 1); 2648 } 2649 2650 /* Once for us */ 2651 free_extent_map(em); 2652 /* Once for the tree */ 2653 free_extent_map(em); 2654 2655 out_unlock: 2656 write_unlock(&em_tree->lock); 2657 unlock_extent(&inode->io_tree, start, start + len - 1, NULL); 2658 out: 2659 free_extent_map(split_pre); 2660 free_extent_map(split_mid); 2661 free_extent_map(split_post); 2662 2663 return ret; 2664 } 2665 2666 static blk_status_t extract_ordered_extent(struct btrfs_inode *inode, 2667 struct bio *bio, loff_t file_offset) 2668 { 2669 struct btrfs_ordered_extent *ordered; 2670 u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT; 2671 u64 file_len; 2672 u64 len = bio->bi_iter.bi_size; 2673 u64 end = start + len; 2674 u64 ordered_end; 2675 u64 pre, post; 2676 int ret = 0; 2677 2678 ordered = btrfs_lookup_ordered_extent(inode, file_offset); 2679 if (WARN_ON_ONCE(!ordered)) 2680 return BLK_STS_IOERR; 2681 2682 /* No need to split */ 2683 if (ordered->disk_num_bytes == len) 2684 goto out; 2685 2686 /* We cannot split once end_bio'd ordered extent */ 2687 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) { 2688 ret = -EINVAL; 2689 goto out; 2690 } 2691 2692 /* We cannot split a compressed ordered extent */ 2693 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) { 2694 ret = -EINVAL; 2695 goto out; 2696 } 2697 2698 ordered_end = ordered->disk_bytenr + ordered->disk_num_bytes; 2699 /* bio must be in one ordered extent */ 2700 if (WARN_ON_ONCE(start < ordered->disk_bytenr || end > ordered_end)) { 2701 ret = -EINVAL; 2702 goto out; 2703 } 2704 2705 /* Checksum list should be empty */ 2706 if (WARN_ON_ONCE(!list_empty(&ordered->list))) { 2707 ret = -EINVAL; 2708 goto out; 2709 } 2710 2711 file_len = ordered->num_bytes; 2712 pre = start - ordered->disk_bytenr; 2713 post = ordered_end - end; 2714 2715 ret = btrfs_split_ordered_extent(ordered, pre, post); 2716 if (ret) 2717 goto out; 2718 ret = split_zoned_em(inode, file_offset, file_len, pre, post); 2719 2720 out: 2721 btrfs_put_ordered_extent(ordered); 2722 2723 return errno_to_blk_status(ret); 2724 } 2725 2726 void btrfs_submit_data_write_bio(struct btrfs_inode *inode, struct bio *bio, int mirror_num) 2727 { 2728 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2729 blk_status_t ret; 2730 2731 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 2732 ret = extract_ordered_extent(inode, bio, 2733 page_offset(bio_first_bvec_all(bio)->bv_page)); 2734 if (ret) { 2735 btrfs_bio_end_io(btrfs_bio(bio), ret); 2736 return; 2737 } 2738 } 2739 2740 /* 2741 * If we need to checksum, and the I/O is not issued by fsync and 2742 * friends, that is ->sync_writers != 0, defer the submission to a 2743 * workqueue to parallelize it. 2744 * 2745 * Csum items for reloc roots have already been cloned at this point, 2746 * so they are handled as part of the no-checksum case. 2747 */ 2748 if (!(inode->flags & BTRFS_INODE_NODATASUM) && 2749 !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && 2750 !btrfs_is_data_reloc_root(inode->root)) { 2751 if (!atomic_read(&inode->sync_writers) && 2752 btrfs_wq_submit_bio(inode, bio, mirror_num, 0, WQ_SUBMIT_DATA)) 2753 return; 2754 2755 ret = btrfs_csum_one_bio(inode, bio, (u64)-1, false); 2756 if (ret) { 2757 btrfs_bio_end_io(btrfs_bio(bio), ret); 2758 return; 2759 } 2760 } 2761 btrfs_submit_bio(fs_info, bio, mirror_num); 2762 } 2763 2764 void btrfs_submit_data_read_bio(struct btrfs_inode *inode, struct bio *bio, 2765 int mirror_num, enum btrfs_compression_type compress_type) 2766 { 2767 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2768 blk_status_t ret; 2769 2770 if (compress_type != BTRFS_COMPRESS_NONE) { 2771 /* 2772 * btrfs_submit_compressed_read will handle completing the bio 2773 * if there were any errors, so just return here. 2774 */ 2775 btrfs_submit_compressed_read(&inode->vfs_inode, bio, mirror_num); 2776 return; 2777 } 2778 2779 /* Save the original iter for read repair */ 2780 btrfs_bio(bio)->iter = bio->bi_iter; 2781 2782 /* 2783 * Lookup bio sums does extra checks around whether we need to csum or 2784 * not, which is why we ignore skip_sum here. 2785 */ 2786 ret = btrfs_lookup_bio_sums(&inode->vfs_inode, bio, NULL); 2787 if (ret) { 2788 btrfs_bio_end_io(btrfs_bio(bio), ret); 2789 return; 2790 } 2791 2792 btrfs_submit_bio(fs_info, bio, mirror_num); 2793 } 2794 2795 /* 2796 * given a list of ordered sums record them in the inode. This happens 2797 * at IO completion time based on sums calculated at bio submission time. 2798 */ 2799 static int add_pending_csums(struct btrfs_trans_handle *trans, 2800 struct list_head *list) 2801 { 2802 struct btrfs_ordered_sum *sum; 2803 struct btrfs_root *csum_root = NULL; 2804 int ret; 2805 2806 list_for_each_entry(sum, list, list) { 2807 trans->adding_csums = true; 2808 if (!csum_root) 2809 csum_root = btrfs_csum_root(trans->fs_info, 2810 sum->bytenr); 2811 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2812 trans->adding_csums = false; 2813 if (ret) 2814 return ret; 2815 } 2816 return 0; 2817 } 2818 2819 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2820 const u64 start, 2821 const u64 len, 2822 struct extent_state **cached_state) 2823 { 2824 u64 search_start = start; 2825 const u64 end = start + len - 1; 2826 2827 while (search_start < end) { 2828 const u64 search_len = end - search_start + 1; 2829 struct extent_map *em; 2830 u64 em_len; 2831 int ret = 0; 2832 2833 em = btrfs_get_extent(inode, NULL, 0, search_start, search_len); 2834 if (IS_ERR(em)) 2835 return PTR_ERR(em); 2836 2837 if (em->block_start != EXTENT_MAP_HOLE) 2838 goto next; 2839 2840 em_len = em->len; 2841 if (em->start < search_start) 2842 em_len -= search_start - em->start; 2843 if (em_len > search_len) 2844 em_len = search_len; 2845 2846 ret = set_extent_bit(&inode->io_tree, search_start, 2847 search_start + em_len - 1, 2848 EXTENT_DELALLOC_NEW, cached_state, 2849 GFP_NOFS); 2850 next: 2851 search_start = extent_map_end(em); 2852 free_extent_map(em); 2853 if (ret) 2854 return ret; 2855 } 2856 return 0; 2857 } 2858 2859 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2860 unsigned int extra_bits, 2861 struct extent_state **cached_state) 2862 { 2863 WARN_ON(PAGE_ALIGNED(end)); 2864 2865 if (start >= i_size_read(&inode->vfs_inode) && 2866 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2867 /* 2868 * There can't be any extents following eof in this case so just 2869 * set the delalloc new bit for the range directly. 2870 */ 2871 extra_bits |= EXTENT_DELALLOC_NEW; 2872 } else { 2873 int ret; 2874 2875 ret = btrfs_find_new_delalloc_bytes(inode, start, 2876 end + 1 - start, 2877 cached_state); 2878 if (ret) 2879 return ret; 2880 } 2881 2882 return set_extent_delalloc(&inode->io_tree, start, end, extra_bits, 2883 cached_state); 2884 } 2885 2886 /* see btrfs_writepage_start_hook for details on why this is required */ 2887 struct btrfs_writepage_fixup { 2888 struct page *page; 2889 struct btrfs_inode *inode; 2890 struct btrfs_work work; 2891 }; 2892 2893 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2894 { 2895 struct btrfs_writepage_fixup *fixup; 2896 struct btrfs_ordered_extent *ordered; 2897 struct extent_state *cached_state = NULL; 2898 struct extent_changeset *data_reserved = NULL; 2899 struct page *page; 2900 struct btrfs_inode *inode; 2901 u64 page_start; 2902 u64 page_end; 2903 int ret = 0; 2904 bool free_delalloc_space = true; 2905 2906 fixup = container_of(work, struct btrfs_writepage_fixup, work); 2907 page = fixup->page; 2908 inode = fixup->inode; 2909 page_start = page_offset(page); 2910 page_end = page_offset(page) + PAGE_SIZE - 1; 2911 2912 /* 2913 * This is similar to page_mkwrite, we need to reserve the space before 2914 * we take the page lock. 2915 */ 2916 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2917 PAGE_SIZE); 2918 again: 2919 lock_page(page); 2920 2921 /* 2922 * Before we queued this fixup, we took a reference on the page. 2923 * page->mapping may go NULL, but it shouldn't be moved to a different 2924 * address space. 2925 */ 2926 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 2927 /* 2928 * Unfortunately this is a little tricky, either 2929 * 2930 * 1) We got here and our page had already been dealt with and 2931 * we reserved our space, thus ret == 0, so we need to just 2932 * drop our space reservation and bail. This can happen the 2933 * first time we come into the fixup worker, or could happen 2934 * while waiting for the ordered extent. 2935 * 2) Our page was already dealt with, but we happened to get an 2936 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2937 * this case we obviously don't have anything to release, but 2938 * because the page was already dealt with we don't want to 2939 * mark the page with an error, so make sure we're resetting 2940 * ret to 0. This is why we have this check _before_ the ret 2941 * check, because we do not want to have a surprise ENOSPC 2942 * when the page was already properly dealt with. 2943 */ 2944 if (!ret) { 2945 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2946 btrfs_delalloc_release_space(inode, data_reserved, 2947 page_start, PAGE_SIZE, 2948 true); 2949 } 2950 ret = 0; 2951 goto out_page; 2952 } 2953 2954 /* 2955 * We can't mess with the page state unless it is locked, so now that 2956 * it is locked bail if we failed to make our space reservation. 2957 */ 2958 if (ret) 2959 goto out_page; 2960 2961 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2962 2963 /* already ordered? We're done */ 2964 if (PageOrdered(page)) 2965 goto out_reserved; 2966 2967 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2968 if (ordered) { 2969 unlock_extent(&inode->io_tree, page_start, page_end, 2970 &cached_state); 2971 unlock_page(page); 2972 btrfs_start_ordered_extent(ordered, 1); 2973 btrfs_put_ordered_extent(ordered); 2974 goto again; 2975 } 2976 2977 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2978 &cached_state); 2979 if (ret) 2980 goto out_reserved; 2981 2982 /* 2983 * Everything went as planned, we're now the owner of a dirty page with 2984 * delayed allocation bits set and space reserved for our COW 2985 * destination. 2986 * 2987 * The page was dirty when we started, nothing should have cleaned it. 2988 */ 2989 BUG_ON(!PageDirty(page)); 2990 free_delalloc_space = false; 2991 out_reserved: 2992 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2993 if (free_delalloc_space) 2994 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2995 PAGE_SIZE, true); 2996 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2997 out_page: 2998 if (ret) { 2999 /* 3000 * We hit ENOSPC or other errors. Update the mapping and page 3001 * to reflect the errors and clean the page. 3002 */ 3003 mapping_set_error(page->mapping, ret); 3004 end_extent_writepage(page, ret, page_start, page_end); 3005 clear_page_dirty_for_io(page); 3006 SetPageError(page); 3007 } 3008 btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE); 3009 unlock_page(page); 3010 put_page(page); 3011 kfree(fixup); 3012 extent_changeset_free(data_reserved); 3013 /* 3014 * As a precaution, do a delayed iput in case it would be the last iput 3015 * that could need flushing space. Recursing back to fixup worker would 3016 * deadlock. 3017 */ 3018 btrfs_add_delayed_iput(inode); 3019 } 3020 3021 /* 3022 * There are a few paths in the higher layers of the kernel that directly 3023 * set the page dirty bit without asking the filesystem if it is a 3024 * good idea. This causes problems because we want to make sure COW 3025 * properly happens and the data=ordered rules are followed. 3026 * 3027 * In our case any range that doesn't have the ORDERED bit set 3028 * hasn't been properly setup for IO. We kick off an async process 3029 * to fix it up. The async helper will wait for ordered extents, set 3030 * the delalloc bit and make it safe to write the page. 3031 */ 3032 int btrfs_writepage_cow_fixup(struct page *page) 3033 { 3034 struct inode *inode = page->mapping->host; 3035 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3036 struct btrfs_writepage_fixup *fixup; 3037 3038 /* This page has ordered extent covering it already */ 3039 if (PageOrdered(page)) 3040 return 0; 3041 3042 /* 3043 * PageChecked is set below when we create a fixup worker for this page, 3044 * don't try to create another one if we're already PageChecked() 3045 * 3046 * The extent_io writepage code will redirty the page if we send back 3047 * EAGAIN. 3048 */ 3049 if (PageChecked(page)) 3050 return -EAGAIN; 3051 3052 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 3053 if (!fixup) 3054 return -EAGAIN; 3055 3056 /* 3057 * We are already holding a reference to this inode from 3058 * write_cache_pages. We need to hold it because the space reservation 3059 * takes place outside of the page lock, and we can't trust 3060 * page->mapping outside of the page lock. 3061 */ 3062 ihold(inode); 3063 btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE); 3064 get_page(page); 3065 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); 3066 fixup->page = page; 3067 fixup->inode = BTRFS_I(inode); 3068 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 3069 3070 return -EAGAIN; 3071 } 3072 3073 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 3074 struct btrfs_inode *inode, u64 file_pos, 3075 struct btrfs_file_extent_item *stack_fi, 3076 const bool update_inode_bytes, 3077 u64 qgroup_reserved) 3078 { 3079 struct btrfs_root *root = inode->root; 3080 const u64 sectorsize = root->fs_info->sectorsize; 3081 struct btrfs_path *path; 3082 struct extent_buffer *leaf; 3083 struct btrfs_key ins; 3084 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 3085 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 3086 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 3087 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 3088 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 3089 struct btrfs_drop_extents_args drop_args = { 0 }; 3090 int ret; 3091 3092 path = btrfs_alloc_path(); 3093 if (!path) 3094 return -ENOMEM; 3095 3096 /* 3097 * we may be replacing one extent in the tree with another. 3098 * The new extent is pinned in the extent map, and we don't want 3099 * to drop it from the cache until it is completely in the btree. 3100 * 3101 * So, tell btrfs_drop_extents to leave this extent in the cache. 3102 * the caller is expected to unpin it and allow it to be merged 3103 * with the others. 3104 */ 3105 drop_args.path = path; 3106 drop_args.start = file_pos; 3107 drop_args.end = file_pos + num_bytes; 3108 drop_args.replace_extent = true; 3109 drop_args.extent_item_size = sizeof(*stack_fi); 3110 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 3111 if (ret) 3112 goto out; 3113 3114 if (!drop_args.extent_inserted) { 3115 ins.objectid = btrfs_ino(inode); 3116 ins.offset = file_pos; 3117 ins.type = BTRFS_EXTENT_DATA_KEY; 3118 3119 ret = btrfs_insert_empty_item(trans, root, path, &ins, 3120 sizeof(*stack_fi)); 3121 if (ret) 3122 goto out; 3123 } 3124 leaf = path->nodes[0]; 3125 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 3126 write_extent_buffer(leaf, stack_fi, 3127 btrfs_item_ptr_offset(leaf, path->slots[0]), 3128 sizeof(struct btrfs_file_extent_item)); 3129 3130 btrfs_mark_buffer_dirty(leaf); 3131 btrfs_release_path(path); 3132 3133 /* 3134 * If we dropped an inline extent here, we know the range where it is 3135 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 3136 * number of bytes only for that range containing the inline extent. 3137 * The remaining of the range will be processed when clearning the 3138 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 3139 */ 3140 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 3141 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 3142 3143 inline_size = drop_args.bytes_found - inline_size; 3144 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 3145 drop_args.bytes_found -= inline_size; 3146 num_bytes -= sectorsize; 3147 } 3148 3149 if (update_inode_bytes) 3150 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 3151 3152 ins.objectid = disk_bytenr; 3153 ins.offset = disk_num_bytes; 3154 ins.type = BTRFS_EXTENT_ITEM_KEY; 3155 3156 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 3157 if (ret) 3158 goto out; 3159 3160 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 3161 file_pos - offset, 3162 qgroup_reserved, &ins); 3163 out: 3164 btrfs_free_path(path); 3165 3166 return ret; 3167 } 3168 3169 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 3170 u64 start, u64 len) 3171 { 3172 struct btrfs_block_group *cache; 3173 3174 cache = btrfs_lookup_block_group(fs_info, start); 3175 ASSERT(cache); 3176 3177 spin_lock(&cache->lock); 3178 cache->delalloc_bytes -= len; 3179 spin_unlock(&cache->lock); 3180 3181 btrfs_put_block_group(cache); 3182 } 3183 3184 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 3185 struct btrfs_ordered_extent *oe) 3186 { 3187 struct btrfs_file_extent_item stack_fi; 3188 bool update_inode_bytes; 3189 u64 num_bytes = oe->num_bytes; 3190 u64 ram_bytes = oe->ram_bytes; 3191 3192 memset(&stack_fi, 0, sizeof(stack_fi)); 3193 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 3194 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 3195 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 3196 oe->disk_num_bytes); 3197 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 3198 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) { 3199 num_bytes = oe->truncated_len; 3200 ram_bytes = num_bytes; 3201 } 3202 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 3203 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 3204 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 3205 /* Encryption and other encoding is reserved and all 0 */ 3206 3207 /* 3208 * For delalloc, when completing an ordered extent we update the inode's 3209 * bytes when clearing the range in the inode's io tree, so pass false 3210 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 3211 * except if the ordered extent was truncated. 3212 */ 3213 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 3214 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3215 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3216 3217 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), 3218 oe->file_offset, &stack_fi, 3219 update_inode_bytes, oe->qgroup_rsv); 3220 } 3221 3222 /* 3223 * As ordered data IO finishes, this gets called so we can finish 3224 * an ordered extent if the range of bytes in the file it covers are 3225 * fully written. 3226 */ 3227 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) 3228 { 3229 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); 3230 struct btrfs_root *root = inode->root; 3231 struct btrfs_fs_info *fs_info = root->fs_info; 3232 struct btrfs_trans_handle *trans = NULL; 3233 struct extent_io_tree *io_tree = &inode->io_tree; 3234 struct extent_state *cached_state = NULL; 3235 u64 start, end; 3236 int compress_type = 0; 3237 int ret = 0; 3238 u64 logical_len = ordered_extent->num_bytes; 3239 bool freespace_inode; 3240 bool truncated = false; 3241 bool clear_reserved_extent = true; 3242 unsigned int clear_bits = EXTENT_DEFRAG; 3243 3244 start = ordered_extent->file_offset; 3245 end = start + ordered_extent->num_bytes - 1; 3246 3247 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3248 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3249 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3250 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3251 clear_bits |= EXTENT_DELALLOC_NEW; 3252 3253 freespace_inode = btrfs_is_free_space_inode(inode); 3254 if (!freespace_inode) 3255 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3256 3257 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3258 ret = -EIO; 3259 goto out; 3260 } 3261 3262 /* A valid bdev implies a write on a sequential zone */ 3263 if (ordered_extent->bdev) { 3264 btrfs_rewrite_logical_zoned(ordered_extent); 3265 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3266 ordered_extent->disk_num_bytes); 3267 } 3268 3269 btrfs_free_io_failure_record(inode, start, end); 3270 3271 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3272 truncated = true; 3273 logical_len = ordered_extent->truncated_len; 3274 /* Truncated the entire extent, don't bother adding */ 3275 if (!logical_len) 3276 goto out; 3277 } 3278 3279 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3280 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 3281 3282 btrfs_inode_safe_disk_i_size_write(inode, 0); 3283 if (freespace_inode) 3284 trans = btrfs_join_transaction_spacecache(root); 3285 else 3286 trans = btrfs_join_transaction(root); 3287 if (IS_ERR(trans)) { 3288 ret = PTR_ERR(trans); 3289 trans = NULL; 3290 goto out; 3291 } 3292 trans->block_rsv = &inode->block_rsv; 3293 ret = btrfs_update_inode_fallback(trans, root, inode); 3294 if (ret) /* -ENOMEM or corruption */ 3295 btrfs_abort_transaction(trans, ret); 3296 goto out; 3297 } 3298 3299 clear_bits |= EXTENT_LOCKED; 3300 lock_extent(io_tree, start, end, &cached_state); 3301 3302 if (freespace_inode) 3303 trans = btrfs_join_transaction_spacecache(root); 3304 else 3305 trans = btrfs_join_transaction(root); 3306 if (IS_ERR(trans)) { 3307 ret = PTR_ERR(trans); 3308 trans = NULL; 3309 goto out; 3310 } 3311 3312 trans->block_rsv = &inode->block_rsv; 3313 3314 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3315 compress_type = ordered_extent->compress_type; 3316 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3317 BUG_ON(compress_type); 3318 ret = btrfs_mark_extent_written(trans, inode, 3319 ordered_extent->file_offset, 3320 ordered_extent->file_offset + 3321 logical_len); 3322 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, 3323 ordered_extent->disk_num_bytes); 3324 } else { 3325 BUG_ON(root == fs_info->tree_root); 3326 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3327 if (!ret) { 3328 clear_reserved_extent = false; 3329 btrfs_release_delalloc_bytes(fs_info, 3330 ordered_extent->disk_bytenr, 3331 ordered_extent->disk_num_bytes); 3332 } 3333 } 3334 unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset, 3335 ordered_extent->num_bytes, trans->transid); 3336 if (ret < 0) { 3337 btrfs_abort_transaction(trans, ret); 3338 goto out; 3339 } 3340 3341 ret = add_pending_csums(trans, &ordered_extent->list); 3342 if (ret) { 3343 btrfs_abort_transaction(trans, ret); 3344 goto out; 3345 } 3346 3347 /* 3348 * If this is a new delalloc range, clear its new delalloc flag to 3349 * update the inode's number of bytes. This needs to be done first 3350 * before updating the inode item. 3351 */ 3352 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3353 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3354 clear_extent_bit(&inode->io_tree, start, end, 3355 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3356 &cached_state); 3357 3358 btrfs_inode_safe_disk_i_size_write(inode, 0); 3359 ret = btrfs_update_inode_fallback(trans, root, inode); 3360 if (ret) { /* -ENOMEM or corruption */ 3361 btrfs_abort_transaction(trans, ret); 3362 goto out; 3363 } 3364 ret = 0; 3365 out: 3366 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3367 &cached_state); 3368 3369 if (trans) 3370 btrfs_end_transaction(trans); 3371 3372 if (ret || truncated) { 3373 u64 unwritten_start = start; 3374 3375 /* 3376 * If we failed to finish this ordered extent for any reason we 3377 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3378 * extent, and mark the inode with the error if it wasn't 3379 * already set. Any error during writeback would have already 3380 * set the mapping error, so we need to set it if we're the ones 3381 * marking this ordered extent as failed. 3382 */ 3383 if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR, 3384 &ordered_extent->flags)) 3385 mapping_set_error(ordered_extent->inode->i_mapping, -EIO); 3386 3387 if (truncated) 3388 unwritten_start += logical_len; 3389 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3390 3391 /* Drop extent maps for the part of the extent we didn't write. */ 3392 btrfs_drop_extent_map_range(inode, unwritten_start, end, false); 3393 3394 /* 3395 * If the ordered extent had an IOERR or something else went 3396 * wrong we need to return the space for this ordered extent 3397 * back to the allocator. We only free the extent in the 3398 * truncated case if we didn't write out the extent at all. 3399 * 3400 * If we made it past insert_reserved_file_extent before we 3401 * errored out then we don't need to do this as the accounting 3402 * has already been done. 3403 */ 3404 if ((ret || !logical_len) && 3405 clear_reserved_extent && 3406 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3407 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3408 /* 3409 * Discard the range before returning it back to the 3410 * free space pool 3411 */ 3412 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3413 btrfs_discard_extent(fs_info, 3414 ordered_extent->disk_bytenr, 3415 ordered_extent->disk_num_bytes, 3416 NULL); 3417 btrfs_free_reserved_extent(fs_info, 3418 ordered_extent->disk_bytenr, 3419 ordered_extent->disk_num_bytes, 1); 3420 } 3421 } 3422 3423 /* 3424 * This needs to be done to make sure anybody waiting knows we are done 3425 * updating everything for this ordered extent. 3426 */ 3427 btrfs_remove_ordered_extent(inode, ordered_extent); 3428 3429 /* once for us */ 3430 btrfs_put_ordered_extent(ordered_extent); 3431 /* once for the tree */ 3432 btrfs_put_ordered_extent(ordered_extent); 3433 3434 return ret; 3435 } 3436 3437 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode, 3438 struct page *page, u64 start, 3439 u64 end, bool uptodate) 3440 { 3441 trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate); 3442 3443 btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start, uptodate); 3444 } 3445 3446 /* 3447 * Verify the checksum for a single sector without any extra action that depend 3448 * on the type of I/O. 3449 */ 3450 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, 3451 u32 pgoff, u8 *csum, const u8 * const csum_expected) 3452 { 3453 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3454 char *kaddr; 3455 3456 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); 3457 3458 shash->tfm = fs_info->csum_shash; 3459 3460 kaddr = kmap_local_page(page) + pgoff; 3461 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 3462 kunmap_local(kaddr); 3463 3464 if (memcmp(csum, csum_expected, fs_info->csum_size)) 3465 return -EIO; 3466 return 0; 3467 } 3468 3469 static u8 *btrfs_csum_ptr(const struct btrfs_fs_info *fs_info, u8 *csums, u64 offset) 3470 { 3471 u64 offset_in_sectors = offset >> fs_info->sectorsize_bits; 3472 3473 return csums + offset_in_sectors * fs_info->csum_size; 3474 } 3475 3476 /* 3477 * check_data_csum - verify checksum of one sector of uncompressed data 3478 * @inode: inode 3479 * @bbio: btrfs_bio which contains the csum 3480 * @bio_offset: offset to the beginning of the bio (in bytes) 3481 * @page: page where is the data to be verified 3482 * @pgoff: offset inside the page 3483 * 3484 * The length of such check is always one sector size. 3485 * 3486 * When csum mismatch is detected, we will also report the error and fill the 3487 * corrupted range with zero. (Thus it needs the extra parameters) 3488 */ 3489 int btrfs_check_data_csum(struct btrfs_inode *inode, struct btrfs_bio *bbio, 3490 u32 bio_offset, struct page *page, u32 pgoff) 3491 { 3492 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3493 u32 len = fs_info->sectorsize; 3494 u8 *csum_expected; 3495 u8 csum[BTRFS_CSUM_SIZE]; 3496 3497 ASSERT(pgoff + len <= PAGE_SIZE); 3498 3499 csum_expected = btrfs_csum_ptr(fs_info, bbio->csum, bio_offset); 3500 3501 if (btrfs_check_sector_csum(fs_info, page, pgoff, csum, csum_expected)) 3502 goto zeroit; 3503 return 0; 3504 3505 zeroit: 3506 btrfs_print_data_csum_error(inode, bbio->file_offset + bio_offset, 3507 csum, csum_expected, bbio->mirror_num); 3508 if (bbio->device) 3509 btrfs_dev_stat_inc_and_print(bbio->device, 3510 BTRFS_DEV_STAT_CORRUPTION_ERRS); 3511 memzero_page(page, pgoff, len); 3512 return -EIO; 3513 } 3514 3515 /* 3516 * When reads are done, we need to check csums to verify the data is correct. 3517 * if there's a match, we allow the bio to finish. If not, the code in 3518 * extent_io.c will try to find good copies for us. 3519 * 3520 * @bio_offset: offset to the beginning of the bio (in bytes) 3521 * @start: file offset of the range start 3522 * @end: file offset of the range end (inclusive) 3523 * 3524 * Return a bitmap where bit set means a csum mismatch, and bit not set means 3525 * csum match. 3526 */ 3527 unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio, 3528 u32 bio_offset, struct page *page, 3529 u64 start, u64 end) 3530 { 3531 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); 3532 struct btrfs_root *root = inode->root; 3533 struct btrfs_fs_info *fs_info = root->fs_info; 3534 struct extent_io_tree *io_tree = &inode->io_tree; 3535 const u32 sectorsize = root->fs_info->sectorsize; 3536 u32 pg_off; 3537 unsigned int result = 0; 3538 3539 /* 3540 * This only happens for NODATASUM or compressed read. 3541 * Normally this should be covered by above check for compressed read 3542 * or the next check for NODATASUM. Just do a quicker exit here. 3543 */ 3544 if (bbio->csum == NULL) 3545 return 0; 3546 3547 if (inode->flags & BTRFS_INODE_NODATASUM) 3548 return 0; 3549 3550 if (unlikely(test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))) 3551 return 0; 3552 3553 ASSERT(page_offset(page) <= start && 3554 end <= page_offset(page) + PAGE_SIZE - 1); 3555 for (pg_off = offset_in_page(start); 3556 pg_off < offset_in_page(end); 3557 pg_off += sectorsize, bio_offset += sectorsize) { 3558 u64 file_offset = pg_off + page_offset(page); 3559 int ret; 3560 3561 if (btrfs_is_data_reloc_root(root) && 3562 test_range_bit(io_tree, file_offset, 3563 file_offset + sectorsize - 1, 3564 EXTENT_NODATASUM, 1, NULL)) { 3565 /* Skip the range without csum for data reloc inode */ 3566 clear_extent_bits(io_tree, file_offset, 3567 file_offset + sectorsize - 1, 3568 EXTENT_NODATASUM); 3569 continue; 3570 } 3571 ret = btrfs_check_data_csum(inode, bbio, bio_offset, page, pg_off); 3572 if (ret < 0) { 3573 const int nr_bit = (pg_off - offset_in_page(start)) >> 3574 root->fs_info->sectorsize_bits; 3575 3576 result |= (1U << nr_bit); 3577 } 3578 } 3579 return result; 3580 } 3581 3582 /* 3583 * btrfs_add_delayed_iput - perform a delayed iput on @inode 3584 * 3585 * @inode: The inode we want to perform iput on 3586 * 3587 * This function uses the generic vfs_inode::i_count to track whether we should 3588 * just decrement it (in case it's > 1) or if this is the last iput then link 3589 * the inode to the delayed iput machinery. Delayed iputs are processed at 3590 * transaction commit time/superblock commit/cleaner kthread. 3591 */ 3592 void btrfs_add_delayed_iput(struct btrfs_inode *inode) 3593 { 3594 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3595 3596 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) 3597 return; 3598 3599 atomic_inc(&fs_info->nr_delayed_iputs); 3600 spin_lock(&fs_info->delayed_iput_lock); 3601 ASSERT(list_empty(&inode->delayed_iput)); 3602 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); 3603 spin_unlock(&fs_info->delayed_iput_lock); 3604 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3605 wake_up_process(fs_info->cleaner_kthread); 3606 } 3607 3608 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3609 struct btrfs_inode *inode) 3610 { 3611 list_del_init(&inode->delayed_iput); 3612 spin_unlock(&fs_info->delayed_iput_lock); 3613 iput(&inode->vfs_inode); 3614 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3615 wake_up(&fs_info->delayed_iputs_wait); 3616 spin_lock(&fs_info->delayed_iput_lock); 3617 } 3618 3619 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3620 struct btrfs_inode *inode) 3621 { 3622 if (!list_empty(&inode->delayed_iput)) { 3623 spin_lock(&fs_info->delayed_iput_lock); 3624 if (!list_empty(&inode->delayed_iput)) 3625 run_delayed_iput_locked(fs_info, inode); 3626 spin_unlock(&fs_info->delayed_iput_lock); 3627 } 3628 } 3629 3630 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3631 { 3632 3633 spin_lock(&fs_info->delayed_iput_lock); 3634 while (!list_empty(&fs_info->delayed_iputs)) { 3635 struct btrfs_inode *inode; 3636 3637 inode = list_first_entry(&fs_info->delayed_iputs, 3638 struct btrfs_inode, delayed_iput); 3639 run_delayed_iput_locked(fs_info, inode); 3640 cond_resched_lock(&fs_info->delayed_iput_lock); 3641 } 3642 spin_unlock(&fs_info->delayed_iput_lock); 3643 } 3644 3645 /* 3646 * Wait for flushing all delayed iputs 3647 * 3648 * @fs_info: the filesystem 3649 * 3650 * This will wait on any delayed iputs that are currently running with KILLABLE 3651 * set. Once they are all done running we will return, unless we are killed in 3652 * which case we return EINTR. This helps in user operations like fallocate etc 3653 * that might get blocked on the iputs. 3654 * 3655 * Return EINTR if we were killed, 0 if nothing's pending 3656 */ 3657 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3658 { 3659 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3660 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3661 if (ret) 3662 return -EINTR; 3663 return 0; 3664 } 3665 3666 /* 3667 * This creates an orphan entry for the given inode in case something goes wrong 3668 * in the middle of an unlink. 3669 */ 3670 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3671 struct btrfs_inode *inode) 3672 { 3673 int ret; 3674 3675 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3676 if (ret && ret != -EEXIST) { 3677 btrfs_abort_transaction(trans, ret); 3678 return ret; 3679 } 3680 3681 return 0; 3682 } 3683 3684 /* 3685 * We have done the delete so we can go ahead and remove the orphan item for 3686 * this particular inode. 3687 */ 3688 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3689 struct btrfs_inode *inode) 3690 { 3691 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3692 } 3693 3694 /* 3695 * this cleans up any orphans that may be left on the list from the last use 3696 * of this root. 3697 */ 3698 int btrfs_orphan_cleanup(struct btrfs_root *root) 3699 { 3700 struct btrfs_fs_info *fs_info = root->fs_info; 3701 struct btrfs_path *path; 3702 struct extent_buffer *leaf; 3703 struct btrfs_key key, found_key; 3704 struct btrfs_trans_handle *trans; 3705 struct inode *inode; 3706 u64 last_objectid = 0; 3707 int ret = 0, nr_unlink = 0; 3708 3709 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3710 return 0; 3711 3712 path = btrfs_alloc_path(); 3713 if (!path) { 3714 ret = -ENOMEM; 3715 goto out; 3716 } 3717 path->reada = READA_BACK; 3718 3719 key.objectid = BTRFS_ORPHAN_OBJECTID; 3720 key.type = BTRFS_ORPHAN_ITEM_KEY; 3721 key.offset = (u64)-1; 3722 3723 while (1) { 3724 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3725 if (ret < 0) 3726 goto out; 3727 3728 /* 3729 * if ret == 0 means we found what we were searching for, which 3730 * is weird, but possible, so only screw with path if we didn't 3731 * find the key and see if we have stuff that matches 3732 */ 3733 if (ret > 0) { 3734 ret = 0; 3735 if (path->slots[0] == 0) 3736 break; 3737 path->slots[0]--; 3738 } 3739 3740 /* pull out the item */ 3741 leaf = path->nodes[0]; 3742 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3743 3744 /* make sure the item matches what we want */ 3745 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3746 break; 3747 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3748 break; 3749 3750 /* release the path since we're done with it */ 3751 btrfs_release_path(path); 3752 3753 /* 3754 * this is where we are basically btrfs_lookup, without the 3755 * crossing root thing. we store the inode number in the 3756 * offset of the orphan item. 3757 */ 3758 3759 if (found_key.offset == last_objectid) { 3760 btrfs_err(fs_info, 3761 "Error removing orphan entry, stopping orphan cleanup"); 3762 ret = -EINVAL; 3763 goto out; 3764 } 3765 3766 last_objectid = found_key.offset; 3767 3768 found_key.objectid = found_key.offset; 3769 found_key.type = BTRFS_INODE_ITEM_KEY; 3770 found_key.offset = 0; 3771 inode = btrfs_iget(fs_info->sb, last_objectid, root); 3772 ret = PTR_ERR_OR_ZERO(inode); 3773 if (ret && ret != -ENOENT) 3774 goto out; 3775 3776 if (ret == -ENOENT && root == fs_info->tree_root) { 3777 struct btrfs_root *dead_root; 3778 int is_dead_root = 0; 3779 3780 /* 3781 * This is an orphan in the tree root. Currently these 3782 * could come from 2 sources: 3783 * a) a root (snapshot/subvolume) deletion in progress 3784 * b) a free space cache inode 3785 * We need to distinguish those two, as the orphan item 3786 * for a root must not get deleted before the deletion 3787 * of the snapshot/subvolume's tree completes. 3788 * 3789 * btrfs_find_orphan_roots() ran before us, which has 3790 * found all deleted roots and loaded them into 3791 * fs_info->fs_roots_radix. So here we can find if an 3792 * orphan item corresponds to a deleted root by looking 3793 * up the root from that radix tree. 3794 */ 3795 3796 spin_lock(&fs_info->fs_roots_radix_lock); 3797 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3798 (unsigned long)found_key.objectid); 3799 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3800 is_dead_root = 1; 3801 spin_unlock(&fs_info->fs_roots_radix_lock); 3802 3803 if (is_dead_root) { 3804 /* prevent this orphan from being found again */ 3805 key.offset = found_key.objectid - 1; 3806 continue; 3807 } 3808 3809 } 3810 3811 /* 3812 * If we have an inode with links, there are a couple of 3813 * possibilities: 3814 * 3815 * 1. We were halfway through creating fsverity metadata for the 3816 * file. In that case, the orphan item represents incomplete 3817 * fsverity metadata which must be cleaned up with 3818 * btrfs_drop_verity_items and deleting the orphan item. 3819 3820 * 2. Old kernels (before v3.12) used to create an 3821 * orphan item for truncate indicating that there were possibly 3822 * extent items past i_size that needed to be deleted. In v3.12, 3823 * truncate was changed to update i_size in sync with the extent 3824 * items, but the (useless) orphan item was still created. Since 3825 * v4.18, we don't create the orphan item for truncate at all. 3826 * 3827 * So, this item could mean that we need to do a truncate, but 3828 * only if this filesystem was last used on a pre-v3.12 kernel 3829 * and was not cleanly unmounted. The odds of that are quite 3830 * slim, and it's a pain to do the truncate now, so just delete 3831 * the orphan item. 3832 * 3833 * It's also possible that this orphan item was supposed to be 3834 * deleted but wasn't. The inode number may have been reused, 3835 * but either way, we can delete the orphan item. 3836 */ 3837 if (ret == -ENOENT || inode->i_nlink) { 3838 if (!ret) { 3839 ret = btrfs_drop_verity_items(BTRFS_I(inode)); 3840 iput(inode); 3841 if (ret) 3842 goto out; 3843 } 3844 trans = btrfs_start_transaction(root, 1); 3845 if (IS_ERR(trans)) { 3846 ret = PTR_ERR(trans); 3847 goto out; 3848 } 3849 btrfs_debug(fs_info, "auto deleting %Lu", 3850 found_key.objectid); 3851 ret = btrfs_del_orphan_item(trans, root, 3852 found_key.objectid); 3853 btrfs_end_transaction(trans); 3854 if (ret) 3855 goto out; 3856 continue; 3857 } 3858 3859 nr_unlink++; 3860 3861 /* this will do delete_inode and everything for us */ 3862 iput(inode); 3863 } 3864 /* release the path since we're done with it */ 3865 btrfs_release_path(path); 3866 3867 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3868 trans = btrfs_join_transaction(root); 3869 if (!IS_ERR(trans)) 3870 btrfs_end_transaction(trans); 3871 } 3872 3873 if (nr_unlink) 3874 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3875 3876 out: 3877 if (ret) 3878 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3879 btrfs_free_path(path); 3880 return ret; 3881 } 3882 3883 /* 3884 * very simple check to peek ahead in the leaf looking for xattrs. If we 3885 * don't find any xattrs, we know there can't be any acls. 3886 * 3887 * slot is the slot the inode is in, objectid is the objectid of the inode 3888 */ 3889 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3890 int slot, u64 objectid, 3891 int *first_xattr_slot) 3892 { 3893 u32 nritems = btrfs_header_nritems(leaf); 3894 struct btrfs_key found_key; 3895 static u64 xattr_access = 0; 3896 static u64 xattr_default = 0; 3897 int scanned = 0; 3898 3899 if (!xattr_access) { 3900 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3901 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3902 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3903 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3904 } 3905 3906 slot++; 3907 *first_xattr_slot = -1; 3908 while (slot < nritems) { 3909 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3910 3911 /* we found a different objectid, there must not be acls */ 3912 if (found_key.objectid != objectid) 3913 return 0; 3914 3915 /* we found an xattr, assume we've got an acl */ 3916 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3917 if (*first_xattr_slot == -1) 3918 *first_xattr_slot = slot; 3919 if (found_key.offset == xattr_access || 3920 found_key.offset == xattr_default) 3921 return 1; 3922 } 3923 3924 /* 3925 * we found a key greater than an xattr key, there can't 3926 * be any acls later on 3927 */ 3928 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3929 return 0; 3930 3931 slot++; 3932 scanned++; 3933 3934 /* 3935 * it goes inode, inode backrefs, xattrs, extents, 3936 * so if there are a ton of hard links to an inode there can 3937 * be a lot of backrefs. Don't waste time searching too hard, 3938 * this is just an optimization 3939 */ 3940 if (scanned >= 8) 3941 break; 3942 } 3943 /* we hit the end of the leaf before we found an xattr or 3944 * something larger than an xattr. We have to assume the inode 3945 * has acls 3946 */ 3947 if (*first_xattr_slot == -1) 3948 *first_xattr_slot = slot; 3949 return 1; 3950 } 3951 3952 /* 3953 * read an inode from the btree into the in-memory inode 3954 */ 3955 static int btrfs_read_locked_inode(struct inode *inode, 3956 struct btrfs_path *in_path) 3957 { 3958 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3959 struct btrfs_path *path = in_path; 3960 struct extent_buffer *leaf; 3961 struct btrfs_inode_item *inode_item; 3962 struct btrfs_root *root = BTRFS_I(inode)->root; 3963 struct btrfs_key location; 3964 unsigned long ptr; 3965 int maybe_acls; 3966 u32 rdev; 3967 int ret; 3968 bool filled = false; 3969 int first_xattr_slot; 3970 3971 ret = btrfs_fill_inode(inode, &rdev); 3972 if (!ret) 3973 filled = true; 3974 3975 if (!path) { 3976 path = btrfs_alloc_path(); 3977 if (!path) 3978 return -ENOMEM; 3979 } 3980 3981 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3982 3983 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3984 if (ret) { 3985 if (path != in_path) 3986 btrfs_free_path(path); 3987 return ret; 3988 } 3989 3990 leaf = path->nodes[0]; 3991 3992 if (filled) 3993 goto cache_index; 3994 3995 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3996 struct btrfs_inode_item); 3997 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3998 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3999 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 4000 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 4001 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 4002 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 4003 round_up(i_size_read(inode), fs_info->sectorsize)); 4004 4005 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 4006 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 4007 4008 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 4009 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 4010 4011 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 4012 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 4013 4014 BTRFS_I(inode)->i_otime.tv_sec = 4015 btrfs_timespec_sec(leaf, &inode_item->otime); 4016 BTRFS_I(inode)->i_otime.tv_nsec = 4017 btrfs_timespec_nsec(leaf, &inode_item->otime); 4018 4019 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 4020 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 4021 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 4022 4023 inode_set_iversion_queried(inode, 4024 btrfs_inode_sequence(leaf, inode_item)); 4025 inode->i_generation = BTRFS_I(inode)->generation; 4026 inode->i_rdev = 0; 4027 rdev = btrfs_inode_rdev(leaf, inode_item); 4028 4029 BTRFS_I(inode)->index_cnt = (u64)-1; 4030 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 4031 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 4032 4033 cache_index: 4034 /* 4035 * If we were modified in the current generation and evicted from memory 4036 * and then re-read we need to do a full sync since we don't have any 4037 * idea about which extents were modified before we were evicted from 4038 * cache. 4039 * 4040 * This is required for both inode re-read from disk and delayed inode 4041 * in delayed_nodes_tree. 4042 */ 4043 if (BTRFS_I(inode)->last_trans == fs_info->generation) 4044 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4045 &BTRFS_I(inode)->runtime_flags); 4046 4047 /* 4048 * We don't persist the id of the transaction where an unlink operation 4049 * against the inode was last made. So here we assume the inode might 4050 * have been evicted, and therefore the exact value of last_unlink_trans 4051 * lost, and set it to last_trans to avoid metadata inconsistencies 4052 * between the inode and its parent if the inode is fsync'ed and the log 4053 * replayed. For example, in the scenario: 4054 * 4055 * touch mydir/foo 4056 * ln mydir/foo mydir/bar 4057 * sync 4058 * unlink mydir/bar 4059 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 4060 * xfs_io -c fsync mydir/foo 4061 * <power failure> 4062 * mount fs, triggers fsync log replay 4063 * 4064 * We must make sure that when we fsync our inode foo we also log its 4065 * parent inode, otherwise after log replay the parent still has the 4066 * dentry with the "bar" name but our inode foo has a link count of 1 4067 * and doesn't have an inode ref with the name "bar" anymore. 4068 * 4069 * Setting last_unlink_trans to last_trans is a pessimistic approach, 4070 * but it guarantees correctness at the expense of occasional full 4071 * transaction commits on fsync if our inode is a directory, or if our 4072 * inode is not a directory, logging its parent unnecessarily. 4073 */ 4074 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 4075 4076 /* 4077 * Same logic as for last_unlink_trans. We don't persist the generation 4078 * of the last transaction where this inode was used for a reflink 4079 * operation, so after eviction and reloading the inode we must be 4080 * pessimistic and assume the last transaction that modified the inode. 4081 */ 4082 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 4083 4084 path->slots[0]++; 4085 if (inode->i_nlink != 1 || 4086 path->slots[0] >= btrfs_header_nritems(leaf)) 4087 goto cache_acl; 4088 4089 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 4090 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 4091 goto cache_acl; 4092 4093 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4094 if (location.type == BTRFS_INODE_REF_KEY) { 4095 struct btrfs_inode_ref *ref; 4096 4097 ref = (struct btrfs_inode_ref *)ptr; 4098 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 4099 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 4100 struct btrfs_inode_extref *extref; 4101 4102 extref = (struct btrfs_inode_extref *)ptr; 4103 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 4104 extref); 4105 } 4106 cache_acl: 4107 /* 4108 * try to precache a NULL acl entry for files that don't have 4109 * any xattrs or acls 4110 */ 4111 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 4112 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 4113 if (first_xattr_slot != -1) { 4114 path->slots[0] = first_xattr_slot; 4115 ret = btrfs_load_inode_props(inode, path); 4116 if (ret) 4117 btrfs_err(fs_info, 4118 "error loading props for ino %llu (root %llu): %d", 4119 btrfs_ino(BTRFS_I(inode)), 4120 root->root_key.objectid, ret); 4121 } 4122 if (path != in_path) 4123 btrfs_free_path(path); 4124 4125 if (!maybe_acls) 4126 cache_no_acl(inode); 4127 4128 switch (inode->i_mode & S_IFMT) { 4129 case S_IFREG: 4130 inode->i_mapping->a_ops = &btrfs_aops; 4131 inode->i_fop = &btrfs_file_operations; 4132 inode->i_op = &btrfs_file_inode_operations; 4133 break; 4134 case S_IFDIR: 4135 inode->i_fop = &btrfs_dir_file_operations; 4136 inode->i_op = &btrfs_dir_inode_operations; 4137 break; 4138 case S_IFLNK: 4139 inode->i_op = &btrfs_symlink_inode_operations; 4140 inode_nohighmem(inode); 4141 inode->i_mapping->a_ops = &btrfs_aops; 4142 break; 4143 default: 4144 inode->i_op = &btrfs_special_inode_operations; 4145 init_special_inode(inode, inode->i_mode, rdev); 4146 break; 4147 } 4148 4149 btrfs_sync_inode_flags_to_i_flags(inode); 4150 return 0; 4151 } 4152 4153 /* 4154 * given a leaf and an inode, copy the inode fields into the leaf 4155 */ 4156 static void fill_inode_item(struct btrfs_trans_handle *trans, 4157 struct extent_buffer *leaf, 4158 struct btrfs_inode_item *item, 4159 struct inode *inode) 4160 { 4161 struct btrfs_map_token token; 4162 u64 flags; 4163 4164 btrfs_init_map_token(&token, leaf); 4165 4166 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 4167 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 4168 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 4169 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 4170 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 4171 4172 btrfs_set_token_timespec_sec(&token, &item->atime, 4173 inode->i_atime.tv_sec); 4174 btrfs_set_token_timespec_nsec(&token, &item->atime, 4175 inode->i_atime.tv_nsec); 4176 4177 btrfs_set_token_timespec_sec(&token, &item->mtime, 4178 inode->i_mtime.tv_sec); 4179 btrfs_set_token_timespec_nsec(&token, &item->mtime, 4180 inode->i_mtime.tv_nsec); 4181 4182 btrfs_set_token_timespec_sec(&token, &item->ctime, 4183 inode->i_ctime.tv_sec); 4184 btrfs_set_token_timespec_nsec(&token, &item->ctime, 4185 inode->i_ctime.tv_nsec); 4186 4187 btrfs_set_token_timespec_sec(&token, &item->otime, 4188 BTRFS_I(inode)->i_otime.tv_sec); 4189 btrfs_set_token_timespec_nsec(&token, &item->otime, 4190 BTRFS_I(inode)->i_otime.tv_nsec); 4191 4192 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 4193 btrfs_set_token_inode_generation(&token, item, 4194 BTRFS_I(inode)->generation); 4195 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 4196 btrfs_set_token_inode_transid(&token, item, trans->transid); 4197 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 4198 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 4199 BTRFS_I(inode)->ro_flags); 4200 btrfs_set_token_inode_flags(&token, item, flags); 4201 btrfs_set_token_inode_block_group(&token, item, 0); 4202 } 4203 4204 /* 4205 * copy everything in the in-memory inode into the btree. 4206 */ 4207 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 4208 struct btrfs_root *root, 4209 struct btrfs_inode *inode) 4210 { 4211 struct btrfs_inode_item *inode_item; 4212 struct btrfs_path *path; 4213 struct extent_buffer *leaf; 4214 int ret; 4215 4216 path = btrfs_alloc_path(); 4217 if (!path) 4218 return -ENOMEM; 4219 4220 ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1); 4221 if (ret) { 4222 if (ret > 0) 4223 ret = -ENOENT; 4224 goto failed; 4225 } 4226 4227 leaf = path->nodes[0]; 4228 inode_item = btrfs_item_ptr(leaf, path->slots[0], 4229 struct btrfs_inode_item); 4230 4231 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 4232 btrfs_mark_buffer_dirty(leaf); 4233 btrfs_set_inode_last_trans(trans, inode); 4234 ret = 0; 4235 failed: 4236 btrfs_free_path(path); 4237 return ret; 4238 } 4239 4240 /* 4241 * copy everything in the in-memory inode into the btree. 4242 */ 4243 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 4244 struct btrfs_root *root, 4245 struct btrfs_inode *inode) 4246 { 4247 struct btrfs_fs_info *fs_info = root->fs_info; 4248 int ret; 4249 4250 /* 4251 * If the inode is a free space inode, we can deadlock during commit 4252 * if we put it into the delayed code. 4253 * 4254 * The data relocation inode should also be directly updated 4255 * without delay 4256 */ 4257 if (!btrfs_is_free_space_inode(inode) 4258 && !btrfs_is_data_reloc_root(root) 4259 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4260 btrfs_update_root_times(trans, root); 4261 4262 ret = btrfs_delayed_update_inode(trans, root, inode); 4263 if (!ret) 4264 btrfs_set_inode_last_trans(trans, inode); 4265 return ret; 4266 } 4267 4268 return btrfs_update_inode_item(trans, root, inode); 4269 } 4270 4271 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4272 struct btrfs_root *root, struct btrfs_inode *inode) 4273 { 4274 int ret; 4275 4276 ret = btrfs_update_inode(trans, root, inode); 4277 if (ret == -ENOSPC) 4278 return btrfs_update_inode_item(trans, root, inode); 4279 return ret; 4280 } 4281 4282 /* 4283 * unlink helper that gets used here in inode.c and in the tree logging 4284 * recovery code. It remove a link in a directory with a given name, and 4285 * also drops the back refs in the inode to the directory 4286 */ 4287 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4288 struct btrfs_inode *dir, 4289 struct btrfs_inode *inode, 4290 const struct fscrypt_str *name, 4291 struct btrfs_rename_ctx *rename_ctx) 4292 { 4293 struct btrfs_root *root = dir->root; 4294 struct btrfs_fs_info *fs_info = root->fs_info; 4295 struct btrfs_path *path; 4296 int ret = 0; 4297 struct btrfs_dir_item *di; 4298 u64 index; 4299 u64 ino = btrfs_ino(inode); 4300 u64 dir_ino = btrfs_ino(dir); 4301 4302 path = btrfs_alloc_path(); 4303 if (!path) { 4304 ret = -ENOMEM; 4305 goto out; 4306 } 4307 4308 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); 4309 if (IS_ERR_OR_NULL(di)) { 4310 ret = di ? PTR_ERR(di) : -ENOENT; 4311 goto err; 4312 } 4313 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4314 if (ret) 4315 goto err; 4316 btrfs_release_path(path); 4317 4318 /* 4319 * If we don't have dir index, we have to get it by looking up 4320 * the inode ref, since we get the inode ref, remove it directly, 4321 * it is unnecessary to do delayed deletion. 4322 * 4323 * But if we have dir index, needn't search inode ref to get it. 4324 * Since the inode ref is close to the inode item, it is better 4325 * that we delay to delete it, and just do this deletion when 4326 * we update the inode item. 4327 */ 4328 if (inode->dir_index) { 4329 ret = btrfs_delayed_delete_inode_ref(inode); 4330 if (!ret) { 4331 index = inode->dir_index; 4332 goto skip_backref; 4333 } 4334 } 4335 4336 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); 4337 if (ret) { 4338 btrfs_info(fs_info, 4339 "failed to delete reference to %.*s, inode %llu parent %llu", 4340 name->len, name->name, ino, dir_ino); 4341 btrfs_abort_transaction(trans, ret); 4342 goto err; 4343 } 4344 skip_backref: 4345 if (rename_ctx) 4346 rename_ctx->index = index; 4347 4348 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4349 if (ret) { 4350 btrfs_abort_transaction(trans, ret); 4351 goto err; 4352 } 4353 4354 /* 4355 * If we are in a rename context, we don't need to update anything in the 4356 * log. That will be done later during the rename by btrfs_log_new_name(). 4357 * Besides that, doing it here would only cause extra unnecessary btree 4358 * operations on the log tree, increasing latency for applications. 4359 */ 4360 if (!rename_ctx) { 4361 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino); 4362 btrfs_del_dir_entries_in_log(trans, root, name, dir, index); 4363 } 4364 4365 /* 4366 * If we have a pending delayed iput we could end up with the final iput 4367 * being run in btrfs-cleaner context. If we have enough of these built 4368 * up we can end up burning a lot of time in btrfs-cleaner without any 4369 * way to throttle the unlinks. Since we're currently holding a ref on 4370 * the inode we can run the delayed iput here without any issues as the 4371 * final iput won't be done until after we drop the ref we're currently 4372 * holding. 4373 */ 4374 btrfs_run_delayed_iput(fs_info, inode); 4375 err: 4376 btrfs_free_path(path); 4377 if (ret) 4378 goto out; 4379 4380 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4381 inode_inc_iversion(&inode->vfs_inode); 4382 inode_inc_iversion(&dir->vfs_inode); 4383 inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode); 4384 dir->vfs_inode.i_mtime = inode->vfs_inode.i_ctime; 4385 dir->vfs_inode.i_ctime = inode->vfs_inode.i_ctime; 4386 ret = btrfs_update_inode(trans, root, dir); 4387 out: 4388 return ret; 4389 } 4390 4391 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4392 struct btrfs_inode *dir, struct btrfs_inode *inode, 4393 const struct fscrypt_str *name) 4394 { 4395 int ret; 4396 4397 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); 4398 if (!ret) { 4399 drop_nlink(&inode->vfs_inode); 4400 ret = btrfs_update_inode(trans, inode->root, inode); 4401 } 4402 return ret; 4403 } 4404 4405 /* 4406 * helper to start transaction for unlink and rmdir. 4407 * 4408 * unlink and rmdir are special in btrfs, they do not always free space, so 4409 * if we cannot make our reservations the normal way try and see if there is 4410 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4411 * allow the unlink to occur. 4412 */ 4413 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) 4414 { 4415 struct btrfs_root *root = dir->root; 4416 4417 /* 4418 * 1 for the possible orphan item 4419 * 1 for the dir item 4420 * 1 for the dir index 4421 * 1 for the inode ref 4422 * 1 for the inode 4423 * 1 for the parent inode 4424 */ 4425 return btrfs_start_transaction_fallback_global_rsv(root, 6); 4426 } 4427 4428 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4429 { 4430 struct btrfs_trans_handle *trans; 4431 struct inode *inode = d_inode(dentry); 4432 int ret; 4433 struct fscrypt_name fname; 4434 4435 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4436 if (ret) 4437 return ret; 4438 4439 /* This needs to handle no-key deletions later on */ 4440 4441 trans = __unlink_start_trans(BTRFS_I(dir)); 4442 if (IS_ERR(trans)) { 4443 ret = PTR_ERR(trans); 4444 goto fscrypt_free; 4445 } 4446 4447 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4448 0); 4449 4450 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4451 &fname.disk_name); 4452 if (ret) 4453 goto end_trans; 4454 4455 if (inode->i_nlink == 0) { 4456 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4457 if (ret) 4458 goto end_trans; 4459 } 4460 4461 end_trans: 4462 btrfs_end_transaction(trans); 4463 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4464 fscrypt_free: 4465 fscrypt_free_filename(&fname); 4466 return ret; 4467 } 4468 4469 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4470 struct btrfs_inode *dir, struct dentry *dentry) 4471 { 4472 struct btrfs_root *root = dir->root; 4473 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4474 struct btrfs_path *path; 4475 struct extent_buffer *leaf; 4476 struct btrfs_dir_item *di; 4477 struct btrfs_key key; 4478 u64 index; 4479 int ret; 4480 u64 objectid; 4481 u64 dir_ino = btrfs_ino(dir); 4482 struct fscrypt_name fname; 4483 4484 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 4485 if (ret) 4486 return ret; 4487 4488 /* This needs to handle no-key deletions later on */ 4489 4490 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4491 objectid = inode->root->root_key.objectid; 4492 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4493 objectid = inode->location.objectid; 4494 } else { 4495 WARN_ON(1); 4496 fscrypt_free_filename(&fname); 4497 return -EINVAL; 4498 } 4499 4500 path = btrfs_alloc_path(); 4501 if (!path) { 4502 ret = -ENOMEM; 4503 goto out; 4504 } 4505 4506 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4507 &fname.disk_name, -1); 4508 if (IS_ERR_OR_NULL(di)) { 4509 ret = di ? PTR_ERR(di) : -ENOENT; 4510 goto out; 4511 } 4512 4513 leaf = path->nodes[0]; 4514 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4515 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4516 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4517 if (ret) { 4518 btrfs_abort_transaction(trans, ret); 4519 goto out; 4520 } 4521 btrfs_release_path(path); 4522 4523 /* 4524 * This is a placeholder inode for a subvolume we didn't have a 4525 * reference to at the time of the snapshot creation. In the meantime 4526 * we could have renamed the real subvol link into our snapshot, so 4527 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4528 * Instead simply lookup the dir_index_item for this entry so we can 4529 * remove it. Otherwise we know we have a ref to the root and we can 4530 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4531 */ 4532 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4533 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); 4534 if (IS_ERR_OR_NULL(di)) { 4535 if (!di) 4536 ret = -ENOENT; 4537 else 4538 ret = PTR_ERR(di); 4539 btrfs_abort_transaction(trans, ret); 4540 goto out; 4541 } 4542 4543 leaf = path->nodes[0]; 4544 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4545 index = key.offset; 4546 btrfs_release_path(path); 4547 } else { 4548 ret = btrfs_del_root_ref(trans, objectid, 4549 root->root_key.objectid, dir_ino, 4550 &index, &fname.disk_name); 4551 if (ret) { 4552 btrfs_abort_transaction(trans, ret); 4553 goto out; 4554 } 4555 } 4556 4557 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4558 if (ret) { 4559 btrfs_abort_transaction(trans, ret); 4560 goto out; 4561 } 4562 4563 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); 4564 inode_inc_iversion(&dir->vfs_inode); 4565 dir->vfs_inode.i_mtime = current_time(&dir->vfs_inode); 4566 dir->vfs_inode.i_ctime = dir->vfs_inode.i_mtime; 4567 ret = btrfs_update_inode_fallback(trans, root, dir); 4568 if (ret) 4569 btrfs_abort_transaction(trans, ret); 4570 out: 4571 btrfs_free_path(path); 4572 fscrypt_free_filename(&fname); 4573 return ret; 4574 } 4575 4576 /* 4577 * Helper to check if the subvolume references other subvolumes or if it's 4578 * default. 4579 */ 4580 static noinline int may_destroy_subvol(struct btrfs_root *root) 4581 { 4582 struct btrfs_fs_info *fs_info = root->fs_info; 4583 struct btrfs_path *path; 4584 struct btrfs_dir_item *di; 4585 struct btrfs_key key; 4586 struct fscrypt_str name = FSTR_INIT("default", 7); 4587 u64 dir_id; 4588 int ret; 4589 4590 path = btrfs_alloc_path(); 4591 if (!path) 4592 return -ENOMEM; 4593 4594 /* Make sure this root isn't set as the default subvol */ 4595 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4596 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4597 dir_id, &name, 0); 4598 if (di && !IS_ERR(di)) { 4599 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4600 if (key.objectid == root->root_key.objectid) { 4601 ret = -EPERM; 4602 btrfs_err(fs_info, 4603 "deleting default subvolume %llu is not allowed", 4604 key.objectid); 4605 goto out; 4606 } 4607 btrfs_release_path(path); 4608 } 4609 4610 key.objectid = root->root_key.objectid; 4611 key.type = BTRFS_ROOT_REF_KEY; 4612 key.offset = (u64)-1; 4613 4614 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4615 if (ret < 0) 4616 goto out; 4617 BUG_ON(ret == 0); 4618 4619 ret = 0; 4620 if (path->slots[0] > 0) { 4621 path->slots[0]--; 4622 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4623 if (key.objectid == root->root_key.objectid && 4624 key.type == BTRFS_ROOT_REF_KEY) 4625 ret = -ENOTEMPTY; 4626 } 4627 out: 4628 btrfs_free_path(path); 4629 return ret; 4630 } 4631 4632 /* Delete all dentries for inodes belonging to the root */ 4633 static void btrfs_prune_dentries(struct btrfs_root *root) 4634 { 4635 struct btrfs_fs_info *fs_info = root->fs_info; 4636 struct rb_node *node; 4637 struct rb_node *prev; 4638 struct btrfs_inode *entry; 4639 struct inode *inode; 4640 u64 objectid = 0; 4641 4642 if (!BTRFS_FS_ERROR(fs_info)) 4643 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4644 4645 spin_lock(&root->inode_lock); 4646 again: 4647 node = root->inode_tree.rb_node; 4648 prev = NULL; 4649 while (node) { 4650 prev = node; 4651 entry = rb_entry(node, struct btrfs_inode, rb_node); 4652 4653 if (objectid < btrfs_ino(entry)) 4654 node = node->rb_left; 4655 else if (objectid > btrfs_ino(entry)) 4656 node = node->rb_right; 4657 else 4658 break; 4659 } 4660 if (!node) { 4661 while (prev) { 4662 entry = rb_entry(prev, struct btrfs_inode, rb_node); 4663 if (objectid <= btrfs_ino(entry)) { 4664 node = prev; 4665 break; 4666 } 4667 prev = rb_next(prev); 4668 } 4669 } 4670 while (node) { 4671 entry = rb_entry(node, struct btrfs_inode, rb_node); 4672 objectid = btrfs_ino(entry) + 1; 4673 inode = igrab(&entry->vfs_inode); 4674 if (inode) { 4675 spin_unlock(&root->inode_lock); 4676 if (atomic_read(&inode->i_count) > 1) 4677 d_prune_aliases(inode); 4678 /* 4679 * btrfs_drop_inode will have it removed from the inode 4680 * cache when its usage count hits zero. 4681 */ 4682 iput(inode); 4683 cond_resched(); 4684 spin_lock(&root->inode_lock); 4685 goto again; 4686 } 4687 4688 if (cond_resched_lock(&root->inode_lock)) 4689 goto again; 4690 4691 node = rb_next(node); 4692 } 4693 spin_unlock(&root->inode_lock); 4694 } 4695 4696 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) 4697 { 4698 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 4699 struct btrfs_root *root = dir->root; 4700 struct inode *inode = d_inode(dentry); 4701 struct btrfs_root *dest = BTRFS_I(inode)->root; 4702 struct btrfs_trans_handle *trans; 4703 struct btrfs_block_rsv block_rsv; 4704 u64 root_flags; 4705 int ret; 4706 4707 /* 4708 * Don't allow to delete a subvolume with send in progress. This is 4709 * inside the inode lock so the error handling that has to drop the bit 4710 * again is not run concurrently. 4711 */ 4712 spin_lock(&dest->root_item_lock); 4713 if (dest->send_in_progress) { 4714 spin_unlock(&dest->root_item_lock); 4715 btrfs_warn(fs_info, 4716 "attempt to delete subvolume %llu during send", 4717 dest->root_key.objectid); 4718 return -EPERM; 4719 } 4720 if (atomic_read(&dest->nr_swapfiles)) { 4721 spin_unlock(&dest->root_item_lock); 4722 btrfs_warn(fs_info, 4723 "attempt to delete subvolume %llu with active swapfile", 4724 root->root_key.objectid); 4725 return -EPERM; 4726 } 4727 root_flags = btrfs_root_flags(&dest->root_item); 4728 btrfs_set_root_flags(&dest->root_item, 4729 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4730 spin_unlock(&dest->root_item_lock); 4731 4732 down_write(&fs_info->subvol_sem); 4733 4734 ret = may_destroy_subvol(dest); 4735 if (ret) 4736 goto out_up_write; 4737 4738 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4739 /* 4740 * One for dir inode, 4741 * two for dir entries, 4742 * two for root ref/backref. 4743 */ 4744 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4745 if (ret) 4746 goto out_up_write; 4747 4748 trans = btrfs_start_transaction(root, 0); 4749 if (IS_ERR(trans)) { 4750 ret = PTR_ERR(trans); 4751 goto out_release; 4752 } 4753 trans->block_rsv = &block_rsv; 4754 trans->bytes_reserved = block_rsv.size; 4755 4756 btrfs_record_snapshot_destroy(trans, dir); 4757 4758 ret = btrfs_unlink_subvol(trans, dir, dentry); 4759 if (ret) { 4760 btrfs_abort_transaction(trans, ret); 4761 goto out_end_trans; 4762 } 4763 4764 ret = btrfs_record_root_in_trans(trans, dest); 4765 if (ret) { 4766 btrfs_abort_transaction(trans, ret); 4767 goto out_end_trans; 4768 } 4769 4770 memset(&dest->root_item.drop_progress, 0, 4771 sizeof(dest->root_item.drop_progress)); 4772 btrfs_set_root_drop_level(&dest->root_item, 0); 4773 btrfs_set_root_refs(&dest->root_item, 0); 4774 4775 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4776 ret = btrfs_insert_orphan_item(trans, 4777 fs_info->tree_root, 4778 dest->root_key.objectid); 4779 if (ret) { 4780 btrfs_abort_transaction(trans, ret); 4781 goto out_end_trans; 4782 } 4783 } 4784 4785 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4786 BTRFS_UUID_KEY_SUBVOL, 4787 dest->root_key.objectid); 4788 if (ret && ret != -ENOENT) { 4789 btrfs_abort_transaction(trans, ret); 4790 goto out_end_trans; 4791 } 4792 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4793 ret = btrfs_uuid_tree_remove(trans, 4794 dest->root_item.received_uuid, 4795 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4796 dest->root_key.objectid); 4797 if (ret && ret != -ENOENT) { 4798 btrfs_abort_transaction(trans, ret); 4799 goto out_end_trans; 4800 } 4801 } 4802 4803 free_anon_bdev(dest->anon_dev); 4804 dest->anon_dev = 0; 4805 out_end_trans: 4806 trans->block_rsv = NULL; 4807 trans->bytes_reserved = 0; 4808 ret = btrfs_end_transaction(trans); 4809 inode->i_flags |= S_DEAD; 4810 out_release: 4811 btrfs_subvolume_release_metadata(root, &block_rsv); 4812 out_up_write: 4813 up_write(&fs_info->subvol_sem); 4814 if (ret) { 4815 spin_lock(&dest->root_item_lock); 4816 root_flags = btrfs_root_flags(&dest->root_item); 4817 btrfs_set_root_flags(&dest->root_item, 4818 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4819 spin_unlock(&dest->root_item_lock); 4820 } else { 4821 d_invalidate(dentry); 4822 btrfs_prune_dentries(dest); 4823 ASSERT(dest->send_in_progress == 0); 4824 } 4825 4826 return ret; 4827 } 4828 4829 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4830 { 4831 struct inode *inode = d_inode(dentry); 4832 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 4833 int err = 0; 4834 struct btrfs_trans_handle *trans; 4835 u64 last_unlink_trans; 4836 struct fscrypt_name fname; 4837 4838 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4839 return -ENOTEMPTY; 4840 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { 4841 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4842 btrfs_err(fs_info, 4843 "extent tree v2 doesn't support snapshot deletion yet"); 4844 return -EOPNOTSUPP; 4845 } 4846 return btrfs_delete_subvolume(BTRFS_I(dir), dentry); 4847 } 4848 4849 err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4850 if (err) 4851 return err; 4852 4853 /* This needs to handle no-key deletions later on */ 4854 4855 trans = __unlink_start_trans(BTRFS_I(dir)); 4856 if (IS_ERR(trans)) { 4857 err = PTR_ERR(trans); 4858 goto out_notrans; 4859 } 4860 4861 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4862 err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); 4863 goto out; 4864 } 4865 4866 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4867 if (err) 4868 goto out; 4869 4870 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4871 4872 /* now the directory is empty */ 4873 err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4874 &fname.disk_name); 4875 if (!err) { 4876 btrfs_i_size_write(BTRFS_I(inode), 0); 4877 /* 4878 * Propagate the last_unlink_trans value of the deleted dir to 4879 * its parent directory. This is to prevent an unrecoverable 4880 * log tree in the case we do something like this: 4881 * 1) create dir foo 4882 * 2) create snapshot under dir foo 4883 * 3) delete the snapshot 4884 * 4) rmdir foo 4885 * 5) mkdir foo 4886 * 6) fsync foo or some file inside foo 4887 */ 4888 if (last_unlink_trans >= trans->transid) 4889 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4890 } 4891 out: 4892 btrfs_end_transaction(trans); 4893 out_notrans: 4894 btrfs_btree_balance_dirty(fs_info); 4895 fscrypt_free_filename(&fname); 4896 4897 return err; 4898 } 4899 4900 /* 4901 * btrfs_truncate_block - read, zero a chunk and write a block 4902 * @inode - inode that we're zeroing 4903 * @from - the offset to start zeroing 4904 * @len - the length to zero, 0 to zero the entire range respective to the 4905 * offset 4906 * @front - zero up to the offset instead of from the offset on 4907 * 4908 * This will find the block for the "from" offset and cow the block and zero the 4909 * part we want to zero. This is used with truncate and hole punching. 4910 */ 4911 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 4912 int front) 4913 { 4914 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4915 struct address_space *mapping = inode->vfs_inode.i_mapping; 4916 struct extent_io_tree *io_tree = &inode->io_tree; 4917 struct btrfs_ordered_extent *ordered; 4918 struct extent_state *cached_state = NULL; 4919 struct extent_changeset *data_reserved = NULL; 4920 bool only_release_metadata = false; 4921 u32 blocksize = fs_info->sectorsize; 4922 pgoff_t index = from >> PAGE_SHIFT; 4923 unsigned offset = from & (blocksize - 1); 4924 struct page *page; 4925 gfp_t mask = btrfs_alloc_write_mask(mapping); 4926 size_t write_bytes = blocksize; 4927 int ret = 0; 4928 u64 block_start; 4929 u64 block_end; 4930 4931 if (IS_ALIGNED(offset, blocksize) && 4932 (!len || IS_ALIGNED(len, blocksize))) 4933 goto out; 4934 4935 block_start = round_down(from, blocksize); 4936 block_end = block_start + blocksize - 1; 4937 4938 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 4939 blocksize, false); 4940 if (ret < 0) { 4941 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { 4942 /* For nocow case, no need to reserve data space */ 4943 only_release_metadata = true; 4944 } else { 4945 goto out; 4946 } 4947 } 4948 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 4949 if (ret < 0) { 4950 if (!only_release_metadata) 4951 btrfs_free_reserved_data_space(inode, data_reserved, 4952 block_start, blocksize); 4953 goto out; 4954 } 4955 again: 4956 page = find_or_create_page(mapping, index, mask); 4957 if (!page) { 4958 btrfs_delalloc_release_space(inode, data_reserved, block_start, 4959 blocksize, true); 4960 btrfs_delalloc_release_extents(inode, blocksize); 4961 ret = -ENOMEM; 4962 goto out; 4963 } 4964 ret = set_page_extent_mapped(page); 4965 if (ret < 0) 4966 goto out_unlock; 4967 4968 if (!PageUptodate(page)) { 4969 ret = btrfs_read_folio(NULL, page_folio(page)); 4970 lock_page(page); 4971 if (page->mapping != mapping) { 4972 unlock_page(page); 4973 put_page(page); 4974 goto again; 4975 } 4976 if (!PageUptodate(page)) { 4977 ret = -EIO; 4978 goto out_unlock; 4979 } 4980 } 4981 wait_on_page_writeback(page); 4982 4983 lock_extent(io_tree, block_start, block_end, &cached_state); 4984 4985 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4986 if (ordered) { 4987 unlock_extent(io_tree, block_start, block_end, &cached_state); 4988 unlock_page(page); 4989 put_page(page); 4990 btrfs_start_ordered_extent(ordered, 1); 4991 btrfs_put_ordered_extent(ordered); 4992 goto again; 4993 } 4994 4995 clear_extent_bit(&inode->io_tree, block_start, block_end, 4996 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4997 &cached_state); 4998 4999 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 5000 &cached_state); 5001 if (ret) { 5002 unlock_extent(io_tree, block_start, block_end, &cached_state); 5003 goto out_unlock; 5004 } 5005 5006 if (offset != blocksize) { 5007 if (!len) 5008 len = blocksize - offset; 5009 if (front) 5010 memzero_page(page, (block_start - page_offset(page)), 5011 offset); 5012 else 5013 memzero_page(page, (block_start - page_offset(page)) + offset, 5014 len); 5015 } 5016 btrfs_page_clear_checked(fs_info, page, block_start, 5017 block_end + 1 - block_start); 5018 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); 5019 unlock_extent(io_tree, block_start, block_end, &cached_state); 5020 5021 if (only_release_metadata) 5022 set_extent_bit(&inode->io_tree, block_start, block_end, 5023 EXTENT_NORESERVE, NULL, GFP_NOFS); 5024 5025 out_unlock: 5026 if (ret) { 5027 if (only_release_metadata) 5028 btrfs_delalloc_release_metadata(inode, blocksize, true); 5029 else 5030 btrfs_delalloc_release_space(inode, data_reserved, 5031 block_start, blocksize, true); 5032 } 5033 btrfs_delalloc_release_extents(inode, blocksize); 5034 unlock_page(page); 5035 put_page(page); 5036 out: 5037 if (only_release_metadata) 5038 btrfs_check_nocow_unlock(inode); 5039 extent_changeset_free(data_reserved); 5040 return ret; 5041 } 5042 5043 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode, 5044 u64 offset, u64 len) 5045 { 5046 struct btrfs_fs_info *fs_info = root->fs_info; 5047 struct btrfs_trans_handle *trans; 5048 struct btrfs_drop_extents_args drop_args = { 0 }; 5049 int ret; 5050 5051 /* 5052 * If NO_HOLES is enabled, we don't need to do anything. 5053 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 5054 * or btrfs_update_inode() will be called, which guarantee that the next 5055 * fsync will know this inode was changed and needs to be logged. 5056 */ 5057 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 5058 return 0; 5059 5060 /* 5061 * 1 - for the one we're dropping 5062 * 1 - for the one we're adding 5063 * 1 - for updating the inode. 5064 */ 5065 trans = btrfs_start_transaction(root, 3); 5066 if (IS_ERR(trans)) 5067 return PTR_ERR(trans); 5068 5069 drop_args.start = offset; 5070 drop_args.end = offset + len; 5071 drop_args.drop_cache = true; 5072 5073 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 5074 if (ret) { 5075 btrfs_abort_transaction(trans, ret); 5076 btrfs_end_transaction(trans); 5077 return ret; 5078 } 5079 5080 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len); 5081 if (ret) { 5082 btrfs_abort_transaction(trans, ret); 5083 } else { 5084 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 5085 btrfs_update_inode(trans, root, inode); 5086 } 5087 btrfs_end_transaction(trans); 5088 return ret; 5089 } 5090 5091 /* 5092 * This function puts in dummy file extents for the area we're creating a hole 5093 * for. So if we are truncating this file to a larger size we need to insert 5094 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 5095 * the range between oldsize and size 5096 */ 5097 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 5098 { 5099 struct btrfs_root *root = inode->root; 5100 struct btrfs_fs_info *fs_info = root->fs_info; 5101 struct extent_io_tree *io_tree = &inode->io_tree; 5102 struct extent_map *em = NULL; 5103 struct extent_state *cached_state = NULL; 5104 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 5105 u64 block_end = ALIGN(size, fs_info->sectorsize); 5106 u64 last_byte; 5107 u64 cur_offset; 5108 u64 hole_size; 5109 int err = 0; 5110 5111 /* 5112 * If our size started in the middle of a block we need to zero out the 5113 * rest of the block before we expand the i_size, otherwise we could 5114 * expose stale data. 5115 */ 5116 err = btrfs_truncate_block(inode, oldsize, 0, 0); 5117 if (err) 5118 return err; 5119 5120 if (size <= hole_start) 5121 return 0; 5122 5123 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 5124 &cached_state); 5125 cur_offset = hole_start; 5126 while (1) { 5127 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 5128 block_end - cur_offset); 5129 if (IS_ERR(em)) { 5130 err = PTR_ERR(em); 5131 em = NULL; 5132 break; 5133 } 5134 last_byte = min(extent_map_end(em), block_end); 5135 last_byte = ALIGN(last_byte, fs_info->sectorsize); 5136 hole_size = last_byte - cur_offset; 5137 5138 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 5139 struct extent_map *hole_em; 5140 5141 err = maybe_insert_hole(root, inode, cur_offset, 5142 hole_size); 5143 if (err) 5144 break; 5145 5146 err = btrfs_inode_set_file_extent_range(inode, 5147 cur_offset, hole_size); 5148 if (err) 5149 break; 5150 5151 hole_em = alloc_extent_map(); 5152 if (!hole_em) { 5153 btrfs_drop_extent_map_range(inode, cur_offset, 5154 cur_offset + hole_size - 1, 5155 false); 5156 btrfs_set_inode_full_sync(inode); 5157 goto next; 5158 } 5159 hole_em->start = cur_offset; 5160 hole_em->len = hole_size; 5161 hole_em->orig_start = cur_offset; 5162 5163 hole_em->block_start = EXTENT_MAP_HOLE; 5164 hole_em->block_len = 0; 5165 hole_em->orig_block_len = 0; 5166 hole_em->ram_bytes = hole_size; 5167 hole_em->compress_type = BTRFS_COMPRESS_NONE; 5168 hole_em->generation = fs_info->generation; 5169 5170 err = btrfs_replace_extent_map_range(inode, hole_em, true); 5171 free_extent_map(hole_em); 5172 } else { 5173 err = btrfs_inode_set_file_extent_range(inode, 5174 cur_offset, hole_size); 5175 if (err) 5176 break; 5177 } 5178 next: 5179 free_extent_map(em); 5180 em = NULL; 5181 cur_offset = last_byte; 5182 if (cur_offset >= block_end) 5183 break; 5184 } 5185 free_extent_map(em); 5186 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); 5187 return err; 5188 } 5189 5190 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 5191 { 5192 struct btrfs_root *root = BTRFS_I(inode)->root; 5193 struct btrfs_trans_handle *trans; 5194 loff_t oldsize = i_size_read(inode); 5195 loff_t newsize = attr->ia_size; 5196 int mask = attr->ia_valid; 5197 int ret; 5198 5199 /* 5200 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 5201 * special case where we need to update the times despite not having 5202 * these flags set. For all other operations the VFS set these flags 5203 * explicitly if it wants a timestamp update. 5204 */ 5205 if (newsize != oldsize) { 5206 inode_inc_iversion(inode); 5207 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) { 5208 inode->i_mtime = current_time(inode); 5209 inode->i_ctime = inode->i_mtime; 5210 } 5211 } 5212 5213 if (newsize > oldsize) { 5214 /* 5215 * Don't do an expanding truncate while snapshotting is ongoing. 5216 * This is to ensure the snapshot captures a fully consistent 5217 * state of this file - if the snapshot captures this expanding 5218 * truncation, it must capture all writes that happened before 5219 * this truncation. 5220 */ 5221 btrfs_drew_write_lock(&root->snapshot_lock); 5222 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 5223 if (ret) { 5224 btrfs_drew_write_unlock(&root->snapshot_lock); 5225 return ret; 5226 } 5227 5228 trans = btrfs_start_transaction(root, 1); 5229 if (IS_ERR(trans)) { 5230 btrfs_drew_write_unlock(&root->snapshot_lock); 5231 return PTR_ERR(trans); 5232 } 5233 5234 i_size_write(inode, newsize); 5235 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 5236 pagecache_isize_extended(inode, oldsize, newsize); 5237 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 5238 btrfs_drew_write_unlock(&root->snapshot_lock); 5239 btrfs_end_transaction(trans); 5240 } else { 5241 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5242 5243 if (btrfs_is_zoned(fs_info)) { 5244 ret = btrfs_wait_ordered_range(inode, 5245 ALIGN(newsize, fs_info->sectorsize), 5246 (u64)-1); 5247 if (ret) 5248 return ret; 5249 } 5250 5251 /* 5252 * We're truncating a file that used to have good data down to 5253 * zero. Make sure any new writes to the file get on disk 5254 * on close. 5255 */ 5256 if (newsize == 0) 5257 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5258 &BTRFS_I(inode)->runtime_flags); 5259 5260 truncate_setsize(inode, newsize); 5261 5262 inode_dio_wait(inode); 5263 5264 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); 5265 if (ret && inode->i_nlink) { 5266 int err; 5267 5268 /* 5269 * Truncate failed, so fix up the in-memory size. We 5270 * adjusted disk_i_size down as we removed extents, so 5271 * wait for disk_i_size to be stable and then update the 5272 * in-memory size to match. 5273 */ 5274 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); 5275 if (err) 5276 return err; 5277 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5278 } 5279 } 5280 5281 return ret; 5282 } 5283 5284 static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 5285 struct iattr *attr) 5286 { 5287 struct inode *inode = d_inode(dentry); 5288 struct btrfs_root *root = BTRFS_I(inode)->root; 5289 int err; 5290 5291 if (btrfs_root_readonly(root)) 5292 return -EROFS; 5293 5294 err = setattr_prepare(mnt_userns, dentry, attr); 5295 if (err) 5296 return err; 5297 5298 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5299 err = btrfs_setsize(inode, attr); 5300 if (err) 5301 return err; 5302 } 5303 5304 if (attr->ia_valid) { 5305 setattr_copy(mnt_userns, inode, attr); 5306 inode_inc_iversion(inode); 5307 err = btrfs_dirty_inode(BTRFS_I(inode)); 5308 5309 if (!err && attr->ia_valid & ATTR_MODE) 5310 err = posix_acl_chmod(mnt_userns, dentry, inode->i_mode); 5311 } 5312 5313 return err; 5314 } 5315 5316 /* 5317 * While truncating the inode pages during eviction, we get the VFS 5318 * calling btrfs_invalidate_folio() against each folio of the inode. This 5319 * is slow because the calls to btrfs_invalidate_folio() result in a 5320 * huge amount of calls to lock_extent() and clear_extent_bit(), 5321 * which keep merging and splitting extent_state structures over and over, 5322 * wasting lots of time. 5323 * 5324 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5325 * skip all those expensive operations on a per folio basis and do only 5326 * the ordered io finishing, while we release here the extent_map and 5327 * extent_state structures, without the excessive merging and splitting. 5328 */ 5329 static void evict_inode_truncate_pages(struct inode *inode) 5330 { 5331 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5332 struct rb_node *node; 5333 5334 ASSERT(inode->i_state & I_FREEING); 5335 truncate_inode_pages_final(&inode->i_data); 5336 5337 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 5338 5339 /* 5340 * Keep looping until we have no more ranges in the io tree. 5341 * We can have ongoing bios started by readahead that have 5342 * their endio callback (extent_io.c:end_bio_extent_readpage) 5343 * still in progress (unlocked the pages in the bio but did not yet 5344 * unlocked the ranges in the io tree). Therefore this means some 5345 * ranges can still be locked and eviction started because before 5346 * submitting those bios, which are executed by a separate task (work 5347 * queue kthread), inode references (inode->i_count) were not taken 5348 * (which would be dropped in the end io callback of each bio). 5349 * Therefore here we effectively end up waiting for those bios and 5350 * anyone else holding locked ranges without having bumped the inode's 5351 * reference count - if we don't do it, when they access the inode's 5352 * io_tree to unlock a range it may be too late, leading to an 5353 * use-after-free issue. 5354 */ 5355 spin_lock(&io_tree->lock); 5356 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5357 struct extent_state *state; 5358 struct extent_state *cached_state = NULL; 5359 u64 start; 5360 u64 end; 5361 unsigned state_flags; 5362 5363 node = rb_first(&io_tree->state); 5364 state = rb_entry(node, struct extent_state, rb_node); 5365 start = state->start; 5366 end = state->end; 5367 state_flags = state->state; 5368 spin_unlock(&io_tree->lock); 5369 5370 lock_extent(io_tree, start, end, &cached_state); 5371 5372 /* 5373 * If still has DELALLOC flag, the extent didn't reach disk, 5374 * and its reserved space won't be freed by delayed_ref. 5375 * So we need to free its reserved space here. 5376 * (Refer to comment in btrfs_invalidate_folio, case 2) 5377 * 5378 * Note, end is the bytenr of last byte, so we need + 1 here. 5379 */ 5380 if (state_flags & EXTENT_DELALLOC) 5381 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5382 end - start + 1); 5383 5384 clear_extent_bit(io_tree, start, end, 5385 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, 5386 &cached_state); 5387 5388 cond_resched(); 5389 spin_lock(&io_tree->lock); 5390 } 5391 spin_unlock(&io_tree->lock); 5392 } 5393 5394 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5395 struct btrfs_block_rsv *rsv) 5396 { 5397 struct btrfs_fs_info *fs_info = root->fs_info; 5398 struct btrfs_trans_handle *trans; 5399 u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1); 5400 int ret; 5401 5402 /* 5403 * Eviction should be taking place at some place safe because of our 5404 * delayed iputs. However the normal flushing code will run delayed 5405 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5406 * 5407 * We reserve the delayed_refs_extra here again because we can't use 5408 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5409 * above. We reserve our extra bit here because we generate a ton of 5410 * delayed refs activity by truncating. 5411 * 5412 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5413 * if we fail to make this reservation we can re-try without the 5414 * delayed_refs_extra so we can make some forward progress. 5415 */ 5416 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5417 BTRFS_RESERVE_FLUSH_EVICT); 5418 if (ret) { 5419 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5420 BTRFS_RESERVE_FLUSH_EVICT); 5421 if (ret) { 5422 btrfs_warn(fs_info, 5423 "could not allocate space for delete; will truncate on mount"); 5424 return ERR_PTR(-ENOSPC); 5425 } 5426 delayed_refs_extra = 0; 5427 } 5428 5429 trans = btrfs_join_transaction(root); 5430 if (IS_ERR(trans)) 5431 return trans; 5432 5433 if (delayed_refs_extra) { 5434 trans->block_rsv = &fs_info->trans_block_rsv; 5435 trans->bytes_reserved = delayed_refs_extra; 5436 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5437 delayed_refs_extra, 1); 5438 } 5439 return trans; 5440 } 5441 5442 void btrfs_evict_inode(struct inode *inode) 5443 { 5444 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5445 struct btrfs_trans_handle *trans; 5446 struct btrfs_root *root = BTRFS_I(inode)->root; 5447 struct btrfs_block_rsv *rsv; 5448 int ret; 5449 5450 trace_btrfs_inode_evict(inode); 5451 5452 if (!root) { 5453 fsverity_cleanup_inode(inode); 5454 clear_inode(inode); 5455 return; 5456 } 5457 5458 evict_inode_truncate_pages(inode); 5459 5460 if (inode->i_nlink && 5461 ((btrfs_root_refs(&root->root_item) != 0 && 5462 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5463 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5464 goto no_delete; 5465 5466 if (is_bad_inode(inode)) 5467 goto no_delete; 5468 5469 btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); 5470 5471 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5472 goto no_delete; 5473 5474 if (inode->i_nlink > 0) { 5475 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5476 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5477 goto no_delete; 5478 } 5479 5480 /* 5481 * This makes sure the inode item in tree is uptodate and the space for 5482 * the inode update is released. 5483 */ 5484 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5485 if (ret) 5486 goto no_delete; 5487 5488 /* 5489 * This drops any pending insert or delete operations we have for this 5490 * inode. We could have a delayed dir index deletion queued up, but 5491 * we're removing the inode completely so that'll be taken care of in 5492 * the truncate. 5493 */ 5494 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5495 5496 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5497 if (!rsv) 5498 goto no_delete; 5499 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5500 rsv->failfast = true; 5501 5502 btrfs_i_size_write(BTRFS_I(inode), 0); 5503 5504 while (1) { 5505 struct btrfs_truncate_control control = { 5506 .inode = BTRFS_I(inode), 5507 .ino = btrfs_ino(BTRFS_I(inode)), 5508 .new_size = 0, 5509 .min_type = 0, 5510 }; 5511 5512 trans = evict_refill_and_join(root, rsv); 5513 if (IS_ERR(trans)) 5514 goto free_rsv; 5515 5516 trans->block_rsv = rsv; 5517 5518 ret = btrfs_truncate_inode_items(trans, root, &control); 5519 trans->block_rsv = &fs_info->trans_block_rsv; 5520 btrfs_end_transaction(trans); 5521 btrfs_btree_balance_dirty(fs_info); 5522 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5523 goto free_rsv; 5524 else if (!ret) 5525 break; 5526 } 5527 5528 /* 5529 * Errors here aren't a big deal, it just means we leave orphan items in 5530 * the tree. They will be cleaned up on the next mount. If the inode 5531 * number gets reused, cleanup deletes the orphan item without doing 5532 * anything, and unlink reuses the existing orphan item. 5533 * 5534 * If it turns out that we are dropping too many of these, we might want 5535 * to add a mechanism for retrying these after a commit. 5536 */ 5537 trans = evict_refill_and_join(root, rsv); 5538 if (!IS_ERR(trans)) { 5539 trans->block_rsv = rsv; 5540 btrfs_orphan_del(trans, BTRFS_I(inode)); 5541 trans->block_rsv = &fs_info->trans_block_rsv; 5542 btrfs_end_transaction(trans); 5543 } 5544 5545 free_rsv: 5546 btrfs_free_block_rsv(fs_info, rsv); 5547 no_delete: 5548 /* 5549 * If we didn't successfully delete, the orphan item will still be in 5550 * the tree and we'll retry on the next mount. Again, we might also want 5551 * to retry these periodically in the future. 5552 */ 5553 btrfs_remove_delayed_node(BTRFS_I(inode)); 5554 fsverity_cleanup_inode(inode); 5555 clear_inode(inode); 5556 } 5557 5558 /* 5559 * Return the key found in the dir entry in the location pointer, fill @type 5560 * with BTRFS_FT_*, and return 0. 5561 * 5562 * If no dir entries were found, returns -ENOENT. 5563 * If found a corrupted location in dir entry, returns -EUCLEAN. 5564 */ 5565 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, 5566 struct btrfs_key *location, u8 *type) 5567 { 5568 struct btrfs_dir_item *di; 5569 struct btrfs_path *path; 5570 struct btrfs_root *root = dir->root; 5571 int ret = 0; 5572 struct fscrypt_name fname; 5573 5574 path = btrfs_alloc_path(); 5575 if (!path) 5576 return -ENOMEM; 5577 5578 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 5579 if (ret) 5580 goto out; 5581 5582 /* This needs to handle no-key deletions later on */ 5583 5584 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), 5585 &fname.disk_name, 0); 5586 if (IS_ERR_OR_NULL(di)) { 5587 ret = di ? PTR_ERR(di) : -ENOENT; 5588 goto out; 5589 } 5590 5591 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5592 if (location->type != BTRFS_INODE_ITEM_KEY && 5593 location->type != BTRFS_ROOT_ITEM_KEY) { 5594 ret = -EUCLEAN; 5595 btrfs_warn(root->fs_info, 5596 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5597 __func__, fname.disk_name.name, btrfs_ino(dir), 5598 location->objectid, location->type, location->offset); 5599 } 5600 if (!ret) 5601 *type = btrfs_dir_ftype(path->nodes[0], di); 5602 out: 5603 fscrypt_free_filename(&fname); 5604 btrfs_free_path(path); 5605 return ret; 5606 } 5607 5608 /* 5609 * when we hit a tree root in a directory, the btrfs part of the inode 5610 * needs to be changed to reflect the root directory of the tree root. This 5611 * is kind of like crossing a mount point. 5612 */ 5613 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5614 struct btrfs_inode *dir, 5615 struct dentry *dentry, 5616 struct btrfs_key *location, 5617 struct btrfs_root **sub_root) 5618 { 5619 struct btrfs_path *path; 5620 struct btrfs_root *new_root; 5621 struct btrfs_root_ref *ref; 5622 struct extent_buffer *leaf; 5623 struct btrfs_key key; 5624 int ret; 5625 int err = 0; 5626 struct fscrypt_name fname; 5627 5628 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); 5629 if (ret) 5630 return ret; 5631 5632 path = btrfs_alloc_path(); 5633 if (!path) { 5634 err = -ENOMEM; 5635 goto out; 5636 } 5637 5638 err = -ENOENT; 5639 key.objectid = dir->root->root_key.objectid; 5640 key.type = BTRFS_ROOT_REF_KEY; 5641 key.offset = location->objectid; 5642 5643 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5644 if (ret) { 5645 if (ret < 0) 5646 err = ret; 5647 goto out; 5648 } 5649 5650 leaf = path->nodes[0]; 5651 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5652 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5653 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) 5654 goto out; 5655 5656 ret = memcmp_extent_buffer(leaf, fname.disk_name.name, 5657 (unsigned long)(ref + 1), fname.disk_name.len); 5658 if (ret) 5659 goto out; 5660 5661 btrfs_release_path(path); 5662 5663 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5664 if (IS_ERR(new_root)) { 5665 err = PTR_ERR(new_root); 5666 goto out; 5667 } 5668 5669 *sub_root = new_root; 5670 location->objectid = btrfs_root_dirid(&new_root->root_item); 5671 location->type = BTRFS_INODE_ITEM_KEY; 5672 location->offset = 0; 5673 err = 0; 5674 out: 5675 btrfs_free_path(path); 5676 fscrypt_free_filename(&fname); 5677 return err; 5678 } 5679 5680 static void inode_tree_add(struct btrfs_inode *inode) 5681 { 5682 struct btrfs_root *root = inode->root; 5683 struct btrfs_inode *entry; 5684 struct rb_node **p; 5685 struct rb_node *parent; 5686 struct rb_node *new = &inode->rb_node; 5687 u64 ino = btrfs_ino(inode); 5688 5689 if (inode_unhashed(&inode->vfs_inode)) 5690 return; 5691 parent = NULL; 5692 spin_lock(&root->inode_lock); 5693 p = &root->inode_tree.rb_node; 5694 while (*p) { 5695 parent = *p; 5696 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5697 5698 if (ino < btrfs_ino(entry)) 5699 p = &parent->rb_left; 5700 else if (ino > btrfs_ino(entry)) 5701 p = &parent->rb_right; 5702 else { 5703 WARN_ON(!(entry->vfs_inode.i_state & 5704 (I_WILL_FREE | I_FREEING))); 5705 rb_replace_node(parent, new, &root->inode_tree); 5706 RB_CLEAR_NODE(parent); 5707 spin_unlock(&root->inode_lock); 5708 return; 5709 } 5710 } 5711 rb_link_node(new, parent, p); 5712 rb_insert_color(new, &root->inode_tree); 5713 spin_unlock(&root->inode_lock); 5714 } 5715 5716 static void inode_tree_del(struct btrfs_inode *inode) 5717 { 5718 struct btrfs_root *root = inode->root; 5719 int empty = 0; 5720 5721 spin_lock(&root->inode_lock); 5722 if (!RB_EMPTY_NODE(&inode->rb_node)) { 5723 rb_erase(&inode->rb_node, &root->inode_tree); 5724 RB_CLEAR_NODE(&inode->rb_node); 5725 empty = RB_EMPTY_ROOT(&root->inode_tree); 5726 } 5727 spin_unlock(&root->inode_lock); 5728 5729 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5730 spin_lock(&root->inode_lock); 5731 empty = RB_EMPTY_ROOT(&root->inode_tree); 5732 spin_unlock(&root->inode_lock); 5733 if (empty) 5734 btrfs_add_dead_root(root); 5735 } 5736 } 5737 5738 5739 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5740 { 5741 struct btrfs_iget_args *args = p; 5742 5743 inode->i_ino = args->ino; 5744 BTRFS_I(inode)->location.objectid = args->ino; 5745 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 5746 BTRFS_I(inode)->location.offset = 0; 5747 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5748 BUG_ON(args->root && !BTRFS_I(inode)->root); 5749 5750 if (args->root && args->root == args->root->fs_info->tree_root && 5751 args->ino != BTRFS_BTREE_INODE_OBJECTID) 5752 set_bit(BTRFS_INODE_FREE_SPACE_INODE, 5753 &BTRFS_I(inode)->runtime_flags); 5754 return 0; 5755 } 5756 5757 static int btrfs_find_actor(struct inode *inode, void *opaque) 5758 { 5759 struct btrfs_iget_args *args = opaque; 5760 5761 return args->ino == BTRFS_I(inode)->location.objectid && 5762 args->root == BTRFS_I(inode)->root; 5763 } 5764 5765 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, 5766 struct btrfs_root *root) 5767 { 5768 struct inode *inode; 5769 struct btrfs_iget_args args; 5770 unsigned long hashval = btrfs_inode_hash(ino, root); 5771 5772 args.ino = ino; 5773 args.root = root; 5774 5775 inode = iget5_locked(s, hashval, btrfs_find_actor, 5776 btrfs_init_locked_inode, 5777 (void *)&args); 5778 return inode; 5779 } 5780 5781 /* 5782 * Get an inode object given its inode number and corresponding root. 5783 * Path can be preallocated to prevent recursing back to iget through 5784 * allocator. NULL is also valid but may require an additional allocation 5785 * later. 5786 */ 5787 struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 5788 struct btrfs_root *root, struct btrfs_path *path) 5789 { 5790 struct inode *inode; 5791 5792 inode = btrfs_iget_locked(s, ino, root); 5793 if (!inode) 5794 return ERR_PTR(-ENOMEM); 5795 5796 if (inode->i_state & I_NEW) { 5797 int ret; 5798 5799 ret = btrfs_read_locked_inode(inode, path); 5800 if (!ret) { 5801 inode_tree_add(BTRFS_I(inode)); 5802 unlock_new_inode(inode); 5803 } else { 5804 iget_failed(inode); 5805 /* 5806 * ret > 0 can come from btrfs_search_slot called by 5807 * btrfs_read_locked_inode, this means the inode item 5808 * was not found. 5809 */ 5810 if (ret > 0) 5811 ret = -ENOENT; 5812 inode = ERR_PTR(ret); 5813 } 5814 } 5815 5816 return inode; 5817 } 5818 5819 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) 5820 { 5821 return btrfs_iget_path(s, ino, root, NULL); 5822 } 5823 5824 static struct inode *new_simple_dir(struct super_block *s, 5825 struct btrfs_key *key, 5826 struct btrfs_root *root) 5827 { 5828 struct inode *inode = new_inode(s); 5829 5830 if (!inode) 5831 return ERR_PTR(-ENOMEM); 5832 5833 BTRFS_I(inode)->root = btrfs_grab_root(root); 5834 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5835 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5836 5837 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5838 /* 5839 * We only need lookup, the rest is read-only and there's no inode 5840 * associated with the dentry 5841 */ 5842 inode->i_op = &simple_dir_inode_operations; 5843 inode->i_opflags &= ~IOP_XATTR; 5844 inode->i_fop = &simple_dir_operations; 5845 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5846 inode->i_mtime = current_time(inode); 5847 inode->i_atime = inode->i_mtime; 5848 inode->i_ctime = inode->i_mtime; 5849 BTRFS_I(inode)->i_otime = inode->i_mtime; 5850 5851 return inode; 5852 } 5853 5854 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5855 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5856 static_assert(BTRFS_FT_DIR == FT_DIR); 5857 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5858 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5859 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5860 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5861 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5862 5863 static inline u8 btrfs_inode_type(struct inode *inode) 5864 { 5865 return fs_umode_to_ftype(inode->i_mode); 5866 } 5867 5868 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5869 { 5870 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 5871 struct inode *inode; 5872 struct btrfs_root *root = BTRFS_I(dir)->root; 5873 struct btrfs_root *sub_root = root; 5874 struct btrfs_key location; 5875 u8 di_type = 0; 5876 int ret = 0; 5877 5878 if (dentry->d_name.len > BTRFS_NAME_LEN) 5879 return ERR_PTR(-ENAMETOOLONG); 5880 5881 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type); 5882 if (ret < 0) 5883 return ERR_PTR(ret); 5884 5885 if (location.type == BTRFS_INODE_ITEM_KEY) { 5886 inode = btrfs_iget(dir->i_sb, location.objectid, root); 5887 if (IS_ERR(inode)) 5888 return inode; 5889 5890 /* Do extra check against inode mode with di_type */ 5891 if (btrfs_inode_type(inode) != di_type) { 5892 btrfs_crit(fs_info, 5893 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5894 inode->i_mode, btrfs_inode_type(inode), 5895 di_type); 5896 iput(inode); 5897 return ERR_PTR(-EUCLEAN); 5898 } 5899 return inode; 5900 } 5901 5902 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry, 5903 &location, &sub_root); 5904 if (ret < 0) { 5905 if (ret != -ENOENT) 5906 inode = ERR_PTR(ret); 5907 else 5908 inode = new_simple_dir(dir->i_sb, &location, root); 5909 } else { 5910 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); 5911 btrfs_put_root(sub_root); 5912 5913 if (IS_ERR(inode)) 5914 return inode; 5915 5916 down_read(&fs_info->cleanup_work_sem); 5917 if (!sb_rdonly(inode->i_sb)) 5918 ret = btrfs_orphan_cleanup(sub_root); 5919 up_read(&fs_info->cleanup_work_sem); 5920 if (ret) { 5921 iput(inode); 5922 inode = ERR_PTR(ret); 5923 } 5924 } 5925 5926 return inode; 5927 } 5928 5929 static int btrfs_dentry_delete(const struct dentry *dentry) 5930 { 5931 struct btrfs_root *root; 5932 struct inode *inode = d_inode(dentry); 5933 5934 if (!inode && !IS_ROOT(dentry)) 5935 inode = d_inode(dentry->d_parent); 5936 5937 if (inode) { 5938 root = BTRFS_I(inode)->root; 5939 if (btrfs_root_refs(&root->root_item) == 0) 5940 return 1; 5941 5942 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5943 return 1; 5944 } 5945 return 0; 5946 } 5947 5948 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5949 unsigned int flags) 5950 { 5951 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5952 5953 if (inode == ERR_PTR(-ENOENT)) 5954 inode = NULL; 5955 return d_splice_alias(inode, dentry); 5956 } 5957 5958 /* 5959 * All this infrastructure exists because dir_emit can fault, and we are holding 5960 * the tree lock when doing readdir. For now just allocate a buffer and copy 5961 * our information into that, and then dir_emit from the buffer. This is 5962 * similar to what NFS does, only we don't keep the buffer around in pagecache 5963 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5964 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5965 * tree lock. 5966 */ 5967 static int btrfs_opendir(struct inode *inode, struct file *file) 5968 { 5969 struct btrfs_file_private *private; 5970 5971 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5972 if (!private) 5973 return -ENOMEM; 5974 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5975 if (!private->filldir_buf) { 5976 kfree(private); 5977 return -ENOMEM; 5978 } 5979 file->private_data = private; 5980 return 0; 5981 } 5982 5983 struct dir_entry { 5984 u64 ino; 5985 u64 offset; 5986 unsigned type; 5987 int name_len; 5988 }; 5989 5990 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5991 { 5992 while (entries--) { 5993 struct dir_entry *entry = addr; 5994 char *name = (char *)(entry + 1); 5995 5996 ctx->pos = get_unaligned(&entry->offset); 5997 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5998 get_unaligned(&entry->ino), 5999 get_unaligned(&entry->type))) 6000 return 1; 6001 addr += sizeof(struct dir_entry) + 6002 get_unaligned(&entry->name_len); 6003 ctx->pos++; 6004 } 6005 return 0; 6006 } 6007 6008 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 6009 { 6010 struct inode *inode = file_inode(file); 6011 struct btrfs_root *root = BTRFS_I(inode)->root; 6012 struct btrfs_file_private *private = file->private_data; 6013 struct btrfs_dir_item *di; 6014 struct btrfs_key key; 6015 struct btrfs_key found_key; 6016 struct btrfs_path *path; 6017 void *addr; 6018 struct list_head ins_list; 6019 struct list_head del_list; 6020 int ret; 6021 char *name_ptr; 6022 int name_len; 6023 int entries = 0; 6024 int total_len = 0; 6025 bool put = false; 6026 struct btrfs_key location; 6027 6028 if (!dir_emit_dots(file, ctx)) 6029 return 0; 6030 6031 path = btrfs_alloc_path(); 6032 if (!path) 6033 return -ENOMEM; 6034 6035 addr = private->filldir_buf; 6036 path->reada = READA_FORWARD; 6037 6038 INIT_LIST_HEAD(&ins_list); 6039 INIT_LIST_HEAD(&del_list); 6040 put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list); 6041 6042 again: 6043 key.type = BTRFS_DIR_INDEX_KEY; 6044 key.offset = ctx->pos; 6045 key.objectid = btrfs_ino(BTRFS_I(inode)); 6046 6047 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 6048 struct dir_entry *entry; 6049 struct extent_buffer *leaf = path->nodes[0]; 6050 u8 ftype; 6051 6052 if (found_key.objectid != key.objectid) 6053 break; 6054 if (found_key.type != BTRFS_DIR_INDEX_KEY) 6055 break; 6056 if (found_key.offset < ctx->pos) 6057 continue; 6058 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 6059 continue; 6060 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 6061 name_len = btrfs_dir_name_len(leaf, di); 6062 if ((total_len + sizeof(struct dir_entry) + name_len) >= 6063 PAGE_SIZE) { 6064 btrfs_release_path(path); 6065 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6066 if (ret) 6067 goto nopos; 6068 addr = private->filldir_buf; 6069 entries = 0; 6070 total_len = 0; 6071 goto again; 6072 } 6073 6074 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di)); 6075 entry = addr; 6076 name_ptr = (char *)(entry + 1); 6077 read_extent_buffer(leaf, name_ptr, 6078 (unsigned long)(di + 1), name_len); 6079 put_unaligned(name_len, &entry->name_len); 6080 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); 6081 btrfs_dir_item_key_to_cpu(leaf, di, &location); 6082 put_unaligned(location.objectid, &entry->ino); 6083 put_unaligned(found_key.offset, &entry->offset); 6084 entries++; 6085 addr += sizeof(struct dir_entry) + name_len; 6086 total_len += sizeof(struct dir_entry) + name_len; 6087 } 6088 /* Catch error encountered during iteration */ 6089 if (ret < 0) 6090 goto err; 6091 6092 btrfs_release_path(path); 6093 6094 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6095 if (ret) 6096 goto nopos; 6097 6098 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 6099 if (ret) 6100 goto nopos; 6101 6102 /* 6103 * Stop new entries from being returned after we return the last 6104 * entry. 6105 * 6106 * New directory entries are assigned a strictly increasing 6107 * offset. This means that new entries created during readdir 6108 * are *guaranteed* to be seen in the future by that readdir. 6109 * This has broken buggy programs which operate on names as 6110 * they're returned by readdir. Until we re-use freed offsets 6111 * we have this hack to stop new entries from being returned 6112 * under the assumption that they'll never reach this huge 6113 * offset. 6114 * 6115 * This is being careful not to overflow 32bit loff_t unless the 6116 * last entry requires it because doing so has broken 32bit apps 6117 * in the past. 6118 */ 6119 if (ctx->pos >= INT_MAX) 6120 ctx->pos = LLONG_MAX; 6121 else 6122 ctx->pos = INT_MAX; 6123 nopos: 6124 ret = 0; 6125 err: 6126 if (put) 6127 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); 6128 btrfs_free_path(path); 6129 return ret; 6130 } 6131 6132 /* 6133 * This is somewhat expensive, updating the tree every time the 6134 * inode changes. But, it is most likely to find the inode in cache. 6135 * FIXME, needs more benchmarking...there are no reasons other than performance 6136 * to keep or drop this code. 6137 */ 6138 static int btrfs_dirty_inode(struct btrfs_inode *inode) 6139 { 6140 struct btrfs_root *root = inode->root; 6141 struct btrfs_fs_info *fs_info = root->fs_info; 6142 struct btrfs_trans_handle *trans; 6143 int ret; 6144 6145 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) 6146 return 0; 6147 6148 trans = btrfs_join_transaction(root); 6149 if (IS_ERR(trans)) 6150 return PTR_ERR(trans); 6151 6152 ret = btrfs_update_inode(trans, root, inode); 6153 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) { 6154 /* whoops, lets try again with the full transaction */ 6155 btrfs_end_transaction(trans); 6156 trans = btrfs_start_transaction(root, 1); 6157 if (IS_ERR(trans)) 6158 return PTR_ERR(trans); 6159 6160 ret = btrfs_update_inode(trans, root, inode); 6161 } 6162 btrfs_end_transaction(trans); 6163 if (inode->delayed_node) 6164 btrfs_balance_delayed_items(fs_info); 6165 6166 return ret; 6167 } 6168 6169 /* 6170 * This is a copy of file_update_time. We need this so we can return error on 6171 * ENOSPC for updating the inode in the case of file write and mmap writes. 6172 */ 6173 static int btrfs_update_time(struct inode *inode, struct timespec64 *now, 6174 int flags) 6175 { 6176 struct btrfs_root *root = BTRFS_I(inode)->root; 6177 bool dirty = flags & ~S_VERSION; 6178 6179 if (btrfs_root_readonly(root)) 6180 return -EROFS; 6181 6182 if (flags & S_VERSION) 6183 dirty |= inode_maybe_inc_iversion(inode, dirty); 6184 if (flags & S_CTIME) 6185 inode->i_ctime = *now; 6186 if (flags & S_MTIME) 6187 inode->i_mtime = *now; 6188 if (flags & S_ATIME) 6189 inode->i_atime = *now; 6190 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6191 } 6192 6193 /* 6194 * find the highest existing sequence number in a directory 6195 * and then set the in-memory index_cnt variable to reflect 6196 * free sequence numbers 6197 */ 6198 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 6199 { 6200 struct btrfs_root *root = inode->root; 6201 struct btrfs_key key, found_key; 6202 struct btrfs_path *path; 6203 struct extent_buffer *leaf; 6204 int ret; 6205 6206 key.objectid = btrfs_ino(inode); 6207 key.type = BTRFS_DIR_INDEX_KEY; 6208 key.offset = (u64)-1; 6209 6210 path = btrfs_alloc_path(); 6211 if (!path) 6212 return -ENOMEM; 6213 6214 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6215 if (ret < 0) 6216 goto out; 6217 /* FIXME: we should be able to handle this */ 6218 if (ret == 0) 6219 goto out; 6220 ret = 0; 6221 6222 if (path->slots[0] == 0) { 6223 inode->index_cnt = BTRFS_DIR_START_INDEX; 6224 goto out; 6225 } 6226 6227 path->slots[0]--; 6228 6229 leaf = path->nodes[0]; 6230 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6231 6232 if (found_key.objectid != btrfs_ino(inode) || 6233 found_key.type != BTRFS_DIR_INDEX_KEY) { 6234 inode->index_cnt = BTRFS_DIR_START_INDEX; 6235 goto out; 6236 } 6237 6238 inode->index_cnt = found_key.offset + 1; 6239 out: 6240 btrfs_free_path(path); 6241 return ret; 6242 } 6243 6244 /* 6245 * helper to find a free sequence number in a given directory. This current 6246 * code is very simple, later versions will do smarter things in the btree 6247 */ 6248 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6249 { 6250 int ret = 0; 6251 6252 if (dir->index_cnt == (u64)-1) { 6253 ret = btrfs_inode_delayed_dir_index_count(dir); 6254 if (ret) { 6255 ret = btrfs_set_inode_index_count(dir); 6256 if (ret) 6257 return ret; 6258 } 6259 } 6260 6261 *index = dir->index_cnt; 6262 dir->index_cnt++; 6263 6264 return ret; 6265 } 6266 6267 static int btrfs_insert_inode_locked(struct inode *inode) 6268 { 6269 struct btrfs_iget_args args; 6270 6271 args.ino = BTRFS_I(inode)->location.objectid; 6272 args.root = BTRFS_I(inode)->root; 6273 6274 return insert_inode_locked4(inode, 6275 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6276 btrfs_find_actor, &args); 6277 } 6278 6279 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6280 unsigned int *trans_num_items) 6281 { 6282 struct inode *dir = args->dir; 6283 struct inode *inode = args->inode; 6284 int ret; 6285 6286 if (!args->orphan) { 6287 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, 6288 &args->fname); 6289 if (ret) 6290 return ret; 6291 } 6292 6293 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6294 if (ret) { 6295 fscrypt_free_filename(&args->fname); 6296 return ret; 6297 } 6298 6299 /* 1 to add inode item */ 6300 *trans_num_items = 1; 6301 /* 1 to add compression property */ 6302 if (BTRFS_I(dir)->prop_compress) 6303 (*trans_num_items)++; 6304 /* 1 to add default ACL xattr */ 6305 if (args->default_acl) 6306 (*trans_num_items)++; 6307 /* 1 to add access ACL xattr */ 6308 if (args->acl) 6309 (*trans_num_items)++; 6310 #ifdef CONFIG_SECURITY 6311 /* 1 to add LSM xattr */ 6312 if (dir->i_security) 6313 (*trans_num_items)++; 6314 #endif 6315 if (args->orphan) { 6316 /* 1 to add orphan item */ 6317 (*trans_num_items)++; 6318 } else { 6319 /* 6320 * 1 to add dir item 6321 * 1 to add dir index 6322 * 1 to update parent inode item 6323 * 6324 * No need for 1 unit for the inode ref item because it is 6325 * inserted in a batch together with the inode item at 6326 * btrfs_create_new_inode(). 6327 */ 6328 *trans_num_items += 3; 6329 } 6330 return 0; 6331 } 6332 6333 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6334 { 6335 posix_acl_release(args->acl); 6336 posix_acl_release(args->default_acl); 6337 fscrypt_free_filename(&args->fname); 6338 } 6339 6340 /* 6341 * Inherit flags from the parent inode. 6342 * 6343 * Currently only the compression flags and the cow flags are inherited. 6344 */ 6345 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir) 6346 { 6347 unsigned int flags; 6348 6349 flags = dir->flags; 6350 6351 if (flags & BTRFS_INODE_NOCOMPRESS) { 6352 inode->flags &= ~BTRFS_INODE_COMPRESS; 6353 inode->flags |= BTRFS_INODE_NOCOMPRESS; 6354 } else if (flags & BTRFS_INODE_COMPRESS) { 6355 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 6356 inode->flags |= BTRFS_INODE_COMPRESS; 6357 } 6358 6359 if (flags & BTRFS_INODE_NODATACOW) { 6360 inode->flags |= BTRFS_INODE_NODATACOW; 6361 if (S_ISREG(inode->vfs_inode.i_mode)) 6362 inode->flags |= BTRFS_INODE_NODATASUM; 6363 } 6364 6365 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); 6366 } 6367 6368 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6369 struct btrfs_new_inode_args *args) 6370 { 6371 struct inode *dir = args->dir; 6372 struct inode *inode = args->inode; 6373 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; 6374 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6375 struct btrfs_root *root; 6376 struct btrfs_inode_item *inode_item; 6377 struct btrfs_key *location; 6378 struct btrfs_path *path; 6379 u64 objectid; 6380 struct btrfs_inode_ref *ref; 6381 struct btrfs_key key[2]; 6382 u32 sizes[2]; 6383 struct btrfs_item_batch batch; 6384 unsigned long ptr; 6385 int ret; 6386 6387 path = btrfs_alloc_path(); 6388 if (!path) 6389 return -ENOMEM; 6390 6391 if (!args->subvol) 6392 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6393 root = BTRFS_I(inode)->root; 6394 6395 ret = btrfs_get_free_objectid(root, &objectid); 6396 if (ret) 6397 goto out; 6398 inode->i_ino = objectid; 6399 6400 if (args->orphan) { 6401 /* 6402 * O_TMPFILE, set link count to 0, so that after this point, we 6403 * fill in an inode item with the correct link count. 6404 */ 6405 set_nlink(inode, 0); 6406 } else { 6407 trace_btrfs_inode_request(dir); 6408 6409 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6410 if (ret) 6411 goto out; 6412 } 6413 /* index_cnt is ignored for everything but a dir. */ 6414 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6415 BTRFS_I(inode)->generation = trans->transid; 6416 inode->i_generation = BTRFS_I(inode)->generation; 6417 6418 /* 6419 * Subvolumes don't inherit flags from their parent directory. 6420 * Originally this was probably by accident, but we probably can't 6421 * change it now without compatibility issues. 6422 */ 6423 if (!args->subvol) 6424 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir)); 6425 6426 if (S_ISREG(inode->i_mode)) { 6427 if (btrfs_test_opt(fs_info, NODATASUM)) 6428 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6429 if (btrfs_test_opt(fs_info, NODATACOW)) 6430 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6431 BTRFS_INODE_NODATASUM; 6432 } 6433 6434 location = &BTRFS_I(inode)->location; 6435 location->objectid = objectid; 6436 location->offset = 0; 6437 location->type = BTRFS_INODE_ITEM_KEY; 6438 6439 ret = btrfs_insert_inode_locked(inode); 6440 if (ret < 0) { 6441 if (!args->orphan) 6442 BTRFS_I(dir)->index_cnt--; 6443 goto out; 6444 } 6445 6446 /* 6447 * We could have gotten an inode number from somebody who was fsynced 6448 * and then removed in this same transaction, so let's just set full 6449 * sync since it will be a full sync anyway and this will blow away the 6450 * old info in the log. 6451 */ 6452 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6453 6454 key[0].objectid = objectid; 6455 key[0].type = BTRFS_INODE_ITEM_KEY; 6456 key[0].offset = 0; 6457 6458 sizes[0] = sizeof(struct btrfs_inode_item); 6459 6460 if (!args->orphan) { 6461 /* 6462 * Start new inodes with an inode_ref. This is slightly more 6463 * efficient for small numbers of hard links since they will 6464 * be packed into one item. Extended refs will kick in if we 6465 * add more hard links than can fit in the ref item. 6466 */ 6467 key[1].objectid = objectid; 6468 key[1].type = BTRFS_INODE_REF_KEY; 6469 if (args->subvol) { 6470 key[1].offset = objectid; 6471 sizes[1] = 2 + sizeof(*ref); 6472 } else { 6473 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6474 sizes[1] = name->len + sizeof(*ref); 6475 } 6476 } 6477 6478 batch.keys = &key[0]; 6479 batch.data_sizes = &sizes[0]; 6480 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6481 batch.nr = args->orphan ? 1 : 2; 6482 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6483 if (ret != 0) { 6484 btrfs_abort_transaction(trans, ret); 6485 goto discard; 6486 } 6487 6488 inode->i_mtime = current_time(inode); 6489 inode->i_atime = inode->i_mtime; 6490 inode->i_ctime = inode->i_mtime; 6491 BTRFS_I(inode)->i_otime = inode->i_mtime; 6492 6493 /* 6494 * We're going to fill the inode item now, so at this point the inode 6495 * must be fully initialized. 6496 */ 6497 6498 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6499 struct btrfs_inode_item); 6500 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6501 sizeof(*inode_item)); 6502 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6503 6504 if (!args->orphan) { 6505 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6506 struct btrfs_inode_ref); 6507 ptr = (unsigned long)(ref + 1); 6508 if (args->subvol) { 6509 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6510 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6511 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6512 } else { 6513 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 6514 name->len); 6515 btrfs_set_inode_ref_index(path->nodes[0], ref, 6516 BTRFS_I(inode)->dir_index); 6517 write_extent_buffer(path->nodes[0], name->name, ptr, 6518 name->len); 6519 } 6520 } 6521 6522 btrfs_mark_buffer_dirty(path->nodes[0]); 6523 /* 6524 * We don't need the path anymore, plus inheriting properties, adding 6525 * ACLs, security xattrs, orphan item or adding the link, will result in 6526 * allocating yet another path. So just free our path. 6527 */ 6528 btrfs_free_path(path); 6529 path = NULL; 6530 6531 if (args->subvol) { 6532 struct inode *parent; 6533 6534 /* 6535 * Subvolumes inherit properties from their parent subvolume, 6536 * not the directory they were created in. 6537 */ 6538 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, 6539 BTRFS_I(dir)->root); 6540 if (IS_ERR(parent)) { 6541 ret = PTR_ERR(parent); 6542 } else { 6543 ret = btrfs_inode_inherit_props(trans, inode, parent); 6544 iput(parent); 6545 } 6546 } else { 6547 ret = btrfs_inode_inherit_props(trans, inode, dir); 6548 } 6549 if (ret) { 6550 btrfs_err(fs_info, 6551 "error inheriting props for ino %llu (root %llu): %d", 6552 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, 6553 ret); 6554 } 6555 6556 /* 6557 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6558 * probably a bug. 6559 */ 6560 if (!args->subvol) { 6561 ret = btrfs_init_inode_security(trans, args); 6562 if (ret) { 6563 btrfs_abort_transaction(trans, ret); 6564 goto discard; 6565 } 6566 } 6567 6568 inode_tree_add(BTRFS_I(inode)); 6569 6570 trace_btrfs_inode_new(inode); 6571 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6572 6573 btrfs_update_root_times(trans, root); 6574 6575 if (args->orphan) { 6576 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6577 } else { 6578 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6579 0, BTRFS_I(inode)->dir_index); 6580 } 6581 if (ret) { 6582 btrfs_abort_transaction(trans, ret); 6583 goto discard; 6584 } 6585 6586 return 0; 6587 6588 discard: 6589 /* 6590 * discard_new_inode() calls iput(), but the caller owns the reference 6591 * to the inode. 6592 */ 6593 ihold(inode); 6594 discard_new_inode(inode); 6595 out: 6596 btrfs_free_path(path); 6597 return ret; 6598 } 6599 6600 /* 6601 * utility function to add 'inode' into 'parent_inode' with 6602 * a give name and a given sequence number. 6603 * if 'add_backref' is true, also insert a backref from the 6604 * inode to the parent directory. 6605 */ 6606 int btrfs_add_link(struct btrfs_trans_handle *trans, 6607 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6608 const struct fscrypt_str *name, int add_backref, u64 index) 6609 { 6610 int ret = 0; 6611 struct btrfs_key key; 6612 struct btrfs_root *root = parent_inode->root; 6613 u64 ino = btrfs_ino(inode); 6614 u64 parent_ino = btrfs_ino(parent_inode); 6615 6616 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6617 memcpy(&key, &inode->root->root_key, sizeof(key)); 6618 } else { 6619 key.objectid = ino; 6620 key.type = BTRFS_INODE_ITEM_KEY; 6621 key.offset = 0; 6622 } 6623 6624 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6625 ret = btrfs_add_root_ref(trans, key.objectid, 6626 root->root_key.objectid, parent_ino, 6627 index, name); 6628 } else if (add_backref) { 6629 ret = btrfs_insert_inode_ref(trans, root, name, 6630 ino, parent_ino, index); 6631 } 6632 6633 /* Nothing to clean up yet */ 6634 if (ret) 6635 return ret; 6636 6637 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, 6638 btrfs_inode_type(&inode->vfs_inode), index); 6639 if (ret == -EEXIST || ret == -EOVERFLOW) 6640 goto fail_dir_item; 6641 else if (ret) { 6642 btrfs_abort_transaction(trans, ret); 6643 return ret; 6644 } 6645 6646 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6647 name->len * 2); 6648 inode_inc_iversion(&parent_inode->vfs_inode); 6649 /* 6650 * If we are replaying a log tree, we do not want to update the mtime 6651 * and ctime of the parent directory with the current time, since the 6652 * log replay procedure is responsible for setting them to their correct 6653 * values (the ones it had when the fsync was done). 6654 */ 6655 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { 6656 struct timespec64 now = current_time(&parent_inode->vfs_inode); 6657 6658 parent_inode->vfs_inode.i_mtime = now; 6659 parent_inode->vfs_inode.i_ctime = now; 6660 } 6661 ret = btrfs_update_inode(trans, root, parent_inode); 6662 if (ret) 6663 btrfs_abort_transaction(trans, ret); 6664 return ret; 6665 6666 fail_dir_item: 6667 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6668 u64 local_index; 6669 int err; 6670 err = btrfs_del_root_ref(trans, key.objectid, 6671 root->root_key.objectid, parent_ino, 6672 &local_index, name); 6673 if (err) 6674 btrfs_abort_transaction(trans, err); 6675 } else if (add_backref) { 6676 u64 local_index; 6677 int err; 6678 6679 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, 6680 &local_index); 6681 if (err) 6682 btrfs_abort_transaction(trans, err); 6683 } 6684 6685 /* Return the original error code */ 6686 return ret; 6687 } 6688 6689 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6690 struct inode *inode) 6691 { 6692 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6693 struct btrfs_root *root = BTRFS_I(dir)->root; 6694 struct btrfs_new_inode_args new_inode_args = { 6695 .dir = dir, 6696 .dentry = dentry, 6697 .inode = inode, 6698 }; 6699 unsigned int trans_num_items; 6700 struct btrfs_trans_handle *trans; 6701 int err; 6702 6703 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6704 if (err) 6705 goto out_inode; 6706 6707 trans = btrfs_start_transaction(root, trans_num_items); 6708 if (IS_ERR(trans)) { 6709 err = PTR_ERR(trans); 6710 goto out_new_inode_args; 6711 } 6712 6713 err = btrfs_create_new_inode(trans, &new_inode_args); 6714 if (!err) 6715 d_instantiate_new(dentry, inode); 6716 6717 btrfs_end_transaction(trans); 6718 btrfs_btree_balance_dirty(fs_info); 6719 out_new_inode_args: 6720 btrfs_new_inode_args_destroy(&new_inode_args); 6721 out_inode: 6722 if (err) 6723 iput(inode); 6724 return err; 6725 } 6726 6727 static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, 6728 struct dentry *dentry, umode_t mode, dev_t rdev) 6729 { 6730 struct inode *inode; 6731 6732 inode = new_inode(dir->i_sb); 6733 if (!inode) 6734 return -ENOMEM; 6735 inode_init_owner(mnt_userns, inode, dir, mode); 6736 inode->i_op = &btrfs_special_inode_operations; 6737 init_special_inode(inode, inode->i_mode, rdev); 6738 return btrfs_create_common(dir, dentry, inode); 6739 } 6740 6741 static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir, 6742 struct dentry *dentry, umode_t mode, bool excl) 6743 { 6744 struct inode *inode; 6745 6746 inode = new_inode(dir->i_sb); 6747 if (!inode) 6748 return -ENOMEM; 6749 inode_init_owner(mnt_userns, inode, dir, mode); 6750 inode->i_fop = &btrfs_file_operations; 6751 inode->i_op = &btrfs_file_inode_operations; 6752 inode->i_mapping->a_ops = &btrfs_aops; 6753 return btrfs_create_common(dir, dentry, inode); 6754 } 6755 6756 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6757 struct dentry *dentry) 6758 { 6759 struct btrfs_trans_handle *trans = NULL; 6760 struct btrfs_root *root = BTRFS_I(dir)->root; 6761 struct inode *inode = d_inode(old_dentry); 6762 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 6763 struct fscrypt_name fname; 6764 u64 index; 6765 int err; 6766 int drop_inode = 0; 6767 6768 /* do not allow sys_link's with other subvols of the same device */ 6769 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) 6770 return -EXDEV; 6771 6772 if (inode->i_nlink >= BTRFS_LINK_MAX) 6773 return -EMLINK; 6774 6775 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); 6776 if (err) 6777 goto fail; 6778 6779 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6780 if (err) 6781 goto fail; 6782 6783 /* 6784 * 2 items for inode and inode ref 6785 * 2 items for dir items 6786 * 1 item for parent inode 6787 * 1 item for orphan item deletion if O_TMPFILE 6788 */ 6789 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6790 if (IS_ERR(trans)) { 6791 err = PTR_ERR(trans); 6792 trans = NULL; 6793 goto fail; 6794 } 6795 6796 /* There are several dir indexes for this inode, clear the cache. */ 6797 BTRFS_I(inode)->dir_index = 0ULL; 6798 inc_nlink(inode); 6799 inode_inc_iversion(inode); 6800 inode->i_ctime = current_time(inode); 6801 ihold(inode); 6802 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6803 6804 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6805 &fname.disk_name, 1, index); 6806 6807 if (err) { 6808 drop_inode = 1; 6809 } else { 6810 struct dentry *parent = dentry->d_parent; 6811 6812 err = btrfs_update_inode(trans, root, BTRFS_I(inode)); 6813 if (err) 6814 goto fail; 6815 if (inode->i_nlink == 1) { 6816 /* 6817 * If new hard link count is 1, it's a file created 6818 * with open(2) O_TMPFILE flag. 6819 */ 6820 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6821 if (err) 6822 goto fail; 6823 } 6824 d_instantiate(dentry, inode); 6825 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent); 6826 } 6827 6828 fail: 6829 fscrypt_free_filename(&fname); 6830 if (trans) 6831 btrfs_end_transaction(trans); 6832 if (drop_inode) { 6833 inode_dec_link_count(inode); 6834 iput(inode); 6835 } 6836 btrfs_btree_balance_dirty(fs_info); 6837 return err; 6838 } 6839 6840 static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 6841 struct dentry *dentry, umode_t mode) 6842 { 6843 struct inode *inode; 6844 6845 inode = new_inode(dir->i_sb); 6846 if (!inode) 6847 return -ENOMEM; 6848 inode_init_owner(mnt_userns, inode, dir, S_IFDIR | mode); 6849 inode->i_op = &btrfs_dir_inode_operations; 6850 inode->i_fop = &btrfs_dir_file_operations; 6851 return btrfs_create_common(dir, dentry, inode); 6852 } 6853 6854 static noinline int uncompress_inline(struct btrfs_path *path, 6855 struct page *page, 6856 struct btrfs_file_extent_item *item) 6857 { 6858 int ret; 6859 struct extent_buffer *leaf = path->nodes[0]; 6860 char *tmp; 6861 size_t max_size; 6862 unsigned long inline_size; 6863 unsigned long ptr; 6864 int compress_type; 6865 6866 compress_type = btrfs_file_extent_compression(leaf, item); 6867 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6868 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 6869 tmp = kmalloc(inline_size, GFP_NOFS); 6870 if (!tmp) 6871 return -ENOMEM; 6872 ptr = btrfs_file_extent_inline_start(item); 6873 6874 read_extent_buffer(leaf, tmp, ptr, inline_size); 6875 6876 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6877 ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size); 6878 6879 /* 6880 * decompression code contains a memset to fill in any space between the end 6881 * of the uncompressed data and the end of max_size in case the decompressed 6882 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6883 * the end of an inline extent and the beginning of the next block, so we 6884 * cover that region here. 6885 */ 6886 6887 if (max_size < PAGE_SIZE) 6888 memzero_page(page, max_size, PAGE_SIZE - max_size); 6889 kfree(tmp); 6890 return ret; 6891 } 6892 6893 static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path, 6894 struct page *page) 6895 { 6896 struct btrfs_file_extent_item *fi; 6897 void *kaddr; 6898 size_t copy_size; 6899 6900 if (!page || PageUptodate(page)) 6901 return 0; 6902 6903 ASSERT(page_offset(page) == 0); 6904 6905 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 6906 struct btrfs_file_extent_item); 6907 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) 6908 return uncompress_inline(path, page, fi); 6909 6910 copy_size = min_t(u64, PAGE_SIZE, 6911 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); 6912 kaddr = kmap_local_page(page); 6913 read_extent_buffer(path->nodes[0], kaddr, 6914 btrfs_file_extent_inline_start(fi), copy_size); 6915 kunmap_local(kaddr); 6916 if (copy_size < PAGE_SIZE) 6917 memzero_page(page, copy_size, PAGE_SIZE - copy_size); 6918 return 0; 6919 } 6920 6921 /* 6922 * Lookup the first extent overlapping a range in a file. 6923 * 6924 * @inode: file to search in 6925 * @page: page to read extent data into if the extent is inline 6926 * @pg_offset: offset into @page to copy to 6927 * @start: file offset 6928 * @len: length of range starting at @start 6929 * 6930 * Return the first &struct extent_map which overlaps the given range, reading 6931 * it from the B-tree and caching it if necessary. Note that there may be more 6932 * extents which overlap the given range after the returned extent_map. 6933 * 6934 * If @page is not NULL and the extent is inline, this also reads the extent 6935 * data directly into the page and marks the extent up to date in the io_tree. 6936 * 6937 * Return: ERR_PTR on error, non-NULL extent_map on success. 6938 */ 6939 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6940 struct page *page, size_t pg_offset, 6941 u64 start, u64 len) 6942 { 6943 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6944 int ret = 0; 6945 u64 extent_start = 0; 6946 u64 extent_end = 0; 6947 u64 objectid = btrfs_ino(inode); 6948 int extent_type = -1; 6949 struct btrfs_path *path = NULL; 6950 struct btrfs_root *root = inode->root; 6951 struct btrfs_file_extent_item *item; 6952 struct extent_buffer *leaf; 6953 struct btrfs_key found_key; 6954 struct extent_map *em = NULL; 6955 struct extent_map_tree *em_tree = &inode->extent_tree; 6956 6957 read_lock(&em_tree->lock); 6958 em = lookup_extent_mapping(em_tree, start, len); 6959 read_unlock(&em_tree->lock); 6960 6961 if (em) { 6962 if (em->start > start || em->start + em->len <= start) 6963 free_extent_map(em); 6964 else if (em->block_start == EXTENT_MAP_INLINE && page) 6965 free_extent_map(em); 6966 else 6967 goto out; 6968 } 6969 em = alloc_extent_map(); 6970 if (!em) { 6971 ret = -ENOMEM; 6972 goto out; 6973 } 6974 em->start = EXTENT_MAP_HOLE; 6975 em->orig_start = EXTENT_MAP_HOLE; 6976 em->len = (u64)-1; 6977 em->block_len = (u64)-1; 6978 6979 path = btrfs_alloc_path(); 6980 if (!path) { 6981 ret = -ENOMEM; 6982 goto out; 6983 } 6984 6985 /* Chances are we'll be called again, so go ahead and do readahead */ 6986 path->reada = READA_FORWARD; 6987 6988 /* 6989 * The same explanation in load_free_space_cache applies here as well, 6990 * we only read when we're loading the free space cache, and at that 6991 * point the commit_root has everything we need. 6992 */ 6993 if (btrfs_is_free_space_inode(inode)) { 6994 path->search_commit_root = 1; 6995 path->skip_locking = 1; 6996 } 6997 6998 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6999 if (ret < 0) { 7000 goto out; 7001 } else if (ret > 0) { 7002 if (path->slots[0] == 0) 7003 goto not_found; 7004 path->slots[0]--; 7005 ret = 0; 7006 } 7007 7008 leaf = path->nodes[0]; 7009 item = btrfs_item_ptr(leaf, path->slots[0], 7010 struct btrfs_file_extent_item); 7011 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 7012 if (found_key.objectid != objectid || 7013 found_key.type != BTRFS_EXTENT_DATA_KEY) { 7014 /* 7015 * If we backup past the first extent we want to move forward 7016 * and see if there is an extent in front of us, otherwise we'll 7017 * say there is a hole for our whole search range which can 7018 * cause problems. 7019 */ 7020 extent_end = start; 7021 goto next; 7022 } 7023 7024 extent_type = btrfs_file_extent_type(leaf, item); 7025 extent_start = found_key.offset; 7026 extent_end = btrfs_file_extent_end(path); 7027 if (extent_type == BTRFS_FILE_EXTENT_REG || 7028 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 7029 /* Only regular file could have regular/prealloc extent */ 7030 if (!S_ISREG(inode->vfs_inode.i_mode)) { 7031 ret = -EUCLEAN; 7032 btrfs_crit(fs_info, 7033 "regular/prealloc extent found for non-regular inode %llu", 7034 btrfs_ino(inode)); 7035 goto out; 7036 } 7037 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 7038 extent_start); 7039 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 7040 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 7041 path->slots[0], 7042 extent_start); 7043 } 7044 next: 7045 if (start >= extent_end) { 7046 path->slots[0]++; 7047 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 7048 ret = btrfs_next_leaf(root, path); 7049 if (ret < 0) 7050 goto out; 7051 else if (ret > 0) 7052 goto not_found; 7053 7054 leaf = path->nodes[0]; 7055 } 7056 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 7057 if (found_key.objectid != objectid || 7058 found_key.type != BTRFS_EXTENT_DATA_KEY) 7059 goto not_found; 7060 if (start + len <= found_key.offset) 7061 goto not_found; 7062 if (start > found_key.offset) 7063 goto next; 7064 7065 /* New extent overlaps with existing one */ 7066 em->start = start; 7067 em->orig_start = start; 7068 em->len = found_key.offset - start; 7069 em->block_start = EXTENT_MAP_HOLE; 7070 goto insert; 7071 } 7072 7073 btrfs_extent_item_to_extent_map(inode, path, item, em); 7074 7075 if (extent_type == BTRFS_FILE_EXTENT_REG || 7076 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 7077 goto insert; 7078 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 7079 /* 7080 * Inline extent can only exist at file offset 0. This is 7081 * ensured by tree-checker and inline extent creation path. 7082 * Thus all members representing file offsets should be zero. 7083 */ 7084 ASSERT(pg_offset == 0); 7085 ASSERT(extent_start == 0); 7086 ASSERT(em->start == 0); 7087 7088 /* 7089 * btrfs_extent_item_to_extent_map() should have properly 7090 * initialized em members already. 7091 * 7092 * Other members are not utilized for inline extents. 7093 */ 7094 ASSERT(em->block_start == EXTENT_MAP_INLINE); 7095 ASSERT(em->len == fs_info->sectorsize); 7096 7097 ret = read_inline_extent(inode, path, page); 7098 if (ret < 0) 7099 goto out; 7100 goto insert; 7101 } 7102 not_found: 7103 em->start = start; 7104 em->orig_start = start; 7105 em->len = len; 7106 em->block_start = EXTENT_MAP_HOLE; 7107 insert: 7108 ret = 0; 7109 btrfs_release_path(path); 7110 if (em->start > start || extent_map_end(em) <= start) { 7111 btrfs_err(fs_info, 7112 "bad extent! em: [%llu %llu] passed [%llu %llu]", 7113 em->start, em->len, start, len); 7114 ret = -EIO; 7115 goto out; 7116 } 7117 7118 write_lock(&em_tree->lock); 7119 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 7120 write_unlock(&em_tree->lock); 7121 out: 7122 btrfs_free_path(path); 7123 7124 trace_btrfs_get_extent(root, inode, em); 7125 7126 if (ret) { 7127 free_extent_map(em); 7128 return ERR_PTR(ret); 7129 } 7130 return em; 7131 } 7132 7133 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, 7134 const u64 start, 7135 const u64 len, 7136 const u64 orig_start, 7137 const u64 block_start, 7138 const u64 block_len, 7139 const u64 orig_block_len, 7140 const u64 ram_bytes, 7141 const int type) 7142 { 7143 struct extent_map *em = NULL; 7144 int ret; 7145 7146 if (type != BTRFS_ORDERED_NOCOW) { 7147 em = create_io_em(inode, start, len, orig_start, block_start, 7148 block_len, orig_block_len, ram_bytes, 7149 BTRFS_COMPRESS_NONE, /* compress_type */ 7150 type); 7151 if (IS_ERR(em)) 7152 goto out; 7153 } 7154 ret = btrfs_add_ordered_extent(inode, start, len, len, block_start, 7155 block_len, 0, 7156 (1 << type) | 7157 (1 << BTRFS_ORDERED_DIRECT), 7158 BTRFS_COMPRESS_NONE); 7159 if (ret) { 7160 if (em) { 7161 free_extent_map(em); 7162 btrfs_drop_extent_map_range(inode, start, 7163 start + len - 1, false); 7164 } 7165 em = ERR_PTR(ret); 7166 } 7167 out: 7168 7169 return em; 7170 } 7171 7172 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, 7173 u64 start, u64 len) 7174 { 7175 struct btrfs_root *root = inode->root; 7176 struct btrfs_fs_info *fs_info = root->fs_info; 7177 struct extent_map *em; 7178 struct btrfs_key ins; 7179 u64 alloc_hint; 7180 int ret; 7181 7182 alloc_hint = get_extent_allocation_hint(inode, start, len); 7183 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 7184 0, alloc_hint, &ins, 1, 1); 7185 if (ret) 7186 return ERR_PTR(ret); 7187 7188 em = btrfs_create_dio_extent(inode, start, ins.offset, start, 7189 ins.objectid, ins.offset, ins.offset, 7190 ins.offset, BTRFS_ORDERED_REGULAR); 7191 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 7192 if (IS_ERR(em)) 7193 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 7194 1); 7195 7196 return em; 7197 } 7198 7199 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 7200 { 7201 struct btrfs_block_group *block_group; 7202 bool readonly = false; 7203 7204 block_group = btrfs_lookup_block_group(fs_info, bytenr); 7205 if (!block_group || block_group->ro) 7206 readonly = true; 7207 if (block_group) 7208 btrfs_put_block_group(block_group); 7209 return readonly; 7210 } 7211 7212 /* 7213 * Check if we can do nocow write into the range [@offset, @offset + @len) 7214 * 7215 * @offset: File offset 7216 * @len: The length to write, will be updated to the nocow writeable 7217 * range 7218 * @orig_start: (optional) Return the original file offset of the file extent 7219 * @orig_len: (optional) Return the original on-disk length of the file extent 7220 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7221 * @strict: if true, omit optimizations that might force us into unnecessary 7222 * cow. e.g., don't trust generation number. 7223 * 7224 * Return: 7225 * >0 and update @len if we can do nocow write 7226 * 0 if we can't do nocow write 7227 * <0 if error happened 7228 * 7229 * NOTE: This only checks the file extents, caller is responsible to wait for 7230 * any ordered extents. 7231 */ 7232 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7233 u64 *orig_start, u64 *orig_block_len, 7234 u64 *ram_bytes, bool nowait, bool strict) 7235 { 7236 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7237 struct can_nocow_file_extent_args nocow_args = { 0 }; 7238 struct btrfs_path *path; 7239 int ret; 7240 struct extent_buffer *leaf; 7241 struct btrfs_root *root = BTRFS_I(inode)->root; 7242 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7243 struct btrfs_file_extent_item *fi; 7244 struct btrfs_key key; 7245 int found_type; 7246 7247 path = btrfs_alloc_path(); 7248 if (!path) 7249 return -ENOMEM; 7250 path->nowait = nowait; 7251 7252 ret = btrfs_lookup_file_extent(NULL, root, path, 7253 btrfs_ino(BTRFS_I(inode)), offset, 0); 7254 if (ret < 0) 7255 goto out; 7256 7257 if (ret == 1) { 7258 if (path->slots[0] == 0) { 7259 /* can't find the item, must cow */ 7260 ret = 0; 7261 goto out; 7262 } 7263 path->slots[0]--; 7264 } 7265 ret = 0; 7266 leaf = path->nodes[0]; 7267 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7268 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7269 key.type != BTRFS_EXTENT_DATA_KEY) { 7270 /* not our file or wrong item type, must cow */ 7271 goto out; 7272 } 7273 7274 if (key.offset > offset) { 7275 /* Wrong offset, must cow */ 7276 goto out; 7277 } 7278 7279 if (btrfs_file_extent_end(path) <= offset) 7280 goto out; 7281 7282 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7283 found_type = btrfs_file_extent_type(leaf, fi); 7284 if (ram_bytes) 7285 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7286 7287 nocow_args.start = offset; 7288 nocow_args.end = offset + *len - 1; 7289 nocow_args.strict = strict; 7290 nocow_args.free_path = true; 7291 7292 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args); 7293 /* can_nocow_file_extent() has freed the path. */ 7294 path = NULL; 7295 7296 if (ret != 1) { 7297 /* Treat errors as not being able to NOCOW. */ 7298 ret = 0; 7299 goto out; 7300 } 7301 7302 ret = 0; 7303 if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr)) 7304 goto out; 7305 7306 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7307 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7308 u64 range_end; 7309 7310 range_end = round_up(offset + nocow_args.num_bytes, 7311 root->fs_info->sectorsize) - 1; 7312 ret = test_range_bit(io_tree, offset, range_end, 7313 EXTENT_DELALLOC, 0, NULL); 7314 if (ret) { 7315 ret = -EAGAIN; 7316 goto out; 7317 } 7318 } 7319 7320 if (orig_start) 7321 *orig_start = key.offset - nocow_args.extent_offset; 7322 if (orig_block_len) 7323 *orig_block_len = nocow_args.disk_num_bytes; 7324 7325 *len = nocow_args.num_bytes; 7326 ret = 1; 7327 out: 7328 btrfs_free_path(path); 7329 return ret; 7330 } 7331 7332 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7333 struct extent_state **cached_state, 7334 unsigned int iomap_flags) 7335 { 7336 const bool writing = (iomap_flags & IOMAP_WRITE); 7337 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7338 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7339 struct btrfs_ordered_extent *ordered; 7340 int ret = 0; 7341 7342 while (1) { 7343 if (nowait) { 7344 if (!try_lock_extent(io_tree, lockstart, lockend, 7345 cached_state)) 7346 return -EAGAIN; 7347 } else { 7348 lock_extent(io_tree, lockstart, lockend, cached_state); 7349 } 7350 /* 7351 * We're concerned with the entire range that we're going to be 7352 * doing DIO to, so we need to make sure there's no ordered 7353 * extents in this range. 7354 */ 7355 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, 7356 lockend - lockstart + 1); 7357 7358 /* 7359 * We need to make sure there are no buffered pages in this 7360 * range either, we could have raced between the invalidate in 7361 * generic_file_direct_write and locking the extent. The 7362 * invalidate needs to happen so that reads after a write do not 7363 * get stale data. 7364 */ 7365 if (!ordered && 7366 (!writing || !filemap_range_has_page(inode->i_mapping, 7367 lockstart, lockend))) 7368 break; 7369 7370 unlock_extent(io_tree, lockstart, lockend, cached_state); 7371 7372 if (ordered) { 7373 if (nowait) { 7374 btrfs_put_ordered_extent(ordered); 7375 ret = -EAGAIN; 7376 break; 7377 } 7378 /* 7379 * If we are doing a DIO read and the ordered extent we 7380 * found is for a buffered write, we can not wait for it 7381 * to complete and retry, because if we do so we can 7382 * deadlock with concurrent buffered writes on page 7383 * locks. This happens only if our DIO read covers more 7384 * than one extent map, if at this point has already 7385 * created an ordered extent for a previous extent map 7386 * and locked its range in the inode's io tree, and a 7387 * concurrent write against that previous extent map's 7388 * range and this range started (we unlock the ranges 7389 * in the io tree only when the bios complete and 7390 * buffered writes always lock pages before attempting 7391 * to lock range in the io tree). 7392 */ 7393 if (writing || 7394 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) 7395 btrfs_start_ordered_extent(ordered, 1); 7396 else 7397 ret = nowait ? -EAGAIN : -ENOTBLK; 7398 btrfs_put_ordered_extent(ordered); 7399 } else { 7400 /* 7401 * We could trigger writeback for this range (and wait 7402 * for it to complete) and then invalidate the pages for 7403 * this range (through invalidate_inode_pages2_range()), 7404 * but that can lead us to a deadlock with a concurrent 7405 * call to readahead (a buffered read or a defrag call 7406 * triggered a readahead) on a page lock due to an 7407 * ordered dio extent we created before but did not have 7408 * yet a corresponding bio submitted (whence it can not 7409 * complete), which makes readahead wait for that 7410 * ordered extent to complete while holding a lock on 7411 * that page. 7412 */ 7413 ret = nowait ? -EAGAIN : -ENOTBLK; 7414 } 7415 7416 if (ret) 7417 break; 7418 7419 cond_resched(); 7420 } 7421 7422 return ret; 7423 } 7424 7425 /* The callers of this must take lock_extent() */ 7426 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 7427 u64 len, u64 orig_start, u64 block_start, 7428 u64 block_len, u64 orig_block_len, 7429 u64 ram_bytes, int compress_type, 7430 int type) 7431 { 7432 struct extent_map *em; 7433 int ret; 7434 7435 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7436 type == BTRFS_ORDERED_COMPRESSED || 7437 type == BTRFS_ORDERED_NOCOW || 7438 type == BTRFS_ORDERED_REGULAR); 7439 7440 em = alloc_extent_map(); 7441 if (!em) 7442 return ERR_PTR(-ENOMEM); 7443 7444 em->start = start; 7445 em->orig_start = orig_start; 7446 em->len = len; 7447 em->block_len = block_len; 7448 em->block_start = block_start; 7449 em->orig_block_len = orig_block_len; 7450 em->ram_bytes = ram_bytes; 7451 em->generation = -1; 7452 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7453 if (type == BTRFS_ORDERED_PREALLOC) { 7454 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7455 } else if (type == BTRFS_ORDERED_COMPRESSED) { 7456 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 7457 em->compress_type = compress_type; 7458 } 7459 7460 ret = btrfs_replace_extent_map_range(inode, em, true); 7461 if (ret) { 7462 free_extent_map(em); 7463 return ERR_PTR(ret); 7464 } 7465 7466 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7467 return em; 7468 } 7469 7470 7471 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7472 struct inode *inode, 7473 struct btrfs_dio_data *dio_data, 7474 u64 start, u64 len, 7475 unsigned int iomap_flags) 7476 { 7477 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7478 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7479 struct extent_map *em = *map; 7480 int type; 7481 u64 block_start, orig_start, orig_block_len, ram_bytes; 7482 struct btrfs_block_group *bg; 7483 bool can_nocow = false; 7484 bool space_reserved = false; 7485 u64 prev_len; 7486 int ret = 0; 7487 7488 /* 7489 * We don't allocate a new extent in the following cases 7490 * 7491 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7492 * existing extent. 7493 * 2) The extent is marked as PREALLOC. We're good to go here and can 7494 * just use the extent. 7495 * 7496 */ 7497 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7498 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7499 em->block_start != EXTENT_MAP_HOLE)) { 7500 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7501 type = BTRFS_ORDERED_PREALLOC; 7502 else 7503 type = BTRFS_ORDERED_NOCOW; 7504 len = min(len, em->len - (start - em->start)); 7505 block_start = em->block_start + (start - em->start); 7506 7507 if (can_nocow_extent(inode, start, &len, &orig_start, 7508 &orig_block_len, &ram_bytes, false, false) == 1) { 7509 bg = btrfs_inc_nocow_writers(fs_info, block_start); 7510 if (bg) 7511 can_nocow = true; 7512 } 7513 } 7514 7515 prev_len = len; 7516 if (can_nocow) { 7517 struct extent_map *em2; 7518 7519 /* We can NOCOW, so only need to reserve metadata space. */ 7520 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7521 nowait); 7522 if (ret < 0) { 7523 /* Our caller expects us to free the input extent map. */ 7524 free_extent_map(em); 7525 *map = NULL; 7526 btrfs_dec_nocow_writers(bg); 7527 if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) 7528 ret = -EAGAIN; 7529 goto out; 7530 } 7531 space_reserved = true; 7532 7533 em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len, 7534 orig_start, block_start, 7535 len, orig_block_len, 7536 ram_bytes, type); 7537 btrfs_dec_nocow_writers(bg); 7538 if (type == BTRFS_ORDERED_PREALLOC) { 7539 free_extent_map(em); 7540 *map = em2; 7541 em = em2; 7542 } 7543 7544 if (IS_ERR(em2)) { 7545 ret = PTR_ERR(em2); 7546 goto out; 7547 } 7548 7549 dio_data->nocow_done = true; 7550 } else { 7551 /* Our caller expects us to free the input extent map. */ 7552 free_extent_map(em); 7553 *map = NULL; 7554 7555 if (nowait) 7556 return -EAGAIN; 7557 7558 /* 7559 * If we could not allocate data space before locking the file 7560 * range and we can't do a NOCOW write, then we have to fail. 7561 */ 7562 if (!dio_data->data_space_reserved) 7563 return -ENOSPC; 7564 7565 /* 7566 * We have to COW and we have already reserved data space before, 7567 * so now we reserve only metadata. 7568 */ 7569 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7570 false); 7571 if (ret < 0) 7572 goto out; 7573 space_reserved = true; 7574 7575 em = btrfs_new_extent_direct(BTRFS_I(inode), start, len); 7576 if (IS_ERR(em)) { 7577 ret = PTR_ERR(em); 7578 goto out; 7579 } 7580 *map = em; 7581 len = min(len, em->len - (start - em->start)); 7582 if (len < prev_len) 7583 btrfs_delalloc_release_metadata(BTRFS_I(inode), 7584 prev_len - len, true); 7585 } 7586 7587 /* 7588 * We have created our ordered extent, so we can now release our reservation 7589 * for an outstanding extent. 7590 */ 7591 btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len); 7592 7593 /* 7594 * Need to update the i_size under the extent lock so buffered 7595 * readers will get the updated i_size when we unlock. 7596 */ 7597 if (start + len > i_size_read(inode)) 7598 i_size_write(inode, start + len); 7599 out: 7600 if (ret && space_reserved) { 7601 btrfs_delalloc_release_extents(BTRFS_I(inode), len); 7602 btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true); 7603 } 7604 return ret; 7605 } 7606 7607 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, 7608 loff_t length, unsigned int flags, struct iomap *iomap, 7609 struct iomap *srcmap) 7610 { 7611 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7612 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7613 struct extent_map *em; 7614 struct extent_state *cached_state = NULL; 7615 struct btrfs_dio_data *dio_data = iter->private; 7616 u64 lockstart, lockend; 7617 const bool write = !!(flags & IOMAP_WRITE); 7618 int ret = 0; 7619 u64 len = length; 7620 const u64 data_alloc_len = length; 7621 bool unlock_extents = false; 7622 7623 /* 7624 * We could potentially fault if we have a buffer > PAGE_SIZE, and if 7625 * we're NOWAIT we may submit a bio for a partial range and return 7626 * EIOCBQUEUED, which would result in an errant short read. 7627 * 7628 * The best way to handle this would be to allow for partial completions 7629 * of iocb's, so we could submit the partial bio, return and fault in 7630 * the rest of the pages, and then submit the io for the rest of the 7631 * range. However we don't have that currently, so simply return 7632 * -EAGAIN at this point so that the normal path is used. 7633 */ 7634 if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE) 7635 return -EAGAIN; 7636 7637 /* 7638 * Cap the size of reads to that usually seen in buffered I/O as we need 7639 * to allocate a contiguous array for the checksums. 7640 */ 7641 if (!write) 7642 len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS); 7643 7644 lockstart = start; 7645 lockend = start + len - 1; 7646 7647 /* 7648 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't 7649 * enough if we've written compressed pages to this area, so we need to 7650 * flush the dirty pages again to make absolutely sure that any 7651 * outstanding dirty pages are on disk - the first flush only starts 7652 * compression on the data, while keeping the pages locked, so by the 7653 * time the second flush returns we know bios for the compressed pages 7654 * were submitted and finished, and the pages no longer under writeback. 7655 * 7656 * If we have a NOWAIT request and we have any pages in the range that 7657 * are locked, likely due to compression still in progress, we don't want 7658 * to block on page locks. We also don't want to block on pages marked as 7659 * dirty or under writeback (same as for the non-compression case). 7660 * iomap_dio_rw() did the same check, but after that and before we got 7661 * here, mmap'ed writes may have happened or buffered reads started 7662 * (readpage() and readahead(), which lock pages), as we haven't locked 7663 * the file range yet. 7664 */ 7665 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7666 &BTRFS_I(inode)->runtime_flags)) { 7667 if (flags & IOMAP_NOWAIT) { 7668 if (filemap_range_needs_writeback(inode->i_mapping, 7669 lockstart, lockend)) 7670 return -EAGAIN; 7671 } else { 7672 ret = filemap_fdatawrite_range(inode->i_mapping, start, 7673 start + length - 1); 7674 if (ret) 7675 return ret; 7676 } 7677 } 7678 7679 memset(dio_data, 0, sizeof(*dio_data)); 7680 7681 /* 7682 * We always try to allocate data space and must do it before locking 7683 * the file range, to avoid deadlocks with concurrent writes to the same 7684 * range if the range has several extents and the writes don't expand the 7685 * current i_size (the inode lock is taken in shared mode). If we fail to 7686 * allocate data space here we continue and later, after locking the 7687 * file range, we fail with ENOSPC only if we figure out we can not do a 7688 * NOCOW write. 7689 */ 7690 if (write && !(flags & IOMAP_NOWAIT)) { 7691 ret = btrfs_check_data_free_space(BTRFS_I(inode), 7692 &dio_data->data_reserved, 7693 start, data_alloc_len, false); 7694 if (!ret) 7695 dio_data->data_space_reserved = true; 7696 else if (ret && !(BTRFS_I(inode)->flags & 7697 (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 7698 goto err; 7699 } 7700 7701 /* 7702 * If this errors out it's because we couldn't invalidate pagecache for 7703 * this range and we need to fallback to buffered IO, or we are doing a 7704 * NOWAIT read/write and we need to block. 7705 */ 7706 ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags); 7707 if (ret < 0) 7708 goto err; 7709 7710 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 7711 if (IS_ERR(em)) { 7712 ret = PTR_ERR(em); 7713 goto unlock_err; 7714 } 7715 7716 /* 7717 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7718 * io. INLINE is special, and we could probably kludge it in here, but 7719 * it's still buffered so for safety lets just fall back to the generic 7720 * buffered path. 7721 * 7722 * For COMPRESSED we _have_ to read the entire extent in so we can 7723 * decompress it, so there will be buffering required no matter what we 7724 * do, so go ahead and fallback to buffered. 7725 * 7726 * We return -ENOTBLK because that's what makes DIO go ahead and go back 7727 * to buffered IO. Don't blame me, this is the price we pay for using 7728 * the generic code. 7729 */ 7730 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7731 em->block_start == EXTENT_MAP_INLINE) { 7732 free_extent_map(em); 7733 /* 7734 * If we are in a NOWAIT context, return -EAGAIN in order to 7735 * fallback to buffered IO. This is not only because we can 7736 * block with buffered IO (no support for NOWAIT semantics at 7737 * the moment) but also to avoid returning short reads to user 7738 * space - this happens if we were able to read some data from 7739 * previous non-compressed extents and then when we fallback to 7740 * buffered IO, at btrfs_file_read_iter() by calling 7741 * filemap_read(), we fail to fault in pages for the read buffer, 7742 * in which case filemap_read() returns a short read (the number 7743 * of bytes previously read is > 0, so it does not return -EFAULT). 7744 */ 7745 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; 7746 goto unlock_err; 7747 } 7748 7749 len = min(len, em->len - (start - em->start)); 7750 7751 /* 7752 * If we have a NOWAIT request and the range contains multiple extents 7753 * (or a mix of extents and holes), then we return -EAGAIN to make the 7754 * caller fallback to a context where it can do a blocking (without 7755 * NOWAIT) request. This way we avoid doing partial IO and returning 7756 * success to the caller, which is not optimal for writes and for reads 7757 * it can result in unexpected behaviour for an application. 7758 * 7759 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling 7760 * iomap_dio_rw(), we can end up returning less data then what the caller 7761 * asked for, resulting in an unexpected, and incorrect, short read. 7762 * That is, the caller asked to read N bytes and we return less than that, 7763 * which is wrong unless we are crossing EOF. This happens if we get a 7764 * page fault error when trying to fault in pages for the buffer that is 7765 * associated to the struct iov_iter passed to iomap_dio_rw(), and we 7766 * have previously submitted bios for other extents in the range, in 7767 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of 7768 * those bios have completed by the time we get the page fault error, 7769 * which we return back to our caller - we should only return EIOCBQUEUED 7770 * after we have submitted bios for all the extents in the range. 7771 */ 7772 if ((flags & IOMAP_NOWAIT) && len < length) { 7773 free_extent_map(em); 7774 ret = -EAGAIN; 7775 goto unlock_err; 7776 } 7777 7778 if (write) { 7779 ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, 7780 start, len, flags); 7781 if (ret < 0) 7782 goto unlock_err; 7783 unlock_extents = true; 7784 /* Recalc len in case the new em is smaller than requested */ 7785 len = min(len, em->len - (start - em->start)); 7786 if (dio_data->data_space_reserved) { 7787 u64 release_offset; 7788 u64 release_len = 0; 7789 7790 if (dio_data->nocow_done) { 7791 release_offset = start; 7792 release_len = data_alloc_len; 7793 } else if (len < data_alloc_len) { 7794 release_offset = start + len; 7795 release_len = data_alloc_len - len; 7796 } 7797 7798 if (release_len > 0) 7799 btrfs_free_reserved_data_space(BTRFS_I(inode), 7800 dio_data->data_reserved, 7801 release_offset, 7802 release_len); 7803 } 7804 } else { 7805 /* 7806 * We need to unlock only the end area that we aren't using. 7807 * The rest is going to be unlocked by the endio routine. 7808 */ 7809 lockstart = start + len; 7810 if (lockstart < lockend) 7811 unlock_extents = true; 7812 } 7813 7814 if (unlock_extents) 7815 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7816 &cached_state); 7817 else 7818 free_extent_state(cached_state); 7819 7820 /* 7821 * Translate extent map information to iomap. 7822 * We trim the extents (and move the addr) even though iomap code does 7823 * that, since we have locked only the parts we are performing I/O in. 7824 */ 7825 if ((em->block_start == EXTENT_MAP_HOLE) || 7826 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { 7827 iomap->addr = IOMAP_NULL_ADDR; 7828 iomap->type = IOMAP_HOLE; 7829 } else { 7830 iomap->addr = em->block_start + (start - em->start); 7831 iomap->type = IOMAP_MAPPED; 7832 } 7833 iomap->offset = start; 7834 iomap->bdev = fs_info->fs_devices->latest_dev->bdev; 7835 iomap->length = len; 7836 7837 if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start)) 7838 iomap->flags |= IOMAP_F_ZONE_APPEND; 7839 7840 free_extent_map(em); 7841 7842 return 0; 7843 7844 unlock_err: 7845 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7846 &cached_state); 7847 err: 7848 if (dio_data->data_space_reserved) { 7849 btrfs_free_reserved_data_space(BTRFS_I(inode), 7850 dio_data->data_reserved, 7851 start, data_alloc_len); 7852 extent_changeset_free(dio_data->data_reserved); 7853 } 7854 7855 return ret; 7856 } 7857 7858 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, 7859 ssize_t written, unsigned int flags, struct iomap *iomap) 7860 { 7861 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7862 struct btrfs_dio_data *dio_data = iter->private; 7863 size_t submitted = dio_data->submitted; 7864 const bool write = !!(flags & IOMAP_WRITE); 7865 int ret = 0; 7866 7867 if (!write && (iomap->type == IOMAP_HOLE)) { 7868 /* If reading from a hole, unlock and return */ 7869 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, 7870 NULL); 7871 return 0; 7872 } 7873 7874 if (submitted < length) { 7875 pos += submitted; 7876 length -= submitted; 7877 if (write) 7878 btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL, 7879 pos, length, false); 7880 else 7881 unlock_extent(&BTRFS_I(inode)->io_tree, pos, 7882 pos + length - 1, NULL); 7883 ret = -ENOTBLK; 7884 } 7885 7886 if (write) 7887 extent_changeset_free(dio_data->data_reserved); 7888 return ret; 7889 } 7890 7891 static void btrfs_dio_private_put(struct btrfs_dio_private *dip) 7892 { 7893 /* 7894 * This implies a barrier so that stores to dio_bio->bi_status before 7895 * this and loads of dio_bio->bi_status after this are fully ordered. 7896 */ 7897 if (!refcount_dec_and_test(&dip->refs)) 7898 return; 7899 7900 if (btrfs_op(&dip->bio) == BTRFS_MAP_WRITE) { 7901 btrfs_mark_ordered_io_finished(dip->inode, NULL, 7902 dip->file_offset, dip->bytes, 7903 !dip->bio.bi_status); 7904 } else { 7905 unlock_extent(&dip->inode->io_tree, 7906 dip->file_offset, 7907 dip->file_offset + dip->bytes - 1, NULL); 7908 } 7909 7910 kfree(dip->csums); 7911 bio_endio(&dip->bio); 7912 } 7913 7914 void btrfs_submit_dio_repair_bio(struct btrfs_inode *inode, struct bio *bio, int mirror_num) 7915 { 7916 struct btrfs_dio_private *dip = btrfs_bio(bio)->private; 7917 7918 BUG_ON(bio_op(bio) == REQ_OP_WRITE); 7919 7920 refcount_inc(&dip->refs); 7921 btrfs_submit_bio(inode->root->fs_info, bio, mirror_num); 7922 } 7923 7924 static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip, 7925 struct btrfs_bio *bbio, 7926 const bool uptodate) 7927 { 7928 struct inode *inode = &dip->inode->vfs_inode; 7929 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 7930 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); 7931 blk_status_t err = BLK_STS_OK; 7932 struct bvec_iter iter; 7933 struct bio_vec bv; 7934 u32 offset; 7935 7936 btrfs_bio_for_each_sector(fs_info, bv, bbio, iter, offset) { 7937 u64 start = bbio->file_offset + offset; 7938 7939 if (uptodate && 7940 (!csum || !btrfs_check_data_csum(BTRFS_I(inode), bbio, offset, 7941 bv.bv_page, bv.bv_offset))) { 7942 btrfs_clean_io_failure(BTRFS_I(inode), start, 7943 bv.bv_page, bv.bv_offset); 7944 } else { 7945 int ret; 7946 7947 ret = btrfs_repair_one_sector(BTRFS_I(inode), bbio, offset, 7948 bv.bv_page, bv.bv_offset, false); 7949 if (ret) 7950 err = errno_to_blk_status(ret); 7951 } 7952 } 7953 7954 return err; 7955 } 7956 7957 blk_status_t btrfs_submit_bio_start_direct_io(struct btrfs_inode *inode, 7958 struct bio *bio, 7959 u64 dio_file_offset) 7960 { 7961 return btrfs_csum_one_bio(inode, bio, dio_file_offset, false); 7962 } 7963 7964 static void btrfs_end_dio_bio(struct btrfs_bio *bbio) 7965 { 7966 struct btrfs_dio_private *dip = bbio->private; 7967 struct bio *bio = &bbio->bio; 7968 blk_status_t err = bio->bi_status; 7969 7970 if (err) 7971 btrfs_warn(dip->inode->root->fs_info, 7972 "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d", 7973 btrfs_ino(dip->inode), bio_op(bio), 7974 bio->bi_opf, bio->bi_iter.bi_sector, 7975 bio->bi_iter.bi_size, err); 7976 7977 if (bio_op(bio) == REQ_OP_READ) 7978 err = btrfs_check_read_dio_bio(dip, bbio, !err); 7979 7980 if (err) 7981 dip->bio.bi_status = err; 7982 7983 btrfs_record_physical_zoned(&dip->inode->vfs_inode, bbio->file_offset, bio); 7984 7985 bio_put(bio); 7986 btrfs_dio_private_put(dip); 7987 } 7988 7989 static void btrfs_submit_dio_bio(struct bio *bio, struct btrfs_inode *inode, 7990 u64 file_offset, int async_submit) 7991 { 7992 struct btrfs_fs_info *fs_info = inode->root->fs_info; 7993 struct btrfs_dio_private *dip = btrfs_bio(bio)->private; 7994 blk_status_t ret; 7995 7996 /* Save the original iter for read repair */ 7997 if (btrfs_op(bio) == BTRFS_MAP_READ) 7998 btrfs_bio(bio)->iter = bio->bi_iter; 7999 8000 if (inode->flags & BTRFS_INODE_NODATASUM) 8001 goto map; 8002 8003 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 8004 /* Check btrfs_submit_data_write_bio() for async submit rules */ 8005 if (async_submit && !atomic_read(&inode->sync_writers) && 8006 btrfs_wq_submit_bio(inode, bio, 0, file_offset, 8007 WQ_SUBMIT_DATA_DIO)) 8008 return; 8009 8010 /* 8011 * If we aren't doing async submit, calculate the csum of the 8012 * bio now. 8013 */ 8014 ret = btrfs_csum_one_bio(inode, bio, file_offset, false); 8015 if (ret) { 8016 btrfs_bio_end_io(btrfs_bio(bio), ret); 8017 return; 8018 } 8019 } else { 8020 btrfs_bio(bio)->csum = btrfs_csum_ptr(fs_info, dip->csums, 8021 file_offset - dip->file_offset); 8022 } 8023 map: 8024 btrfs_submit_bio(fs_info, bio, 0); 8025 } 8026 8027 static void btrfs_submit_direct(const struct iomap_iter *iter, 8028 struct bio *dio_bio, loff_t file_offset) 8029 { 8030 struct btrfs_dio_private *dip = 8031 container_of(dio_bio, struct btrfs_dio_private, bio); 8032 struct inode *inode = iter->inode; 8033 const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE); 8034 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8035 const bool raid56 = (btrfs_data_alloc_profile(fs_info) & 8036 BTRFS_BLOCK_GROUP_RAID56_MASK); 8037 struct bio *bio; 8038 u64 start_sector; 8039 int async_submit = 0; 8040 u64 submit_len; 8041 u64 clone_offset = 0; 8042 u64 clone_len; 8043 u64 logical; 8044 int ret; 8045 blk_status_t status; 8046 struct btrfs_io_geometry geom; 8047 struct btrfs_dio_data *dio_data = iter->private; 8048 struct extent_map *em = NULL; 8049 8050 dip->inode = BTRFS_I(inode); 8051 dip->file_offset = file_offset; 8052 dip->bytes = dio_bio->bi_iter.bi_size; 8053 refcount_set(&dip->refs, 1); 8054 dip->csums = NULL; 8055 8056 if (!write && !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 8057 unsigned int nr_sectors = 8058 (dio_bio->bi_iter.bi_size >> fs_info->sectorsize_bits); 8059 8060 /* 8061 * Load the csums up front to reduce csum tree searches and 8062 * contention when submitting bios. 8063 */ 8064 status = BLK_STS_RESOURCE; 8065 dip->csums = kcalloc(nr_sectors, fs_info->csum_size, GFP_NOFS); 8066 if (!dip->csums) 8067 goto out_err; 8068 8069 status = btrfs_lookup_bio_sums(inode, dio_bio, dip->csums); 8070 if (status != BLK_STS_OK) 8071 goto out_err; 8072 } 8073 8074 start_sector = dio_bio->bi_iter.bi_sector; 8075 submit_len = dio_bio->bi_iter.bi_size; 8076 8077 do { 8078 logical = start_sector << 9; 8079 em = btrfs_get_chunk_map(fs_info, logical, submit_len); 8080 if (IS_ERR(em)) { 8081 status = errno_to_blk_status(PTR_ERR(em)); 8082 em = NULL; 8083 goto out_err_em; 8084 } 8085 ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(dio_bio), 8086 logical, &geom); 8087 if (ret) { 8088 status = errno_to_blk_status(ret); 8089 goto out_err_em; 8090 } 8091 8092 clone_len = min(submit_len, geom.len); 8093 ASSERT(clone_len <= UINT_MAX); 8094 8095 /* 8096 * This will never fail as it's passing GPF_NOFS and 8097 * the allocation is backed by btrfs_bioset. 8098 */ 8099 bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len, 8100 btrfs_end_dio_bio, dip); 8101 btrfs_bio(bio)->file_offset = file_offset; 8102 8103 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 8104 status = extract_ordered_extent(BTRFS_I(inode), bio, 8105 file_offset); 8106 if (status) { 8107 bio_put(bio); 8108 goto out_err; 8109 } 8110 } 8111 8112 ASSERT(submit_len >= clone_len); 8113 submit_len -= clone_len; 8114 8115 /* 8116 * Increase the count before we submit the bio so we know 8117 * the end IO handler won't happen before we increase the 8118 * count. Otherwise, the dip might get freed before we're 8119 * done setting it up. 8120 * 8121 * We transfer the initial reference to the last bio, so we 8122 * don't need to increment the reference count for the last one. 8123 */ 8124 if (submit_len > 0) { 8125 refcount_inc(&dip->refs); 8126 /* 8127 * If we are submitting more than one bio, submit them 8128 * all asynchronously. The exception is RAID 5 or 6, as 8129 * asynchronous checksums make it difficult to collect 8130 * full stripe writes. 8131 */ 8132 if (!raid56) 8133 async_submit = 1; 8134 } 8135 8136 btrfs_submit_dio_bio(bio, BTRFS_I(inode), file_offset, async_submit); 8137 8138 dio_data->submitted += clone_len; 8139 clone_offset += clone_len; 8140 start_sector += clone_len >> 9; 8141 file_offset += clone_len; 8142 8143 free_extent_map(em); 8144 } while (submit_len > 0); 8145 return; 8146 8147 out_err_em: 8148 free_extent_map(em); 8149 out_err: 8150 dio_bio->bi_status = status; 8151 btrfs_dio_private_put(dip); 8152 } 8153 8154 static const struct iomap_ops btrfs_dio_iomap_ops = { 8155 .iomap_begin = btrfs_dio_iomap_begin, 8156 .iomap_end = btrfs_dio_iomap_end, 8157 }; 8158 8159 static const struct iomap_dio_ops btrfs_dio_ops = { 8160 .submit_io = btrfs_submit_direct, 8161 .bio_set = &btrfs_dio_bioset, 8162 }; 8163 8164 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) 8165 { 8166 struct btrfs_dio_data data; 8167 8168 return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 8169 IOMAP_DIO_PARTIAL, &data, done_before); 8170 } 8171 8172 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, 8173 size_t done_before) 8174 { 8175 struct btrfs_dio_data data; 8176 8177 return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 8178 IOMAP_DIO_PARTIAL, &data, done_before); 8179 } 8180 8181 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 8182 u64 start, u64 len) 8183 { 8184 int ret; 8185 8186 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 8187 if (ret) 8188 return ret; 8189 8190 /* 8191 * fiemap_prep() called filemap_write_and_wait() for the whole possible 8192 * file range (0 to LLONG_MAX), but that is not enough if we have 8193 * compression enabled. The first filemap_fdatawrite_range() only kicks 8194 * in the compression of data (in an async thread) and will return 8195 * before the compression is done and writeback is started. A second 8196 * filemap_fdatawrite_range() is needed to wait for the compression to 8197 * complete and writeback to start. We also need to wait for ordered 8198 * extents to complete, because our fiemap implementation uses mainly 8199 * file extent items to list the extents, searching for extent maps 8200 * only for file ranges with holes or prealloc extents to figure out 8201 * if we have delalloc in those ranges. 8202 */ 8203 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { 8204 ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX); 8205 if (ret) 8206 return ret; 8207 } 8208 8209 return extent_fiemap(BTRFS_I(inode), fieinfo, start, len); 8210 } 8211 8212 static int btrfs_writepages(struct address_space *mapping, 8213 struct writeback_control *wbc) 8214 { 8215 return extent_writepages(mapping, wbc); 8216 } 8217 8218 static void btrfs_readahead(struct readahead_control *rac) 8219 { 8220 extent_readahead(rac); 8221 } 8222 8223 /* 8224 * For release_folio() and invalidate_folio() we have a race window where 8225 * folio_end_writeback() is called but the subpage spinlock is not yet released. 8226 * If we continue to release/invalidate the page, we could cause use-after-free 8227 * for subpage spinlock. So this function is to spin and wait for subpage 8228 * spinlock. 8229 */ 8230 static void wait_subpage_spinlock(struct page *page) 8231 { 8232 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); 8233 struct btrfs_subpage *subpage; 8234 8235 if (!btrfs_is_subpage(fs_info, page)) 8236 return; 8237 8238 ASSERT(PagePrivate(page) && page->private); 8239 subpage = (struct btrfs_subpage *)page->private; 8240 8241 /* 8242 * This may look insane as we just acquire the spinlock and release it, 8243 * without doing anything. But we just want to make sure no one is 8244 * still holding the subpage spinlock. 8245 * And since the page is not dirty nor writeback, and we have page 8246 * locked, the only possible way to hold a spinlock is from the endio 8247 * function to clear page writeback. 8248 * 8249 * Here we just acquire the spinlock so that all existing callers 8250 * should exit and we're safe to release/invalidate the page. 8251 */ 8252 spin_lock_irq(&subpage->lock); 8253 spin_unlock_irq(&subpage->lock); 8254 } 8255 8256 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 8257 { 8258 int ret = try_release_extent_mapping(&folio->page, gfp_flags); 8259 8260 if (ret == 1) { 8261 wait_subpage_spinlock(&folio->page); 8262 clear_page_extent_mapped(&folio->page); 8263 } 8264 return ret; 8265 } 8266 8267 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 8268 { 8269 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 8270 return false; 8271 return __btrfs_release_folio(folio, gfp_flags); 8272 } 8273 8274 #ifdef CONFIG_MIGRATION 8275 static int btrfs_migrate_folio(struct address_space *mapping, 8276 struct folio *dst, struct folio *src, 8277 enum migrate_mode mode) 8278 { 8279 int ret = filemap_migrate_folio(mapping, dst, src, mode); 8280 8281 if (ret != MIGRATEPAGE_SUCCESS) 8282 return ret; 8283 8284 if (folio_test_ordered(src)) { 8285 folio_clear_ordered(src); 8286 folio_set_ordered(dst); 8287 } 8288 8289 return MIGRATEPAGE_SUCCESS; 8290 } 8291 #else 8292 #define btrfs_migrate_folio NULL 8293 #endif 8294 8295 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 8296 size_t length) 8297 { 8298 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); 8299 struct btrfs_fs_info *fs_info = inode->root->fs_info; 8300 struct extent_io_tree *tree = &inode->io_tree; 8301 struct extent_state *cached_state = NULL; 8302 u64 page_start = folio_pos(folio); 8303 u64 page_end = page_start + folio_size(folio) - 1; 8304 u64 cur; 8305 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 8306 8307 /* 8308 * We have folio locked so no new ordered extent can be created on this 8309 * page, nor bio can be submitted for this folio. 8310 * 8311 * But already submitted bio can still be finished on this folio. 8312 * Furthermore, endio function won't skip folio which has Ordered 8313 * (Private2) already cleared, so it's possible for endio and 8314 * invalidate_folio to do the same ordered extent accounting twice 8315 * on one folio. 8316 * 8317 * So here we wait for any submitted bios to finish, so that we won't 8318 * do double ordered extent accounting on the same folio. 8319 */ 8320 folio_wait_writeback(folio); 8321 wait_subpage_spinlock(&folio->page); 8322 8323 /* 8324 * For subpage case, we have call sites like 8325 * btrfs_punch_hole_lock_range() which passes range not aligned to 8326 * sectorsize. 8327 * If the range doesn't cover the full folio, we don't need to and 8328 * shouldn't clear page extent mapped, as folio->private can still 8329 * record subpage dirty bits for other part of the range. 8330 * 8331 * For cases that invalidate the full folio even the range doesn't 8332 * cover the full folio, like invalidating the last folio, we're 8333 * still safe to wait for ordered extent to finish. 8334 */ 8335 if (!(offset == 0 && length == folio_size(folio))) { 8336 btrfs_release_folio(folio, GFP_NOFS); 8337 return; 8338 } 8339 8340 if (!inode_evicting) 8341 lock_extent(tree, page_start, page_end, &cached_state); 8342 8343 cur = page_start; 8344 while (cur < page_end) { 8345 struct btrfs_ordered_extent *ordered; 8346 u64 range_end; 8347 u32 range_len; 8348 u32 extra_flags = 0; 8349 8350 ordered = btrfs_lookup_first_ordered_range(inode, cur, 8351 page_end + 1 - cur); 8352 if (!ordered) { 8353 range_end = page_end; 8354 /* 8355 * No ordered extent covering this range, we are safe 8356 * to delete all extent states in the range. 8357 */ 8358 extra_flags = EXTENT_CLEAR_ALL_BITS; 8359 goto next; 8360 } 8361 if (ordered->file_offset > cur) { 8362 /* 8363 * There is a range between [cur, oe->file_offset) not 8364 * covered by any ordered extent. 8365 * We are safe to delete all extent states, and handle 8366 * the ordered extent in the next iteration. 8367 */ 8368 range_end = ordered->file_offset - 1; 8369 extra_flags = EXTENT_CLEAR_ALL_BITS; 8370 goto next; 8371 } 8372 8373 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 8374 page_end); 8375 ASSERT(range_end + 1 - cur < U32_MAX); 8376 range_len = range_end + 1 - cur; 8377 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) { 8378 /* 8379 * If Ordered (Private2) is cleared, it means endio has 8380 * already been executed for the range. 8381 * We can't delete the extent states as 8382 * btrfs_finish_ordered_io() may still use some of them. 8383 */ 8384 goto next; 8385 } 8386 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len); 8387 8388 /* 8389 * IO on this page will never be started, so we need to account 8390 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 8391 * here, must leave that up for the ordered extent completion. 8392 * 8393 * This will also unlock the range for incoming 8394 * btrfs_finish_ordered_io(). 8395 */ 8396 if (!inode_evicting) 8397 clear_extent_bit(tree, cur, range_end, 8398 EXTENT_DELALLOC | 8399 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8400 EXTENT_DEFRAG, &cached_state); 8401 8402 spin_lock_irq(&inode->ordered_tree.lock); 8403 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8404 ordered->truncated_len = min(ordered->truncated_len, 8405 cur - ordered->file_offset); 8406 spin_unlock_irq(&inode->ordered_tree.lock); 8407 8408 /* 8409 * If the ordered extent has finished, we're safe to delete all 8410 * the extent states of the range, otherwise 8411 * btrfs_finish_ordered_io() will get executed by endio for 8412 * other pages, so we can't delete extent states. 8413 */ 8414 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8415 cur, range_end + 1 - cur)) { 8416 btrfs_finish_ordered_io(ordered); 8417 /* 8418 * The ordered extent has finished, now we're again 8419 * safe to delete all extent states of the range. 8420 */ 8421 extra_flags = EXTENT_CLEAR_ALL_BITS; 8422 } 8423 next: 8424 if (ordered) 8425 btrfs_put_ordered_extent(ordered); 8426 /* 8427 * Qgroup reserved space handler 8428 * Sector(s) here will be either: 8429 * 8430 * 1) Already written to disk or bio already finished 8431 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 8432 * Qgroup will be handled by its qgroup_record then. 8433 * btrfs_qgroup_free_data() call will do nothing here. 8434 * 8435 * 2) Not written to disk yet 8436 * Then btrfs_qgroup_free_data() call will clear the 8437 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 8438 * reserved data space. 8439 * Since the IO will never happen for this page. 8440 */ 8441 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur); 8442 if (!inode_evicting) { 8443 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 8444 EXTENT_DELALLOC | EXTENT_UPTODATE | 8445 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG | 8446 extra_flags, &cached_state); 8447 } 8448 cur = range_end + 1; 8449 } 8450 /* 8451 * We have iterated through all ordered extents of the page, the page 8452 * should not have Ordered (Private2) anymore, or the above iteration 8453 * did something wrong. 8454 */ 8455 ASSERT(!folio_test_ordered(folio)); 8456 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio)); 8457 if (!inode_evicting) 8458 __btrfs_release_folio(folio, GFP_NOFS); 8459 clear_page_extent_mapped(&folio->page); 8460 } 8461 8462 /* 8463 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8464 * called from a page fault handler when a page is first dirtied. Hence we must 8465 * be careful to check for EOF conditions here. We set the page up correctly 8466 * for a written page which means we get ENOSPC checking when writing into 8467 * holes and correct delalloc and unwritten extent mapping on filesystems that 8468 * support these features. 8469 * 8470 * We are not allowed to take the i_mutex here so we have to play games to 8471 * protect against truncate races as the page could now be beyond EOF. Because 8472 * truncate_setsize() writes the inode size before removing pages, once we have 8473 * the page lock we can determine safely if the page is beyond EOF. If it is not 8474 * beyond EOF, then the page is guaranteed safe against truncation until we 8475 * unlock the page. 8476 */ 8477 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 8478 { 8479 struct page *page = vmf->page; 8480 struct inode *inode = file_inode(vmf->vma->vm_file); 8481 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8482 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8483 struct btrfs_ordered_extent *ordered; 8484 struct extent_state *cached_state = NULL; 8485 struct extent_changeset *data_reserved = NULL; 8486 unsigned long zero_start; 8487 loff_t size; 8488 vm_fault_t ret; 8489 int ret2; 8490 int reserved = 0; 8491 u64 reserved_space; 8492 u64 page_start; 8493 u64 page_end; 8494 u64 end; 8495 8496 reserved_space = PAGE_SIZE; 8497 8498 sb_start_pagefault(inode->i_sb); 8499 page_start = page_offset(page); 8500 page_end = page_start + PAGE_SIZE - 1; 8501 end = page_end; 8502 8503 /* 8504 * Reserving delalloc space after obtaining the page lock can lead to 8505 * deadlock. For example, if a dirty page is locked by this function 8506 * and the call to btrfs_delalloc_reserve_space() ends up triggering 8507 * dirty page write out, then the btrfs_writepages() function could 8508 * end up waiting indefinitely to get a lock on the page currently 8509 * being processed by btrfs_page_mkwrite() function. 8510 */ 8511 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 8512 page_start, reserved_space); 8513 if (!ret2) { 8514 ret2 = file_update_time(vmf->vma->vm_file); 8515 reserved = 1; 8516 } 8517 if (ret2) { 8518 ret = vmf_error(ret2); 8519 if (reserved) 8520 goto out; 8521 goto out_noreserve; 8522 } 8523 8524 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8525 again: 8526 down_read(&BTRFS_I(inode)->i_mmap_lock); 8527 lock_page(page); 8528 size = i_size_read(inode); 8529 8530 if ((page->mapping != inode->i_mapping) || 8531 (page_start >= size)) { 8532 /* page got truncated out from underneath us */ 8533 goto out_unlock; 8534 } 8535 wait_on_page_writeback(page); 8536 8537 lock_extent(io_tree, page_start, page_end, &cached_state); 8538 ret2 = set_page_extent_mapped(page); 8539 if (ret2 < 0) { 8540 ret = vmf_error(ret2); 8541 unlock_extent(io_tree, page_start, page_end, &cached_state); 8542 goto out_unlock; 8543 } 8544 8545 /* 8546 * we can't set the delalloc bits if there are pending ordered 8547 * extents. Drop our locks and wait for them to finish 8548 */ 8549 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, 8550 PAGE_SIZE); 8551 if (ordered) { 8552 unlock_extent(io_tree, page_start, page_end, &cached_state); 8553 unlock_page(page); 8554 up_read(&BTRFS_I(inode)->i_mmap_lock); 8555 btrfs_start_ordered_extent(ordered, 1); 8556 btrfs_put_ordered_extent(ordered); 8557 goto again; 8558 } 8559 8560 if (page->index == ((size - 1) >> PAGE_SHIFT)) { 8561 reserved_space = round_up(size - page_start, 8562 fs_info->sectorsize); 8563 if (reserved_space < PAGE_SIZE) { 8564 end = page_start + reserved_space - 1; 8565 btrfs_delalloc_release_space(BTRFS_I(inode), 8566 data_reserved, page_start, 8567 PAGE_SIZE - reserved_space, true); 8568 } 8569 } 8570 8571 /* 8572 * page_mkwrite gets called when the page is firstly dirtied after it's 8573 * faulted in, but write(2) could also dirty a page and set delalloc 8574 * bits, thus in this case for space account reason, we still need to 8575 * clear any delalloc bits within this page range since we have to 8576 * reserve data&meta space before lock_page() (see above comments). 8577 */ 8578 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, 8579 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8580 EXTENT_DEFRAG, &cached_state); 8581 8582 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, 8583 &cached_state); 8584 if (ret2) { 8585 unlock_extent(io_tree, page_start, page_end, &cached_state); 8586 ret = VM_FAULT_SIGBUS; 8587 goto out_unlock; 8588 } 8589 8590 /* page is wholly or partially inside EOF */ 8591 if (page_start + PAGE_SIZE > size) 8592 zero_start = offset_in_page(size); 8593 else 8594 zero_start = PAGE_SIZE; 8595 8596 if (zero_start != PAGE_SIZE) 8597 memzero_page(page, zero_start, PAGE_SIZE - zero_start); 8598 8599 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); 8600 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); 8601 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); 8602 8603 btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); 8604 8605 unlock_extent(io_tree, page_start, page_end, &cached_state); 8606 up_read(&BTRFS_I(inode)->i_mmap_lock); 8607 8608 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8609 sb_end_pagefault(inode->i_sb); 8610 extent_changeset_free(data_reserved); 8611 return VM_FAULT_LOCKED; 8612 8613 out_unlock: 8614 unlock_page(page); 8615 up_read(&BTRFS_I(inode)->i_mmap_lock); 8616 out: 8617 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8618 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, 8619 reserved_space, (ret != 0)); 8620 out_noreserve: 8621 sb_end_pagefault(inode->i_sb); 8622 extent_changeset_free(data_reserved); 8623 return ret; 8624 } 8625 8626 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) 8627 { 8628 struct btrfs_truncate_control control = { 8629 .inode = inode, 8630 .ino = btrfs_ino(inode), 8631 .min_type = BTRFS_EXTENT_DATA_KEY, 8632 .clear_extent_range = true, 8633 }; 8634 struct btrfs_root *root = inode->root; 8635 struct btrfs_fs_info *fs_info = root->fs_info; 8636 struct btrfs_block_rsv *rsv; 8637 int ret; 8638 struct btrfs_trans_handle *trans; 8639 u64 mask = fs_info->sectorsize - 1; 8640 u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 8641 8642 if (!skip_writeback) { 8643 ret = btrfs_wait_ordered_range(&inode->vfs_inode, 8644 inode->vfs_inode.i_size & (~mask), 8645 (u64)-1); 8646 if (ret) 8647 return ret; 8648 } 8649 8650 /* 8651 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 8652 * things going on here: 8653 * 8654 * 1) We need to reserve space to update our inode. 8655 * 8656 * 2) We need to have something to cache all the space that is going to 8657 * be free'd up by the truncate operation, but also have some slack 8658 * space reserved in case it uses space during the truncate (thank you 8659 * very much snapshotting). 8660 * 8661 * And we need these to be separate. The fact is we can use a lot of 8662 * space doing the truncate, and we have no earthly idea how much space 8663 * we will use, so we need the truncate reservation to be separate so it 8664 * doesn't end up using space reserved for updating the inode. We also 8665 * need to be able to stop the transaction and start a new one, which 8666 * means we need to be able to update the inode several times, and we 8667 * have no idea of knowing how many times that will be, so we can't just 8668 * reserve 1 item for the entirety of the operation, so that has to be 8669 * done separately as well. 8670 * 8671 * So that leaves us with 8672 * 8673 * 1) rsv - for the truncate reservation, which we will steal from the 8674 * transaction reservation. 8675 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 8676 * updating the inode. 8677 */ 8678 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 8679 if (!rsv) 8680 return -ENOMEM; 8681 rsv->size = min_size; 8682 rsv->failfast = true; 8683 8684 /* 8685 * 1 for the truncate slack space 8686 * 1 for updating the inode. 8687 */ 8688 trans = btrfs_start_transaction(root, 2); 8689 if (IS_ERR(trans)) { 8690 ret = PTR_ERR(trans); 8691 goto out; 8692 } 8693 8694 /* Migrate the slack space for the truncate to our reserve */ 8695 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 8696 min_size, false); 8697 BUG_ON(ret); 8698 8699 trans->block_rsv = rsv; 8700 8701 while (1) { 8702 struct extent_state *cached_state = NULL; 8703 const u64 new_size = inode->vfs_inode.i_size; 8704 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 8705 8706 control.new_size = new_size; 8707 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8708 /* 8709 * We want to drop from the next block forward in case this new 8710 * size is not block aligned since we will be keeping the last 8711 * block of the extent just the way it is. 8712 */ 8713 btrfs_drop_extent_map_range(inode, 8714 ALIGN(new_size, fs_info->sectorsize), 8715 (u64)-1, false); 8716 8717 ret = btrfs_truncate_inode_items(trans, root, &control); 8718 8719 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 8720 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 8721 8722 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8723 8724 trans->block_rsv = &fs_info->trans_block_rsv; 8725 if (ret != -ENOSPC && ret != -EAGAIN) 8726 break; 8727 8728 ret = btrfs_update_inode(trans, root, inode); 8729 if (ret) 8730 break; 8731 8732 btrfs_end_transaction(trans); 8733 btrfs_btree_balance_dirty(fs_info); 8734 8735 trans = btrfs_start_transaction(root, 2); 8736 if (IS_ERR(trans)) { 8737 ret = PTR_ERR(trans); 8738 trans = NULL; 8739 break; 8740 } 8741 8742 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 8743 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 8744 rsv, min_size, false); 8745 BUG_ON(ret); /* shouldn't happen */ 8746 trans->block_rsv = rsv; 8747 } 8748 8749 /* 8750 * We can't call btrfs_truncate_block inside a trans handle as we could 8751 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 8752 * know we've truncated everything except the last little bit, and can 8753 * do btrfs_truncate_block and then update the disk_i_size. 8754 */ 8755 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 8756 btrfs_end_transaction(trans); 8757 btrfs_btree_balance_dirty(fs_info); 8758 8759 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); 8760 if (ret) 8761 goto out; 8762 trans = btrfs_start_transaction(root, 1); 8763 if (IS_ERR(trans)) { 8764 ret = PTR_ERR(trans); 8765 goto out; 8766 } 8767 btrfs_inode_safe_disk_i_size_write(inode, 0); 8768 } 8769 8770 if (trans) { 8771 int ret2; 8772 8773 trans->block_rsv = &fs_info->trans_block_rsv; 8774 ret2 = btrfs_update_inode(trans, root, inode); 8775 if (ret2 && !ret) 8776 ret = ret2; 8777 8778 ret2 = btrfs_end_transaction(trans); 8779 if (ret2 && !ret) 8780 ret = ret2; 8781 btrfs_btree_balance_dirty(fs_info); 8782 } 8783 out: 8784 btrfs_free_block_rsv(fs_info, rsv); 8785 /* 8786 * So if we truncate and then write and fsync we normally would just 8787 * write the extents that changed, which is a problem if we need to 8788 * first truncate that entire inode. So set this flag so we write out 8789 * all of the extents in the inode to the sync log so we're completely 8790 * safe. 8791 * 8792 * If no extents were dropped or trimmed we don't need to force the next 8793 * fsync to truncate all the inode's items from the log and re-log them 8794 * all. This means the truncate operation did not change the file size, 8795 * or changed it to a smaller size but there was only an implicit hole 8796 * between the old i_size and the new i_size, and there were no prealloc 8797 * extents beyond i_size to drop. 8798 */ 8799 if (control.extents_found > 0) 8800 btrfs_set_inode_full_sync(inode); 8801 8802 return ret; 8803 } 8804 8805 struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns, 8806 struct inode *dir) 8807 { 8808 struct inode *inode; 8809 8810 inode = new_inode(dir->i_sb); 8811 if (inode) { 8812 /* 8813 * Subvolumes don't inherit the sgid bit or the parent's gid if 8814 * the parent's sgid bit is set. This is probably a bug. 8815 */ 8816 inode_init_owner(mnt_userns, inode, NULL, 8817 S_IFDIR | (~current_umask() & S_IRWXUGO)); 8818 inode->i_op = &btrfs_dir_inode_operations; 8819 inode->i_fop = &btrfs_dir_file_operations; 8820 } 8821 return inode; 8822 } 8823 8824 struct inode *btrfs_alloc_inode(struct super_block *sb) 8825 { 8826 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 8827 struct btrfs_inode *ei; 8828 struct inode *inode; 8829 8830 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 8831 if (!ei) 8832 return NULL; 8833 8834 ei->root = NULL; 8835 ei->generation = 0; 8836 ei->last_trans = 0; 8837 ei->last_sub_trans = 0; 8838 ei->logged_trans = 0; 8839 ei->delalloc_bytes = 0; 8840 ei->new_delalloc_bytes = 0; 8841 ei->defrag_bytes = 0; 8842 ei->disk_i_size = 0; 8843 ei->flags = 0; 8844 ei->ro_flags = 0; 8845 ei->csum_bytes = 0; 8846 ei->index_cnt = (u64)-1; 8847 ei->dir_index = 0; 8848 ei->last_unlink_trans = 0; 8849 ei->last_reflink_trans = 0; 8850 ei->last_log_commit = 0; 8851 8852 spin_lock_init(&ei->lock); 8853 spin_lock_init(&ei->io_failure_lock); 8854 ei->outstanding_extents = 0; 8855 if (sb->s_magic != BTRFS_TEST_MAGIC) 8856 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 8857 BTRFS_BLOCK_RSV_DELALLOC); 8858 ei->runtime_flags = 0; 8859 ei->prop_compress = BTRFS_COMPRESS_NONE; 8860 ei->defrag_compress = BTRFS_COMPRESS_NONE; 8861 8862 ei->delayed_node = NULL; 8863 8864 ei->i_otime.tv_sec = 0; 8865 ei->i_otime.tv_nsec = 0; 8866 8867 inode = &ei->vfs_inode; 8868 extent_map_tree_init(&ei->extent_tree); 8869 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); 8870 ei->io_tree.inode = ei; 8871 extent_io_tree_init(fs_info, &ei->file_extent_tree, 8872 IO_TREE_INODE_FILE_EXTENT); 8873 ei->io_failure_tree = RB_ROOT; 8874 atomic_set(&ei->sync_writers, 0); 8875 mutex_init(&ei->log_mutex); 8876 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 8877 INIT_LIST_HEAD(&ei->delalloc_inodes); 8878 INIT_LIST_HEAD(&ei->delayed_iput); 8879 RB_CLEAR_NODE(&ei->rb_node); 8880 init_rwsem(&ei->i_mmap_lock); 8881 8882 return inode; 8883 } 8884 8885 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 8886 void btrfs_test_destroy_inode(struct inode *inode) 8887 { 8888 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 8889 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8890 } 8891 #endif 8892 8893 void btrfs_free_inode(struct inode *inode) 8894 { 8895 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8896 } 8897 8898 void btrfs_destroy_inode(struct inode *vfs_inode) 8899 { 8900 struct btrfs_ordered_extent *ordered; 8901 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 8902 struct btrfs_root *root = inode->root; 8903 bool freespace_inode; 8904 8905 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 8906 WARN_ON(vfs_inode->i_data.nrpages); 8907 WARN_ON(inode->block_rsv.reserved); 8908 WARN_ON(inode->block_rsv.size); 8909 WARN_ON(inode->outstanding_extents); 8910 if (!S_ISDIR(vfs_inode->i_mode)) { 8911 WARN_ON(inode->delalloc_bytes); 8912 WARN_ON(inode->new_delalloc_bytes); 8913 } 8914 WARN_ON(inode->csum_bytes); 8915 WARN_ON(inode->defrag_bytes); 8916 8917 /* 8918 * This can happen where we create an inode, but somebody else also 8919 * created the same inode and we need to destroy the one we already 8920 * created. 8921 */ 8922 if (!root) 8923 return; 8924 8925 /* 8926 * If this is a free space inode do not take the ordered extents lockdep 8927 * map. 8928 */ 8929 freespace_inode = btrfs_is_free_space_inode(inode); 8930 8931 while (1) { 8932 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8933 if (!ordered) 8934 break; 8935 else { 8936 btrfs_err(root->fs_info, 8937 "found ordered extent %llu %llu on inode cleanup", 8938 ordered->file_offset, ordered->num_bytes); 8939 8940 if (!freespace_inode) 8941 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); 8942 8943 btrfs_remove_ordered_extent(inode, ordered); 8944 btrfs_put_ordered_extent(ordered); 8945 btrfs_put_ordered_extent(ordered); 8946 } 8947 } 8948 btrfs_qgroup_check_reserved_leak(inode); 8949 inode_tree_del(inode); 8950 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 8951 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 8952 btrfs_put_root(inode->root); 8953 } 8954 8955 int btrfs_drop_inode(struct inode *inode) 8956 { 8957 struct btrfs_root *root = BTRFS_I(inode)->root; 8958 8959 if (root == NULL) 8960 return 1; 8961 8962 /* the snap/subvol tree is on deleting */ 8963 if (btrfs_root_refs(&root->root_item) == 0) 8964 return 1; 8965 else 8966 return generic_drop_inode(inode); 8967 } 8968 8969 static void init_once(void *foo) 8970 { 8971 struct btrfs_inode *ei = foo; 8972 8973 inode_init_once(&ei->vfs_inode); 8974 } 8975 8976 void __cold btrfs_destroy_cachep(void) 8977 { 8978 /* 8979 * Make sure all delayed rcu free inodes are flushed before we 8980 * destroy cache. 8981 */ 8982 rcu_barrier(); 8983 bioset_exit(&btrfs_dio_bioset); 8984 kmem_cache_destroy(btrfs_inode_cachep); 8985 } 8986 8987 int __init btrfs_init_cachep(void) 8988 { 8989 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 8990 sizeof(struct btrfs_inode), 0, 8991 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, 8992 init_once); 8993 if (!btrfs_inode_cachep) 8994 goto fail; 8995 8996 if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE, 8997 offsetof(struct btrfs_dio_private, bio), 8998 BIOSET_NEED_BVECS)) 8999 goto fail; 9000 9001 return 0; 9002 fail: 9003 btrfs_destroy_cachep(); 9004 return -ENOMEM; 9005 } 9006 9007 static int btrfs_getattr(struct user_namespace *mnt_userns, 9008 const struct path *path, struct kstat *stat, 9009 u32 request_mask, unsigned int flags) 9010 { 9011 u64 delalloc_bytes; 9012 u64 inode_bytes; 9013 struct inode *inode = d_inode(path->dentry); 9014 u32 blocksize = inode->i_sb->s_blocksize; 9015 u32 bi_flags = BTRFS_I(inode)->flags; 9016 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 9017 9018 stat->result_mask |= STATX_BTIME; 9019 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; 9020 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; 9021 if (bi_flags & BTRFS_INODE_APPEND) 9022 stat->attributes |= STATX_ATTR_APPEND; 9023 if (bi_flags & BTRFS_INODE_COMPRESS) 9024 stat->attributes |= STATX_ATTR_COMPRESSED; 9025 if (bi_flags & BTRFS_INODE_IMMUTABLE) 9026 stat->attributes |= STATX_ATTR_IMMUTABLE; 9027 if (bi_flags & BTRFS_INODE_NODUMP) 9028 stat->attributes |= STATX_ATTR_NODUMP; 9029 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 9030 stat->attributes |= STATX_ATTR_VERITY; 9031 9032 stat->attributes_mask |= (STATX_ATTR_APPEND | 9033 STATX_ATTR_COMPRESSED | 9034 STATX_ATTR_IMMUTABLE | 9035 STATX_ATTR_NODUMP); 9036 9037 generic_fillattr(mnt_userns, inode, stat); 9038 stat->dev = BTRFS_I(inode)->root->anon_dev; 9039 9040 spin_lock(&BTRFS_I(inode)->lock); 9041 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 9042 inode_bytes = inode_get_bytes(inode); 9043 spin_unlock(&BTRFS_I(inode)->lock); 9044 stat->blocks = (ALIGN(inode_bytes, blocksize) + 9045 ALIGN(delalloc_bytes, blocksize)) >> 9; 9046 return 0; 9047 } 9048 9049 static int btrfs_rename_exchange(struct inode *old_dir, 9050 struct dentry *old_dentry, 9051 struct inode *new_dir, 9052 struct dentry *new_dentry) 9053 { 9054 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 9055 struct btrfs_trans_handle *trans; 9056 unsigned int trans_num_items; 9057 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9058 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9059 struct inode *new_inode = new_dentry->d_inode; 9060 struct inode *old_inode = old_dentry->d_inode; 9061 struct timespec64 ctime = current_time(old_inode); 9062 struct btrfs_rename_ctx old_rename_ctx; 9063 struct btrfs_rename_ctx new_rename_ctx; 9064 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9065 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 9066 u64 old_idx = 0; 9067 u64 new_idx = 0; 9068 int ret; 9069 int ret2; 9070 bool need_abort = false; 9071 struct fscrypt_name old_fname, new_fname; 9072 struct fscrypt_str *old_name, *new_name; 9073 9074 /* 9075 * For non-subvolumes allow exchange only within one subvolume, in the 9076 * same inode namespace. Two subvolumes (represented as directory) can 9077 * be exchanged as they're a logical link and have a fixed inode number. 9078 */ 9079 if (root != dest && 9080 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 9081 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 9082 return -EXDEV; 9083 9084 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 9085 if (ret) 9086 return ret; 9087 9088 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 9089 if (ret) { 9090 fscrypt_free_filename(&old_fname); 9091 return ret; 9092 } 9093 9094 old_name = &old_fname.disk_name; 9095 new_name = &new_fname.disk_name; 9096 9097 /* close the race window with snapshot create/destroy ioctl */ 9098 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 9099 new_ino == BTRFS_FIRST_FREE_OBJECTID) 9100 down_read(&fs_info->subvol_sem); 9101 9102 /* 9103 * For each inode: 9104 * 1 to remove old dir item 9105 * 1 to remove old dir index 9106 * 1 to add new dir item 9107 * 1 to add new dir index 9108 * 1 to update parent inode 9109 * 9110 * If the parents are the same, we only need to account for one 9111 */ 9112 trans_num_items = (old_dir == new_dir ? 9 : 10); 9113 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9114 /* 9115 * 1 to remove old root ref 9116 * 1 to remove old root backref 9117 * 1 to add new root ref 9118 * 1 to add new root backref 9119 */ 9120 trans_num_items += 4; 9121 } else { 9122 /* 9123 * 1 to update inode item 9124 * 1 to remove old inode ref 9125 * 1 to add new inode ref 9126 */ 9127 trans_num_items += 3; 9128 } 9129 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 9130 trans_num_items += 4; 9131 else 9132 trans_num_items += 3; 9133 trans = btrfs_start_transaction(root, trans_num_items); 9134 if (IS_ERR(trans)) { 9135 ret = PTR_ERR(trans); 9136 goto out_notrans; 9137 } 9138 9139 if (dest != root) { 9140 ret = btrfs_record_root_in_trans(trans, dest); 9141 if (ret) 9142 goto out_fail; 9143 } 9144 9145 /* 9146 * We need to find a free sequence number both in the source and 9147 * in the destination directory for the exchange. 9148 */ 9149 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 9150 if (ret) 9151 goto out_fail; 9152 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 9153 if (ret) 9154 goto out_fail; 9155 9156 BTRFS_I(old_inode)->dir_index = 0ULL; 9157 BTRFS_I(new_inode)->dir_index = 0ULL; 9158 9159 /* Reference for the source. */ 9160 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9161 /* force full log commit if subvolume involved. */ 9162 btrfs_set_log_full_commit(trans); 9163 } else { 9164 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, 9165 btrfs_ino(BTRFS_I(new_dir)), 9166 old_idx); 9167 if (ret) 9168 goto out_fail; 9169 need_abort = true; 9170 } 9171 9172 /* And now for the dest. */ 9173 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 9174 /* force full log commit if subvolume involved. */ 9175 btrfs_set_log_full_commit(trans); 9176 } else { 9177 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, 9178 btrfs_ino(BTRFS_I(old_dir)), 9179 new_idx); 9180 if (ret) { 9181 if (need_abort) 9182 btrfs_abort_transaction(trans, ret); 9183 goto out_fail; 9184 } 9185 } 9186 9187 /* Update inode version and ctime/mtime. */ 9188 inode_inc_iversion(old_dir); 9189 inode_inc_iversion(new_dir); 9190 inode_inc_iversion(old_inode); 9191 inode_inc_iversion(new_inode); 9192 old_dir->i_mtime = ctime; 9193 old_dir->i_ctime = ctime; 9194 new_dir->i_mtime = ctime; 9195 new_dir->i_ctime = ctime; 9196 old_inode->i_ctime = ctime; 9197 new_inode->i_ctime = ctime; 9198 9199 if (old_dentry->d_parent != new_dentry->d_parent) { 9200 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9201 BTRFS_I(old_inode), 1); 9202 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 9203 BTRFS_I(new_inode), 1); 9204 } 9205 9206 /* src is a subvolume */ 9207 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9208 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 9209 } else { /* src is an inode */ 9210 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 9211 BTRFS_I(old_dentry->d_inode), 9212 old_name, &old_rename_ctx); 9213 if (!ret) 9214 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 9215 } 9216 if (ret) { 9217 btrfs_abort_transaction(trans, ret); 9218 goto out_fail; 9219 } 9220 9221 /* dest is a subvolume */ 9222 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 9223 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 9224 } else { /* dest is an inode */ 9225 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 9226 BTRFS_I(new_dentry->d_inode), 9227 new_name, &new_rename_ctx); 9228 if (!ret) 9229 ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode)); 9230 } 9231 if (ret) { 9232 btrfs_abort_transaction(trans, ret); 9233 goto out_fail; 9234 } 9235 9236 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9237 new_name, 0, old_idx); 9238 if (ret) { 9239 btrfs_abort_transaction(trans, ret); 9240 goto out_fail; 9241 } 9242 9243 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 9244 old_name, 0, new_idx); 9245 if (ret) { 9246 btrfs_abort_transaction(trans, ret); 9247 goto out_fail; 9248 } 9249 9250 if (old_inode->i_nlink == 1) 9251 BTRFS_I(old_inode)->dir_index = old_idx; 9252 if (new_inode->i_nlink == 1) 9253 BTRFS_I(new_inode)->dir_index = new_idx; 9254 9255 /* 9256 * Now pin the logs of the roots. We do it to ensure that no other task 9257 * can sync the logs while we are in progress with the rename, because 9258 * that could result in an inconsistency in case any of the inodes that 9259 * are part of this rename operation were logged before. 9260 */ 9261 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9262 btrfs_pin_log_trans(root); 9263 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 9264 btrfs_pin_log_trans(dest); 9265 9266 /* Do the log updates for all inodes. */ 9267 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9268 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9269 old_rename_ctx.index, new_dentry->d_parent); 9270 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 9271 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 9272 new_rename_ctx.index, old_dentry->d_parent); 9273 9274 /* Now unpin the logs. */ 9275 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9276 btrfs_end_log_trans(root); 9277 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 9278 btrfs_end_log_trans(dest); 9279 out_fail: 9280 ret2 = btrfs_end_transaction(trans); 9281 ret = ret ? ret : ret2; 9282 out_notrans: 9283 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 9284 old_ino == BTRFS_FIRST_FREE_OBJECTID) 9285 up_read(&fs_info->subvol_sem); 9286 9287 fscrypt_free_filename(&new_fname); 9288 fscrypt_free_filename(&old_fname); 9289 return ret; 9290 } 9291 9292 static struct inode *new_whiteout_inode(struct user_namespace *mnt_userns, 9293 struct inode *dir) 9294 { 9295 struct inode *inode; 9296 9297 inode = new_inode(dir->i_sb); 9298 if (inode) { 9299 inode_init_owner(mnt_userns, inode, dir, 9300 S_IFCHR | WHITEOUT_MODE); 9301 inode->i_op = &btrfs_special_inode_operations; 9302 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 9303 } 9304 return inode; 9305 } 9306 9307 static int btrfs_rename(struct user_namespace *mnt_userns, 9308 struct inode *old_dir, struct dentry *old_dentry, 9309 struct inode *new_dir, struct dentry *new_dentry, 9310 unsigned int flags) 9311 { 9312 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 9313 struct btrfs_new_inode_args whiteout_args = { 9314 .dir = old_dir, 9315 .dentry = old_dentry, 9316 }; 9317 struct btrfs_trans_handle *trans; 9318 unsigned int trans_num_items; 9319 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9320 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9321 struct inode *new_inode = d_inode(new_dentry); 9322 struct inode *old_inode = d_inode(old_dentry); 9323 struct btrfs_rename_ctx rename_ctx; 9324 u64 index = 0; 9325 int ret; 9326 int ret2; 9327 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9328 struct fscrypt_name old_fname, new_fname; 9329 9330 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9331 return -EPERM; 9332 9333 /* we only allow rename subvolume link between subvolumes */ 9334 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9335 return -EXDEV; 9336 9337 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 9338 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 9339 return -ENOTEMPTY; 9340 9341 if (S_ISDIR(old_inode->i_mode) && new_inode && 9342 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 9343 return -ENOTEMPTY; 9344 9345 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 9346 if (ret) 9347 return ret; 9348 9349 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 9350 if (ret) { 9351 fscrypt_free_filename(&old_fname); 9352 return ret; 9353 } 9354 9355 /* check for collisions, even if the name isn't there */ 9356 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); 9357 if (ret) { 9358 if (ret == -EEXIST) { 9359 /* we shouldn't get 9360 * eexist without a new_inode */ 9361 if (WARN_ON(!new_inode)) { 9362 goto out_fscrypt_names; 9363 } 9364 } else { 9365 /* maybe -EOVERFLOW */ 9366 goto out_fscrypt_names; 9367 } 9368 } 9369 ret = 0; 9370 9371 /* 9372 * we're using rename to replace one file with another. Start IO on it 9373 * now so we don't add too much work to the end of the transaction 9374 */ 9375 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 9376 filemap_flush(old_inode->i_mapping); 9377 9378 if (flags & RENAME_WHITEOUT) { 9379 whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir); 9380 if (!whiteout_args.inode) { 9381 ret = -ENOMEM; 9382 goto out_fscrypt_names; 9383 } 9384 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 9385 if (ret) 9386 goto out_whiteout_inode; 9387 } else { 9388 /* 1 to update the old parent inode. */ 9389 trans_num_items = 1; 9390 } 9391 9392 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9393 /* Close the race window with snapshot create/destroy ioctl */ 9394 down_read(&fs_info->subvol_sem); 9395 /* 9396 * 1 to remove old root ref 9397 * 1 to remove old root backref 9398 * 1 to add new root ref 9399 * 1 to add new root backref 9400 */ 9401 trans_num_items += 4; 9402 } else { 9403 /* 9404 * 1 to update inode 9405 * 1 to remove old inode ref 9406 * 1 to add new inode ref 9407 */ 9408 trans_num_items += 3; 9409 } 9410 /* 9411 * 1 to remove old dir item 9412 * 1 to remove old dir index 9413 * 1 to add new dir item 9414 * 1 to add new dir index 9415 */ 9416 trans_num_items += 4; 9417 /* 1 to update new parent inode if it's not the same as the old parent */ 9418 if (new_dir != old_dir) 9419 trans_num_items++; 9420 if (new_inode) { 9421 /* 9422 * 1 to update inode 9423 * 1 to remove inode ref 9424 * 1 to remove dir item 9425 * 1 to remove dir index 9426 * 1 to possibly add orphan item 9427 */ 9428 trans_num_items += 5; 9429 } 9430 trans = btrfs_start_transaction(root, trans_num_items); 9431 if (IS_ERR(trans)) { 9432 ret = PTR_ERR(trans); 9433 goto out_notrans; 9434 } 9435 9436 if (dest != root) { 9437 ret = btrfs_record_root_in_trans(trans, dest); 9438 if (ret) 9439 goto out_fail; 9440 } 9441 9442 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 9443 if (ret) 9444 goto out_fail; 9445 9446 BTRFS_I(old_inode)->dir_index = 0ULL; 9447 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9448 /* force full log commit if subvolume involved. */ 9449 btrfs_set_log_full_commit(trans); 9450 } else { 9451 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, 9452 old_ino, btrfs_ino(BTRFS_I(new_dir)), 9453 index); 9454 if (ret) 9455 goto out_fail; 9456 } 9457 9458 inode_inc_iversion(old_dir); 9459 inode_inc_iversion(new_dir); 9460 inode_inc_iversion(old_inode); 9461 old_dir->i_mtime = current_time(old_dir); 9462 old_dir->i_ctime = old_dir->i_mtime; 9463 new_dir->i_mtime = old_dir->i_mtime; 9464 new_dir->i_ctime = old_dir->i_mtime; 9465 old_inode->i_ctime = old_dir->i_mtime; 9466 9467 if (old_dentry->d_parent != new_dentry->d_parent) 9468 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9469 BTRFS_I(old_inode), 1); 9470 9471 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9472 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 9473 } else { 9474 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 9475 BTRFS_I(d_inode(old_dentry)), 9476 &old_fname.disk_name, &rename_ctx); 9477 if (!ret) 9478 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 9479 } 9480 if (ret) { 9481 btrfs_abort_transaction(trans, ret); 9482 goto out_fail; 9483 } 9484 9485 if (new_inode) { 9486 inode_inc_iversion(new_inode); 9487 new_inode->i_ctime = current_time(new_inode); 9488 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 9489 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9490 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 9491 BUG_ON(new_inode->i_nlink == 0); 9492 } else { 9493 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 9494 BTRFS_I(d_inode(new_dentry)), 9495 &new_fname.disk_name); 9496 } 9497 if (!ret && new_inode->i_nlink == 0) 9498 ret = btrfs_orphan_add(trans, 9499 BTRFS_I(d_inode(new_dentry))); 9500 if (ret) { 9501 btrfs_abort_transaction(trans, ret); 9502 goto out_fail; 9503 } 9504 } 9505 9506 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9507 &new_fname.disk_name, 0, index); 9508 if (ret) { 9509 btrfs_abort_transaction(trans, ret); 9510 goto out_fail; 9511 } 9512 9513 if (old_inode->i_nlink == 1) 9514 BTRFS_I(old_inode)->dir_index = index; 9515 9516 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9517 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9518 rename_ctx.index, new_dentry->d_parent); 9519 9520 if (flags & RENAME_WHITEOUT) { 9521 ret = btrfs_create_new_inode(trans, &whiteout_args); 9522 if (ret) { 9523 btrfs_abort_transaction(trans, ret); 9524 goto out_fail; 9525 } else { 9526 unlock_new_inode(whiteout_args.inode); 9527 iput(whiteout_args.inode); 9528 whiteout_args.inode = NULL; 9529 } 9530 } 9531 out_fail: 9532 ret2 = btrfs_end_transaction(trans); 9533 ret = ret ? ret : ret2; 9534 out_notrans: 9535 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9536 up_read(&fs_info->subvol_sem); 9537 if (flags & RENAME_WHITEOUT) 9538 btrfs_new_inode_args_destroy(&whiteout_args); 9539 out_whiteout_inode: 9540 if (flags & RENAME_WHITEOUT) 9541 iput(whiteout_args.inode); 9542 out_fscrypt_names: 9543 fscrypt_free_filename(&old_fname); 9544 fscrypt_free_filename(&new_fname); 9545 return ret; 9546 } 9547 9548 static int btrfs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir, 9549 struct dentry *old_dentry, struct inode *new_dir, 9550 struct dentry *new_dentry, unsigned int flags) 9551 { 9552 int ret; 9553 9554 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 9555 return -EINVAL; 9556 9557 if (flags & RENAME_EXCHANGE) 9558 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir, 9559 new_dentry); 9560 else 9561 ret = btrfs_rename(mnt_userns, old_dir, old_dentry, new_dir, 9562 new_dentry, flags); 9563 9564 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); 9565 9566 return ret; 9567 } 9568 9569 struct btrfs_delalloc_work { 9570 struct inode *inode; 9571 struct completion completion; 9572 struct list_head list; 9573 struct btrfs_work work; 9574 }; 9575 9576 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9577 { 9578 struct btrfs_delalloc_work *delalloc_work; 9579 struct inode *inode; 9580 9581 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9582 work); 9583 inode = delalloc_work->inode; 9584 filemap_flush(inode->i_mapping); 9585 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9586 &BTRFS_I(inode)->runtime_flags)) 9587 filemap_flush(inode->i_mapping); 9588 9589 iput(inode); 9590 complete(&delalloc_work->completion); 9591 } 9592 9593 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 9594 { 9595 struct btrfs_delalloc_work *work; 9596 9597 work = kmalloc(sizeof(*work), GFP_NOFS); 9598 if (!work) 9599 return NULL; 9600 9601 init_completion(&work->completion); 9602 INIT_LIST_HEAD(&work->list); 9603 work->inode = inode; 9604 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); 9605 9606 return work; 9607 } 9608 9609 /* 9610 * some fairly slow code that needs optimization. This walks the list 9611 * of all the inodes with pending delalloc and forces them to disk. 9612 */ 9613 static int start_delalloc_inodes(struct btrfs_root *root, 9614 struct writeback_control *wbc, bool snapshot, 9615 bool in_reclaim_context) 9616 { 9617 struct btrfs_inode *binode; 9618 struct inode *inode; 9619 struct btrfs_delalloc_work *work, *next; 9620 struct list_head works; 9621 struct list_head splice; 9622 int ret = 0; 9623 bool full_flush = wbc->nr_to_write == LONG_MAX; 9624 9625 INIT_LIST_HEAD(&works); 9626 INIT_LIST_HEAD(&splice); 9627 9628 mutex_lock(&root->delalloc_mutex); 9629 spin_lock(&root->delalloc_lock); 9630 list_splice_init(&root->delalloc_inodes, &splice); 9631 while (!list_empty(&splice)) { 9632 binode = list_entry(splice.next, struct btrfs_inode, 9633 delalloc_inodes); 9634 9635 list_move_tail(&binode->delalloc_inodes, 9636 &root->delalloc_inodes); 9637 9638 if (in_reclaim_context && 9639 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 9640 continue; 9641 9642 inode = igrab(&binode->vfs_inode); 9643 if (!inode) { 9644 cond_resched_lock(&root->delalloc_lock); 9645 continue; 9646 } 9647 spin_unlock(&root->delalloc_lock); 9648 9649 if (snapshot) 9650 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 9651 &binode->runtime_flags); 9652 if (full_flush) { 9653 work = btrfs_alloc_delalloc_work(inode); 9654 if (!work) { 9655 iput(inode); 9656 ret = -ENOMEM; 9657 goto out; 9658 } 9659 list_add_tail(&work->list, &works); 9660 btrfs_queue_work(root->fs_info->flush_workers, 9661 &work->work); 9662 } else { 9663 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); 9664 btrfs_add_delayed_iput(BTRFS_I(inode)); 9665 if (ret || wbc->nr_to_write <= 0) 9666 goto out; 9667 } 9668 cond_resched(); 9669 spin_lock(&root->delalloc_lock); 9670 } 9671 spin_unlock(&root->delalloc_lock); 9672 9673 out: 9674 list_for_each_entry_safe(work, next, &works, list) { 9675 list_del_init(&work->list); 9676 wait_for_completion(&work->completion); 9677 kfree(work); 9678 } 9679 9680 if (!list_empty(&splice)) { 9681 spin_lock(&root->delalloc_lock); 9682 list_splice_tail(&splice, &root->delalloc_inodes); 9683 spin_unlock(&root->delalloc_lock); 9684 } 9685 mutex_unlock(&root->delalloc_mutex); 9686 return ret; 9687 } 9688 9689 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 9690 { 9691 struct writeback_control wbc = { 9692 .nr_to_write = LONG_MAX, 9693 .sync_mode = WB_SYNC_NONE, 9694 .range_start = 0, 9695 .range_end = LLONG_MAX, 9696 }; 9697 struct btrfs_fs_info *fs_info = root->fs_info; 9698 9699 if (BTRFS_FS_ERROR(fs_info)) 9700 return -EROFS; 9701 9702 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context); 9703 } 9704 9705 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 9706 bool in_reclaim_context) 9707 { 9708 struct writeback_control wbc = { 9709 .nr_to_write = nr, 9710 .sync_mode = WB_SYNC_NONE, 9711 .range_start = 0, 9712 .range_end = LLONG_MAX, 9713 }; 9714 struct btrfs_root *root; 9715 struct list_head splice; 9716 int ret; 9717 9718 if (BTRFS_FS_ERROR(fs_info)) 9719 return -EROFS; 9720 9721 INIT_LIST_HEAD(&splice); 9722 9723 mutex_lock(&fs_info->delalloc_root_mutex); 9724 spin_lock(&fs_info->delalloc_root_lock); 9725 list_splice_init(&fs_info->delalloc_roots, &splice); 9726 while (!list_empty(&splice)) { 9727 /* 9728 * Reset nr_to_write here so we know that we're doing a full 9729 * flush. 9730 */ 9731 if (nr == LONG_MAX) 9732 wbc.nr_to_write = LONG_MAX; 9733 9734 root = list_first_entry(&splice, struct btrfs_root, 9735 delalloc_root); 9736 root = btrfs_grab_root(root); 9737 BUG_ON(!root); 9738 list_move_tail(&root->delalloc_root, 9739 &fs_info->delalloc_roots); 9740 spin_unlock(&fs_info->delalloc_root_lock); 9741 9742 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context); 9743 btrfs_put_root(root); 9744 if (ret < 0 || wbc.nr_to_write <= 0) 9745 goto out; 9746 spin_lock(&fs_info->delalloc_root_lock); 9747 } 9748 spin_unlock(&fs_info->delalloc_root_lock); 9749 9750 ret = 0; 9751 out: 9752 if (!list_empty(&splice)) { 9753 spin_lock(&fs_info->delalloc_root_lock); 9754 list_splice_tail(&splice, &fs_info->delalloc_roots); 9755 spin_unlock(&fs_info->delalloc_root_lock); 9756 } 9757 mutex_unlock(&fs_info->delalloc_root_mutex); 9758 return ret; 9759 } 9760 9761 static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, 9762 struct dentry *dentry, const char *symname) 9763 { 9764 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9765 struct btrfs_trans_handle *trans; 9766 struct btrfs_root *root = BTRFS_I(dir)->root; 9767 struct btrfs_path *path; 9768 struct btrfs_key key; 9769 struct inode *inode; 9770 struct btrfs_new_inode_args new_inode_args = { 9771 .dir = dir, 9772 .dentry = dentry, 9773 }; 9774 unsigned int trans_num_items; 9775 int err; 9776 int name_len; 9777 int datasize; 9778 unsigned long ptr; 9779 struct btrfs_file_extent_item *ei; 9780 struct extent_buffer *leaf; 9781 9782 name_len = strlen(symname); 9783 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 9784 return -ENAMETOOLONG; 9785 9786 inode = new_inode(dir->i_sb); 9787 if (!inode) 9788 return -ENOMEM; 9789 inode_init_owner(mnt_userns, inode, dir, S_IFLNK | S_IRWXUGO); 9790 inode->i_op = &btrfs_symlink_inode_operations; 9791 inode_nohighmem(inode); 9792 inode->i_mapping->a_ops = &btrfs_aops; 9793 btrfs_i_size_write(BTRFS_I(inode), name_len); 9794 inode_set_bytes(inode, name_len); 9795 9796 new_inode_args.inode = inode; 9797 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9798 if (err) 9799 goto out_inode; 9800 /* 1 additional item for the inline extent */ 9801 trans_num_items++; 9802 9803 trans = btrfs_start_transaction(root, trans_num_items); 9804 if (IS_ERR(trans)) { 9805 err = PTR_ERR(trans); 9806 goto out_new_inode_args; 9807 } 9808 9809 err = btrfs_create_new_inode(trans, &new_inode_args); 9810 if (err) 9811 goto out; 9812 9813 path = btrfs_alloc_path(); 9814 if (!path) { 9815 err = -ENOMEM; 9816 btrfs_abort_transaction(trans, err); 9817 discard_new_inode(inode); 9818 inode = NULL; 9819 goto out; 9820 } 9821 key.objectid = btrfs_ino(BTRFS_I(inode)); 9822 key.offset = 0; 9823 key.type = BTRFS_EXTENT_DATA_KEY; 9824 datasize = btrfs_file_extent_calc_inline_size(name_len); 9825 err = btrfs_insert_empty_item(trans, root, path, &key, 9826 datasize); 9827 if (err) { 9828 btrfs_abort_transaction(trans, err); 9829 btrfs_free_path(path); 9830 discard_new_inode(inode); 9831 inode = NULL; 9832 goto out; 9833 } 9834 leaf = path->nodes[0]; 9835 ei = btrfs_item_ptr(leaf, path->slots[0], 9836 struct btrfs_file_extent_item); 9837 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9838 btrfs_set_file_extent_type(leaf, ei, 9839 BTRFS_FILE_EXTENT_INLINE); 9840 btrfs_set_file_extent_encryption(leaf, ei, 0); 9841 btrfs_set_file_extent_compression(leaf, ei, 0); 9842 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9843 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9844 9845 ptr = btrfs_file_extent_inline_start(ei); 9846 write_extent_buffer(leaf, symname, ptr, name_len); 9847 btrfs_mark_buffer_dirty(leaf); 9848 btrfs_free_path(path); 9849 9850 d_instantiate_new(dentry, inode); 9851 err = 0; 9852 out: 9853 btrfs_end_transaction(trans); 9854 btrfs_btree_balance_dirty(fs_info); 9855 out_new_inode_args: 9856 btrfs_new_inode_args_destroy(&new_inode_args); 9857 out_inode: 9858 if (err) 9859 iput(inode); 9860 return err; 9861 } 9862 9863 static struct btrfs_trans_handle *insert_prealloc_file_extent( 9864 struct btrfs_trans_handle *trans_in, 9865 struct btrfs_inode *inode, 9866 struct btrfs_key *ins, 9867 u64 file_offset) 9868 { 9869 struct btrfs_file_extent_item stack_fi; 9870 struct btrfs_replace_extent_info extent_info; 9871 struct btrfs_trans_handle *trans = trans_in; 9872 struct btrfs_path *path; 9873 u64 start = ins->objectid; 9874 u64 len = ins->offset; 9875 int qgroup_released; 9876 int ret; 9877 9878 memset(&stack_fi, 0, sizeof(stack_fi)); 9879 9880 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 9881 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 9882 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 9883 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 9884 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 9885 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9886 /* Encryption and other encoding is reserved and all 0 */ 9887 9888 qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len); 9889 if (qgroup_released < 0) 9890 return ERR_PTR(qgroup_released); 9891 9892 if (trans) { 9893 ret = insert_reserved_file_extent(trans, inode, 9894 file_offset, &stack_fi, 9895 true, qgroup_released); 9896 if (ret) 9897 goto free_qgroup; 9898 return trans; 9899 } 9900 9901 extent_info.disk_offset = start; 9902 extent_info.disk_len = len; 9903 extent_info.data_offset = 0; 9904 extent_info.data_len = len; 9905 extent_info.file_offset = file_offset; 9906 extent_info.extent_buf = (char *)&stack_fi; 9907 extent_info.is_new_extent = true; 9908 extent_info.update_times = true; 9909 extent_info.qgroup_reserved = qgroup_released; 9910 extent_info.insertions = 0; 9911 9912 path = btrfs_alloc_path(); 9913 if (!path) { 9914 ret = -ENOMEM; 9915 goto free_qgroup; 9916 } 9917 9918 ret = btrfs_replace_file_extents(inode, path, file_offset, 9919 file_offset + len - 1, &extent_info, 9920 &trans); 9921 btrfs_free_path(path); 9922 if (ret) 9923 goto free_qgroup; 9924 return trans; 9925 9926 free_qgroup: 9927 /* 9928 * We have released qgroup data range at the beginning of the function, 9929 * and normally qgroup_released bytes will be freed when committing 9930 * transaction. 9931 * But if we error out early, we have to free what we have released 9932 * or we leak qgroup data reservation. 9933 */ 9934 btrfs_qgroup_free_refroot(inode->root->fs_info, 9935 inode->root->root_key.objectid, qgroup_released, 9936 BTRFS_QGROUP_RSV_DATA); 9937 return ERR_PTR(ret); 9938 } 9939 9940 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9941 u64 start, u64 num_bytes, u64 min_size, 9942 loff_t actual_len, u64 *alloc_hint, 9943 struct btrfs_trans_handle *trans) 9944 { 9945 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 9946 struct extent_map *em; 9947 struct btrfs_root *root = BTRFS_I(inode)->root; 9948 struct btrfs_key ins; 9949 u64 cur_offset = start; 9950 u64 clear_offset = start; 9951 u64 i_size; 9952 u64 cur_bytes; 9953 u64 last_alloc = (u64)-1; 9954 int ret = 0; 9955 bool own_trans = true; 9956 u64 end = start + num_bytes - 1; 9957 9958 if (trans) 9959 own_trans = false; 9960 while (num_bytes > 0) { 9961 cur_bytes = min_t(u64, num_bytes, SZ_256M); 9962 cur_bytes = max(cur_bytes, min_size); 9963 /* 9964 * If we are severely fragmented we could end up with really 9965 * small allocations, so if the allocator is returning small 9966 * chunks lets make its job easier by only searching for those 9967 * sized chunks. 9968 */ 9969 cur_bytes = min(cur_bytes, last_alloc); 9970 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 9971 min_size, 0, *alloc_hint, &ins, 1, 0); 9972 if (ret) 9973 break; 9974 9975 /* 9976 * We've reserved this space, and thus converted it from 9977 * ->bytes_may_use to ->bytes_reserved. Any error that happens 9978 * from here on out we will only need to clear our reservation 9979 * for the remaining unreserved area, so advance our 9980 * clear_offset by our extent size. 9981 */ 9982 clear_offset += ins.offset; 9983 9984 last_alloc = ins.offset; 9985 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 9986 &ins, cur_offset); 9987 /* 9988 * Now that we inserted the prealloc extent we can finally 9989 * decrement the number of reservations in the block group. 9990 * If we did it before, we could race with relocation and have 9991 * relocation miss the reserved extent, making it fail later. 9992 */ 9993 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9994 if (IS_ERR(trans)) { 9995 ret = PTR_ERR(trans); 9996 btrfs_free_reserved_extent(fs_info, ins.objectid, 9997 ins.offset, 0); 9998 break; 9999 } 10000 10001 em = alloc_extent_map(); 10002 if (!em) { 10003 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, 10004 cur_offset + ins.offset - 1, false); 10005 btrfs_set_inode_full_sync(BTRFS_I(inode)); 10006 goto next; 10007 } 10008 10009 em->start = cur_offset; 10010 em->orig_start = cur_offset; 10011 em->len = ins.offset; 10012 em->block_start = ins.objectid; 10013 em->block_len = ins.offset; 10014 em->orig_block_len = ins.offset; 10015 em->ram_bytes = ins.offset; 10016 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 10017 em->generation = trans->transid; 10018 10019 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); 10020 free_extent_map(em); 10021 next: 10022 num_bytes -= ins.offset; 10023 cur_offset += ins.offset; 10024 *alloc_hint = ins.objectid + ins.offset; 10025 10026 inode_inc_iversion(inode); 10027 inode->i_ctime = current_time(inode); 10028 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 10029 if (!(mode & FALLOC_FL_KEEP_SIZE) && 10030 (actual_len > inode->i_size) && 10031 (cur_offset > inode->i_size)) { 10032 if (cur_offset > actual_len) 10033 i_size = actual_len; 10034 else 10035 i_size = cur_offset; 10036 i_size_write(inode, i_size); 10037 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 10038 } 10039 10040 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 10041 10042 if (ret) { 10043 btrfs_abort_transaction(trans, ret); 10044 if (own_trans) 10045 btrfs_end_transaction(trans); 10046 break; 10047 } 10048 10049 if (own_trans) { 10050 btrfs_end_transaction(trans); 10051 trans = NULL; 10052 } 10053 } 10054 if (clear_offset < end) 10055 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 10056 end - clear_offset + 1); 10057 return ret; 10058 } 10059 10060 int btrfs_prealloc_file_range(struct inode *inode, int mode, 10061 u64 start, u64 num_bytes, u64 min_size, 10062 loff_t actual_len, u64 *alloc_hint) 10063 { 10064 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 10065 min_size, actual_len, alloc_hint, 10066 NULL); 10067 } 10068 10069 int btrfs_prealloc_file_range_trans(struct inode *inode, 10070 struct btrfs_trans_handle *trans, int mode, 10071 u64 start, u64 num_bytes, u64 min_size, 10072 loff_t actual_len, u64 *alloc_hint) 10073 { 10074 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 10075 min_size, actual_len, alloc_hint, trans); 10076 } 10077 10078 static int btrfs_permission(struct user_namespace *mnt_userns, 10079 struct inode *inode, int mask) 10080 { 10081 struct btrfs_root *root = BTRFS_I(inode)->root; 10082 umode_t mode = inode->i_mode; 10083 10084 if (mask & MAY_WRITE && 10085 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 10086 if (btrfs_root_readonly(root)) 10087 return -EROFS; 10088 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 10089 return -EACCES; 10090 } 10091 return generic_permission(mnt_userns, inode, mask); 10092 } 10093 10094 static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, 10095 struct file *file, umode_t mode) 10096 { 10097 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 10098 struct btrfs_trans_handle *trans; 10099 struct btrfs_root *root = BTRFS_I(dir)->root; 10100 struct inode *inode; 10101 struct btrfs_new_inode_args new_inode_args = { 10102 .dir = dir, 10103 .dentry = file->f_path.dentry, 10104 .orphan = true, 10105 }; 10106 unsigned int trans_num_items; 10107 int ret; 10108 10109 inode = new_inode(dir->i_sb); 10110 if (!inode) 10111 return -ENOMEM; 10112 inode_init_owner(mnt_userns, inode, dir, mode); 10113 inode->i_fop = &btrfs_file_operations; 10114 inode->i_op = &btrfs_file_inode_operations; 10115 inode->i_mapping->a_ops = &btrfs_aops; 10116 10117 new_inode_args.inode = inode; 10118 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 10119 if (ret) 10120 goto out_inode; 10121 10122 trans = btrfs_start_transaction(root, trans_num_items); 10123 if (IS_ERR(trans)) { 10124 ret = PTR_ERR(trans); 10125 goto out_new_inode_args; 10126 } 10127 10128 ret = btrfs_create_new_inode(trans, &new_inode_args); 10129 10130 /* 10131 * We set number of links to 0 in btrfs_create_new_inode(), and here we 10132 * set it to 1 because d_tmpfile() will issue a warning if the count is 10133 * 0, through: 10134 * 10135 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 10136 */ 10137 set_nlink(inode, 1); 10138 10139 if (!ret) { 10140 d_tmpfile(file, inode); 10141 unlock_new_inode(inode); 10142 mark_inode_dirty(inode); 10143 } 10144 10145 btrfs_end_transaction(trans); 10146 btrfs_btree_balance_dirty(fs_info); 10147 out_new_inode_args: 10148 btrfs_new_inode_args_destroy(&new_inode_args); 10149 out_inode: 10150 if (ret) 10151 iput(inode); 10152 return finish_open_simple(file, ret); 10153 } 10154 10155 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end) 10156 { 10157 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10158 unsigned long index = start >> PAGE_SHIFT; 10159 unsigned long end_index = end >> PAGE_SHIFT; 10160 struct page *page; 10161 u32 len; 10162 10163 ASSERT(end + 1 - start <= U32_MAX); 10164 len = end + 1 - start; 10165 while (index <= end_index) { 10166 page = find_get_page(inode->vfs_inode.i_mapping, index); 10167 ASSERT(page); /* Pages should be in the extent_io_tree */ 10168 10169 btrfs_page_set_writeback(fs_info, page, start, len); 10170 put_page(page); 10171 index++; 10172 } 10173 } 10174 10175 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 10176 int compress_type) 10177 { 10178 switch (compress_type) { 10179 case BTRFS_COMPRESS_NONE: 10180 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 10181 case BTRFS_COMPRESS_ZLIB: 10182 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 10183 case BTRFS_COMPRESS_LZO: 10184 /* 10185 * The LZO format depends on the sector size. 64K is the maximum 10186 * sector size that we support. 10187 */ 10188 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 10189 return -EINVAL; 10190 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 10191 (fs_info->sectorsize_bits - 12); 10192 case BTRFS_COMPRESS_ZSTD: 10193 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 10194 default: 10195 return -EUCLEAN; 10196 } 10197 } 10198 10199 static ssize_t btrfs_encoded_read_inline( 10200 struct kiocb *iocb, 10201 struct iov_iter *iter, u64 start, 10202 u64 lockend, 10203 struct extent_state **cached_state, 10204 u64 extent_start, size_t count, 10205 struct btrfs_ioctl_encoded_io_args *encoded, 10206 bool *unlocked) 10207 { 10208 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10209 struct btrfs_root *root = inode->root; 10210 struct btrfs_fs_info *fs_info = root->fs_info; 10211 struct extent_io_tree *io_tree = &inode->io_tree; 10212 struct btrfs_path *path; 10213 struct extent_buffer *leaf; 10214 struct btrfs_file_extent_item *item; 10215 u64 ram_bytes; 10216 unsigned long ptr; 10217 void *tmp; 10218 ssize_t ret; 10219 10220 path = btrfs_alloc_path(); 10221 if (!path) { 10222 ret = -ENOMEM; 10223 goto out; 10224 } 10225 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 10226 extent_start, 0); 10227 if (ret) { 10228 if (ret > 0) { 10229 /* The extent item disappeared? */ 10230 ret = -EIO; 10231 } 10232 goto out; 10233 } 10234 leaf = path->nodes[0]; 10235 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 10236 10237 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 10238 ptr = btrfs_file_extent_inline_start(item); 10239 10240 encoded->len = min_t(u64, extent_start + ram_bytes, 10241 inode->vfs_inode.i_size) - iocb->ki_pos; 10242 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10243 btrfs_file_extent_compression(leaf, item)); 10244 if (ret < 0) 10245 goto out; 10246 encoded->compression = ret; 10247 if (encoded->compression) { 10248 size_t inline_size; 10249 10250 inline_size = btrfs_file_extent_inline_item_len(leaf, 10251 path->slots[0]); 10252 if (inline_size > count) { 10253 ret = -ENOBUFS; 10254 goto out; 10255 } 10256 count = inline_size; 10257 encoded->unencoded_len = ram_bytes; 10258 encoded->unencoded_offset = iocb->ki_pos - extent_start; 10259 } else { 10260 count = min_t(u64, count, encoded->len); 10261 encoded->len = count; 10262 encoded->unencoded_len = count; 10263 ptr += iocb->ki_pos - extent_start; 10264 } 10265 10266 tmp = kmalloc(count, GFP_NOFS); 10267 if (!tmp) { 10268 ret = -ENOMEM; 10269 goto out; 10270 } 10271 read_extent_buffer(leaf, tmp, ptr, count); 10272 btrfs_release_path(path); 10273 unlock_extent(io_tree, start, lockend, cached_state); 10274 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10275 *unlocked = true; 10276 10277 ret = copy_to_iter(tmp, count, iter); 10278 if (ret != count) 10279 ret = -EFAULT; 10280 kfree(tmp); 10281 out: 10282 btrfs_free_path(path); 10283 return ret; 10284 } 10285 10286 struct btrfs_encoded_read_private { 10287 struct btrfs_inode *inode; 10288 u64 file_offset; 10289 wait_queue_head_t wait; 10290 atomic_t pending; 10291 blk_status_t status; 10292 bool skip_csum; 10293 }; 10294 10295 static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode, 10296 struct bio *bio, int mirror_num) 10297 { 10298 struct btrfs_encoded_read_private *priv = btrfs_bio(bio)->private; 10299 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10300 blk_status_t ret; 10301 10302 if (!priv->skip_csum) { 10303 ret = btrfs_lookup_bio_sums(&inode->vfs_inode, bio, NULL); 10304 if (ret) 10305 return ret; 10306 } 10307 10308 atomic_inc(&priv->pending); 10309 btrfs_submit_bio(fs_info, bio, mirror_num); 10310 return BLK_STS_OK; 10311 } 10312 10313 static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio) 10314 { 10315 const bool uptodate = (bbio->bio.bi_status == BLK_STS_OK); 10316 struct btrfs_encoded_read_private *priv = bbio->private; 10317 struct btrfs_inode *inode = priv->inode; 10318 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10319 u32 sectorsize = fs_info->sectorsize; 10320 struct bio_vec *bvec; 10321 struct bvec_iter_all iter_all; 10322 u32 bio_offset = 0; 10323 10324 if (priv->skip_csum || !uptodate) 10325 return bbio->bio.bi_status; 10326 10327 bio_for_each_segment_all(bvec, &bbio->bio, iter_all) { 10328 unsigned int i, nr_sectors, pgoff; 10329 10330 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len); 10331 pgoff = bvec->bv_offset; 10332 for (i = 0; i < nr_sectors; i++) { 10333 ASSERT(pgoff < PAGE_SIZE); 10334 if (btrfs_check_data_csum(inode, bbio, bio_offset, 10335 bvec->bv_page, pgoff)) 10336 return BLK_STS_IOERR; 10337 bio_offset += sectorsize; 10338 pgoff += sectorsize; 10339 } 10340 } 10341 return BLK_STS_OK; 10342 } 10343 10344 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) 10345 { 10346 struct btrfs_encoded_read_private *priv = bbio->private; 10347 blk_status_t status; 10348 10349 status = btrfs_encoded_read_verify_csum(bbio); 10350 if (status) { 10351 /* 10352 * The memory barrier implied by the atomic_dec_return() here 10353 * pairs with the memory barrier implied by the 10354 * atomic_dec_return() or io_wait_event() in 10355 * btrfs_encoded_read_regular_fill_pages() to ensure that this 10356 * write is observed before the load of status in 10357 * btrfs_encoded_read_regular_fill_pages(). 10358 */ 10359 WRITE_ONCE(priv->status, status); 10360 } 10361 if (!atomic_dec_return(&priv->pending)) 10362 wake_up(&priv->wait); 10363 btrfs_bio_free_csum(bbio); 10364 bio_put(&bbio->bio); 10365 } 10366 10367 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 10368 u64 file_offset, u64 disk_bytenr, 10369 u64 disk_io_size, struct page **pages) 10370 { 10371 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10372 struct btrfs_encoded_read_private priv = { 10373 .inode = inode, 10374 .file_offset = file_offset, 10375 .pending = ATOMIC_INIT(1), 10376 .skip_csum = (inode->flags & BTRFS_INODE_NODATASUM), 10377 }; 10378 unsigned long i = 0; 10379 u64 cur = 0; 10380 int ret; 10381 10382 init_waitqueue_head(&priv.wait); 10383 /* 10384 * Submit bios for the extent, splitting due to bio or stripe limits as 10385 * necessary. 10386 */ 10387 while (cur < disk_io_size) { 10388 struct extent_map *em; 10389 struct btrfs_io_geometry geom; 10390 struct bio *bio = NULL; 10391 u64 remaining; 10392 10393 em = btrfs_get_chunk_map(fs_info, disk_bytenr + cur, 10394 disk_io_size - cur); 10395 if (IS_ERR(em)) { 10396 ret = PTR_ERR(em); 10397 } else { 10398 ret = btrfs_get_io_geometry(fs_info, em, BTRFS_MAP_READ, 10399 disk_bytenr + cur, &geom); 10400 free_extent_map(em); 10401 } 10402 if (ret) { 10403 WRITE_ONCE(priv.status, errno_to_blk_status(ret)); 10404 break; 10405 } 10406 remaining = min(geom.len, disk_io_size - cur); 10407 while (bio || remaining) { 10408 size_t bytes = min_t(u64, remaining, PAGE_SIZE); 10409 10410 if (!bio) { 10411 bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, 10412 btrfs_encoded_read_endio, 10413 &priv); 10414 bio->bi_iter.bi_sector = 10415 (disk_bytenr + cur) >> SECTOR_SHIFT; 10416 } 10417 10418 if (!bytes || 10419 bio_add_page(bio, pages[i], bytes, 0) < bytes) { 10420 blk_status_t status; 10421 10422 status = submit_encoded_read_bio(inode, bio, 0); 10423 if (status) { 10424 WRITE_ONCE(priv.status, status); 10425 bio_put(bio); 10426 goto out; 10427 } 10428 bio = NULL; 10429 continue; 10430 } 10431 10432 i++; 10433 cur += bytes; 10434 remaining -= bytes; 10435 } 10436 } 10437 10438 out: 10439 if (atomic_dec_return(&priv.pending)) 10440 io_wait_event(priv.wait, !atomic_read(&priv.pending)); 10441 /* See btrfs_encoded_read_endio() for ordering. */ 10442 return blk_status_to_errno(READ_ONCE(priv.status)); 10443 } 10444 10445 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, 10446 struct iov_iter *iter, 10447 u64 start, u64 lockend, 10448 struct extent_state **cached_state, 10449 u64 disk_bytenr, u64 disk_io_size, 10450 size_t count, bool compressed, 10451 bool *unlocked) 10452 { 10453 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10454 struct extent_io_tree *io_tree = &inode->io_tree; 10455 struct page **pages; 10456 unsigned long nr_pages, i; 10457 u64 cur; 10458 size_t page_offset; 10459 ssize_t ret; 10460 10461 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 10462 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 10463 if (!pages) 10464 return -ENOMEM; 10465 ret = btrfs_alloc_page_array(nr_pages, pages); 10466 if (ret) { 10467 ret = -ENOMEM; 10468 goto out; 10469 } 10470 10471 ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr, 10472 disk_io_size, pages); 10473 if (ret) 10474 goto out; 10475 10476 unlock_extent(io_tree, start, lockend, cached_state); 10477 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10478 *unlocked = true; 10479 10480 if (compressed) { 10481 i = 0; 10482 page_offset = 0; 10483 } else { 10484 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 10485 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 10486 } 10487 cur = 0; 10488 while (cur < count) { 10489 size_t bytes = min_t(size_t, count - cur, 10490 PAGE_SIZE - page_offset); 10491 10492 if (copy_page_to_iter(pages[i], page_offset, bytes, 10493 iter) != bytes) { 10494 ret = -EFAULT; 10495 goto out; 10496 } 10497 i++; 10498 cur += bytes; 10499 page_offset = 0; 10500 } 10501 ret = count; 10502 out: 10503 for (i = 0; i < nr_pages; i++) { 10504 if (pages[i]) 10505 __free_page(pages[i]); 10506 } 10507 kfree(pages); 10508 return ret; 10509 } 10510 10511 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 10512 struct btrfs_ioctl_encoded_io_args *encoded) 10513 { 10514 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10515 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10516 struct extent_io_tree *io_tree = &inode->io_tree; 10517 ssize_t ret; 10518 size_t count = iov_iter_count(iter); 10519 u64 start, lockend, disk_bytenr, disk_io_size; 10520 struct extent_state *cached_state = NULL; 10521 struct extent_map *em; 10522 bool unlocked = false; 10523 10524 file_accessed(iocb->ki_filp); 10525 10526 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED); 10527 10528 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 10529 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10530 return 0; 10531 } 10532 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 10533 /* 10534 * We don't know how long the extent containing iocb->ki_pos is, but if 10535 * it's compressed we know that it won't be longer than this. 10536 */ 10537 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 10538 10539 for (;;) { 10540 struct btrfs_ordered_extent *ordered; 10541 10542 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, 10543 lockend - start + 1); 10544 if (ret) 10545 goto out_unlock_inode; 10546 lock_extent(io_tree, start, lockend, &cached_state); 10547 ordered = btrfs_lookup_ordered_range(inode, start, 10548 lockend - start + 1); 10549 if (!ordered) 10550 break; 10551 btrfs_put_ordered_extent(ordered); 10552 unlock_extent(io_tree, start, lockend, &cached_state); 10553 cond_resched(); 10554 } 10555 10556 em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1); 10557 if (IS_ERR(em)) { 10558 ret = PTR_ERR(em); 10559 goto out_unlock_extent; 10560 } 10561 10562 if (em->block_start == EXTENT_MAP_INLINE) { 10563 u64 extent_start = em->start; 10564 10565 /* 10566 * For inline extents we get everything we need out of the 10567 * extent item. 10568 */ 10569 free_extent_map(em); 10570 em = NULL; 10571 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 10572 &cached_state, extent_start, 10573 count, encoded, &unlocked); 10574 goto out; 10575 } 10576 10577 /* 10578 * We only want to return up to EOF even if the extent extends beyond 10579 * that. 10580 */ 10581 encoded->len = min_t(u64, extent_map_end(em), 10582 inode->vfs_inode.i_size) - iocb->ki_pos; 10583 if (em->block_start == EXTENT_MAP_HOLE || 10584 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 10585 disk_bytenr = EXTENT_MAP_HOLE; 10586 count = min_t(u64, count, encoded->len); 10587 encoded->len = count; 10588 encoded->unencoded_len = count; 10589 } else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10590 disk_bytenr = em->block_start; 10591 /* 10592 * Bail if the buffer isn't large enough to return the whole 10593 * compressed extent. 10594 */ 10595 if (em->block_len > count) { 10596 ret = -ENOBUFS; 10597 goto out_em; 10598 } 10599 disk_io_size = em->block_len; 10600 count = em->block_len; 10601 encoded->unencoded_len = em->ram_bytes; 10602 encoded->unencoded_offset = iocb->ki_pos - em->orig_start; 10603 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10604 em->compress_type); 10605 if (ret < 0) 10606 goto out_em; 10607 encoded->compression = ret; 10608 } else { 10609 disk_bytenr = em->block_start + (start - em->start); 10610 if (encoded->len > count) 10611 encoded->len = count; 10612 /* 10613 * Don't read beyond what we locked. This also limits the page 10614 * allocations that we'll do. 10615 */ 10616 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 10617 count = start + disk_io_size - iocb->ki_pos; 10618 encoded->len = count; 10619 encoded->unencoded_len = count; 10620 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize); 10621 } 10622 free_extent_map(em); 10623 em = NULL; 10624 10625 if (disk_bytenr == EXTENT_MAP_HOLE) { 10626 unlock_extent(io_tree, start, lockend, &cached_state); 10627 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10628 unlocked = true; 10629 ret = iov_iter_zero(count, iter); 10630 if (ret != count) 10631 ret = -EFAULT; 10632 } else { 10633 ret = btrfs_encoded_read_regular(iocb, iter, start, lockend, 10634 &cached_state, disk_bytenr, 10635 disk_io_size, count, 10636 encoded->compression, 10637 &unlocked); 10638 } 10639 10640 out: 10641 if (ret >= 0) 10642 iocb->ki_pos += encoded->len; 10643 out_em: 10644 free_extent_map(em); 10645 out_unlock_extent: 10646 if (!unlocked) 10647 unlock_extent(io_tree, start, lockend, &cached_state); 10648 out_unlock_inode: 10649 if (!unlocked) 10650 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10651 return ret; 10652 } 10653 10654 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 10655 const struct btrfs_ioctl_encoded_io_args *encoded) 10656 { 10657 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10658 struct btrfs_root *root = inode->root; 10659 struct btrfs_fs_info *fs_info = root->fs_info; 10660 struct extent_io_tree *io_tree = &inode->io_tree; 10661 struct extent_changeset *data_reserved = NULL; 10662 struct extent_state *cached_state = NULL; 10663 int compression; 10664 size_t orig_count; 10665 u64 start, end; 10666 u64 num_bytes, ram_bytes, disk_num_bytes; 10667 unsigned long nr_pages, i; 10668 struct page **pages; 10669 struct btrfs_key ins; 10670 bool extent_reserved = false; 10671 struct extent_map *em; 10672 ssize_t ret; 10673 10674 switch (encoded->compression) { 10675 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 10676 compression = BTRFS_COMPRESS_ZLIB; 10677 break; 10678 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 10679 compression = BTRFS_COMPRESS_ZSTD; 10680 break; 10681 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 10682 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 10683 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 10684 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 10685 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 10686 /* The sector size must match for LZO. */ 10687 if (encoded->compression - 10688 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 10689 fs_info->sectorsize_bits) 10690 return -EINVAL; 10691 compression = BTRFS_COMPRESS_LZO; 10692 break; 10693 default: 10694 return -EINVAL; 10695 } 10696 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 10697 return -EINVAL; 10698 10699 orig_count = iov_iter_count(from); 10700 10701 /* The extent size must be sane. */ 10702 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 10703 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 10704 return -EINVAL; 10705 10706 /* 10707 * The compressed data must be smaller than the decompressed data. 10708 * 10709 * It's of course possible for data to compress to larger or the same 10710 * size, but the buffered I/O path falls back to no compression for such 10711 * data, and we don't want to break any assumptions by creating these 10712 * extents. 10713 * 10714 * Note that this is less strict than the current check we have that the 10715 * compressed data must be at least one sector smaller than the 10716 * decompressed data. We only want to enforce the weaker requirement 10717 * from old kernels that it is at least one byte smaller. 10718 */ 10719 if (orig_count >= encoded->unencoded_len) 10720 return -EINVAL; 10721 10722 /* The extent must start on a sector boundary. */ 10723 start = iocb->ki_pos; 10724 if (!IS_ALIGNED(start, fs_info->sectorsize)) 10725 return -EINVAL; 10726 10727 /* 10728 * The extent must end on a sector boundary. However, we allow a write 10729 * which ends at or extends i_size to have an unaligned length; we round 10730 * up the extent size and set i_size to the unaligned end. 10731 */ 10732 if (start + encoded->len < inode->vfs_inode.i_size && 10733 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 10734 return -EINVAL; 10735 10736 /* Finally, the offset in the unencoded data must be sector-aligned. */ 10737 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 10738 return -EINVAL; 10739 10740 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 10741 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 10742 end = start + num_bytes - 1; 10743 10744 /* 10745 * If the extent cannot be inline, the compressed data on disk must be 10746 * sector-aligned. For convenience, we extend it with zeroes if it 10747 * isn't. 10748 */ 10749 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 10750 nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 10751 pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT); 10752 if (!pages) 10753 return -ENOMEM; 10754 for (i = 0; i < nr_pages; i++) { 10755 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 10756 char *kaddr; 10757 10758 pages[i] = alloc_page(GFP_KERNEL_ACCOUNT); 10759 if (!pages[i]) { 10760 ret = -ENOMEM; 10761 goto out_pages; 10762 } 10763 kaddr = kmap_local_page(pages[i]); 10764 if (copy_from_iter(kaddr, bytes, from) != bytes) { 10765 kunmap_local(kaddr); 10766 ret = -EFAULT; 10767 goto out_pages; 10768 } 10769 if (bytes < PAGE_SIZE) 10770 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 10771 kunmap_local(kaddr); 10772 } 10773 10774 for (;;) { 10775 struct btrfs_ordered_extent *ordered; 10776 10777 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes); 10778 if (ret) 10779 goto out_pages; 10780 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 10781 start >> PAGE_SHIFT, 10782 end >> PAGE_SHIFT); 10783 if (ret) 10784 goto out_pages; 10785 lock_extent(io_tree, start, end, &cached_state); 10786 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 10787 if (!ordered && 10788 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 10789 break; 10790 if (ordered) 10791 btrfs_put_ordered_extent(ordered); 10792 unlock_extent(io_tree, start, end, &cached_state); 10793 cond_resched(); 10794 } 10795 10796 /* 10797 * We don't use the higher-level delalloc space functions because our 10798 * num_bytes and disk_num_bytes are different. 10799 */ 10800 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 10801 if (ret) 10802 goto out_unlock; 10803 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 10804 if (ret) 10805 goto out_free_data_space; 10806 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 10807 false); 10808 if (ret) 10809 goto out_qgroup_free_data; 10810 10811 /* Try an inline extent first. */ 10812 if (start == 0 && encoded->unencoded_len == encoded->len && 10813 encoded->unencoded_offset == 0) { 10814 ret = cow_file_range_inline(inode, encoded->len, orig_count, 10815 compression, pages, true); 10816 if (ret <= 0) { 10817 if (ret == 0) 10818 ret = orig_count; 10819 goto out_delalloc_release; 10820 } 10821 } 10822 10823 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 10824 disk_num_bytes, 0, 0, &ins, 1, 1); 10825 if (ret) 10826 goto out_delalloc_release; 10827 extent_reserved = true; 10828 10829 em = create_io_em(inode, start, num_bytes, 10830 start - encoded->unencoded_offset, ins.objectid, 10831 ins.offset, ins.offset, ram_bytes, compression, 10832 BTRFS_ORDERED_COMPRESSED); 10833 if (IS_ERR(em)) { 10834 ret = PTR_ERR(em); 10835 goto out_free_reserved; 10836 } 10837 free_extent_map(em); 10838 10839 ret = btrfs_add_ordered_extent(inode, start, num_bytes, ram_bytes, 10840 ins.objectid, ins.offset, 10841 encoded->unencoded_offset, 10842 (1 << BTRFS_ORDERED_ENCODED) | 10843 (1 << BTRFS_ORDERED_COMPRESSED), 10844 compression); 10845 if (ret) { 10846 btrfs_drop_extent_map_range(inode, start, end, false); 10847 goto out_free_reserved; 10848 } 10849 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10850 10851 if (start + encoded->len > inode->vfs_inode.i_size) 10852 i_size_write(&inode->vfs_inode, start + encoded->len); 10853 10854 unlock_extent(io_tree, start, end, &cached_state); 10855 10856 btrfs_delalloc_release_extents(inode, num_bytes); 10857 10858 if (btrfs_submit_compressed_write(inode, start, num_bytes, ins.objectid, 10859 ins.offset, pages, nr_pages, 0, NULL, 10860 false)) { 10861 btrfs_writepage_endio_finish_ordered(inode, pages[0], start, end, 0); 10862 ret = -EIO; 10863 goto out_pages; 10864 } 10865 ret = orig_count; 10866 goto out; 10867 10868 out_free_reserved: 10869 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10870 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 10871 out_delalloc_release: 10872 btrfs_delalloc_release_extents(inode, num_bytes); 10873 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 10874 out_qgroup_free_data: 10875 if (ret < 0) 10876 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes); 10877 out_free_data_space: 10878 /* 10879 * If btrfs_reserve_extent() succeeded, then we already decremented 10880 * bytes_may_use. 10881 */ 10882 if (!extent_reserved) 10883 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); 10884 out_unlock: 10885 unlock_extent(io_tree, start, end, &cached_state); 10886 out_pages: 10887 for (i = 0; i < nr_pages; i++) { 10888 if (pages[i]) 10889 __free_page(pages[i]); 10890 } 10891 kvfree(pages); 10892 out: 10893 if (ret >= 0) 10894 iocb->ki_pos += encoded->len; 10895 return ret; 10896 } 10897 10898 #ifdef CONFIG_SWAP 10899 /* 10900 * Add an entry indicating a block group or device which is pinned by a 10901 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 10902 * negative errno on failure. 10903 */ 10904 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 10905 bool is_block_group) 10906 { 10907 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10908 struct btrfs_swapfile_pin *sp, *entry; 10909 struct rb_node **p; 10910 struct rb_node *parent = NULL; 10911 10912 sp = kmalloc(sizeof(*sp), GFP_NOFS); 10913 if (!sp) 10914 return -ENOMEM; 10915 sp->ptr = ptr; 10916 sp->inode = inode; 10917 sp->is_block_group = is_block_group; 10918 sp->bg_extent_count = 1; 10919 10920 spin_lock(&fs_info->swapfile_pins_lock); 10921 p = &fs_info->swapfile_pins.rb_node; 10922 while (*p) { 10923 parent = *p; 10924 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 10925 if (sp->ptr < entry->ptr || 10926 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 10927 p = &(*p)->rb_left; 10928 } else if (sp->ptr > entry->ptr || 10929 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 10930 p = &(*p)->rb_right; 10931 } else { 10932 if (is_block_group) 10933 entry->bg_extent_count++; 10934 spin_unlock(&fs_info->swapfile_pins_lock); 10935 kfree(sp); 10936 return 1; 10937 } 10938 } 10939 rb_link_node(&sp->node, parent, p); 10940 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 10941 spin_unlock(&fs_info->swapfile_pins_lock); 10942 return 0; 10943 } 10944 10945 /* Free all of the entries pinned by this swapfile. */ 10946 static void btrfs_free_swapfile_pins(struct inode *inode) 10947 { 10948 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10949 struct btrfs_swapfile_pin *sp; 10950 struct rb_node *node, *next; 10951 10952 spin_lock(&fs_info->swapfile_pins_lock); 10953 node = rb_first(&fs_info->swapfile_pins); 10954 while (node) { 10955 next = rb_next(node); 10956 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 10957 if (sp->inode == inode) { 10958 rb_erase(&sp->node, &fs_info->swapfile_pins); 10959 if (sp->is_block_group) { 10960 btrfs_dec_block_group_swap_extents(sp->ptr, 10961 sp->bg_extent_count); 10962 btrfs_put_block_group(sp->ptr); 10963 } 10964 kfree(sp); 10965 } 10966 node = next; 10967 } 10968 spin_unlock(&fs_info->swapfile_pins_lock); 10969 } 10970 10971 struct btrfs_swap_info { 10972 u64 start; 10973 u64 block_start; 10974 u64 block_len; 10975 u64 lowest_ppage; 10976 u64 highest_ppage; 10977 unsigned long nr_pages; 10978 int nr_extents; 10979 }; 10980 10981 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 10982 struct btrfs_swap_info *bsi) 10983 { 10984 unsigned long nr_pages; 10985 unsigned long max_pages; 10986 u64 first_ppage, first_ppage_reported, next_ppage; 10987 int ret; 10988 10989 /* 10990 * Our swapfile may have had its size extended after the swap header was 10991 * written. In that case activating the swapfile should not go beyond 10992 * the max size set in the swap header. 10993 */ 10994 if (bsi->nr_pages >= sis->max) 10995 return 0; 10996 10997 max_pages = sis->max - bsi->nr_pages; 10998 first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT; 10999 next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len, 11000 PAGE_SIZE) >> PAGE_SHIFT; 11001 11002 if (first_ppage >= next_ppage) 11003 return 0; 11004 nr_pages = next_ppage - first_ppage; 11005 nr_pages = min(nr_pages, max_pages); 11006 11007 first_ppage_reported = first_ppage; 11008 if (bsi->start == 0) 11009 first_ppage_reported++; 11010 if (bsi->lowest_ppage > first_ppage_reported) 11011 bsi->lowest_ppage = first_ppage_reported; 11012 if (bsi->highest_ppage < (next_ppage - 1)) 11013 bsi->highest_ppage = next_ppage - 1; 11014 11015 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 11016 if (ret < 0) 11017 return ret; 11018 bsi->nr_extents += ret; 11019 bsi->nr_pages += nr_pages; 11020 return 0; 11021 } 11022 11023 static void btrfs_swap_deactivate(struct file *file) 11024 { 11025 struct inode *inode = file_inode(file); 11026 11027 btrfs_free_swapfile_pins(inode); 11028 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 11029 } 11030 11031 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 11032 sector_t *span) 11033 { 11034 struct inode *inode = file_inode(file); 11035 struct btrfs_root *root = BTRFS_I(inode)->root; 11036 struct btrfs_fs_info *fs_info = root->fs_info; 11037 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 11038 struct extent_state *cached_state = NULL; 11039 struct extent_map *em = NULL; 11040 struct btrfs_device *device = NULL; 11041 struct btrfs_swap_info bsi = { 11042 .lowest_ppage = (sector_t)-1ULL, 11043 }; 11044 int ret = 0; 11045 u64 isize; 11046 u64 start; 11047 11048 /* 11049 * If the swap file was just created, make sure delalloc is done. If the 11050 * file changes again after this, the user is doing something stupid and 11051 * we don't really care. 11052 */ 11053 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 11054 if (ret) 11055 return ret; 11056 11057 /* 11058 * The inode is locked, so these flags won't change after we check them. 11059 */ 11060 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 11061 btrfs_warn(fs_info, "swapfile must not be compressed"); 11062 return -EINVAL; 11063 } 11064 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 11065 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 11066 return -EINVAL; 11067 } 11068 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 11069 btrfs_warn(fs_info, "swapfile must not be checksummed"); 11070 return -EINVAL; 11071 } 11072 11073 /* 11074 * Balance or device remove/replace/resize can move stuff around from 11075 * under us. The exclop protection makes sure they aren't running/won't 11076 * run concurrently while we are mapping the swap extents, and 11077 * fs_info->swapfile_pins prevents them from running while the swap 11078 * file is active and moving the extents. Note that this also prevents 11079 * a concurrent device add which isn't actually necessary, but it's not 11080 * really worth the trouble to allow it. 11081 */ 11082 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 11083 btrfs_warn(fs_info, 11084 "cannot activate swapfile while exclusive operation is running"); 11085 return -EBUSY; 11086 } 11087 11088 /* 11089 * Prevent snapshot creation while we are activating the swap file. 11090 * We do not want to race with snapshot creation. If snapshot creation 11091 * already started before we bumped nr_swapfiles from 0 to 1 and 11092 * completes before the first write into the swap file after it is 11093 * activated, than that write would fallback to COW. 11094 */ 11095 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 11096 btrfs_exclop_finish(fs_info); 11097 btrfs_warn(fs_info, 11098 "cannot activate swapfile because snapshot creation is in progress"); 11099 return -EINVAL; 11100 } 11101 /* 11102 * Snapshots can create extents which require COW even if NODATACOW is 11103 * set. We use this counter to prevent snapshots. We must increment it 11104 * before walking the extents because we don't want a concurrent 11105 * snapshot to run after we've already checked the extents. 11106 * 11107 * It is possible that subvolume is marked for deletion but still not 11108 * removed yet. To prevent this race, we check the root status before 11109 * activating the swapfile. 11110 */ 11111 spin_lock(&root->root_item_lock); 11112 if (btrfs_root_dead(root)) { 11113 spin_unlock(&root->root_item_lock); 11114 11115 btrfs_exclop_finish(fs_info); 11116 btrfs_warn(fs_info, 11117 "cannot activate swapfile because subvolume %llu is being deleted", 11118 root->root_key.objectid); 11119 return -EPERM; 11120 } 11121 atomic_inc(&root->nr_swapfiles); 11122 spin_unlock(&root->root_item_lock); 11123 11124 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 11125 11126 lock_extent(io_tree, 0, isize - 1, &cached_state); 11127 start = 0; 11128 while (start < isize) { 11129 u64 logical_block_start, physical_block_start; 11130 struct btrfs_block_group *bg; 11131 u64 len = isize - start; 11132 11133 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 11134 if (IS_ERR(em)) { 11135 ret = PTR_ERR(em); 11136 goto out; 11137 } 11138 11139 if (em->block_start == EXTENT_MAP_HOLE) { 11140 btrfs_warn(fs_info, "swapfile must not have holes"); 11141 ret = -EINVAL; 11142 goto out; 11143 } 11144 if (em->block_start == EXTENT_MAP_INLINE) { 11145 /* 11146 * It's unlikely we'll ever actually find ourselves 11147 * here, as a file small enough to fit inline won't be 11148 * big enough to store more than the swap header, but in 11149 * case something changes in the future, let's catch it 11150 * here rather than later. 11151 */ 11152 btrfs_warn(fs_info, "swapfile must not be inline"); 11153 ret = -EINVAL; 11154 goto out; 11155 } 11156 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 11157 btrfs_warn(fs_info, "swapfile must not be compressed"); 11158 ret = -EINVAL; 11159 goto out; 11160 } 11161 11162 logical_block_start = em->block_start + (start - em->start); 11163 len = min(len, em->len - (start - em->start)); 11164 free_extent_map(em); 11165 em = NULL; 11166 11167 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true); 11168 if (ret < 0) { 11169 goto out; 11170 } else if (ret) { 11171 ret = 0; 11172 } else { 11173 btrfs_warn(fs_info, 11174 "swapfile must not be copy-on-write"); 11175 ret = -EINVAL; 11176 goto out; 11177 } 11178 11179 em = btrfs_get_chunk_map(fs_info, logical_block_start, len); 11180 if (IS_ERR(em)) { 11181 ret = PTR_ERR(em); 11182 goto out; 11183 } 11184 11185 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 11186 btrfs_warn(fs_info, 11187 "swapfile must have single data profile"); 11188 ret = -EINVAL; 11189 goto out; 11190 } 11191 11192 if (device == NULL) { 11193 device = em->map_lookup->stripes[0].dev; 11194 ret = btrfs_add_swapfile_pin(inode, device, false); 11195 if (ret == 1) 11196 ret = 0; 11197 else if (ret) 11198 goto out; 11199 } else if (device != em->map_lookup->stripes[0].dev) { 11200 btrfs_warn(fs_info, "swapfile must be on one device"); 11201 ret = -EINVAL; 11202 goto out; 11203 } 11204 11205 physical_block_start = (em->map_lookup->stripes[0].physical + 11206 (logical_block_start - em->start)); 11207 len = min(len, em->len - (logical_block_start - em->start)); 11208 free_extent_map(em); 11209 em = NULL; 11210 11211 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 11212 if (!bg) { 11213 btrfs_warn(fs_info, 11214 "could not find block group containing swapfile"); 11215 ret = -EINVAL; 11216 goto out; 11217 } 11218 11219 if (!btrfs_inc_block_group_swap_extents(bg)) { 11220 btrfs_warn(fs_info, 11221 "block group for swapfile at %llu is read-only%s", 11222 bg->start, 11223 atomic_read(&fs_info->scrubs_running) ? 11224 " (scrub running)" : ""); 11225 btrfs_put_block_group(bg); 11226 ret = -EINVAL; 11227 goto out; 11228 } 11229 11230 ret = btrfs_add_swapfile_pin(inode, bg, true); 11231 if (ret) { 11232 btrfs_put_block_group(bg); 11233 if (ret == 1) 11234 ret = 0; 11235 else 11236 goto out; 11237 } 11238 11239 if (bsi.block_len && 11240 bsi.block_start + bsi.block_len == physical_block_start) { 11241 bsi.block_len += len; 11242 } else { 11243 if (bsi.block_len) { 11244 ret = btrfs_add_swap_extent(sis, &bsi); 11245 if (ret) 11246 goto out; 11247 } 11248 bsi.start = start; 11249 bsi.block_start = physical_block_start; 11250 bsi.block_len = len; 11251 } 11252 11253 start += len; 11254 } 11255 11256 if (bsi.block_len) 11257 ret = btrfs_add_swap_extent(sis, &bsi); 11258 11259 out: 11260 if (!IS_ERR_OR_NULL(em)) 11261 free_extent_map(em); 11262 11263 unlock_extent(io_tree, 0, isize - 1, &cached_state); 11264 11265 if (ret) 11266 btrfs_swap_deactivate(file); 11267 11268 btrfs_drew_write_unlock(&root->snapshot_lock); 11269 11270 btrfs_exclop_finish(fs_info); 11271 11272 if (ret) 11273 return ret; 11274 11275 if (device) 11276 sis->bdev = device->bdev; 11277 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 11278 sis->max = bsi.nr_pages; 11279 sis->pages = bsi.nr_pages - 1; 11280 sis->highest_bit = bsi.nr_pages - 1; 11281 return bsi.nr_extents; 11282 } 11283 #else 11284 static void btrfs_swap_deactivate(struct file *file) 11285 { 11286 } 11287 11288 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 11289 sector_t *span) 11290 { 11291 return -EOPNOTSUPP; 11292 } 11293 #endif 11294 11295 /* 11296 * Update the number of bytes used in the VFS' inode. When we replace extents in 11297 * a range (clone, dedupe, fallocate's zero range), we must update the number of 11298 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 11299 * always get a correct value. 11300 */ 11301 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 11302 const u64 add_bytes, 11303 const u64 del_bytes) 11304 { 11305 if (add_bytes == del_bytes) 11306 return; 11307 11308 spin_lock(&inode->lock); 11309 if (del_bytes > 0) 11310 inode_sub_bytes(&inode->vfs_inode, del_bytes); 11311 if (add_bytes > 0) 11312 inode_add_bytes(&inode->vfs_inode, add_bytes); 11313 spin_unlock(&inode->lock); 11314 } 11315 11316 /* 11317 * Verify that there are no ordered extents for a given file range. 11318 * 11319 * @inode: The target inode. 11320 * @start: Start offset of the file range, should be sector size aligned. 11321 * @end: End offset (inclusive) of the file range, its value +1 should be 11322 * sector size aligned. 11323 * 11324 * This should typically be used for cases where we locked an inode's VFS lock in 11325 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 11326 * we have flushed all delalloc in the range, we have waited for all ordered 11327 * extents in the range to complete and finally we have locked the file range in 11328 * the inode's io_tree. 11329 */ 11330 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 11331 { 11332 struct btrfs_root *root = inode->root; 11333 struct btrfs_ordered_extent *ordered; 11334 11335 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 11336 return; 11337 11338 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 11339 if (ordered) { 11340 btrfs_err(root->fs_info, 11341 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 11342 start, end, btrfs_ino(inode), root->root_key.objectid, 11343 ordered->file_offset, 11344 ordered->file_offset + ordered->num_bytes - 1); 11345 btrfs_put_ordered_extent(ordered); 11346 } 11347 11348 ASSERT(ordered == NULL); 11349 } 11350 11351 static const struct inode_operations btrfs_dir_inode_operations = { 11352 .getattr = btrfs_getattr, 11353 .lookup = btrfs_lookup, 11354 .create = btrfs_create, 11355 .unlink = btrfs_unlink, 11356 .link = btrfs_link, 11357 .mkdir = btrfs_mkdir, 11358 .rmdir = btrfs_rmdir, 11359 .rename = btrfs_rename2, 11360 .symlink = btrfs_symlink, 11361 .setattr = btrfs_setattr, 11362 .mknod = btrfs_mknod, 11363 .listxattr = btrfs_listxattr, 11364 .permission = btrfs_permission, 11365 .get_inode_acl = btrfs_get_acl, 11366 .set_acl = btrfs_set_acl, 11367 .update_time = btrfs_update_time, 11368 .tmpfile = btrfs_tmpfile, 11369 .fileattr_get = btrfs_fileattr_get, 11370 .fileattr_set = btrfs_fileattr_set, 11371 }; 11372 11373 static const struct file_operations btrfs_dir_file_operations = { 11374 .llseek = generic_file_llseek, 11375 .read = generic_read_dir, 11376 .iterate_shared = btrfs_real_readdir, 11377 .open = btrfs_opendir, 11378 .unlocked_ioctl = btrfs_ioctl, 11379 #ifdef CONFIG_COMPAT 11380 .compat_ioctl = btrfs_compat_ioctl, 11381 #endif 11382 .release = btrfs_release_file, 11383 .fsync = btrfs_sync_file, 11384 }; 11385 11386 /* 11387 * btrfs doesn't support the bmap operation because swapfiles 11388 * use bmap to make a mapping of extents in the file. They assume 11389 * these extents won't change over the life of the file and they 11390 * use the bmap result to do IO directly to the drive. 11391 * 11392 * the btrfs bmap call would return logical addresses that aren't 11393 * suitable for IO and they also will change frequently as COW 11394 * operations happen. So, swapfile + btrfs == corruption. 11395 * 11396 * For now we're avoiding this by dropping bmap. 11397 */ 11398 static const struct address_space_operations btrfs_aops = { 11399 .read_folio = btrfs_read_folio, 11400 .writepages = btrfs_writepages, 11401 .readahead = btrfs_readahead, 11402 .direct_IO = noop_direct_IO, 11403 .invalidate_folio = btrfs_invalidate_folio, 11404 .release_folio = btrfs_release_folio, 11405 .migrate_folio = btrfs_migrate_folio, 11406 .dirty_folio = filemap_dirty_folio, 11407 .error_remove_page = generic_error_remove_page, 11408 .swap_activate = btrfs_swap_activate, 11409 .swap_deactivate = btrfs_swap_deactivate, 11410 }; 11411 11412 static const struct inode_operations btrfs_file_inode_operations = { 11413 .getattr = btrfs_getattr, 11414 .setattr = btrfs_setattr, 11415 .listxattr = btrfs_listxattr, 11416 .permission = btrfs_permission, 11417 .fiemap = btrfs_fiemap, 11418 .get_inode_acl = btrfs_get_acl, 11419 .set_acl = btrfs_set_acl, 11420 .update_time = btrfs_update_time, 11421 .fileattr_get = btrfs_fileattr_get, 11422 .fileattr_set = btrfs_fileattr_set, 11423 }; 11424 static const struct inode_operations btrfs_special_inode_operations = { 11425 .getattr = btrfs_getattr, 11426 .setattr = btrfs_setattr, 11427 .permission = btrfs_permission, 11428 .listxattr = btrfs_listxattr, 11429 .get_inode_acl = btrfs_get_acl, 11430 .set_acl = btrfs_set_acl, 11431 .update_time = btrfs_update_time, 11432 }; 11433 static const struct inode_operations btrfs_symlink_inode_operations = { 11434 .get_link = page_get_link, 11435 .getattr = btrfs_getattr, 11436 .setattr = btrfs_setattr, 11437 .permission = btrfs_permission, 11438 .listxattr = btrfs_listxattr, 11439 .update_time = btrfs_update_time, 11440 }; 11441 11442 const struct dentry_operations btrfs_dentry_operations = { 11443 .d_delete = btrfs_dentry_delete, 11444 }; 11445