1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 #include <linux/time.h> 15 #include <linux/init.h> 16 #include <linux/string.h> 17 #include <linux/backing-dev.h> 18 #include <linux/writeback.h> 19 #include <linux/compat.h> 20 #include <linux/xattr.h> 21 #include <linux/posix_acl.h> 22 #include <linux/falloc.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/btrfs.h> 26 #include <linux/blkdev.h> 27 #include <linux/posix_acl_xattr.h> 28 #include <linux/uio.h> 29 #include <linux/magic.h> 30 #include <linux/iversion.h> 31 #include <linux/swap.h> 32 #include <linux/migrate.h> 33 #include <linux/sched/mm.h> 34 #include <linux/iomap.h> 35 #include <asm/unaligned.h> 36 #include <linux/fsverity.h> 37 #include "misc.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "print-tree.h" 43 #include "ordered-data.h" 44 #include "xattr.h" 45 #include "tree-log.h" 46 #include "bio.h" 47 #include "compression.h" 48 #include "locking.h" 49 #include "free-space-cache.h" 50 #include "props.h" 51 #include "qgroup.h" 52 #include "delalloc-space.h" 53 #include "block-group.h" 54 #include "space-info.h" 55 #include "zoned.h" 56 #include "subpage.h" 57 #include "inode-item.h" 58 #include "fs.h" 59 #include "accessors.h" 60 #include "extent-tree.h" 61 #include "root-tree.h" 62 #include "defrag.h" 63 #include "dir-item.h" 64 #include "file-item.h" 65 #include "uuid-tree.h" 66 #include "ioctl.h" 67 #include "file.h" 68 #include "acl.h" 69 #include "relocation.h" 70 #include "verity.h" 71 #include "super.h" 72 #include "orphan.h" 73 74 struct btrfs_iget_args { 75 u64 ino; 76 struct btrfs_root *root; 77 }; 78 79 struct btrfs_dio_data { 80 ssize_t submitted; 81 struct extent_changeset *data_reserved; 82 bool data_space_reserved; 83 bool nocow_done; 84 }; 85 86 struct btrfs_dio_private { 87 /* Range of I/O */ 88 u64 file_offset; 89 u32 bytes; 90 91 /* This must be last */ 92 struct btrfs_bio bbio; 93 }; 94 95 static struct bio_set btrfs_dio_bioset; 96 97 struct btrfs_rename_ctx { 98 /* Output field. Stores the index number of the old directory entry. */ 99 u64 index; 100 }; 101 102 static const struct inode_operations btrfs_dir_inode_operations; 103 static const struct inode_operations btrfs_symlink_inode_operations; 104 static const struct inode_operations btrfs_special_inode_operations; 105 static const struct inode_operations btrfs_file_inode_operations; 106 static const struct address_space_operations btrfs_aops; 107 static const struct file_operations btrfs_dir_file_operations; 108 109 static struct kmem_cache *btrfs_inode_cachep; 110 111 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 112 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback); 113 static noinline int cow_file_range(struct btrfs_inode *inode, 114 struct page *locked_page, 115 u64 start, u64 end, int *page_started, 116 unsigned long *nr_written, int unlock, 117 u64 *done_offset); 118 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 119 u64 len, u64 orig_start, u64 block_start, 120 u64 block_len, u64 orig_block_len, 121 u64 ram_bytes, int compress_type, 122 int type); 123 124 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, 125 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) 126 { 127 struct btrfs_root *root = inode->root; 128 const u32 csum_size = root->fs_info->csum_size; 129 130 /* Output without objectid, which is more meaningful */ 131 if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) { 132 btrfs_warn_rl(root->fs_info, 133 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 134 root->root_key.objectid, btrfs_ino(inode), 135 logical_start, 136 CSUM_FMT_VALUE(csum_size, csum), 137 CSUM_FMT_VALUE(csum_size, csum_expected), 138 mirror_num); 139 } else { 140 btrfs_warn_rl(root->fs_info, 141 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 142 root->root_key.objectid, btrfs_ino(inode), 143 logical_start, 144 CSUM_FMT_VALUE(csum_size, csum), 145 CSUM_FMT_VALUE(csum_size, csum_expected), 146 mirror_num); 147 } 148 } 149 150 /* 151 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed 152 * 153 * ilock_flags can have the following bit set: 154 * 155 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 156 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 157 * return -EAGAIN 158 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 159 */ 160 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) 161 { 162 if (ilock_flags & BTRFS_ILOCK_SHARED) { 163 if (ilock_flags & BTRFS_ILOCK_TRY) { 164 if (!inode_trylock_shared(&inode->vfs_inode)) 165 return -EAGAIN; 166 else 167 return 0; 168 } 169 inode_lock_shared(&inode->vfs_inode); 170 } else { 171 if (ilock_flags & BTRFS_ILOCK_TRY) { 172 if (!inode_trylock(&inode->vfs_inode)) 173 return -EAGAIN; 174 else 175 return 0; 176 } 177 inode_lock(&inode->vfs_inode); 178 } 179 if (ilock_flags & BTRFS_ILOCK_MMAP) 180 down_write(&inode->i_mmap_lock); 181 return 0; 182 } 183 184 /* 185 * btrfs_inode_unlock - unock inode i_rwsem 186 * 187 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 188 * to decide whether the lock acquired is shared or exclusive. 189 */ 190 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) 191 { 192 if (ilock_flags & BTRFS_ILOCK_MMAP) 193 up_write(&inode->i_mmap_lock); 194 if (ilock_flags & BTRFS_ILOCK_SHARED) 195 inode_unlock_shared(&inode->vfs_inode); 196 else 197 inode_unlock(&inode->vfs_inode); 198 } 199 200 /* 201 * Cleanup all submitted ordered extents in specified range to handle errors 202 * from the btrfs_run_delalloc_range() callback. 203 * 204 * NOTE: caller must ensure that when an error happens, it can not call 205 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 206 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 207 * to be released, which we want to happen only when finishing the ordered 208 * extent (btrfs_finish_ordered_io()). 209 */ 210 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 211 struct page *locked_page, 212 u64 offset, u64 bytes) 213 { 214 unsigned long index = offset >> PAGE_SHIFT; 215 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 216 u64 page_start = 0, page_end = 0; 217 struct page *page; 218 219 if (locked_page) { 220 page_start = page_offset(locked_page); 221 page_end = page_start + PAGE_SIZE - 1; 222 } 223 224 while (index <= end_index) { 225 /* 226 * For locked page, we will call end_extent_writepage() on it 227 * in run_delalloc_range() for the error handling. That 228 * end_extent_writepage() function will call 229 * btrfs_mark_ordered_io_finished() to clear page Ordered and 230 * run the ordered extent accounting. 231 * 232 * Here we can't just clear the Ordered bit, or 233 * btrfs_mark_ordered_io_finished() would skip the accounting 234 * for the page range, and the ordered extent will never finish. 235 */ 236 if (locked_page && index == (page_start >> PAGE_SHIFT)) { 237 index++; 238 continue; 239 } 240 page = find_get_page(inode->vfs_inode.i_mapping, index); 241 index++; 242 if (!page) 243 continue; 244 245 /* 246 * Here we just clear all Ordered bits for every page in the 247 * range, then btrfs_mark_ordered_io_finished() will handle 248 * the ordered extent accounting for the range. 249 */ 250 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page, 251 offset, bytes); 252 put_page(page); 253 } 254 255 if (locked_page) { 256 /* The locked page covers the full range, nothing needs to be done */ 257 if (bytes + offset <= page_start + PAGE_SIZE) 258 return; 259 /* 260 * In case this page belongs to the delalloc range being 261 * instantiated then skip it, since the first page of a range is 262 * going to be properly cleaned up by the caller of 263 * run_delalloc_range 264 */ 265 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 266 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE; 267 offset = page_offset(locked_page) + PAGE_SIZE; 268 } 269 } 270 271 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false); 272 } 273 274 static int btrfs_dirty_inode(struct btrfs_inode *inode); 275 276 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 277 struct btrfs_new_inode_args *args) 278 { 279 int err; 280 281 if (args->default_acl) { 282 err = __btrfs_set_acl(trans, args->inode, args->default_acl, 283 ACL_TYPE_DEFAULT); 284 if (err) 285 return err; 286 } 287 if (args->acl) { 288 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 289 if (err) 290 return err; 291 } 292 if (!args->default_acl && !args->acl) 293 cache_no_acl(args->inode); 294 return btrfs_xattr_security_init(trans, args->inode, args->dir, 295 &args->dentry->d_name); 296 } 297 298 /* 299 * this does all the hard work for inserting an inline extent into 300 * the btree. The caller should have done a btrfs_drop_extents so that 301 * no overlapping inline items exist in the btree 302 */ 303 static int insert_inline_extent(struct btrfs_trans_handle *trans, 304 struct btrfs_path *path, 305 struct btrfs_inode *inode, bool extent_inserted, 306 size_t size, size_t compressed_size, 307 int compress_type, 308 struct page **compressed_pages, 309 bool update_i_size) 310 { 311 struct btrfs_root *root = inode->root; 312 struct extent_buffer *leaf; 313 struct page *page = NULL; 314 char *kaddr; 315 unsigned long ptr; 316 struct btrfs_file_extent_item *ei; 317 int ret; 318 size_t cur_size = size; 319 u64 i_size; 320 321 ASSERT((compressed_size > 0 && compressed_pages) || 322 (compressed_size == 0 && !compressed_pages)); 323 324 if (compressed_size && compressed_pages) 325 cur_size = compressed_size; 326 327 if (!extent_inserted) { 328 struct btrfs_key key; 329 size_t datasize; 330 331 key.objectid = btrfs_ino(inode); 332 key.offset = 0; 333 key.type = BTRFS_EXTENT_DATA_KEY; 334 335 datasize = btrfs_file_extent_calc_inline_size(cur_size); 336 ret = btrfs_insert_empty_item(trans, root, path, &key, 337 datasize); 338 if (ret) 339 goto fail; 340 } 341 leaf = path->nodes[0]; 342 ei = btrfs_item_ptr(leaf, path->slots[0], 343 struct btrfs_file_extent_item); 344 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 345 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 346 btrfs_set_file_extent_encryption(leaf, ei, 0); 347 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 348 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 349 ptr = btrfs_file_extent_inline_start(ei); 350 351 if (compress_type != BTRFS_COMPRESS_NONE) { 352 struct page *cpage; 353 int i = 0; 354 while (compressed_size > 0) { 355 cpage = compressed_pages[i]; 356 cur_size = min_t(unsigned long, compressed_size, 357 PAGE_SIZE); 358 359 kaddr = kmap_local_page(cpage); 360 write_extent_buffer(leaf, kaddr, ptr, cur_size); 361 kunmap_local(kaddr); 362 363 i++; 364 ptr += cur_size; 365 compressed_size -= cur_size; 366 } 367 btrfs_set_file_extent_compression(leaf, ei, 368 compress_type); 369 } else { 370 page = find_get_page(inode->vfs_inode.i_mapping, 0); 371 btrfs_set_file_extent_compression(leaf, ei, 0); 372 kaddr = kmap_local_page(page); 373 write_extent_buffer(leaf, kaddr, ptr, size); 374 kunmap_local(kaddr); 375 put_page(page); 376 } 377 btrfs_mark_buffer_dirty(leaf); 378 btrfs_release_path(path); 379 380 /* 381 * We align size to sectorsize for inline extents just for simplicity 382 * sake. 383 */ 384 ret = btrfs_inode_set_file_extent_range(inode, 0, 385 ALIGN(size, root->fs_info->sectorsize)); 386 if (ret) 387 goto fail; 388 389 /* 390 * We're an inline extent, so nobody can extend the file past i_size 391 * without locking a page we already have locked. 392 * 393 * We must do any i_size and inode updates before we unlock the pages. 394 * Otherwise we could end up racing with unlink. 395 */ 396 i_size = i_size_read(&inode->vfs_inode); 397 if (update_i_size && size > i_size) { 398 i_size_write(&inode->vfs_inode, size); 399 i_size = size; 400 } 401 inode->disk_i_size = i_size; 402 403 fail: 404 return ret; 405 } 406 407 408 /* 409 * conditionally insert an inline extent into the file. This 410 * does the checks required to make sure the data is small enough 411 * to fit as an inline extent. 412 */ 413 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size, 414 size_t compressed_size, 415 int compress_type, 416 struct page **compressed_pages, 417 bool update_i_size) 418 { 419 struct btrfs_drop_extents_args drop_args = { 0 }; 420 struct btrfs_root *root = inode->root; 421 struct btrfs_fs_info *fs_info = root->fs_info; 422 struct btrfs_trans_handle *trans; 423 u64 data_len = (compressed_size ?: size); 424 int ret; 425 struct btrfs_path *path; 426 427 /* 428 * We can create an inline extent if it ends at or beyond the current 429 * i_size, is no larger than a sector (decompressed), and the (possibly 430 * compressed) data fits in a leaf and the configured maximum inline 431 * size. 432 */ 433 if (size < i_size_read(&inode->vfs_inode) || 434 size > fs_info->sectorsize || 435 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || 436 data_len > fs_info->max_inline) 437 return 1; 438 439 path = btrfs_alloc_path(); 440 if (!path) 441 return -ENOMEM; 442 443 trans = btrfs_join_transaction(root); 444 if (IS_ERR(trans)) { 445 btrfs_free_path(path); 446 return PTR_ERR(trans); 447 } 448 trans->block_rsv = &inode->block_rsv; 449 450 drop_args.path = path; 451 drop_args.start = 0; 452 drop_args.end = fs_info->sectorsize; 453 drop_args.drop_cache = true; 454 drop_args.replace_extent = true; 455 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 456 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 457 if (ret) { 458 btrfs_abort_transaction(trans, ret); 459 goto out; 460 } 461 462 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 463 size, compressed_size, compress_type, 464 compressed_pages, update_i_size); 465 if (ret && ret != -ENOSPC) { 466 btrfs_abort_transaction(trans, ret); 467 goto out; 468 } else if (ret == -ENOSPC) { 469 ret = 1; 470 goto out; 471 } 472 473 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 474 ret = btrfs_update_inode(trans, root, inode); 475 if (ret && ret != -ENOSPC) { 476 btrfs_abort_transaction(trans, ret); 477 goto out; 478 } else if (ret == -ENOSPC) { 479 ret = 1; 480 goto out; 481 } 482 483 btrfs_set_inode_full_sync(inode); 484 out: 485 /* 486 * Don't forget to free the reserved space, as for inlined extent 487 * it won't count as data extent, free them directly here. 488 * And at reserve time, it's always aligned to page size, so 489 * just free one page here. 490 */ 491 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE); 492 btrfs_free_path(path); 493 btrfs_end_transaction(trans); 494 return ret; 495 } 496 497 struct async_extent { 498 u64 start; 499 u64 ram_size; 500 u64 compressed_size; 501 struct page **pages; 502 unsigned long nr_pages; 503 int compress_type; 504 struct list_head list; 505 }; 506 507 struct async_chunk { 508 struct btrfs_inode *inode; 509 struct page *locked_page; 510 u64 start; 511 u64 end; 512 blk_opf_t write_flags; 513 struct list_head extents; 514 struct cgroup_subsys_state *blkcg_css; 515 struct btrfs_work work; 516 struct async_cow *async_cow; 517 }; 518 519 struct async_cow { 520 atomic_t num_chunks; 521 struct async_chunk chunks[]; 522 }; 523 524 static noinline int add_async_extent(struct async_chunk *cow, 525 u64 start, u64 ram_size, 526 u64 compressed_size, 527 struct page **pages, 528 unsigned long nr_pages, 529 int compress_type) 530 { 531 struct async_extent *async_extent; 532 533 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 534 BUG_ON(!async_extent); /* -ENOMEM */ 535 async_extent->start = start; 536 async_extent->ram_size = ram_size; 537 async_extent->compressed_size = compressed_size; 538 async_extent->pages = pages; 539 async_extent->nr_pages = nr_pages; 540 async_extent->compress_type = compress_type; 541 list_add_tail(&async_extent->list, &cow->extents); 542 return 0; 543 } 544 545 /* 546 * Check if the inode needs to be submitted to compression, based on mount 547 * options, defragmentation, properties or heuristics. 548 */ 549 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 550 u64 end) 551 { 552 struct btrfs_fs_info *fs_info = inode->root->fs_info; 553 554 if (!btrfs_inode_can_compress(inode)) { 555 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 556 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 557 btrfs_ino(inode)); 558 return 0; 559 } 560 /* 561 * Special check for subpage. 562 * 563 * We lock the full page then run each delalloc range in the page, thus 564 * for the following case, we will hit some subpage specific corner case: 565 * 566 * 0 32K 64K 567 * | |///////| |///////| 568 * \- A \- B 569 * 570 * In above case, both range A and range B will try to unlock the full 571 * page [0, 64K), causing the one finished later will have page 572 * unlocked already, triggering various page lock requirement BUG_ON()s. 573 * 574 * So here we add an artificial limit that subpage compression can only 575 * if the range is fully page aligned. 576 * 577 * In theory we only need to ensure the first page is fully covered, but 578 * the tailing partial page will be locked until the full compression 579 * finishes, delaying the write of other range. 580 * 581 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range 582 * first to prevent any submitted async extent to unlock the full page. 583 * By this, we can ensure for subpage case that only the last async_cow 584 * will unlock the full page. 585 */ 586 if (fs_info->sectorsize < PAGE_SIZE) { 587 if (!PAGE_ALIGNED(start) || 588 !PAGE_ALIGNED(end + 1)) 589 return 0; 590 } 591 592 /* force compress */ 593 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 594 return 1; 595 /* defrag ioctl */ 596 if (inode->defrag_compress) 597 return 1; 598 /* bad compression ratios */ 599 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 600 return 0; 601 if (btrfs_test_opt(fs_info, COMPRESS) || 602 inode->flags & BTRFS_INODE_COMPRESS || 603 inode->prop_compress) 604 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); 605 return 0; 606 } 607 608 static inline void inode_should_defrag(struct btrfs_inode *inode, 609 u64 start, u64 end, u64 num_bytes, u32 small_write) 610 { 611 /* If this is a small write inside eof, kick off a defrag */ 612 if (num_bytes < small_write && 613 (start > 0 || end + 1 < inode->disk_i_size)) 614 btrfs_add_inode_defrag(NULL, inode, small_write); 615 } 616 617 /* 618 * we create compressed extents in two phases. The first 619 * phase compresses a range of pages that have already been 620 * locked (both pages and state bits are locked). 621 * 622 * This is done inside an ordered work queue, and the compression 623 * is spread across many cpus. The actual IO submission is step 624 * two, and the ordered work queue takes care of making sure that 625 * happens in the same order things were put onto the queue by 626 * writepages and friends. 627 * 628 * If this code finds it can't get good compression, it puts an 629 * entry onto the work queue to write the uncompressed bytes. This 630 * makes sure that both compressed inodes and uncompressed inodes 631 * are written in the same order that the flusher thread sent them 632 * down. 633 */ 634 static noinline int compress_file_range(struct async_chunk *async_chunk) 635 { 636 struct btrfs_inode *inode = async_chunk->inode; 637 struct btrfs_fs_info *fs_info = inode->root->fs_info; 638 u64 blocksize = fs_info->sectorsize; 639 u64 start = async_chunk->start; 640 u64 end = async_chunk->end; 641 u64 actual_end; 642 u64 i_size; 643 int ret = 0; 644 struct page **pages = NULL; 645 unsigned long nr_pages; 646 unsigned long total_compressed = 0; 647 unsigned long total_in = 0; 648 int i; 649 int will_compress; 650 int compress_type = fs_info->compress_type; 651 int compressed_extents = 0; 652 int redirty = 0; 653 654 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); 655 656 /* 657 * We need to save i_size before now because it could change in between 658 * us evaluating the size and assigning it. This is because we lock and 659 * unlock the page in truncate and fallocate, and then modify the i_size 660 * later on. 661 * 662 * The barriers are to emulate READ_ONCE, remove that once i_size_read 663 * does that for us. 664 */ 665 barrier(); 666 i_size = i_size_read(&inode->vfs_inode); 667 barrier(); 668 actual_end = min_t(u64, i_size, end + 1); 669 again: 670 will_compress = 0; 671 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 672 nr_pages = min_t(unsigned long, nr_pages, 673 BTRFS_MAX_COMPRESSED / PAGE_SIZE); 674 675 /* 676 * we don't want to send crud past the end of i_size through 677 * compression, that's just a waste of CPU time. So, if the 678 * end of the file is before the start of our current 679 * requested range of bytes, we bail out to the uncompressed 680 * cleanup code that can deal with all of this. 681 * 682 * It isn't really the fastest way to fix things, but this is a 683 * very uncommon corner. 684 */ 685 if (actual_end <= start) 686 goto cleanup_and_bail_uncompressed; 687 688 total_compressed = actual_end - start; 689 690 /* 691 * Skip compression for a small file range(<=blocksize) that 692 * isn't an inline extent, since it doesn't save disk space at all. 693 */ 694 if (total_compressed <= blocksize && 695 (start > 0 || end + 1 < inode->disk_i_size)) 696 goto cleanup_and_bail_uncompressed; 697 698 /* 699 * For subpage case, we require full page alignment for the sector 700 * aligned range. 701 * Thus we must also check against @actual_end, not just @end. 702 */ 703 if (blocksize < PAGE_SIZE) { 704 if (!PAGE_ALIGNED(start) || 705 !PAGE_ALIGNED(round_up(actual_end, blocksize))) 706 goto cleanup_and_bail_uncompressed; 707 } 708 709 total_compressed = min_t(unsigned long, total_compressed, 710 BTRFS_MAX_UNCOMPRESSED); 711 total_in = 0; 712 ret = 0; 713 714 /* 715 * we do compression for mount -o compress and when the 716 * inode has not been flagged as nocompress. This flag can 717 * change at any time if we discover bad compression ratios. 718 */ 719 if (inode_need_compress(inode, start, end)) { 720 WARN_ON(pages); 721 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 722 if (!pages) { 723 /* just bail out to the uncompressed code */ 724 nr_pages = 0; 725 goto cont; 726 } 727 728 if (inode->defrag_compress) 729 compress_type = inode->defrag_compress; 730 else if (inode->prop_compress) 731 compress_type = inode->prop_compress; 732 733 /* 734 * we need to call clear_page_dirty_for_io on each 735 * page in the range. Otherwise applications with the file 736 * mmap'd can wander in and change the page contents while 737 * we are compressing them. 738 * 739 * If the compression fails for any reason, we set the pages 740 * dirty again later on. 741 * 742 * Note that the remaining part is redirtied, the start pointer 743 * has moved, the end is the original one. 744 */ 745 if (!redirty) { 746 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); 747 redirty = 1; 748 } 749 750 /* Compression level is applied here and only here */ 751 ret = btrfs_compress_pages( 752 compress_type | (fs_info->compress_level << 4), 753 inode->vfs_inode.i_mapping, start, 754 pages, 755 &nr_pages, 756 &total_in, 757 &total_compressed); 758 759 if (!ret) { 760 unsigned long offset = offset_in_page(total_compressed); 761 struct page *page = pages[nr_pages - 1]; 762 763 /* zero the tail end of the last page, we might be 764 * sending it down to disk 765 */ 766 if (offset) 767 memzero_page(page, offset, PAGE_SIZE - offset); 768 will_compress = 1; 769 } 770 } 771 cont: 772 /* 773 * Check cow_file_range() for why we don't even try to create inline 774 * extent for subpage case. 775 */ 776 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 777 /* lets try to make an inline extent */ 778 if (ret || total_in < actual_end) { 779 /* we didn't compress the entire range, try 780 * to make an uncompressed inline extent. 781 */ 782 ret = cow_file_range_inline(inode, actual_end, 783 0, BTRFS_COMPRESS_NONE, 784 NULL, false); 785 } else { 786 /* try making a compressed inline extent */ 787 ret = cow_file_range_inline(inode, actual_end, 788 total_compressed, 789 compress_type, pages, 790 false); 791 } 792 if (ret <= 0) { 793 unsigned long clear_flags = EXTENT_DELALLOC | 794 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 795 EXTENT_DO_ACCOUNTING; 796 unsigned long page_error_op; 797 798 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; 799 800 /* 801 * inline extent creation worked or returned error, 802 * we don't need to create any more async work items. 803 * Unlock and free up our temp pages. 804 * 805 * We use DO_ACCOUNTING here because we need the 806 * delalloc_release_metadata to be done _after_ we drop 807 * our outstanding extent for clearing delalloc for this 808 * range. 809 */ 810 extent_clear_unlock_delalloc(inode, start, end, 811 NULL, 812 clear_flags, 813 PAGE_UNLOCK | 814 PAGE_START_WRITEBACK | 815 page_error_op | 816 PAGE_END_WRITEBACK); 817 818 /* 819 * Ensure we only free the compressed pages if we have 820 * them allocated, as we can still reach here with 821 * inode_need_compress() == false. 822 */ 823 if (pages) { 824 for (i = 0; i < nr_pages; i++) { 825 WARN_ON(pages[i]->mapping); 826 put_page(pages[i]); 827 } 828 kfree(pages); 829 } 830 return 0; 831 } 832 } 833 834 if (will_compress) { 835 /* 836 * we aren't doing an inline extent round the compressed size 837 * up to a block size boundary so the allocator does sane 838 * things 839 */ 840 total_compressed = ALIGN(total_compressed, blocksize); 841 842 /* 843 * one last check to make sure the compression is really a 844 * win, compare the page count read with the blocks on disk, 845 * compression must free at least one sector size 846 */ 847 total_in = round_up(total_in, fs_info->sectorsize); 848 if (total_compressed + blocksize <= total_in) { 849 compressed_extents++; 850 851 /* 852 * The async work queues will take care of doing actual 853 * allocation on disk for these compressed pages, and 854 * will submit them to the elevator. 855 */ 856 add_async_extent(async_chunk, start, total_in, 857 total_compressed, pages, nr_pages, 858 compress_type); 859 860 if (start + total_in < end) { 861 start += total_in; 862 pages = NULL; 863 cond_resched(); 864 goto again; 865 } 866 return compressed_extents; 867 } 868 } 869 if (pages) { 870 /* 871 * the compression code ran but failed to make things smaller, 872 * free any pages it allocated and our page pointer array 873 */ 874 for (i = 0; i < nr_pages; i++) { 875 WARN_ON(pages[i]->mapping); 876 put_page(pages[i]); 877 } 878 kfree(pages); 879 pages = NULL; 880 total_compressed = 0; 881 nr_pages = 0; 882 883 /* flag the file so we don't compress in the future */ 884 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && 885 !(inode->prop_compress)) { 886 inode->flags |= BTRFS_INODE_NOCOMPRESS; 887 } 888 } 889 cleanup_and_bail_uncompressed: 890 /* 891 * No compression, but we still need to write the pages in the file 892 * we've been given so far. redirty the locked page if it corresponds 893 * to our extent and set things up for the async work queue to run 894 * cow_file_range to do the normal delalloc dance. 895 */ 896 if (async_chunk->locked_page && 897 (page_offset(async_chunk->locked_page) >= start && 898 page_offset(async_chunk->locked_page)) <= end) { 899 __set_page_dirty_nobuffers(async_chunk->locked_page); 900 /* unlocked later on in the async handlers */ 901 } 902 903 if (redirty) 904 extent_range_redirty_for_io(&inode->vfs_inode, start, end); 905 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 906 BTRFS_COMPRESS_NONE); 907 compressed_extents++; 908 909 return compressed_extents; 910 } 911 912 static void free_async_extent_pages(struct async_extent *async_extent) 913 { 914 int i; 915 916 if (!async_extent->pages) 917 return; 918 919 for (i = 0; i < async_extent->nr_pages; i++) { 920 WARN_ON(async_extent->pages[i]->mapping); 921 put_page(async_extent->pages[i]); 922 } 923 kfree(async_extent->pages); 924 async_extent->nr_pages = 0; 925 async_extent->pages = NULL; 926 } 927 928 static int submit_uncompressed_range(struct btrfs_inode *inode, 929 struct async_extent *async_extent, 930 struct page *locked_page) 931 { 932 u64 start = async_extent->start; 933 u64 end = async_extent->start + async_extent->ram_size - 1; 934 unsigned long nr_written = 0; 935 int page_started = 0; 936 int ret; 937 938 /* 939 * Call cow_file_range() to run the delalloc range directly, since we 940 * won't go to NOCOW or async path again. 941 * 942 * Also we call cow_file_range() with @unlock_page == 0, so that we 943 * can directly submit them without interruption. 944 */ 945 ret = cow_file_range(inode, locked_page, start, end, &page_started, 946 &nr_written, 0, NULL); 947 /* Inline extent inserted, page gets unlocked and everything is done */ 948 if (page_started) { 949 ret = 0; 950 goto out; 951 } 952 if (ret < 0) { 953 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1); 954 if (locked_page) { 955 const u64 page_start = page_offset(locked_page); 956 const u64 page_end = page_start + PAGE_SIZE - 1; 957 958 btrfs_page_set_error(inode->root->fs_info, locked_page, 959 page_start, PAGE_SIZE); 960 set_page_writeback(locked_page); 961 end_page_writeback(locked_page); 962 end_extent_writepage(locked_page, ret, page_start, page_end); 963 unlock_page(locked_page); 964 } 965 goto out; 966 } 967 968 ret = extent_write_locked_range(&inode->vfs_inode, start, end); 969 /* All pages will be unlocked, including @locked_page */ 970 out: 971 kfree(async_extent); 972 return ret; 973 } 974 975 static int submit_one_async_extent(struct btrfs_inode *inode, 976 struct async_chunk *async_chunk, 977 struct async_extent *async_extent, 978 u64 *alloc_hint) 979 { 980 struct extent_io_tree *io_tree = &inode->io_tree; 981 struct btrfs_root *root = inode->root; 982 struct btrfs_fs_info *fs_info = root->fs_info; 983 struct btrfs_key ins; 984 struct page *locked_page = NULL; 985 struct extent_map *em; 986 int ret = 0; 987 u64 start = async_extent->start; 988 u64 end = async_extent->start + async_extent->ram_size - 1; 989 990 /* 991 * If async_chunk->locked_page is in the async_extent range, we need to 992 * handle it. 993 */ 994 if (async_chunk->locked_page) { 995 u64 locked_page_start = page_offset(async_chunk->locked_page); 996 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1; 997 998 if (!(start >= locked_page_end || end <= locked_page_start)) 999 locked_page = async_chunk->locked_page; 1000 } 1001 lock_extent(io_tree, start, end, NULL); 1002 1003 /* We have fall back to uncompressed write */ 1004 if (!async_extent->pages) 1005 return submit_uncompressed_range(inode, async_extent, locked_page); 1006 1007 ret = btrfs_reserve_extent(root, async_extent->ram_size, 1008 async_extent->compressed_size, 1009 async_extent->compressed_size, 1010 0, *alloc_hint, &ins, 1, 1); 1011 if (ret) { 1012 free_async_extent_pages(async_extent); 1013 /* 1014 * Here we used to try again by going back to non-compressed 1015 * path for ENOSPC. But we can't reserve space even for 1016 * compressed size, how could it work for uncompressed size 1017 * which requires larger size? So here we directly go error 1018 * path. 1019 */ 1020 goto out_free; 1021 } 1022 1023 /* Here we're doing allocation and writeback of the compressed pages */ 1024 em = create_io_em(inode, start, 1025 async_extent->ram_size, /* len */ 1026 start, /* orig_start */ 1027 ins.objectid, /* block_start */ 1028 ins.offset, /* block_len */ 1029 ins.offset, /* orig_block_len */ 1030 async_extent->ram_size, /* ram_bytes */ 1031 async_extent->compress_type, 1032 BTRFS_ORDERED_COMPRESSED); 1033 if (IS_ERR(em)) { 1034 ret = PTR_ERR(em); 1035 goto out_free_reserve; 1036 } 1037 free_extent_map(em); 1038 1039 ret = btrfs_add_ordered_extent(inode, start, /* file_offset */ 1040 async_extent->ram_size, /* num_bytes */ 1041 async_extent->ram_size, /* ram_bytes */ 1042 ins.objectid, /* disk_bytenr */ 1043 ins.offset, /* disk_num_bytes */ 1044 0, /* offset */ 1045 1 << BTRFS_ORDERED_COMPRESSED, 1046 async_extent->compress_type); 1047 if (ret) { 1048 btrfs_drop_extent_map_range(inode, start, end, false); 1049 goto out_free_reserve; 1050 } 1051 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1052 1053 /* Clear dirty, set writeback and unlock the pages. */ 1054 extent_clear_unlock_delalloc(inode, start, end, 1055 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 1056 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1057 if (btrfs_submit_compressed_write(inode, start, /* file_offset */ 1058 async_extent->ram_size, /* num_bytes */ 1059 ins.objectid, /* disk_bytenr */ 1060 ins.offset, /* compressed_len */ 1061 async_extent->pages, /* compressed_pages */ 1062 async_extent->nr_pages, 1063 async_chunk->write_flags, 1064 async_chunk->blkcg_css, true)) { 1065 const u64 start = async_extent->start; 1066 const u64 end = start + async_extent->ram_size - 1; 1067 1068 btrfs_writepage_endio_finish_ordered(inode, NULL, start, end, 0); 1069 1070 extent_clear_unlock_delalloc(inode, start, end, NULL, 0, 1071 PAGE_END_WRITEBACK | PAGE_SET_ERROR); 1072 free_async_extent_pages(async_extent); 1073 } 1074 *alloc_hint = ins.objectid + ins.offset; 1075 kfree(async_extent); 1076 return ret; 1077 1078 out_free_reserve: 1079 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1080 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1081 out_free: 1082 extent_clear_unlock_delalloc(inode, start, end, 1083 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 1084 EXTENT_DELALLOC_NEW | 1085 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1086 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1087 PAGE_END_WRITEBACK | PAGE_SET_ERROR); 1088 free_async_extent_pages(async_extent); 1089 kfree(async_extent); 1090 return ret; 1091 } 1092 1093 /* 1094 * Phase two of compressed writeback. This is the ordered portion of the code, 1095 * which only gets called in the order the work was queued. We walk all the 1096 * async extents created by compress_file_range and send them down to the disk. 1097 */ 1098 static noinline void submit_compressed_extents(struct async_chunk *async_chunk) 1099 { 1100 struct btrfs_inode *inode = async_chunk->inode; 1101 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1102 struct async_extent *async_extent; 1103 u64 alloc_hint = 0; 1104 int ret = 0; 1105 1106 while (!list_empty(&async_chunk->extents)) { 1107 u64 extent_start; 1108 u64 ram_size; 1109 1110 async_extent = list_entry(async_chunk->extents.next, 1111 struct async_extent, list); 1112 list_del(&async_extent->list); 1113 extent_start = async_extent->start; 1114 ram_size = async_extent->ram_size; 1115 1116 ret = submit_one_async_extent(inode, async_chunk, async_extent, 1117 &alloc_hint); 1118 btrfs_debug(fs_info, 1119 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1120 inode->root->root_key.objectid, 1121 btrfs_ino(inode), extent_start, ram_size, ret); 1122 } 1123 } 1124 1125 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1126 u64 num_bytes) 1127 { 1128 struct extent_map_tree *em_tree = &inode->extent_tree; 1129 struct extent_map *em; 1130 u64 alloc_hint = 0; 1131 1132 read_lock(&em_tree->lock); 1133 em = search_extent_mapping(em_tree, start, num_bytes); 1134 if (em) { 1135 /* 1136 * if block start isn't an actual block number then find the 1137 * first block in this inode and use that as a hint. If that 1138 * block is also bogus then just don't worry about it. 1139 */ 1140 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 1141 free_extent_map(em); 1142 em = search_extent_mapping(em_tree, 0, 0); 1143 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 1144 alloc_hint = em->block_start; 1145 if (em) 1146 free_extent_map(em); 1147 } else { 1148 alloc_hint = em->block_start; 1149 free_extent_map(em); 1150 } 1151 } 1152 read_unlock(&em_tree->lock); 1153 1154 return alloc_hint; 1155 } 1156 1157 /* 1158 * when extent_io.c finds a delayed allocation range in the file, 1159 * the call backs end up in this code. The basic idea is to 1160 * allocate extents on disk for the range, and create ordered data structs 1161 * in ram to track those extents. 1162 * 1163 * locked_page is the page that writepage had locked already. We use 1164 * it to make sure we don't do extra locks or unlocks. 1165 * 1166 * *page_started is set to one if we unlock locked_page and do everything 1167 * required to start IO on it. It may be clean and already done with 1168 * IO when we return. 1169 * 1170 * When unlock == 1, we unlock the pages in successfully allocated regions. 1171 * When unlock == 0, we leave them locked for writing them out. 1172 * 1173 * However, we unlock all the pages except @locked_page in case of failure. 1174 * 1175 * In summary, page locking state will be as follow: 1176 * 1177 * - page_started == 1 (return value) 1178 * - All the pages are unlocked. IO is started. 1179 * - Note that this can happen only on success 1180 * - unlock == 1 1181 * - All the pages except @locked_page are unlocked in any case 1182 * - unlock == 0 1183 * - On success, all the pages are locked for writing out them 1184 * - On failure, all the pages except @locked_page are unlocked 1185 * 1186 * When a failure happens in the second or later iteration of the 1187 * while-loop, the ordered extents created in previous iterations are kept 1188 * intact. So, the caller must clean them up by calling 1189 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for 1190 * example. 1191 */ 1192 static noinline int cow_file_range(struct btrfs_inode *inode, 1193 struct page *locked_page, 1194 u64 start, u64 end, int *page_started, 1195 unsigned long *nr_written, int unlock, 1196 u64 *done_offset) 1197 { 1198 struct btrfs_root *root = inode->root; 1199 struct btrfs_fs_info *fs_info = root->fs_info; 1200 u64 alloc_hint = 0; 1201 u64 orig_start = start; 1202 u64 num_bytes; 1203 unsigned long ram_size; 1204 u64 cur_alloc_size = 0; 1205 u64 min_alloc_size; 1206 u64 blocksize = fs_info->sectorsize; 1207 struct btrfs_key ins; 1208 struct extent_map *em; 1209 unsigned clear_bits; 1210 unsigned long page_ops; 1211 bool extent_reserved = false; 1212 int ret = 0; 1213 1214 if (btrfs_is_free_space_inode(inode)) { 1215 ret = -EINVAL; 1216 goto out_unlock; 1217 } 1218 1219 num_bytes = ALIGN(end - start + 1, blocksize); 1220 num_bytes = max(blocksize, num_bytes); 1221 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1222 1223 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1224 1225 /* 1226 * Due to the page size limit, for subpage we can only trigger the 1227 * writeback for the dirty sectors of page, that means data writeback 1228 * is doing more writeback than what we want. 1229 * 1230 * This is especially unexpected for some call sites like fallocate, 1231 * where we only increase i_size after everything is done. 1232 * This means we can trigger inline extent even if we didn't want to. 1233 * So here we skip inline extent creation completely. 1234 */ 1235 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 1236 u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode), 1237 end + 1); 1238 1239 /* lets try to make an inline extent */ 1240 ret = cow_file_range_inline(inode, actual_end, 0, 1241 BTRFS_COMPRESS_NONE, NULL, false); 1242 if (ret == 0) { 1243 /* 1244 * We use DO_ACCOUNTING here because we need the 1245 * delalloc_release_metadata to be run _after_ we drop 1246 * our outstanding extent for clearing delalloc for this 1247 * range. 1248 */ 1249 extent_clear_unlock_delalloc(inode, start, end, 1250 locked_page, 1251 EXTENT_LOCKED | EXTENT_DELALLOC | 1252 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1253 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1254 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); 1255 *nr_written = *nr_written + 1256 (end - start + PAGE_SIZE) / PAGE_SIZE; 1257 *page_started = 1; 1258 /* 1259 * locked_page is locked by the caller of 1260 * writepage_delalloc(), not locked by 1261 * __process_pages_contig(). 1262 * 1263 * We can't let __process_pages_contig() to unlock it, 1264 * as it doesn't have any subpage::writers recorded. 1265 * 1266 * Here we manually unlock the page, since the caller 1267 * can't use page_started to determine if it's an 1268 * inline extent or a compressed extent. 1269 */ 1270 unlock_page(locked_page); 1271 goto out; 1272 } else if (ret < 0) { 1273 goto out_unlock; 1274 } 1275 } 1276 1277 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 1278 1279 /* 1280 * Relocation relies on the relocated extents to have exactly the same 1281 * size as the original extents. Normally writeback for relocation data 1282 * extents follows a NOCOW path because relocation preallocates the 1283 * extents. However, due to an operation such as scrub turning a block 1284 * group to RO mode, it may fallback to COW mode, so we must make sure 1285 * an extent allocated during COW has exactly the requested size and can 1286 * not be split into smaller extents, otherwise relocation breaks and 1287 * fails during the stage where it updates the bytenr of file extent 1288 * items. 1289 */ 1290 if (btrfs_is_data_reloc_root(root)) 1291 min_alloc_size = num_bytes; 1292 else 1293 min_alloc_size = fs_info->sectorsize; 1294 1295 while (num_bytes > 0) { 1296 cur_alloc_size = num_bytes; 1297 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, 1298 min_alloc_size, 0, alloc_hint, 1299 &ins, 1, 1); 1300 if (ret < 0) 1301 goto out_unlock; 1302 cur_alloc_size = ins.offset; 1303 extent_reserved = true; 1304 1305 ram_size = ins.offset; 1306 em = create_io_em(inode, start, ins.offset, /* len */ 1307 start, /* orig_start */ 1308 ins.objectid, /* block_start */ 1309 ins.offset, /* block_len */ 1310 ins.offset, /* orig_block_len */ 1311 ram_size, /* ram_bytes */ 1312 BTRFS_COMPRESS_NONE, /* compress_type */ 1313 BTRFS_ORDERED_REGULAR /* type */); 1314 if (IS_ERR(em)) { 1315 ret = PTR_ERR(em); 1316 goto out_reserve; 1317 } 1318 free_extent_map(em); 1319 1320 ret = btrfs_add_ordered_extent(inode, start, ram_size, ram_size, 1321 ins.objectid, cur_alloc_size, 0, 1322 1 << BTRFS_ORDERED_REGULAR, 1323 BTRFS_COMPRESS_NONE); 1324 if (ret) 1325 goto out_drop_extent_cache; 1326 1327 if (btrfs_is_data_reloc_root(root)) { 1328 ret = btrfs_reloc_clone_csums(inode, start, 1329 cur_alloc_size); 1330 /* 1331 * Only drop cache here, and process as normal. 1332 * 1333 * We must not allow extent_clear_unlock_delalloc() 1334 * at out_unlock label to free meta of this ordered 1335 * extent, as its meta should be freed by 1336 * btrfs_finish_ordered_io(). 1337 * 1338 * So we must continue until @start is increased to 1339 * skip current ordered extent. 1340 */ 1341 if (ret) 1342 btrfs_drop_extent_map_range(inode, start, 1343 start + ram_size - 1, 1344 false); 1345 } 1346 1347 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1348 1349 /* 1350 * We're not doing compressed IO, don't unlock the first page 1351 * (which the caller expects to stay locked), don't clear any 1352 * dirty bits and don't set any writeback bits 1353 * 1354 * Do set the Ordered (Private2) bit so we know this page was 1355 * properly setup for writepage. 1356 */ 1357 page_ops = unlock ? PAGE_UNLOCK : 0; 1358 page_ops |= PAGE_SET_ORDERED; 1359 1360 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, 1361 locked_page, 1362 EXTENT_LOCKED | EXTENT_DELALLOC, 1363 page_ops); 1364 if (num_bytes < cur_alloc_size) 1365 num_bytes = 0; 1366 else 1367 num_bytes -= cur_alloc_size; 1368 alloc_hint = ins.objectid + ins.offset; 1369 start += cur_alloc_size; 1370 extent_reserved = false; 1371 1372 /* 1373 * btrfs_reloc_clone_csums() error, since start is increased 1374 * extent_clear_unlock_delalloc() at out_unlock label won't 1375 * free metadata of current ordered extent, we're OK to exit. 1376 */ 1377 if (ret) 1378 goto out_unlock; 1379 } 1380 out: 1381 return ret; 1382 1383 out_drop_extent_cache: 1384 btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false); 1385 out_reserve: 1386 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1387 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1388 out_unlock: 1389 /* 1390 * If done_offset is non-NULL and ret == -EAGAIN, we expect the 1391 * caller to write out the successfully allocated region and retry. 1392 */ 1393 if (done_offset && ret == -EAGAIN) { 1394 if (orig_start < start) 1395 *done_offset = start - 1; 1396 else 1397 *done_offset = start; 1398 return ret; 1399 } else if (ret == -EAGAIN) { 1400 /* Convert to -ENOSPC since the caller cannot retry. */ 1401 ret = -ENOSPC; 1402 } 1403 1404 /* 1405 * Now, we have three regions to clean up: 1406 * 1407 * |-------(1)----|---(2)---|-------------(3)----------| 1408 * `- orig_start `- start `- start + cur_alloc_size `- end 1409 * 1410 * We process each region below. 1411 */ 1412 1413 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1414 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1415 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1416 1417 /* 1418 * For the range (1). We have already instantiated the ordered extents 1419 * for this region. They are cleaned up by 1420 * btrfs_cleanup_ordered_extents() in e.g, 1421 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are 1422 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW | 1423 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup 1424 * function. 1425 * 1426 * However, in case of unlock == 0, we still need to unlock the pages 1427 * (except @locked_page) to ensure all the pages are unlocked. 1428 */ 1429 if (!unlock && orig_start < start) { 1430 if (!locked_page) 1431 mapping_set_error(inode->vfs_inode.i_mapping, ret); 1432 extent_clear_unlock_delalloc(inode, orig_start, start - 1, 1433 locked_page, 0, page_ops); 1434 } 1435 1436 /* 1437 * For the range (2). If we reserved an extent for our delalloc range 1438 * (or a subrange) and failed to create the respective ordered extent, 1439 * then it means that when we reserved the extent we decremented the 1440 * extent's size from the data space_info's bytes_may_use counter and 1441 * incremented the space_info's bytes_reserved counter by the same 1442 * amount. We must make sure extent_clear_unlock_delalloc() does not try 1443 * to decrement again the data space_info's bytes_may_use counter, 1444 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV. 1445 */ 1446 if (extent_reserved) { 1447 extent_clear_unlock_delalloc(inode, start, 1448 start + cur_alloc_size - 1, 1449 locked_page, 1450 clear_bits, 1451 page_ops); 1452 start += cur_alloc_size; 1453 if (start >= end) 1454 return ret; 1455 } 1456 1457 /* 1458 * For the range (3). We never touched the region. In addition to the 1459 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data 1460 * space_info's bytes_may_use counter, reserved in 1461 * btrfs_check_data_free_space(). 1462 */ 1463 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1464 clear_bits | EXTENT_CLEAR_DATA_RESV, 1465 page_ops); 1466 return ret; 1467 } 1468 1469 /* 1470 * work queue call back to started compression on a file and pages 1471 */ 1472 static noinline void async_cow_start(struct btrfs_work *work) 1473 { 1474 struct async_chunk *async_chunk; 1475 int compressed_extents; 1476 1477 async_chunk = container_of(work, struct async_chunk, work); 1478 1479 compressed_extents = compress_file_range(async_chunk); 1480 if (compressed_extents == 0) { 1481 btrfs_add_delayed_iput(async_chunk->inode); 1482 async_chunk->inode = NULL; 1483 } 1484 } 1485 1486 /* 1487 * work queue call back to submit previously compressed pages 1488 */ 1489 static noinline void async_cow_submit(struct btrfs_work *work) 1490 { 1491 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1492 work); 1493 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1494 unsigned long nr_pages; 1495 1496 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1497 PAGE_SHIFT; 1498 1499 /* 1500 * ->inode could be NULL if async_chunk_start has failed to compress, 1501 * in which case we don't have anything to submit, yet we need to 1502 * always adjust ->async_delalloc_pages as its paired with the init 1503 * happening in cow_file_range_async 1504 */ 1505 if (async_chunk->inode) 1506 submit_compressed_extents(async_chunk); 1507 1508 /* atomic_sub_return implies a barrier */ 1509 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1510 5 * SZ_1M) 1511 cond_wake_up_nomb(&fs_info->async_submit_wait); 1512 } 1513 1514 static noinline void async_cow_free(struct btrfs_work *work) 1515 { 1516 struct async_chunk *async_chunk; 1517 struct async_cow *async_cow; 1518 1519 async_chunk = container_of(work, struct async_chunk, work); 1520 if (async_chunk->inode) 1521 btrfs_add_delayed_iput(async_chunk->inode); 1522 if (async_chunk->blkcg_css) 1523 css_put(async_chunk->blkcg_css); 1524 1525 async_cow = async_chunk->async_cow; 1526 if (atomic_dec_and_test(&async_cow->num_chunks)) 1527 kvfree(async_cow); 1528 } 1529 1530 static int cow_file_range_async(struct btrfs_inode *inode, 1531 struct writeback_control *wbc, 1532 struct page *locked_page, 1533 u64 start, u64 end, int *page_started, 1534 unsigned long *nr_written) 1535 { 1536 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1537 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1538 struct async_cow *ctx; 1539 struct async_chunk *async_chunk; 1540 unsigned long nr_pages; 1541 u64 cur_end; 1542 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1543 int i; 1544 bool should_compress; 1545 unsigned nofs_flag; 1546 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1547 1548 unlock_extent(&inode->io_tree, start, end, NULL); 1549 1550 if (inode->flags & BTRFS_INODE_NOCOMPRESS && 1551 !btrfs_test_opt(fs_info, FORCE_COMPRESS)) { 1552 num_chunks = 1; 1553 should_compress = false; 1554 } else { 1555 should_compress = true; 1556 } 1557 1558 nofs_flag = memalloc_nofs_save(); 1559 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1560 memalloc_nofs_restore(nofs_flag); 1561 1562 if (!ctx) { 1563 unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | 1564 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1565 EXTENT_DO_ACCOUNTING; 1566 unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | 1567 PAGE_END_WRITEBACK | PAGE_SET_ERROR; 1568 1569 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1570 clear_bits, page_ops); 1571 return -ENOMEM; 1572 } 1573 1574 async_chunk = ctx->chunks; 1575 atomic_set(&ctx->num_chunks, num_chunks); 1576 1577 for (i = 0; i < num_chunks; i++) { 1578 if (should_compress) 1579 cur_end = min(end, start + SZ_512K - 1); 1580 else 1581 cur_end = end; 1582 1583 /* 1584 * igrab is called higher up in the call chain, take only the 1585 * lightweight reference for the callback lifetime 1586 */ 1587 ihold(&inode->vfs_inode); 1588 async_chunk[i].async_cow = ctx; 1589 async_chunk[i].inode = inode; 1590 async_chunk[i].start = start; 1591 async_chunk[i].end = cur_end; 1592 async_chunk[i].write_flags = write_flags; 1593 INIT_LIST_HEAD(&async_chunk[i].extents); 1594 1595 /* 1596 * The locked_page comes all the way from writepage and its 1597 * the original page we were actually given. As we spread 1598 * this large delalloc region across multiple async_chunk 1599 * structs, only the first struct needs a pointer to locked_page 1600 * 1601 * This way we don't need racey decisions about who is supposed 1602 * to unlock it. 1603 */ 1604 if (locked_page) { 1605 /* 1606 * Depending on the compressibility, the pages might or 1607 * might not go through async. We want all of them to 1608 * be accounted against wbc once. Let's do it here 1609 * before the paths diverge. wbc accounting is used 1610 * only for foreign writeback detection and doesn't 1611 * need full accuracy. Just account the whole thing 1612 * against the first page. 1613 */ 1614 wbc_account_cgroup_owner(wbc, locked_page, 1615 cur_end - start); 1616 async_chunk[i].locked_page = locked_page; 1617 locked_page = NULL; 1618 } else { 1619 async_chunk[i].locked_page = NULL; 1620 } 1621 1622 if (blkcg_css != blkcg_root_css) { 1623 css_get(blkcg_css); 1624 async_chunk[i].blkcg_css = blkcg_css; 1625 } else { 1626 async_chunk[i].blkcg_css = NULL; 1627 } 1628 1629 btrfs_init_work(&async_chunk[i].work, async_cow_start, 1630 async_cow_submit, async_cow_free); 1631 1632 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1633 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1634 1635 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1636 1637 *nr_written += nr_pages; 1638 start = cur_end + 1; 1639 } 1640 *page_started = 1; 1641 return 0; 1642 } 1643 1644 static noinline int run_delalloc_zoned(struct btrfs_inode *inode, 1645 struct page *locked_page, u64 start, 1646 u64 end, int *page_started, 1647 unsigned long *nr_written) 1648 { 1649 u64 done_offset = end; 1650 int ret; 1651 bool locked_page_done = false; 1652 1653 while (start <= end) { 1654 ret = cow_file_range(inode, locked_page, start, end, page_started, 1655 nr_written, 0, &done_offset); 1656 if (ret && ret != -EAGAIN) 1657 return ret; 1658 1659 if (*page_started) { 1660 ASSERT(ret == 0); 1661 return 0; 1662 } 1663 1664 if (ret == 0) 1665 done_offset = end; 1666 1667 if (done_offset == start) { 1668 wait_on_bit_io(&inode->root->fs_info->flags, 1669 BTRFS_FS_NEED_ZONE_FINISH, 1670 TASK_UNINTERRUPTIBLE); 1671 continue; 1672 } 1673 1674 if (!locked_page_done) { 1675 __set_page_dirty_nobuffers(locked_page); 1676 account_page_redirty(locked_page); 1677 } 1678 locked_page_done = true; 1679 extent_write_locked_range(&inode->vfs_inode, start, done_offset); 1680 1681 start = done_offset + 1; 1682 } 1683 1684 *page_started = 1; 1685 1686 return 0; 1687 } 1688 1689 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, 1690 u64 bytenr, u64 num_bytes, bool nowait) 1691 { 1692 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr); 1693 struct btrfs_ordered_sum *sums; 1694 int ret; 1695 LIST_HEAD(list); 1696 1697 ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1, 1698 &list, 0, nowait); 1699 if (ret == 0 && list_empty(&list)) 1700 return 0; 1701 1702 while (!list_empty(&list)) { 1703 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1704 list_del(&sums->list); 1705 kfree(sums); 1706 } 1707 if (ret < 0) 1708 return ret; 1709 return 1; 1710 } 1711 1712 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, 1713 const u64 start, const u64 end, 1714 int *page_started, unsigned long *nr_written) 1715 { 1716 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1717 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1718 const u64 range_bytes = end + 1 - start; 1719 struct extent_io_tree *io_tree = &inode->io_tree; 1720 u64 range_start = start; 1721 u64 count; 1722 1723 /* 1724 * If EXTENT_NORESERVE is set it means that when the buffered write was 1725 * made we had not enough available data space and therefore we did not 1726 * reserve data space for it, since we though we could do NOCOW for the 1727 * respective file range (either there is prealloc extent or the inode 1728 * has the NOCOW bit set). 1729 * 1730 * However when we need to fallback to COW mode (because for example the 1731 * block group for the corresponding extent was turned to RO mode by a 1732 * scrub or relocation) we need to do the following: 1733 * 1734 * 1) We increment the bytes_may_use counter of the data space info. 1735 * If COW succeeds, it allocates a new data extent and after doing 1736 * that it decrements the space info's bytes_may_use counter and 1737 * increments its bytes_reserved counter by the same amount (we do 1738 * this at btrfs_add_reserved_bytes()). So we need to increment the 1739 * bytes_may_use counter to compensate (when space is reserved at 1740 * buffered write time, the bytes_may_use counter is incremented); 1741 * 1742 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1743 * that if the COW path fails for any reason, it decrements (through 1744 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1745 * data space info, which we incremented in the step above. 1746 * 1747 * If we need to fallback to cow and the inode corresponds to a free 1748 * space cache inode or an inode of the data relocation tree, we must 1749 * also increment bytes_may_use of the data space_info for the same 1750 * reason. Space caches and relocated data extents always get a prealloc 1751 * extent for them, however scrub or balance may have set the block 1752 * group that contains that extent to RO mode and therefore force COW 1753 * when starting writeback. 1754 */ 1755 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1756 EXTENT_NORESERVE, 0, NULL); 1757 if (count > 0 || is_space_ino || is_reloc_ino) { 1758 u64 bytes = count; 1759 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1760 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1761 1762 if (is_space_ino || is_reloc_ino) 1763 bytes = range_bytes; 1764 1765 spin_lock(&sinfo->lock); 1766 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1767 spin_unlock(&sinfo->lock); 1768 1769 if (count > 0) 1770 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1771 NULL); 1772 } 1773 1774 return cow_file_range(inode, locked_page, start, end, page_started, 1775 nr_written, 1, NULL); 1776 } 1777 1778 struct can_nocow_file_extent_args { 1779 /* Input fields. */ 1780 1781 /* Start file offset of the range we want to NOCOW. */ 1782 u64 start; 1783 /* End file offset (inclusive) of the range we want to NOCOW. */ 1784 u64 end; 1785 bool writeback_path; 1786 bool strict; 1787 /* 1788 * Free the path passed to can_nocow_file_extent() once it's not needed 1789 * anymore. 1790 */ 1791 bool free_path; 1792 1793 /* Output fields. Only set when can_nocow_file_extent() returns 1. */ 1794 1795 u64 disk_bytenr; 1796 u64 disk_num_bytes; 1797 u64 extent_offset; 1798 /* Number of bytes that can be written to in NOCOW mode. */ 1799 u64 num_bytes; 1800 }; 1801 1802 /* 1803 * Check if we can NOCOW the file extent that the path points to. 1804 * This function may return with the path released, so the caller should check 1805 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1806 * 1807 * Returns: < 0 on error 1808 * 0 if we can not NOCOW 1809 * 1 if we can NOCOW 1810 */ 1811 static int can_nocow_file_extent(struct btrfs_path *path, 1812 struct btrfs_key *key, 1813 struct btrfs_inode *inode, 1814 struct can_nocow_file_extent_args *args) 1815 { 1816 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 1817 struct extent_buffer *leaf = path->nodes[0]; 1818 struct btrfs_root *root = inode->root; 1819 struct btrfs_file_extent_item *fi; 1820 u64 extent_end; 1821 u8 extent_type; 1822 int can_nocow = 0; 1823 int ret = 0; 1824 bool nowait = path->nowait; 1825 1826 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 1827 extent_type = btrfs_file_extent_type(leaf, fi); 1828 1829 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1830 goto out; 1831 1832 /* Can't access these fields unless we know it's not an inline extent. */ 1833 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1834 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1835 args->extent_offset = btrfs_file_extent_offset(leaf, fi); 1836 1837 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1838 extent_type == BTRFS_FILE_EXTENT_REG) 1839 goto out; 1840 1841 /* 1842 * If the extent was created before the generation where the last snapshot 1843 * for its subvolume was created, then this implies the extent is shared, 1844 * hence we must COW. 1845 */ 1846 if (!args->strict && 1847 btrfs_file_extent_generation(leaf, fi) <= 1848 btrfs_root_last_snapshot(&root->root_item)) 1849 goto out; 1850 1851 /* An explicit hole, must COW. */ 1852 if (args->disk_bytenr == 0) 1853 goto out; 1854 1855 /* Compressed/encrypted/encoded extents must be COWed. */ 1856 if (btrfs_file_extent_compression(leaf, fi) || 1857 btrfs_file_extent_encryption(leaf, fi) || 1858 btrfs_file_extent_other_encoding(leaf, fi)) 1859 goto out; 1860 1861 extent_end = btrfs_file_extent_end(path); 1862 1863 /* 1864 * The following checks can be expensive, as they need to take other 1865 * locks and do btree or rbtree searches, so release the path to avoid 1866 * blocking other tasks for too long. 1867 */ 1868 btrfs_release_path(path); 1869 1870 ret = btrfs_cross_ref_exist(root, btrfs_ino(inode), 1871 key->offset - args->extent_offset, 1872 args->disk_bytenr, false, path); 1873 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1874 if (ret != 0) 1875 goto out; 1876 1877 if (args->free_path) { 1878 /* 1879 * We don't need the path anymore, plus through the 1880 * csum_exist_in_range() call below we will end up allocating 1881 * another path. So free the path to avoid unnecessary extra 1882 * memory usage. 1883 */ 1884 btrfs_free_path(path); 1885 path = NULL; 1886 } 1887 1888 /* If there are pending snapshots for this root, we must COW. */ 1889 if (args->writeback_path && !is_freespace_inode && 1890 atomic_read(&root->snapshot_force_cow)) 1891 goto out; 1892 1893 args->disk_bytenr += args->extent_offset; 1894 args->disk_bytenr += args->start - key->offset; 1895 args->num_bytes = min(args->end + 1, extent_end) - args->start; 1896 1897 /* 1898 * Force COW if csums exist in the range. This ensures that csums for a 1899 * given extent are either valid or do not exist. 1900 */ 1901 ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes, 1902 nowait); 1903 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1904 if (ret != 0) 1905 goto out; 1906 1907 can_nocow = 1; 1908 out: 1909 if (args->free_path && path) 1910 btrfs_free_path(path); 1911 1912 return ret < 0 ? ret : can_nocow; 1913 } 1914 1915 /* 1916 * when nowcow writeback call back. This checks for snapshots or COW copies 1917 * of the extents that exist in the file, and COWs the file as required. 1918 * 1919 * If no cow copies or snapshots exist, we write directly to the existing 1920 * blocks on disk 1921 */ 1922 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 1923 struct page *locked_page, 1924 const u64 start, const u64 end, 1925 int *page_started, 1926 unsigned long *nr_written) 1927 { 1928 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1929 struct btrfs_root *root = inode->root; 1930 struct btrfs_path *path; 1931 u64 cow_start = (u64)-1; 1932 u64 cur_offset = start; 1933 int ret; 1934 bool check_prev = true; 1935 u64 ino = btrfs_ino(inode); 1936 struct btrfs_block_group *bg; 1937 bool nocow = false; 1938 struct can_nocow_file_extent_args nocow_args = { 0 }; 1939 1940 path = btrfs_alloc_path(); 1941 if (!path) { 1942 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1943 EXTENT_LOCKED | EXTENT_DELALLOC | 1944 EXTENT_DO_ACCOUNTING | 1945 EXTENT_DEFRAG, PAGE_UNLOCK | 1946 PAGE_START_WRITEBACK | 1947 PAGE_END_WRITEBACK); 1948 return -ENOMEM; 1949 } 1950 1951 nocow_args.end = end; 1952 nocow_args.writeback_path = true; 1953 1954 while (1) { 1955 struct btrfs_key found_key; 1956 struct btrfs_file_extent_item *fi; 1957 struct extent_buffer *leaf; 1958 u64 extent_end; 1959 u64 ram_bytes; 1960 u64 nocow_end; 1961 int extent_type; 1962 1963 nocow = false; 1964 1965 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 1966 cur_offset, 0); 1967 if (ret < 0) 1968 goto error; 1969 1970 /* 1971 * If there is no extent for our range when doing the initial 1972 * search, then go back to the previous slot as it will be the 1973 * one containing the search offset 1974 */ 1975 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1976 leaf = path->nodes[0]; 1977 btrfs_item_key_to_cpu(leaf, &found_key, 1978 path->slots[0] - 1); 1979 if (found_key.objectid == ino && 1980 found_key.type == BTRFS_EXTENT_DATA_KEY) 1981 path->slots[0]--; 1982 } 1983 check_prev = false; 1984 next_slot: 1985 /* Go to next leaf if we have exhausted the current one */ 1986 leaf = path->nodes[0]; 1987 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1988 ret = btrfs_next_leaf(root, path); 1989 if (ret < 0) { 1990 if (cow_start != (u64)-1) 1991 cur_offset = cow_start; 1992 goto error; 1993 } 1994 if (ret > 0) 1995 break; 1996 leaf = path->nodes[0]; 1997 } 1998 1999 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2000 2001 /* Didn't find anything for our INO */ 2002 if (found_key.objectid > ino) 2003 break; 2004 /* 2005 * Keep searching until we find an EXTENT_ITEM or there are no 2006 * more extents for this inode 2007 */ 2008 if (WARN_ON_ONCE(found_key.objectid < ino) || 2009 found_key.type < BTRFS_EXTENT_DATA_KEY) { 2010 path->slots[0]++; 2011 goto next_slot; 2012 } 2013 2014 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 2015 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 2016 found_key.offset > end) 2017 break; 2018 2019 /* 2020 * If the found extent starts after requested offset, then 2021 * adjust extent_end to be right before this extent begins 2022 */ 2023 if (found_key.offset > cur_offset) { 2024 extent_end = found_key.offset; 2025 extent_type = 0; 2026 goto out_check; 2027 } 2028 2029 /* 2030 * Found extent which begins before our range and potentially 2031 * intersect it 2032 */ 2033 fi = btrfs_item_ptr(leaf, path->slots[0], 2034 struct btrfs_file_extent_item); 2035 extent_type = btrfs_file_extent_type(leaf, fi); 2036 /* If this is triggered then we have a memory corruption. */ 2037 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 2038 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 2039 ret = -EUCLEAN; 2040 goto error; 2041 } 2042 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 2043 extent_end = btrfs_file_extent_end(path); 2044 2045 /* 2046 * If the extent we got ends before our current offset, skip to 2047 * the next extent. 2048 */ 2049 if (extent_end <= cur_offset) { 2050 path->slots[0]++; 2051 goto next_slot; 2052 } 2053 2054 nocow_args.start = cur_offset; 2055 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 2056 if (ret < 0) { 2057 if (cow_start != (u64)-1) 2058 cur_offset = cow_start; 2059 goto error; 2060 } else if (ret == 0) { 2061 goto out_check; 2062 } 2063 2064 ret = 0; 2065 bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr); 2066 if (bg) 2067 nocow = true; 2068 out_check: 2069 /* 2070 * If nocow is false then record the beginning of the range 2071 * that needs to be COWed 2072 */ 2073 if (!nocow) { 2074 if (cow_start == (u64)-1) 2075 cow_start = cur_offset; 2076 cur_offset = extent_end; 2077 if (cur_offset > end) 2078 break; 2079 if (!path->nodes[0]) 2080 continue; 2081 path->slots[0]++; 2082 goto next_slot; 2083 } 2084 2085 /* 2086 * COW range from cow_start to found_key.offset - 1. As the key 2087 * will contain the beginning of the first extent that can be 2088 * NOCOW, following one which needs to be COW'ed 2089 */ 2090 if (cow_start != (u64)-1) { 2091 ret = fallback_to_cow(inode, locked_page, 2092 cow_start, found_key.offset - 1, 2093 page_started, nr_written); 2094 if (ret) 2095 goto error; 2096 cow_start = (u64)-1; 2097 } 2098 2099 nocow_end = cur_offset + nocow_args.num_bytes - 1; 2100 2101 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 2102 u64 orig_start = found_key.offset - nocow_args.extent_offset; 2103 struct extent_map *em; 2104 2105 em = create_io_em(inode, cur_offset, nocow_args.num_bytes, 2106 orig_start, 2107 nocow_args.disk_bytenr, /* block_start */ 2108 nocow_args.num_bytes, /* block_len */ 2109 nocow_args.disk_num_bytes, /* orig_block_len */ 2110 ram_bytes, BTRFS_COMPRESS_NONE, 2111 BTRFS_ORDERED_PREALLOC); 2112 if (IS_ERR(em)) { 2113 ret = PTR_ERR(em); 2114 goto error; 2115 } 2116 free_extent_map(em); 2117 ret = btrfs_add_ordered_extent(inode, 2118 cur_offset, nocow_args.num_bytes, 2119 nocow_args.num_bytes, 2120 nocow_args.disk_bytenr, 2121 nocow_args.num_bytes, 0, 2122 1 << BTRFS_ORDERED_PREALLOC, 2123 BTRFS_COMPRESS_NONE); 2124 if (ret) { 2125 btrfs_drop_extent_map_range(inode, cur_offset, 2126 nocow_end, false); 2127 goto error; 2128 } 2129 } else { 2130 ret = btrfs_add_ordered_extent(inode, cur_offset, 2131 nocow_args.num_bytes, 2132 nocow_args.num_bytes, 2133 nocow_args.disk_bytenr, 2134 nocow_args.num_bytes, 2135 0, 2136 1 << BTRFS_ORDERED_NOCOW, 2137 BTRFS_COMPRESS_NONE); 2138 if (ret) 2139 goto error; 2140 } 2141 2142 if (nocow) { 2143 btrfs_dec_nocow_writers(bg); 2144 nocow = false; 2145 } 2146 2147 if (btrfs_is_data_reloc_root(root)) 2148 /* 2149 * Error handled later, as we must prevent 2150 * extent_clear_unlock_delalloc() in error handler 2151 * from freeing metadata of created ordered extent. 2152 */ 2153 ret = btrfs_reloc_clone_csums(inode, cur_offset, 2154 nocow_args.num_bytes); 2155 2156 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, 2157 locked_page, EXTENT_LOCKED | 2158 EXTENT_DELALLOC | 2159 EXTENT_CLEAR_DATA_RESV, 2160 PAGE_UNLOCK | PAGE_SET_ORDERED); 2161 2162 cur_offset = extent_end; 2163 2164 /* 2165 * btrfs_reloc_clone_csums() error, now we're OK to call error 2166 * handler, as metadata for created ordered extent will only 2167 * be freed by btrfs_finish_ordered_io(). 2168 */ 2169 if (ret) 2170 goto error; 2171 if (cur_offset > end) 2172 break; 2173 } 2174 btrfs_release_path(path); 2175 2176 if (cur_offset <= end && cow_start == (u64)-1) 2177 cow_start = cur_offset; 2178 2179 if (cow_start != (u64)-1) { 2180 cur_offset = end; 2181 ret = fallback_to_cow(inode, locked_page, cow_start, end, 2182 page_started, nr_written); 2183 if (ret) 2184 goto error; 2185 } 2186 2187 error: 2188 if (nocow) 2189 btrfs_dec_nocow_writers(bg); 2190 2191 if (ret && cur_offset < end) 2192 extent_clear_unlock_delalloc(inode, cur_offset, end, 2193 locked_page, EXTENT_LOCKED | 2194 EXTENT_DELALLOC | EXTENT_DEFRAG | 2195 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2196 PAGE_START_WRITEBACK | 2197 PAGE_END_WRITEBACK); 2198 btrfs_free_path(path); 2199 return ret; 2200 } 2201 2202 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2203 { 2204 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2205 if (inode->defrag_bytes && 2206 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 2207 0, NULL)) 2208 return false; 2209 return true; 2210 } 2211 return false; 2212 } 2213 2214 /* 2215 * Function to process delayed allocation (create CoW) for ranges which are 2216 * being touched for the first time. 2217 */ 2218 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, 2219 u64 start, u64 end, int *page_started, unsigned long *nr_written, 2220 struct writeback_control *wbc) 2221 { 2222 int ret; 2223 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2224 2225 /* 2226 * The range must cover part of the @locked_page, or the returned 2227 * @page_started can confuse the caller. 2228 */ 2229 ASSERT(!(end <= page_offset(locked_page) || 2230 start >= page_offset(locked_page) + PAGE_SIZE)); 2231 2232 if (should_nocow(inode, start, end)) { 2233 /* 2234 * Normally on a zoned device we're only doing COW writes, but 2235 * in case of relocation on a zoned filesystem we have taken 2236 * precaution, that we're only writing sequentially. It's safe 2237 * to use run_delalloc_nocow() here, like for regular 2238 * preallocated inodes. 2239 */ 2240 ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root)); 2241 ret = run_delalloc_nocow(inode, locked_page, start, end, 2242 page_started, nr_written); 2243 } else if (!btrfs_inode_can_compress(inode) || 2244 !inode_need_compress(inode, start, end)) { 2245 if (zoned) 2246 ret = run_delalloc_zoned(inode, locked_page, start, end, 2247 page_started, nr_written); 2248 else 2249 ret = cow_file_range(inode, locked_page, start, end, 2250 page_started, nr_written, 1, NULL); 2251 } else { 2252 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 2253 ret = cow_file_range_async(inode, wbc, locked_page, start, end, 2254 page_started, nr_written); 2255 } 2256 ASSERT(ret <= 0); 2257 if (ret) 2258 btrfs_cleanup_ordered_extents(inode, locked_page, start, 2259 end - start + 1); 2260 return ret; 2261 } 2262 2263 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 2264 struct extent_state *orig, u64 split) 2265 { 2266 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2267 u64 size; 2268 2269 /* not delalloc, ignore it */ 2270 if (!(orig->state & EXTENT_DELALLOC)) 2271 return; 2272 2273 size = orig->end - orig->start + 1; 2274 if (size > fs_info->max_extent_size) { 2275 u32 num_extents; 2276 u64 new_size; 2277 2278 /* 2279 * See the explanation in btrfs_merge_delalloc_extent, the same 2280 * applies here, just in reverse. 2281 */ 2282 new_size = orig->end - split + 1; 2283 num_extents = count_max_extents(fs_info, new_size); 2284 new_size = split - orig->start; 2285 num_extents += count_max_extents(fs_info, new_size); 2286 if (count_max_extents(fs_info, size) >= num_extents) 2287 return; 2288 } 2289 2290 spin_lock(&inode->lock); 2291 btrfs_mod_outstanding_extents(inode, 1); 2292 spin_unlock(&inode->lock); 2293 } 2294 2295 /* 2296 * Handle merged delayed allocation extents so we can keep track of new extents 2297 * that are just merged onto old extents, such as when we are doing sequential 2298 * writes, so we can properly account for the metadata space we'll need. 2299 */ 2300 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 2301 struct extent_state *other) 2302 { 2303 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2304 u64 new_size, old_size; 2305 u32 num_extents; 2306 2307 /* not delalloc, ignore it */ 2308 if (!(other->state & EXTENT_DELALLOC)) 2309 return; 2310 2311 if (new->start > other->start) 2312 new_size = new->end - other->start + 1; 2313 else 2314 new_size = other->end - new->start + 1; 2315 2316 /* we're not bigger than the max, unreserve the space and go */ 2317 if (new_size <= fs_info->max_extent_size) { 2318 spin_lock(&inode->lock); 2319 btrfs_mod_outstanding_extents(inode, -1); 2320 spin_unlock(&inode->lock); 2321 return; 2322 } 2323 2324 /* 2325 * We have to add up either side to figure out how many extents were 2326 * accounted for before we merged into one big extent. If the number of 2327 * extents we accounted for is <= the amount we need for the new range 2328 * then we can return, otherwise drop. Think of it like this 2329 * 2330 * [ 4k][MAX_SIZE] 2331 * 2332 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2333 * need 2 outstanding extents, on one side we have 1 and the other side 2334 * we have 1 so they are == and we can return. But in this case 2335 * 2336 * [MAX_SIZE+4k][MAX_SIZE+4k] 2337 * 2338 * Each range on their own accounts for 2 extents, but merged together 2339 * they are only 3 extents worth of accounting, so we need to drop in 2340 * this case. 2341 */ 2342 old_size = other->end - other->start + 1; 2343 num_extents = count_max_extents(fs_info, old_size); 2344 old_size = new->end - new->start + 1; 2345 num_extents += count_max_extents(fs_info, old_size); 2346 if (count_max_extents(fs_info, new_size) >= num_extents) 2347 return; 2348 2349 spin_lock(&inode->lock); 2350 btrfs_mod_outstanding_extents(inode, -1); 2351 spin_unlock(&inode->lock); 2352 } 2353 2354 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 2355 struct btrfs_inode *inode) 2356 { 2357 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2358 2359 spin_lock(&root->delalloc_lock); 2360 if (list_empty(&inode->delalloc_inodes)) { 2361 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 2362 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, &inode->runtime_flags); 2363 root->nr_delalloc_inodes++; 2364 if (root->nr_delalloc_inodes == 1) { 2365 spin_lock(&fs_info->delalloc_root_lock); 2366 BUG_ON(!list_empty(&root->delalloc_root)); 2367 list_add_tail(&root->delalloc_root, 2368 &fs_info->delalloc_roots); 2369 spin_unlock(&fs_info->delalloc_root_lock); 2370 } 2371 } 2372 spin_unlock(&root->delalloc_lock); 2373 } 2374 2375 void __btrfs_del_delalloc_inode(struct btrfs_root *root, 2376 struct btrfs_inode *inode) 2377 { 2378 struct btrfs_fs_info *fs_info = root->fs_info; 2379 2380 if (!list_empty(&inode->delalloc_inodes)) { 2381 list_del_init(&inode->delalloc_inodes); 2382 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2383 &inode->runtime_flags); 2384 root->nr_delalloc_inodes--; 2385 if (!root->nr_delalloc_inodes) { 2386 ASSERT(list_empty(&root->delalloc_inodes)); 2387 spin_lock(&fs_info->delalloc_root_lock); 2388 BUG_ON(list_empty(&root->delalloc_root)); 2389 list_del_init(&root->delalloc_root); 2390 spin_unlock(&fs_info->delalloc_root_lock); 2391 } 2392 } 2393 } 2394 2395 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 2396 struct btrfs_inode *inode) 2397 { 2398 spin_lock(&root->delalloc_lock); 2399 __btrfs_del_delalloc_inode(root, inode); 2400 spin_unlock(&root->delalloc_lock); 2401 } 2402 2403 /* 2404 * Properly track delayed allocation bytes in the inode and to maintain the 2405 * list of inodes that have pending delalloc work to be done. 2406 */ 2407 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 2408 u32 bits) 2409 { 2410 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2411 2412 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC)) 2413 WARN_ON(1); 2414 /* 2415 * set_bit and clear bit hooks normally require _irqsave/restore 2416 * but in this case, we are only testing for the DELALLOC 2417 * bit, which is only set or cleared with irqs on 2418 */ 2419 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2420 struct btrfs_root *root = inode->root; 2421 u64 len = state->end + 1 - state->start; 2422 u32 num_extents = count_max_extents(fs_info, len); 2423 bool do_list = !btrfs_is_free_space_inode(inode); 2424 2425 spin_lock(&inode->lock); 2426 btrfs_mod_outstanding_extents(inode, num_extents); 2427 spin_unlock(&inode->lock); 2428 2429 /* For sanity tests */ 2430 if (btrfs_is_testing(fs_info)) 2431 return; 2432 2433 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2434 fs_info->delalloc_batch); 2435 spin_lock(&inode->lock); 2436 inode->delalloc_bytes += len; 2437 if (bits & EXTENT_DEFRAG) 2438 inode->defrag_bytes += len; 2439 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2440 &inode->runtime_flags)) 2441 btrfs_add_delalloc_inodes(root, inode); 2442 spin_unlock(&inode->lock); 2443 } 2444 2445 if (!(state->state & EXTENT_DELALLOC_NEW) && 2446 (bits & EXTENT_DELALLOC_NEW)) { 2447 spin_lock(&inode->lock); 2448 inode->new_delalloc_bytes += state->end + 1 - state->start; 2449 spin_unlock(&inode->lock); 2450 } 2451 } 2452 2453 /* 2454 * Once a range is no longer delalloc this function ensures that proper 2455 * accounting happens. 2456 */ 2457 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 2458 struct extent_state *state, u32 bits) 2459 { 2460 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2461 u64 len = state->end + 1 - state->start; 2462 u32 num_extents = count_max_extents(fs_info, len); 2463 2464 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { 2465 spin_lock(&inode->lock); 2466 inode->defrag_bytes -= len; 2467 spin_unlock(&inode->lock); 2468 } 2469 2470 /* 2471 * set_bit and clear bit hooks normally require _irqsave/restore 2472 * but in this case, we are only testing for the DELALLOC 2473 * bit, which is only set or cleared with irqs on 2474 */ 2475 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2476 struct btrfs_root *root = inode->root; 2477 bool do_list = !btrfs_is_free_space_inode(inode); 2478 2479 spin_lock(&inode->lock); 2480 btrfs_mod_outstanding_extents(inode, -num_extents); 2481 spin_unlock(&inode->lock); 2482 2483 /* 2484 * We don't reserve metadata space for space cache inodes so we 2485 * don't need to call delalloc_release_metadata if there is an 2486 * error. 2487 */ 2488 if (bits & EXTENT_CLEAR_META_RESV && 2489 root != fs_info->tree_root) 2490 btrfs_delalloc_release_metadata(inode, len, false); 2491 2492 /* For sanity tests. */ 2493 if (btrfs_is_testing(fs_info)) 2494 return; 2495 2496 if (!btrfs_is_data_reloc_root(root) && 2497 do_list && !(state->state & EXTENT_NORESERVE) && 2498 (bits & EXTENT_CLEAR_DATA_RESV)) 2499 btrfs_free_reserved_data_space_noquota(fs_info, len); 2500 2501 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2502 fs_info->delalloc_batch); 2503 spin_lock(&inode->lock); 2504 inode->delalloc_bytes -= len; 2505 if (do_list && inode->delalloc_bytes == 0 && 2506 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2507 &inode->runtime_flags)) 2508 btrfs_del_delalloc_inode(root, inode); 2509 spin_unlock(&inode->lock); 2510 } 2511 2512 if ((state->state & EXTENT_DELALLOC_NEW) && 2513 (bits & EXTENT_DELALLOC_NEW)) { 2514 spin_lock(&inode->lock); 2515 ASSERT(inode->new_delalloc_bytes >= len); 2516 inode->new_delalloc_bytes -= len; 2517 if (bits & EXTENT_ADD_INODE_BYTES) 2518 inode_add_bytes(&inode->vfs_inode, len); 2519 spin_unlock(&inode->lock); 2520 } 2521 } 2522 2523 /* 2524 * Split an extent_map at [start, start + len] 2525 * 2526 * This function is intended to be used only for extract_ordered_extent(). 2527 */ 2528 static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len, 2529 u64 pre, u64 post) 2530 { 2531 struct extent_map_tree *em_tree = &inode->extent_tree; 2532 struct extent_map *em; 2533 struct extent_map *split_pre = NULL; 2534 struct extent_map *split_mid = NULL; 2535 struct extent_map *split_post = NULL; 2536 int ret = 0; 2537 unsigned long flags; 2538 2539 /* Sanity check */ 2540 if (pre == 0 && post == 0) 2541 return 0; 2542 2543 split_pre = alloc_extent_map(); 2544 if (pre) 2545 split_mid = alloc_extent_map(); 2546 if (post) 2547 split_post = alloc_extent_map(); 2548 if (!split_pre || (pre && !split_mid) || (post && !split_post)) { 2549 ret = -ENOMEM; 2550 goto out; 2551 } 2552 2553 ASSERT(pre + post < len); 2554 2555 lock_extent(&inode->io_tree, start, start + len - 1, NULL); 2556 write_lock(&em_tree->lock); 2557 em = lookup_extent_mapping(em_tree, start, len); 2558 if (!em) { 2559 ret = -EIO; 2560 goto out_unlock; 2561 } 2562 2563 ASSERT(em->len == len); 2564 ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); 2565 ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE); 2566 ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 2567 ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags)); 2568 ASSERT(!list_empty(&em->list)); 2569 2570 flags = em->flags; 2571 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 2572 2573 /* First, replace the em with a new extent_map starting from * em->start */ 2574 split_pre->start = em->start; 2575 split_pre->len = (pre ? pre : em->len - post); 2576 split_pre->orig_start = split_pre->start; 2577 split_pre->block_start = em->block_start; 2578 split_pre->block_len = split_pre->len; 2579 split_pre->orig_block_len = split_pre->block_len; 2580 split_pre->ram_bytes = split_pre->len; 2581 split_pre->flags = flags; 2582 split_pre->compress_type = em->compress_type; 2583 split_pre->generation = em->generation; 2584 2585 replace_extent_mapping(em_tree, em, split_pre, 1); 2586 2587 /* 2588 * Now we only have an extent_map at: 2589 * [em->start, em->start + pre] if pre != 0 2590 * [em->start, em->start + em->len - post] if pre == 0 2591 */ 2592 2593 if (pre) { 2594 /* Insert the middle extent_map */ 2595 split_mid->start = em->start + pre; 2596 split_mid->len = em->len - pre - post; 2597 split_mid->orig_start = split_mid->start; 2598 split_mid->block_start = em->block_start + pre; 2599 split_mid->block_len = split_mid->len; 2600 split_mid->orig_block_len = split_mid->block_len; 2601 split_mid->ram_bytes = split_mid->len; 2602 split_mid->flags = flags; 2603 split_mid->compress_type = em->compress_type; 2604 split_mid->generation = em->generation; 2605 add_extent_mapping(em_tree, split_mid, 1); 2606 } 2607 2608 if (post) { 2609 split_post->start = em->start + em->len - post; 2610 split_post->len = post; 2611 split_post->orig_start = split_post->start; 2612 split_post->block_start = em->block_start + em->len - post; 2613 split_post->block_len = split_post->len; 2614 split_post->orig_block_len = split_post->block_len; 2615 split_post->ram_bytes = split_post->len; 2616 split_post->flags = flags; 2617 split_post->compress_type = em->compress_type; 2618 split_post->generation = em->generation; 2619 add_extent_mapping(em_tree, split_post, 1); 2620 } 2621 2622 /* Once for us */ 2623 free_extent_map(em); 2624 /* Once for the tree */ 2625 free_extent_map(em); 2626 2627 out_unlock: 2628 write_unlock(&em_tree->lock); 2629 unlock_extent(&inode->io_tree, start, start + len - 1, NULL); 2630 out: 2631 free_extent_map(split_pre); 2632 free_extent_map(split_mid); 2633 free_extent_map(split_post); 2634 2635 return ret; 2636 } 2637 2638 blk_status_t btrfs_extract_ordered_extent(struct btrfs_bio *bbio) 2639 { 2640 u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 2641 u64 len = bbio->bio.bi_iter.bi_size; 2642 struct btrfs_inode *inode = bbio->inode; 2643 struct btrfs_ordered_extent *ordered; 2644 u64 file_len; 2645 u64 end = start + len; 2646 u64 ordered_end; 2647 u64 pre, post; 2648 int ret = 0; 2649 2650 ordered = btrfs_lookup_ordered_extent(inode, bbio->file_offset); 2651 if (WARN_ON_ONCE(!ordered)) 2652 return BLK_STS_IOERR; 2653 2654 /* No need to split */ 2655 if (ordered->disk_num_bytes == len) 2656 goto out; 2657 2658 /* We cannot split once end_bio'd ordered extent */ 2659 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) { 2660 ret = -EINVAL; 2661 goto out; 2662 } 2663 2664 /* We cannot split a compressed ordered extent */ 2665 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) { 2666 ret = -EINVAL; 2667 goto out; 2668 } 2669 2670 ordered_end = ordered->disk_bytenr + ordered->disk_num_bytes; 2671 /* bio must be in one ordered extent */ 2672 if (WARN_ON_ONCE(start < ordered->disk_bytenr || end > ordered_end)) { 2673 ret = -EINVAL; 2674 goto out; 2675 } 2676 2677 /* Checksum list should be empty */ 2678 if (WARN_ON_ONCE(!list_empty(&ordered->list))) { 2679 ret = -EINVAL; 2680 goto out; 2681 } 2682 2683 file_len = ordered->num_bytes; 2684 pre = start - ordered->disk_bytenr; 2685 post = ordered_end - end; 2686 2687 ret = btrfs_split_ordered_extent(ordered, pre, post); 2688 if (ret) 2689 goto out; 2690 ret = split_zoned_em(inode, bbio->file_offset, file_len, pre, post); 2691 2692 out: 2693 btrfs_put_ordered_extent(ordered); 2694 2695 return errno_to_blk_status(ret); 2696 } 2697 2698 /* 2699 * given a list of ordered sums record them in the inode. This happens 2700 * at IO completion time based on sums calculated at bio submission time. 2701 */ 2702 static int add_pending_csums(struct btrfs_trans_handle *trans, 2703 struct list_head *list) 2704 { 2705 struct btrfs_ordered_sum *sum; 2706 struct btrfs_root *csum_root = NULL; 2707 int ret; 2708 2709 list_for_each_entry(sum, list, list) { 2710 trans->adding_csums = true; 2711 if (!csum_root) 2712 csum_root = btrfs_csum_root(trans->fs_info, 2713 sum->bytenr); 2714 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2715 trans->adding_csums = false; 2716 if (ret) 2717 return ret; 2718 } 2719 return 0; 2720 } 2721 2722 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2723 const u64 start, 2724 const u64 len, 2725 struct extent_state **cached_state) 2726 { 2727 u64 search_start = start; 2728 const u64 end = start + len - 1; 2729 2730 while (search_start < end) { 2731 const u64 search_len = end - search_start + 1; 2732 struct extent_map *em; 2733 u64 em_len; 2734 int ret = 0; 2735 2736 em = btrfs_get_extent(inode, NULL, 0, search_start, search_len); 2737 if (IS_ERR(em)) 2738 return PTR_ERR(em); 2739 2740 if (em->block_start != EXTENT_MAP_HOLE) 2741 goto next; 2742 2743 em_len = em->len; 2744 if (em->start < search_start) 2745 em_len -= search_start - em->start; 2746 if (em_len > search_len) 2747 em_len = search_len; 2748 2749 ret = set_extent_bit(&inode->io_tree, search_start, 2750 search_start + em_len - 1, 2751 EXTENT_DELALLOC_NEW, cached_state, 2752 GFP_NOFS); 2753 next: 2754 search_start = extent_map_end(em); 2755 free_extent_map(em); 2756 if (ret) 2757 return ret; 2758 } 2759 return 0; 2760 } 2761 2762 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2763 unsigned int extra_bits, 2764 struct extent_state **cached_state) 2765 { 2766 WARN_ON(PAGE_ALIGNED(end)); 2767 2768 if (start >= i_size_read(&inode->vfs_inode) && 2769 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2770 /* 2771 * There can't be any extents following eof in this case so just 2772 * set the delalloc new bit for the range directly. 2773 */ 2774 extra_bits |= EXTENT_DELALLOC_NEW; 2775 } else { 2776 int ret; 2777 2778 ret = btrfs_find_new_delalloc_bytes(inode, start, 2779 end + 1 - start, 2780 cached_state); 2781 if (ret) 2782 return ret; 2783 } 2784 2785 return set_extent_delalloc(&inode->io_tree, start, end, extra_bits, 2786 cached_state); 2787 } 2788 2789 /* see btrfs_writepage_start_hook for details on why this is required */ 2790 struct btrfs_writepage_fixup { 2791 struct page *page; 2792 struct btrfs_inode *inode; 2793 struct btrfs_work work; 2794 }; 2795 2796 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2797 { 2798 struct btrfs_writepage_fixup *fixup; 2799 struct btrfs_ordered_extent *ordered; 2800 struct extent_state *cached_state = NULL; 2801 struct extent_changeset *data_reserved = NULL; 2802 struct page *page; 2803 struct btrfs_inode *inode; 2804 u64 page_start; 2805 u64 page_end; 2806 int ret = 0; 2807 bool free_delalloc_space = true; 2808 2809 fixup = container_of(work, struct btrfs_writepage_fixup, work); 2810 page = fixup->page; 2811 inode = fixup->inode; 2812 page_start = page_offset(page); 2813 page_end = page_offset(page) + PAGE_SIZE - 1; 2814 2815 /* 2816 * This is similar to page_mkwrite, we need to reserve the space before 2817 * we take the page lock. 2818 */ 2819 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2820 PAGE_SIZE); 2821 again: 2822 lock_page(page); 2823 2824 /* 2825 * Before we queued this fixup, we took a reference on the page. 2826 * page->mapping may go NULL, but it shouldn't be moved to a different 2827 * address space. 2828 */ 2829 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 2830 /* 2831 * Unfortunately this is a little tricky, either 2832 * 2833 * 1) We got here and our page had already been dealt with and 2834 * we reserved our space, thus ret == 0, so we need to just 2835 * drop our space reservation and bail. This can happen the 2836 * first time we come into the fixup worker, or could happen 2837 * while waiting for the ordered extent. 2838 * 2) Our page was already dealt with, but we happened to get an 2839 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2840 * this case we obviously don't have anything to release, but 2841 * because the page was already dealt with we don't want to 2842 * mark the page with an error, so make sure we're resetting 2843 * ret to 0. This is why we have this check _before_ the ret 2844 * check, because we do not want to have a surprise ENOSPC 2845 * when the page was already properly dealt with. 2846 */ 2847 if (!ret) { 2848 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2849 btrfs_delalloc_release_space(inode, data_reserved, 2850 page_start, PAGE_SIZE, 2851 true); 2852 } 2853 ret = 0; 2854 goto out_page; 2855 } 2856 2857 /* 2858 * We can't mess with the page state unless it is locked, so now that 2859 * it is locked bail if we failed to make our space reservation. 2860 */ 2861 if (ret) 2862 goto out_page; 2863 2864 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2865 2866 /* already ordered? We're done */ 2867 if (PageOrdered(page)) 2868 goto out_reserved; 2869 2870 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2871 if (ordered) { 2872 unlock_extent(&inode->io_tree, page_start, page_end, 2873 &cached_state); 2874 unlock_page(page); 2875 btrfs_start_ordered_extent(ordered); 2876 btrfs_put_ordered_extent(ordered); 2877 goto again; 2878 } 2879 2880 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2881 &cached_state); 2882 if (ret) 2883 goto out_reserved; 2884 2885 /* 2886 * Everything went as planned, we're now the owner of a dirty page with 2887 * delayed allocation bits set and space reserved for our COW 2888 * destination. 2889 * 2890 * The page was dirty when we started, nothing should have cleaned it. 2891 */ 2892 BUG_ON(!PageDirty(page)); 2893 free_delalloc_space = false; 2894 out_reserved: 2895 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2896 if (free_delalloc_space) 2897 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2898 PAGE_SIZE, true); 2899 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2900 out_page: 2901 if (ret) { 2902 /* 2903 * We hit ENOSPC or other errors. Update the mapping and page 2904 * to reflect the errors and clean the page. 2905 */ 2906 mapping_set_error(page->mapping, ret); 2907 end_extent_writepage(page, ret, page_start, page_end); 2908 clear_page_dirty_for_io(page); 2909 SetPageError(page); 2910 } 2911 btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE); 2912 unlock_page(page); 2913 put_page(page); 2914 kfree(fixup); 2915 extent_changeset_free(data_reserved); 2916 /* 2917 * As a precaution, do a delayed iput in case it would be the last iput 2918 * that could need flushing space. Recursing back to fixup worker would 2919 * deadlock. 2920 */ 2921 btrfs_add_delayed_iput(inode); 2922 } 2923 2924 /* 2925 * There are a few paths in the higher layers of the kernel that directly 2926 * set the page dirty bit without asking the filesystem if it is a 2927 * good idea. This causes problems because we want to make sure COW 2928 * properly happens and the data=ordered rules are followed. 2929 * 2930 * In our case any range that doesn't have the ORDERED bit set 2931 * hasn't been properly setup for IO. We kick off an async process 2932 * to fix it up. The async helper will wait for ordered extents, set 2933 * the delalloc bit and make it safe to write the page. 2934 */ 2935 int btrfs_writepage_cow_fixup(struct page *page) 2936 { 2937 struct inode *inode = page->mapping->host; 2938 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2939 struct btrfs_writepage_fixup *fixup; 2940 2941 /* This page has ordered extent covering it already */ 2942 if (PageOrdered(page)) 2943 return 0; 2944 2945 /* 2946 * PageChecked is set below when we create a fixup worker for this page, 2947 * don't try to create another one if we're already PageChecked() 2948 * 2949 * The extent_io writepage code will redirty the page if we send back 2950 * EAGAIN. 2951 */ 2952 if (PageChecked(page)) 2953 return -EAGAIN; 2954 2955 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2956 if (!fixup) 2957 return -EAGAIN; 2958 2959 /* 2960 * We are already holding a reference to this inode from 2961 * write_cache_pages. We need to hold it because the space reservation 2962 * takes place outside of the page lock, and we can't trust 2963 * page->mapping outside of the page lock. 2964 */ 2965 ihold(inode); 2966 btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE); 2967 get_page(page); 2968 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); 2969 fixup->page = page; 2970 fixup->inode = BTRFS_I(inode); 2971 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2972 2973 return -EAGAIN; 2974 } 2975 2976 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2977 struct btrfs_inode *inode, u64 file_pos, 2978 struct btrfs_file_extent_item *stack_fi, 2979 const bool update_inode_bytes, 2980 u64 qgroup_reserved) 2981 { 2982 struct btrfs_root *root = inode->root; 2983 const u64 sectorsize = root->fs_info->sectorsize; 2984 struct btrfs_path *path; 2985 struct extent_buffer *leaf; 2986 struct btrfs_key ins; 2987 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 2988 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 2989 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 2990 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 2991 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 2992 struct btrfs_drop_extents_args drop_args = { 0 }; 2993 int ret; 2994 2995 path = btrfs_alloc_path(); 2996 if (!path) 2997 return -ENOMEM; 2998 2999 /* 3000 * we may be replacing one extent in the tree with another. 3001 * The new extent is pinned in the extent map, and we don't want 3002 * to drop it from the cache until it is completely in the btree. 3003 * 3004 * So, tell btrfs_drop_extents to leave this extent in the cache. 3005 * the caller is expected to unpin it and allow it to be merged 3006 * with the others. 3007 */ 3008 drop_args.path = path; 3009 drop_args.start = file_pos; 3010 drop_args.end = file_pos + num_bytes; 3011 drop_args.replace_extent = true; 3012 drop_args.extent_item_size = sizeof(*stack_fi); 3013 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 3014 if (ret) 3015 goto out; 3016 3017 if (!drop_args.extent_inserted) { 3018 ins.objectid = btrfs_ino(inode); 3019 ins.offset = file_pos; 3020 ins.type = BTRFS_EXTENT_DATA_KEY; 3021 3022 ret = btrfs_insert_empty_item(trans, root, path, &ins, 3023 sizeof(*stack_fi)); 3024 if (ret) 3025 goto out; 3026 } 3027 leaf = path->nodes[0]; 3028 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 3029 write_extent_buffer(leaf, stack_fi, 3030 btrfs_item_ptr_offset(leaf, path->slots[0]), 3031 sizeof(struct btrfs_file_extent_item)); 3032 3033 btrfs_mark_buffer_dirty(leaf); 3034 btrfs_release_path(path); 3035 3036 /* 3037 * If we dropped an inline extent here, we know the range where it is 3038 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 3039 * number of bytes only for that range containing the inline extent. 3040 * The remaining of the range will be processed when clearning the 3041 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 3042 */ 3043 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 3044 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 3045 3046 inline_size = drop_args.bytes_found - inline_size; 3047 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 3048 drop_args.bytes_found -= inline_size; 3049 num_bytes -= sectorsize; 3050 } 3051 3052 if (update_inode_bytes) 3053 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 3054 3055 ins.objectid = disk_bytenr; 3056 ins.offset = disk_num_bytes; 3057 ins.type = BTRFS_EXTENT_ITEM_KEY; 3058 3059 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 3060 if (ret) 3061 goto out; 3062 3063 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 3064 file_pos - offset, 3065 qgroup_reserved, &ins); 3066 out: 3067 btrfs_free_path(path); 3068 3069 return ret; 3070 } 3071 3072 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 3073 u64 start, u64 len) 3074 { 3075 struct btrfs_block_group *cache; 3076 3077 cache = btrfs_lookup_block_group(fs_info, start); 3078 ASSERT(cache); 3079 3080 spin_lock(&cache->lock); 3081 cache->delalloc_bytes -= len; 3082 spin_unlock(&cache->lock); 3083 3084 btrfs_put_block_group(cache); 3085 } 3086 3087 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 3088 struct btrfs_ordered_extent *oe) 3089 { 3090 struct btrfs_file_extent_item stack_fi; 3091 bool update_inode_bytes; 3092 u64 num_bytes = oe->num_bytes; 3093 u64 ram_bytes = oe->ram_bytes; 3094 3095 memset(&stack_fi, 0, sizeof(stack_fi)); 3096 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 3097 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 3098 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 3099 oe->disk_num_bytes); 3100 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 3101 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) { 3102 num_bytes = oe->truncated_len; 3103 ram_bytes = num_bytes; 3104 } 3105 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 3106 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 3107 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 3108 /* Encryption and other encoding is reserved and all 0 */ 3109 3110 /* 3111 * For delalloc, when completing an ordered extent we update the inode's 3112 * bytes when clearing the range in the inode's io tree, so pass false 3113 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 3114 * except if the ordered extent was truncated. 3115 */ 3116 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 3117 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3118 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3119 3120 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), 3121 oe->file_offset, &stack_fi, 3122 update_inode_bytes, oe->qgroup_rsv); 3123 } 3124 3125 /* 3126 * As ordered data IO finishes, this gets called so we can finish 3127 * an ordered extent if the range of bytes in the file it covers are 3128 * fully written. 3129 */ 3130 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) 3131 { 3132 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); 3133 struct btrfs_root *root = inode->root; 3134 struct btrfs_fs_info *fs_info = root->fs_info; 3135 struct btrfs_trans_handle *trans = NULL; 3136 struct extent_io_tree *io_tree = &inode->io_tree; 3137 struct extent_state *cached_state = NULL; 3138 u64 start, end; 3139 int compress_type = 0; 3140 int ret = 0; 3141 u64 logical_len = ordered_extent->num_bytes; 3142 bool freespace_inode; 3143 bool truncated = false; 3144 bool clear_reserved_extent = true; 3145 unsigned int clear_bits = EXTENT_DEFRAG; 3146 3147 start = ordered_extent->file_offset; 3148 end = start + ordered_extent->num_bytes - 1; 3149 3150 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3151 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3152 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3153 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3154 clear_bits |= EXTENT_DELALLOC_NEW; 3155 3156 freespace_inode = btrfs_is_free_space_inode(inode); 3157 if (!freespace_inode) 3158 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3159 3160 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3161 ret = -EIO; 3162 goto out; 3163 } 3164 3165 /* A valid ->physical implies a write on a sequential zone. */ 3166 if (ordered_extent->physical != (u64)-1) { 3167 btrfs_rewrite_logical_zoned(ordered_extent); 3168 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3169 ordered_extent->disk_num_bytes); 3170 } 3171 3172 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3173 truncated = true; 3174 logical_len = ordered_extent->truncated_len; 3175 /* Truncated the entire extent, don't bother adding */ 3176 if (!logical_len) 3177 goto out; 3178 } 3179 3180 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3181 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 3182 3183 btrfs_inode_safe_disk_i_size_write(inode, 0); 3184 if (freespace_inode) 3185 trans = btrfs_join_transaction_spacecache(root); 3186 else 3187 trans = btrfs_join_transaction(root); 3188 if (IS_ERR(trans)) { 3189 ret = PTR_ERR(trans); 3190 trans = NULL; 3191 goto out; 3192 } 3193 trans->block_rsv = &inode->block_rsv; 3194 ret = btrfs_update_inode_fallback(trans, root, inode); 3195 if (ret) /* -ENOMEM or corruption */ 3196 btrfs_abort_transaction(trans, ret); 3197 goto out; 3198 } 3199 3200 clear_bits |= EXTENT_LOCKED; 3201 lock_extent(io_tree, start, end, &cached_state); 3202 3203 if (freespace_inode) 3204 trans = btrfs_join_transaction_spacecache(root); 3205 else 3206 trans = btrfs_join_transaction(root); 3207 if (IS_ERR(trans)) { 3208 ret = PTR_ERR(trans); 3209 trans = NULL; 3210 goto out; 3211 } 3212 3213 trans->block_rsv = &inode->block_rsv; 3214 3215 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3216 compress_type = ordered_extent->compress_type; 3217 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3218 BUG_ON(compress_type); 3219 ret = btrfs_mark_extent_written(trans, inode, 3220 ordered_extent->file_offset, 3221 ordered_extent->file_offset + 3222 logical_len); 3223 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, 3224 ordered_extent->disk_num_bytes); 3225 } else { 3226 BUG_ON(root == fs_info->tree_root); 3227 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3228 if (!ret) { 3229 clear_reserved_extent = false; 3230 btrfs_release_delalloc_bytes(fs_info, 3231 ordered_extent->disk_bytenr, 3232 ordered_extent->disk_num_bytes); 3233 } 3234 } 3235 unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset, 3236 ordered_extent->num_bytes, trans->transid); 3237 if (ret < 0) { 3238 btrfs_abort_transaction(trans, ret); 3239 goto out; 3240 } 3241 3242 ret = add_pending_csums(trans, &ordered_extent->list); 3243 if (ret) { 3244 btrfs_abort_transaction(trans, ret); 3245 goto out; 3246 } 3247 3248 /* 3249 * If this is a new delalloc range, clear its new delalloc flag to 3250 * update the inode's number of bytes. This needs to be done first 3251 * before updating the inode item. 3252 */ 3253 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3254 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3255 clear_extent_bit(&inode->io_tree, start, end, 3256 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3257 &cached_state); 3258 3259 btrfs_inode_safe_disk_i_size_write(inode, 0); 3260 ret = btrfs_update_inode_fallback(trans, root, inode); 3261 if (ret) { /* -ENOMEM or corruption */ 3262 btrfs_abort_transaction(trans, ret); 3263 goto out; 3264 } 3265 ret = 0; 3266 out: 3267 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3268 &cached_state); 3269 3270 if (trans) 3271 btrfs_end_transaction(trans); 3272 3273 if (ret || truncated) { 3274 u64 unwritten_start = start; 3275 3276 /* 3277 * If we failed to finish this ordered extent for any reason we 3278 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3279 * extent, and mark the inode with the error if it wasn't 3280 * already set. Any error during writeback would have already 3281 * set the mapping error, so we need to set it if we're the ones 3282 * marking this ordered extent as failed. 3283 */ 3284 if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR, 3285 &ordered_extent->flags)) 3286 mapping_set_error(ordered_extent->inode->i_mapping, -EIO); 3287 3288 if (truncated) 3289 unwritten_start += logical_len; 3290 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3291 3292 /* Drop extent maps for the part of the extent we didn't write. */ 3293 btrfs_drop_extent_map_range(inode, unwritten_start, end, false); 3294 3295 /* 3296 * If the ordered extent had an IOERR or something else went 3297 * wrong we need to return the space for this ordered extent 3298 * back to the allocator. We only free the extent in the 3299 * truncated case if we didn't write out the extent at all. 3300 * 3301 * If we made it past insert_reserved_file_extent before we 3302 * errored out then we don't need to do this as the accounting 3303 * has already been done. 3304 */ 3305 if ((ret || !logical_len) && 3306 clear_reserved_extent && 3307 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3308 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3309 /* 3310 * Discard the range before returning it back to the 3311 * free space pool 3312 */ 3313 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3314 btrfs_discard_extent(fs_info, 3315 ordered_extent->disk_bytenr, 3316 ordered_extent->disk_num_bytes, 3317 NULL); 3318 btrfs_free_reserved_extent(fs_info, 3319 ordered_extent->disk_bytenr, 3320 ordered_extent->disk_num_bytes, 1); 3321 } 3322 } 3323 3324 /* 3325 * This needs to be done to make sure anybody waiting knows we are done 3326 * updating everything for this ordered extent. 3327 */ 3328 btrfs_remove_ordered_extent(inode, ordered_extent); 3329 3330 /* once for us */ 3331 btrfs_put_ordered_extent(ordered_extent); 3332 /* once for the tree */ 3333 btrfs_put_ordered_extent(ordered_extent); 3334 3335 return ret; 3336 } 3337 3338 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode, 3339 struct page *page, u64 start, 3340 u64 end, bool uptodate) 3341 { 3342 trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate); 3343 3344 btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start, uptodate); 3345 } 3346 3347 /* 3348 * Verify the checksum for a single sector without any extra action that depend 3349 * on the type of I/O. 3350 */ 3351 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, 3352 u32 pgoff, u8 *csum, const u8 * const csum_expected) 3353 { 3354 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3355 char *kaddr; 3356 3357 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); 3358 3359 shash->tfm = fs_info->csum_shash; 3360 3361 kaddr = kmap_local_page(page) + pgoff; 3362 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 3363 kunmap_local(kaddr); 3364 3365 if (memcmp(csum, csum_expected, fs_info->csum_size)) 3366 return -EIO; 3367 return 0; 3368 } 3369 3370 static u8 *btrfs_csum_ptr(const struct btrfs_fs_info *fs_info, u8 *csums, u64 offset) 3371 { 3372 u64 offset_in_sectors = offset >> fs_info->sectorsize_bits; 3373 3374 return csums + offset_in_sectors * fs_info->csum_size; 3375 } 3376 3377 /* 3378 * Verify the checksum of a single data sector. 3379 * 3380 * @bbio: btrfs_io_bio which contains the csum 3381 * @dev: device the sector is on 3382 * @bio_offset: offset to the beginning of the bio (in bytes) 3383 * @bv: bio_vec to check 3384 * 3385 * Check if the checksum on a data block is valid. When a checksum mismatch is 3386 * detected, report the error and fill the corrupted range with zero. 3387 * 3388 * Return %true if the sector is ok or had no checksum to start with, else %false. 3389 */ 3390 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, 3391 u32 bio_offset, struct bio_vec *bv) 3392 { 3393 struct btrfs_inode *inode = bbio->inode; 3394 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3395 u64 file_offset = bbio->file_offset + bio_offset; 3396 u64 end = file_offset + bv->bv_len - 1; 3397 u8 *csum_expected; 3398 u8 csum[BTRFS_CSUM_SIZE]; 3399 3400 ASSERT(bv->bv_len == fs_info->sectorsize); 3401 3402 if (!bbio->csum) 3403 return true; 3404 3405 if (btrfs_is_data_reloc_root(inode->root) && 3406 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, 3407 1, NULL)) { 3408 /* Skip the range without csum for data reloc inode */ 3409 clear_extent_bits(&inode->io_tree, file_offset, end, 3410 EXTENT_NODATASUM); 3411 return true; 3412 } 3413 3414 csum_expected = btrfs_csum_ptr(fs_info, bbio->csum, bio_offset); 3415 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, 3416 csum_expected)) 3417 goto zeroit; 3418 return true; 3419 3420 zeroit: 3421 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected, 3422 bbio->mirror_num); 3423 if (dev) 3424 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 3425 memzero_bvec(bv); 3426 return false; 3427 } 3428 3429 /* 3430 * btrfs_add_delayed_iput - perform a delayed iput on @inode 3431 * 3432 * @inode: The inode we want to perform iput on 3433 * 3434 * This function uses the generic vfs_inode::i_count to track whether we should 3435 * just decrement it (in case it's > 1) or if this is the last iput then link 3436 * the inode to the delayed iput machinery. Delayed iputs are processed at 3437 * transaction commit time/superblock commit/cleaner kthread. 3438 */ 3439 void btrfs_add_delayed_iput(struct btrfs_inode *inode) 3440 { 3441 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3442 3443 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) 3444 return; 3445 3446 atomic_inc(&fs_info->nr_delayed_iputs); 3447 spin_lock(&fs_info->delayed_iput_lock); 3448 ASSERT(list_empty(&inode->delayed_iput)); 3449 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); 3450 spin_unlock(&fs_info->delayed_iput_lock); 3451 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3452 wake_up_process(fs_info->cleaner_kthread); 3453 } 3454 3455 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3456 struct btrfs_inode *inode) 3457 { 3458 list_del_init(&inode->delayed_iput); 3459 spin_unlock(&fs_info->delayed_iput_lock); 3460 iput(&inode->vfs_inode); 3461 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3462 wake_up(&fs_info->delayed_iputs_wait); 3463 spin_lock(&fs_info->delayed_iput_lock); 3464 } 3465 3466 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3467 struct btrfs_inode *inode) 3468 { 3469 if (!list_empty(&inode->delayed_iput)) { 3470 spin_lock(&fs_info->delayed_iput_lock); 3471 if (!list_empty(&inode->delayed_iput)) 3472 run_delayed_iput_locked(fs_info, inode); 3473 spin_unlock(&fs_info->delayed_iput_lock); 3474 } 3475 } 3476 3477 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3478 { 3479 3480 spin_lock(&fs_info->delayed_iput_lock); 3481 while (!list_empty(&fs_info->delayed_iputs)) { 3482 struct btrfs_inode *inode; 3483 3484 inode = list_first_entry(&fs_info->delayed_iputs, 3485 struct btrfs_inode, delayed_iput); 3486 run_delayed_iput_locked(fs_info, inode); 3487 cond_resched_lock(&fs_info->delayed_iput_lock); 3488 } 3489 spin_unlock(&fs_info->delayed_iput_lock); 3490 } 3491 3492 /* 3493 * Wait for flushing all delayed iputs 3494 * 3495 * @fs_info: the filesystem 3496 * 3497 * This will wait on any delayed iputs that are currently running with KILLABLE 3498 * set. Once they are all done running we will return, unless we are killed in 3499 * which case we return EINTR. This helps in user operations like fallocate etc 3500 * that might get blocked on the iputs. 3501 * 3502 * Return EINTR if we were killed, 0 if nothing's pending 3503 */ 3504 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3505 { 3506 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3507 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3508 if (ret) 3509 return -EINTR; 3510 return 0; 3511 } 3512 3513 /* 3514 * This creates an orphan entry for the given inode in case something goes wrong 3515 * in the middle of an unlink. 3516 */ 3517 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3518 struct btrfs_inode *inode) 3519 { 3520 int ret; 3521 3522 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3523 if (ret && ret != -EEXIST) { 3524 btrfs_abort_transaction(trans, ret); 3525 return ret; 3526 } 3527 3528 return 0; 3529 } 3530 3531 /* 3532 * We have done the delete so we can go ahead and remove the orphan item for 3533 * this particular inode. 3534 */ 3535 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3536 struct btrfs_inode *inode) 3537 { 3538 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3539 } 3540 3541 /* 3542 * this cleans up any orphans that may be left on the list from the last use 3543 * of this root. 3544 */ 3545 int btrfs_orphan_cleanup(struct btrfs_root *root) 3546 { 3547 struct btrfs_fs_info *fs_info = root->fs_info; 3548 struct btrfs_path *path; 3549 struct extent_buffer *leaf; 3550 struct btrfs_key key, found_key; 3551 struct btrfs_trans_handle *trans; 3552 struct inode *inode; 3553 u64 last_objectid = 0; 3554 int ret = 0, nr_unlink = 0; 3555 3556 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3557 return 0; 3558 3559 path = btrfs_alloc_path(); 3560 if (!path) { 3561 ret = -ENOMEM; 3562 goto out; 3563 } 3564 path->reada = READA_BACK; 3565 3566 key.objectid = BTRFS_ORPHAN_OBJECTID; 3567 key.type = BTRFS_ORPHAN_ITEM_KEY; 3568 key.offset = (u64)-1; 3569 3570 while (1) { 3571 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3572 if (ret < 0) 3573 goto out; 3574 3575 /* 3576 * if ret == 0 means we found what we were searching for, which 3577 * is weird, but possible, so only screw with path if we didn't 3578 * find the key and see if we have stuff that matches 3579 */ 3580 if (ret > 0) { 3581 ret = 0; 3582 if (path->slots[0] == 0) 3583 break; 3584 path->slots[0]--; 3585 } 3586 3587 /* pull out the item */ 3588 leaf = path->nodes[0]; 3589 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3590 3591 /* make sure the item matches what we want */ 3592 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3593 break; 3594 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3595 break; 3596 3597 /* release the path since we're done with it */ 3598 btrfs_release_path(path); 3599 3600 /* 3601 * this is where we are basically btrfs_lookup, without the 3602 * crossing root thing. we store the inode number in the 3603 * offset of the orphan item. 3604 */ 3605 3606 if (found_key.offset == last_objectid) { 3607 btrfs_err(fs_info, 3608 "Error removing orphan entry, stopping orphan cleanup"); 3609 ret = -EINVAL; 3610 goto out; 3611 } 3612 3613 last_objectid = found_key.offset; 3614 3615 found_key.objectid = found_key.offset; 3616 found_key.type = BTRFS_INODE_ITEM_KEY; 3617 found_key.offset = 0; 3618 inode = btrfs_iget(fs_info->sb, last_objectid, root); 3619 ret = PTR_ERR_OR_ZERO(inode); 3620 if (ret && ret != -ENOENT) 3621 goto out; 3622 3623 if (ret == -ENOENT && root == fs_info->tree_root) { 3624 struct btrfs_root *dead_root; 3625 int is_dead_root = 0; 3626 3627 /* 3628 * This is an orphan in the tree root. Currently these 3629 * could come from 2 sources: 3630 * a) a root (snapshot/subvolume) deletion in progress 3631 * b) a free space cache inode 3632 * We need to distinguish those two, as the orphan item 3633 * for a root must not get deleted before the deletion 3634 * of the snapshot/subvolume's tree completes. 3635 * 3636 * btrfs_find_orphan_roots() ran before us, which has 3637 * found all deleted roots and loaded them into 3638 * fs_info->fs_roots_radix. So here we can find if an 3639 * orphan item corresponds to a deleted root by looking 3640 * up the root from that radix tree. 3641 */ 3642 3643 spin_lock(&fs_info->fs_roots_radix_lock); 3644 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3645 (unsigned long)found_key.objectid); 3646 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3647 is_dead_root = 1; 3648 spin_unlock(&fs_info->fs_roots_radix_lock); 3649 3650 if (is_dead_root) { 3651 /* prevent this orphan from being found again */ 3652 key.offset = found_key.objectid - 1; 3653 continue; 3654 } 3655 3656 } 3657 3658 /* 3659 * If we have an inode with links, there are a couple of 3660 * possibilities: 3661 * 3662 * 1. We were halfway through creating fsverity metadata for the 3663 * file. In that case, the orphan item represents incomplete 3664 * fsverity metadata which must be cleaned up with 3665 * btrfs_drop_verity_items and deleting the orphan item. 3666 3667 * 2. Old kernels (before v3.12) used to create an 3668 * orphan item for truncate indicating that there were possibly 3669 * extent items past i_size that needed to be deleted. In v3.12, 3670 * truncate was changed to update i_size in sync with the extent 3671 * items, but the (useless) orphan item was still created. Since 3672 * v4.18, we don't create the orphan item for truncate at all. 3673 * 3674 * So, this item could mean that we need to do a truncate, but 3675 * only if this filesystem was last used on a pre-v3.12 kernel 3676 * and was not cleanly unmounted. The odds of that are quite 3677 * slim, and it's a pain to do the truncate now, so just delete 3678 * the orphan item. 3679 * 3680 * It's also possible that this orphan item was supposed to be 3681 * deleted but wasn't. The inode number may have been reused, 3682 * but either way, we can delete the orphan item. 3683 */ 3684 if (ret == -ENOENT || inode->i_nlink) { 3685 if (!ret) { 3686 ret = btrfs_drop_verity_items(BTRFS_I(inode)); 3687 iput(inode); 3688 if (ret) 3689 goto out; 3690 } 3691 trans = btrfs_start_transaction(root, 1); 3692 if (IS_ERR(trans)) { 3693 ret = PTR_ERR(trans); 3694 goto out; 3695 } 3696 btrfs_debug(fs_info, "auto deleting %Lu", 3697 found_key.objectid); 3698 ret = btrfs_del_orphan_item(trans, root, 3699 found_key.objectid); 3700 btrfs_end_transaction(trans); 3701 if (ret) 3702 goto out; 3703 continue; 3704 } 3705 3706 nr_unlink++; 3707 3708 /* this will do delete_inode and everything for us */ 3709 iput(inode); 3710 } 3711 /* release the path since we're done with it */ 3712 btrfs_release_path(path); 3713 3714 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3715 trans = btrfs_join_transaction(root); 3716 if (!IS_ERR(trans)) 3717 btrfs_end_transaction(trans); 3718 } 3719 3720 if (nr_unlink) 3721 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3722 3723 out: 3724 if (ret) 3725 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3726 btrfs_free_path(path); 3727 return ret; 3728 } 3729 3730 /* 3731 * very simple check to peek ahead in the leaf looking for xattrs. If we 3732 * don't find any xattrs, we know there can't be any acls. 3733 * 3734 * slot is the slot the inode is in, objectid is the objectid of the inode 3735 */ 3736 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3737 int slot, u64 objectid, 3738 int *first_xattr_slot) 3739 { 3740 u32 nritems = btrfs_header_nritems(leaf); 3741 struct btrfs_key found_key; 3742 static u64 xattr_access = 0; 3743 static u64 xattr_default = 0; 3744 int scanned = 0; 3745 3746 if (!xattr_access) { 3747 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3748 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3749 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3750 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3751 } 3752 3753 slot++; 3754 *first_xattr_slot = -1; 3755 while (slot < nritems) { 3756 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3757 3758 /* we found a different objectid, there must not be acls */ 3759 if (found_key.objectid != objectid) 3760 return 0; 3761 3762 /* we found an xattr, assume we've got an acl */ 3763 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3764 if (*first_xattr_slot == -1) 3765 *first_xattr_slot = slot; 3766 if (found_key.offset == xattr_access || 3767 found_key.offset == xattr_default) 3768 return 1; 3769 } 3770 3771 /* 3772 * we found a key greater than an xattr key, there can't 3773 * be any acls later on 3774 */ 3775 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3776 return 0; 3777 3778 slot++; 3779 scanned++; 3780 3781 /* 3782 * it goes inode, inode backrefs, xattrs, extents, 3783 * so if there are a ton of hard links to an inode there can 3784 * be a lot of backrefs. Don't waste time searching too hard, 3785 * this is just an optimization 3786 */ 3787 if (scanned >= 8) 3788 break; 3789 } 3790 /* we hit the end of the leaf before we found an xattr or 3791 * something larger than an xattr. We have to assume the inode 3792 * has acls 3793 */ 3794 if (*first_xattr_slot == -1) 3795 *first_xattr_slot = slot; 3796 return 1; 3797 } 3798 3799 /* 3800 * read an inode from the btree into the in-memory inode 3801 */ 3802 static int btrfs_read_locked_inode(struct inode *inode, 3803 struct btrfs_path *in_path) 3804 { 3805 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3806 struct btrfs_path *path = in_path; 3807 struct extent_buffer *leaf; 3808 struct btrfs_inode_item *inode_item; 3809 struct btrfs_root *root = BTRFS_I(inode)->root; 3810 struct btrfs_key location; 3811 unsigned long ptr; 3812 int maybe_acls; 3813 u32 rdev; 3814 int ret; 3815 bool filled = false; 3816 int first_xattr_slot; 3817 3818 ret = btrfs_fill_inode(inode, &rdev); 3819 if (!ret) 3820 filled = true; 3821 3822 if (!path) { 3823 path = btrfs_alloc_path(); 3824 if (!path) 3825 return -ENOMEM; 3826 } 3827 3828 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3829 3830 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3831 if (ret) { 3832 if (path != in_path) 3833 btrfs_free_path(path); 3834 return ret; 3835 } 3836 3837 leaf = path->nodes[0]; 3838 3839 if (filled) 3840 goto cache_index; 3841 3842 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3843 struct btrfs_inode_item); 3844 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3845 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3846 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3847 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3848 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3849 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 3850 round_up(i_size_read(inode), fs_info->sectorsize)); 3851 3852 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3853 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3854 3855 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3856 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3857 3858 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 3859 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 3860 3861 BTRFS_I(inode)->i_otime.tv_sec = 3862 btrfs_timespec_sec(leaf, &inode_item->otime); 3863 BTRFS_I(inode)->i_otime.tv_nsec = 3864 btrfs_timespec_nsec(leaf, &inode_item->otime); 3865 3866 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3867 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3868 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3869 3870 inode_set_iversion_queried(inode, 3871 btrfs_inode_sequence(leaf, inode_item)); 3872 inode->i_generation = BTRFS_I(inode)->generation; 3873 inode->i_rdev = 0; 3874 rdev = btrfs_inode_rdev(leaf, inode_item); 3875 3876 BTRFS_I(inode)->index_cnt = (u64)-1; 3877 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 3878 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 3879 3880 cache_index: 3881 /* 3882 * If we were modified in the current generation and evicted from memory 3883 * and then re-read we need to do a full sync since we don't have any 3884 * idea about which extents were modified before we were evicted from 3885 * cache. 3886 * 3887 * This is required for both inode re-read from disk and delayed inode 3888 * in delayed_nodes_tree. 3889 */ 3890 if (BTRFS_I(inode)->last_trans == fs_info->generation) 3891 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3892 &BTRFS_I(inode)->runtime_flags); 3893 3894 /* 3895 * We don't persist the id of the transaction where an unlink operation 3896 * against the inode was last made. So here we assume the inode might 3897 * have been evicted, and therefore the exact value of last_unlink_trans 3898 * lost, and set it to last_trans to avoid metadata inconsistencies 3899 * between the inode and its parent if the inode is fsync'ed and the log 3900 * replayed. For example, in the scenario: 3901 * 3902 * touch mydir/foo 3903 * ln mydir/foo mydir/bar 3904 * sync 3905 * unlink mydir/bar 3906 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3907 * xfs_io -c fsync mydir/foo 3908 * <power failure> 3909 * mount fs, triggers fsync log replay 3910 * 3911 * We must make sure that when we fsync our inode foo we also log its 3912 * parent inode, otherwise after log replay the parent still has the 3913 * dentry with the "bar" name but our inode foo has a link count of 1 3914 * and doesn't have an inode ref with the name "bar" anymore. 3915 * 3916 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3917 * but it guarantees correctness at the expense of occasional full 3918 * transaction commits on fsync if our inode is a directory, or if our 3919 * inode is not a directory, logging its parent unnecessarily. 3920 */ 3921 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3922 3923 /* 3924 * Same logic as for last_unlink_trans. We don't persist the generation 3925 * of the last transaction where this inode was used for a reflink 3926 * operation, so after eviction and reloading the inode we must be 3927 * pessimistic and assume the last transaction that modified the inode. 3928 */ 3929 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 3930 3931 path->slots[0]++; 3932 if (inode->i_nlink != 1 || 3933 path->slots[0] >= btrfs_header_nritems(leaf)) 3934 goto cache_acl; 3935 3936 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3937 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 3938 goto cache_acl; 3939 3940 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3941 if (location.type == BTRFS_INODE_REF_KEY) { 3942 struct btrfs_inode_ref *ref; 3943 3944 ref = (struct btrfs_inode_ref *)ptr; 3945 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3946 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3947 struct btrfs_inode_extref *extref; 3948 3949 extref = (struct btrfs_inode_extref *)ptr; 3950 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3951 extref); 3952 } 3953 cache_acl: 3954 /* 3955 * try to precache a NULL acl entry for files that don't have 3956 * any xattrs or acls 3957 */ 3958 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3959 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 3960 if (first_xattr_slot != -1) { 3961 path->slots[0] = first_xattr_slot; 3962 ret = btrfs_load_inode_props(inode, path); 3963 if (ret) 3964 btrfs_err(fs_info, 3965 "error loading props for ino %llu (root %llu): %d", 3966 btrfs_ino(BTRFS_I(inode)), 3967 root->root_key.objectid, ret); 3968 } 3969 if (path != in_path) 3970 btrfs_free_path(path); 3971 3972 if (!maybe_acls) 3973 cache_no_acl(inode); 3974 3975 switch (inode->i_mode & S_IFMT) { 3976 case S_IFREG: 3977 inode->i_mapping->a_ops = &btrfs_aops; 3978 inode->i_fop = &btrfs_file_operations; 3979 inode->i_op = &btrfs_file_inode_operations; 3980 break; 3981 case S_IFDIR: 3982 inode->i_fop = &btrfs_dir_file_operations; 3983 inode->i_op = &btrfs_dir_inode_operations; 3984 break; 3985 case S_IFLNK: 3986 inode->i_op = &btrfs_symlink_inode_operations; 3987 inode_nohighmem(inode); 3988 inode->i_mapping->a_ops = &btrfs_aops; 3989 break; 3990 default: 3991 inode->i_op = &btrfs_special_inode_operations; 3992 init_special_inode(inode, inode->i_mode, rdev); 3993 break; 3994 } 3995 3996 btrfs_sync_inode_flags_to_i_flags(inode); 3997 return 0; 3998 } 3999 4000 /* 4001 * given a leaf and an inode, copy the inode fields into the leaf 4002 */ 4003 static void fill_inode_item(struct btrfs_trans_handle *trans, 4004 struct extent_buffer *leaf, 4005 struct btrfs_inode_item *item, 4006 struct inode *inode) 4007 { 4008 struct btrfs_map_token token; 4009 u64 flags; 4010 4011 btrfs_init_map_token(&token, leaf); 4012 4013 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 4014 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 4015 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 4016 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 4017 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 4018 4019 btrfs_set_token_timespec_sec(&token, &item->atime, 4020 inode->i_atime.tv_sec); 4021 btrfs_set_token_timespec_nsec(&token, &item->atime, 4022 inode->i_atime.tv_nsec); 4023 4024 btrfs_set_token_timespec_sec(&token, &item->mtime, 4025 inode->i_mtime.tv_sec); 4026 btrfs_set_token_timespec_nsec(&token, &item->mtime, 4027 inode->i_mtime.tv_nsec); 4028 4029 btrfs_set_token_timespec_sec(&token, &item->ctime, 4030 inode->i_ctime.tv_sec); 4031 btrfs_set_token_timespec_nsec(&token, &item->ctime, 4032 inode->i_ctime.tv_nsec); 4033 4034 btrfs_set_token_timespec_sec(&token, &item->otime, 4035 BTRFS_I(inode)->i_otime.tv_sec); 4036 btrfs_set_token_timespec_nsec(&token, &item->otime, 4037 BTRFS_I(inode)->i_otime.tv_nsec); 4038 4039 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 4040 btrfs_set_token_inode_generation(&token, item, 4041 BTRFS_I(inode)->generation); 4042 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 4043 btrfs_set_token_inode_transid(&token, item, trans->transid); 4044 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 4045 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 4046 BTRFS_I(inode)->ro_flags); 4047 btrfs_set_token_inode_flags(&token, item, flags); 4048 btrfs_set_token_inode_block_group(&token, item, 0); 4049 } 4050 4051 /* 4052 * copy everything in the in-memory inode into the btree. 4053 */ 4054 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 4055 struct btrfs_root *root, 4056 struct btrfs_inode *inode) 4057 { 4058 struct btrfs_inode_item *inode_item; 4059 struct btrfs_path *path; 4060 struct extent_buffer *leaf; 4061 int ret; 4062 4063 path = btrfs_alloc_path(); 4064 if (!path) 4065 return -ENOMEM; 4066 4067 ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1); 4068 if (ret) { 4069 if (ret > 0) 4070 ret = -ENOENT; 4071 goto failed; 4072 } 4073 4074 leaf = path->nodes[0]; 4075 inode_item = btrfs_item_ptr(leaf, path->slots[0], 4076 struct btrfs_inode_item); 4077 4078 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 4079 btrfs_mark_buffer_dirty(leaf); 4080 btrfs_set_inode_last_trans(trans, inode); 4081 ret = 0; 4082 failed: 4083 btrfs_free_path(path); 4084 return ret; 4085 } 4086 4087 /* 4088 * copy everything in the in-memory inode into the btree. 4089 */ 4090 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 4091 struct btrfs_root *root, 4092 struct btrfs_inode *inode) 4093 { 4094 struct btrfs_fs_info *fs_info = root->fs_info; 4095 int ret; 4096 4097 /* 4098 * If the inode is a free space inode, we can deadlock during commit 4099 * if we put it into the delayed code. 4100 * 4101 * The data relocation inode should also be directly updated 4102 * without delay 4103 */ 4104 if (!btrfs_is_free_space_inode(inode) 4105 && !btrfs_is_data_reloc_root(root) 4106 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4107 btrfs_update_root_times(trans, root); 4108 4109 ret = btrfs_delayed_update_inode(trans, root, inode); 4110 if (!ret) 4111 btrfs_set_inode_last_trans(trans, inode); 4112 return ret; 4113 } 4114 4115 return btrfs_update_inode_item(trans, root, inode); 4116 } 4117 4118 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4119 struct btrfs_root *root, struct btrfs_inode *inode) 4120 { 4121 int ret; 4122 4123 ret = btrfs_update_inode(trans, root, inode); 4124 if (ret == -ENOSPC) 4125 return btrfs_update_inode_item(trans, root, inode); 4126 return ret; 4127 } 4128 4129 /* 4130 * unlink helper that gets used here in inode.c and in the tree logging 4131 * recovery code. It remove a link in a directory with a given name, and 4132 * also drops the back refs in the inode to the directory 4133 */ 4134 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4135 struct btrfs_inode *dir, 4136 struct btrfs_inode *inode, 4137 const struct fscrypt_str *name, 4138 struct btrfs_rename_ctx *rename_ctx) 4139 { 4140 struct btrfs_root *root = dir->root; 4141 struct btrfs_fs_info *fs_info = root->fs_info; 4142 struct btrfs_path *path; 4143 int ret = 0; 4144 struct btrfs_dir_item *di; 4145 u64 index; 4146 u64 ino = btrfs_ino(inode); 4147 u64 dir_ino = btrfs_ino(dir); 4148 4149 path = btrfs_alloc_path(); 4150 if (!path) { 4151 ret = -ENOMEM; 4152 goto out; 4153 } 4154 4155 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); 4156 if (IS_ERR_OR_NULL(di)) { 4157 ret = di ? PTR_ERR(di) : -ENOENT; 4158 goto err; 4159 } 4160 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4161 if (ret) 4162 goto err; 4163 btrfs_release_path(path); 4164 4165 /* 4166 * If we don't have dir index, we have to get it by looking up 4167 * the inode ref, since we get the inode ref, remove it directly, 4168 * it is unnecessary to do delayed deletion. 4169 * 4170 * But if we have dir index, needn't search inode ref to get it. 4171 * Since the inode ref is close to the inode item, it is better 4172 * that we delay to delete it, and just do this deletion when 4173 * we update the inode item. 4174 */ 4175 if (inode->dir_index) { 4176 ret = btrfs_delayed_delete_inode_ref(inode); 4177 if (!ret) { 4178 index = inode->dir_index; 4179 goto skip_backref; 4180 } 4181 } 4182 4183 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); 4184 if (ret) { 4185 btrfs_info(fs_info, 4186 "failed to delete reference to %.*s, inode %llu parent %llu", 4187 name->len, name->name, ino, dir_ino); 4188 btrfs_abort_transaction(trans, ret); 4189 goto err; 4190 } 4191 skip_backref: 4192 if (rename_ctx) 4193 rename_ctx->index = index; 4194 4195 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4196 if (ret) { 4197 btrfs_abort_transaction(trans, ret); 4198 goto err; 4199 } 4200 4201 /* 4202 * If we are in a rename context, we don't need to update anything in the 4203 * log. That will be done later during the rename by btrfs_log_new_name(). 4204 * Besides that, doing it here would only cause extra unnecessary btree 4205 * operations on the log tree, increasing latency for applications. 4206 */ 4207 if (!rename_ctx) { 4208 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino); 4209 btrfs_del_dir_entries_in_log(trans, root, name, dir, index); 4210 } 4211 4212 /* 4213 * If we have a pending delayed iput we could end up with the final iput 4214 * being run in btrfs-cleaner context. If we have enough of these built 4215 * up we can end up burning a lot of time in btrfs-cleaner without any 4216 * way to throttle the unlinks. Since we're currently holding a ref on 4217 * the inode we can run the delayed iput here without any issues as the 4218 * final iput won't be done until after we drop the ref we're currently 4219 * holding. 4220 */ 4221 btrfs_run_delayed_iput(fs_info, inode); 4222 err: 4223 btrfs_free_path(path); 4224 if (ret) 4225 goto out; 4226 4227 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4228 inode_inc_iversion(&inode->vfs_inode); 4229 inode_inc_iversion(&dir->vfs_inode); 4230 inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode); 4231 dir->vfs_inode.i_mtime = inode->vfs_inode.i_ctime; 4232 dir->vfs_inode.i_ctime = inode->vfs_inode.i_ctime; 4233 ret = btrfs_update_inode(trans, root, dir); 4234 out: 4235 return ret; 4236 } 4237 4238 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4239 struct btrfs_inode *dir, struct btrfs_inode *inode, 4240 const struct fscrypt_str *name) 4241 { 4242 int ret; 4243 4244 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); 4245 if (!ret) { 4246 drop_nlink(&inode->vfs_inode); 4247 ret = btrfs_update_inode(trans, inode->root, inode); 4248 } 4249 return ret; 4250 } 4251 4252 /* 4253 * helper to start transaction for unlink and rmdir. 4254 * 4255 * unlink and rmdir are special in btrfs, they do not always free space, so 4256 * if we cannot make our reservations the normal way try and see if there is 4257 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4258 * allow the unlink to occur. 4259 */ 4260 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) 4261 { 4262 struct btrfs_root *root = dir->root; 4263 4264 /* 4265 * 1 for the possible orphan item 4266 * 1 for the dir item 4267 * 1 for the dir index 4268 * 1 for the inode ref 4269 * 1 for the inode 4270 * 1 for the parent inode 4271 */ 4272 return btrfs_start_transaction_fallback_global_rsv(root, 6); 4273 } 4274 4275 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4276 { 4277 struct btrfs_trans_handle *trans; 4278 struct inode *inode = d_inode(dentry); 4279 int ret; 4280 struct fscrypt_name fname; 4281 4282 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4283 if (ret) 4284 return ret; 4285 4286 /* This needs to handle no-key deletions later on */ 4287 4288 trans = __unlink_start_trans(BTRFS_I(dir)); 4289 if (IS_ERR(trans)) { 4290 ret = PTR_ERR(trans); 4291 goto fscrypt_free; 4292 } 4293 4294 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4295 0); 4296 4297 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4298 &fname.disk_name); 4299 if (ret) 4300 goto end_trans; 4301 4302 if (inode->i_nlink == 0) { 4303 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4304 if (ret) 4305 goto end_trans; 4306 } 4307 4308 end_trans: 4309 btrfs_end_transaction(trans); 4310 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4311 fscrypt_free: 4312 fscrypt_free_filename(&fname); 4313 return ret; 4314 } 4315 4316 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4317 struct btrfs_inode *dir, struct dentry *dentry) 4318 { 4319 struct btrfs_root *root = dir->root; 4320 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4321 struct btrfs_path *path; 4322 struct extent_buffer *leaf; 4323 struct btrfs_dir_item *di; 4324 struct btrfs_key key; 4325 u64 index; 4326 int ret; 4327 u64 objectid; 4328 u64 dir_ino = btrfs_ino(dir); 4329 struct fscrypt_name fname; 4330 4331 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 4332 if (ret) 4333 return ret; 4334 4335 /* This needs to handle no-key deletions later on */ 4336 4337 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4338 objectid = inode->root->root_key.objectid; 4339 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4340 objectid = inode->location.objectid; 4341 } else { 4342 WARN_ON(1); 4343 fscrypt_free_filename(&fname); 4344 return -EINVAL; 4345 } 4346 4347 path = btrfs_alloc_path(); 4348 if (!path) { 4349 ret = -ENOMEM; 4350 goto out; 4351 } 4352 4353 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4354 &fname.disk_name, -1); 4355 if (IS_ERR_OR_NULL(di)) { 4356 ret = di ? PTR_ERR(di) : -ENOENT; 4357 goto out; 4358 } 4359 4360 leaf = path->nodes[0]; 4361 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4362 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4363 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4364 if (ret) { 4365 btrfs_abort_transaction(trans, ret); 4366 goto out; 4367 } 4368 btrfs_release_path(path); 4369 4370 /* 4371 * This is a placeholder inode for a subvolume we didn't have a 4372 * reference to at the time of the snapshot creation. In the meantime 4373 * we could have renamed the real subvol link into our snapshot, so 4374 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4375 * Instead simply lookup the dir_index_item for this entry so we can 4376 * remove it. Otherwise we know we have a ref to the root and we can 4377 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4378 */ 4379 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4380 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); 4381 if (IS_ERR_OR_NULL(di)) { 4382 if (!di) 4383 ret = -ENOENT; 4384 else 4385 ret = PTR_ERR(di); 4386 btrfs_abort_transaction(trans, ret); 4387 goto out; 4388 } 4389 4390 leaf = path->nodes[0]; 4391 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4392 index = key.offset; 4393 btrfs_release_path(path); 4394 } else { 4395 ret = btrfs_del_root_ref(trans, objectid, 4396 root->root_key.objectid, dir_ino, 4397 &index, &fname.disk_name); 4398 if (ret) { 4399 btrfs_abort_transaction(trans, ret); 4400 goto out; 4401 } 4402 } 4403 4404 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4405 if (ret) { 4406 btrfs_abort_transaction(trans, ret); 4407 goto out; 4408 } 4409 4410 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); 4411 inode_inc_iversion(&dir->vfs_inode); 4412 dir->vfs_inode.i_mtime = current_time(&dir->vfs_inode); 4413 dir->vfs_inode.i_ctime = dir->vfs_inode.i_mtime; 4414 ret = btrfs_update_inode_fallback(trans, root, dir); 4415 if (ret) 4416 btrfs_abort_transaction(trans, ret); 4417 out: 4418 btrfs_free_path(path); 4419 fscrypt_free_filename(&fname); 4420 return ret; 4421 } 4422 4423 /* 4424 * Helper to check if the subvolume references other subvolumes or if it's 4425 * default. 4426 */ 4427 static noinline int may_destroy_subvol(struct btrfs_root *root) 4428 { 4429 struct btrfs_fs_info *fs_info = root->fs_info; 4430 struct btrfs_path *path; 4431 struct btrfs_dir_item *di; 4432 struct btrfs_key key; 4433 struct fscrypt_str name = FSTR_INIT("default", 7); 4434 u64 dir_id; 4435 int ret; 4436 4437 path = btrfs_alloc_path(); 4438 if (!path) 4439 return -ENOMEM; 4440 4441 /* Make sure this root isn't set as the default subvol */ 4442 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4443 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4444 dir_id, &name, 0); 4445 if (di && !IS_ERR(di)) { 4446 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4447 if (key.objectid == root->root_key.objectid) { 4448 ret = -EPERM; 4449 btrfs_err(fs_info, 4450 "deleting default subvolume %llu is not allowed", 4451 key.objectid); 4452 goto out; 4453 } 4454 btrfs_release_path(path); 4455 } 4456 4457 key.objectid = root->root_key.objectid; 4458 key.type = BTRFS_ROOT_REF_KEY; 4459 key.offset = (u64)-1; 4460 4461 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4462 if (ret < 0) 4463 goto out; 4464 BUG_ON(ret == 0); 4465 4466 ret = 0; 4467 if (path->slots[0] > 0) { 4468 path->slots[0]--; 4469 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4470 if (key.objectid == root->root_key.objectid && 4471 key.type == BTRFS_ROOT_REF_KEY) 4472 ret = -ENOTEMPTY; 4473 } 4474 out: 4475 btrfs_free_path(path); 4476 return ret; 4477 } 4478 4479 /* Delete all dentries for inodes belonging to the root */ 4480 static void btrfs_prune_dentries(struct btrfs_root *root) 4481 { 4482 struct btrfs_fs_info *fs_info = root->fs_info; 4483 struct rb_node *node; 4484 struct rb_node *prev; 4485 struct btrfs_inode *entry; 4486 struct inode *inode; 4487 u64 objectid = 0; 4488 4489 if (!BTRFS_FS_ERROR(fs_info)) 4490 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4491 4492 spin_lock(&root->inode_lock); 4493 again: 4494 node = root->inode_tree.rb_node; 4495 prev = NULL; 4496 while (node) { 4497 prev = node; 4498 entry = rb_entry(node, struct btrfs_inode, rb_node); 4499 4500 if (objectid < btrfs_ino(entry)) 4501 node = node->rb_left; 4502 else if (objectid > btrfs_ino(entry)) 4503 node = node->rb_right; 4504 else 4505 break; 4506 } 4507 if (!node) { 4508 while (prev) { 4509 entry = rb_entry(prev, struct btrfs_inode, rb_node); 4510 if (objectid <= btrfs_ino(entry)) { 4511 node = prev; 4512 break; 4513 } 4514 prev = rb_next(prev); 4515 } 4516 } 4517 while (node) { 4518 entry = rb_entry(node, struct btrfs_inode, rb_node); 4519 objectid = btrfs_ino(entry) + 1; 4520 inode = igrab(&entry->vfs_inode); 4521 if (inode) { 4522 spin_unlock(&root->inode_lock); 4523 if (atomic_read(&inode->i_count) > 1) 4524 d_prune_aliases(inode); 4525 /* 4526 * btrfs_drop_inode will have it removed from the inode 4527 * cache when its usage count hits zero. 4528 */ 4529 iput(inode); 4530 cond_resched(); 4531 spin_lock(&root->inode_lock); 4532 goto again; 4533 } 4534 4535 if (cond_resched_lock(&root->inode_lock)) 4536 goto again; 4537 4538 node = rb_next(node); 4539 } 4540 spin_unlock(&root->inode_lock); 4541 } 4542 4543 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) 4544 { 4545 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 4546 struct btrfs_root *root = dir->root; 4547 struct inode *inode = d_inode(dentry); 4548 struct btrfs_root *dest = BTRFS_I(inode)->root; 4549 struct btrfs_trans_handle *trans; 4550 struct btrfs_block_rsv block_rsv; 4551 u64 root_flags; 4552 int ret; 4553 4554 /* 4555 * Don't allow to delete a subvolume with send in progress. This is 4556 * inside the inode lock so the error handling that has to drop the bit 4557 * again is not run concurrently. 4558 */ 4559 spin_lock(&dest->root_item_lock); 4560 if (dest->send_in_progress) { 4561 spin_unlock(&dest->root_item_lock); 4562 btrfs_warn(fs_info, 4563 "attempt to delete subvolume %llu during send", 4564 dest->root_key.objectid); 4565 return -EPERM; 4566 } 4567 if (atomic_read(&dest->nr_swapfiles)) { 4568 spin_unlock(&dest->root_item_lock); 4569 btrfs_warn(fs_info, 4570 "attempt to delete subvolume %llu with active swapfile", 4571 root->root_key.objectid); 4572 return -EPERM; 4573 } 4574 root_flags = btrfs_root_flags(&dest->root_item); 4575 btrfs_set_root_flags(&dest->root_item, 4576 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4577 spin_unlock(&dest->root_item_lock); 4578 4579 down_write(&fs_info->subvol_sem); 4580 4581 ret = may_destroy_subvol(dest); 4582 if (ret) 4583 goto out_up_write; 4584 4585 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4586 /* 4587 * One for dir inode, 4588 * two for dir entries, 4589 * two for root ref/backref. 4590 */ 4591 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4592 if (ret) 4593 goto out_up_write; 4594 4595 trans = btrfs_start_transaction(root, 0); 4596 if (IS_ERR(trans)) { 4597 ret = PTR_ERR(trans); 4598 goto out_release; 4599 } 4600 trans->block_rsv = &block_rsv; 4601 trans->bytes_reserved = block_rsv.size; 4602 4603 btrfs_record_snapshot_destroy(trans, dir); 4604 4605 ret = btrfs_unlink_subvol(trans, dir, dentry); 4606 if (ret) { 4607 btrfs_abort_transaction(trans, ret); 4608 goto out_end_trans; 4609 } 4610 4611 ret = btrfs_record_root_in_trans(trans, dest); 4612 if (ret) { 4613 btrfs_abort_transaction(trans, ret); 4614 goto out_end_trans; 4615 } 4616 4617 memset(&dest->root_item.drop_progress, 0, 4618 sizeof(dest->root_item.drop_progress)); 4619 btrfs_set_root_drop_level(&dest->root_item, 0); 4620 btrfs_set_root_refs(&dest->root_item, 0); 4621 4622 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4623 ret = btrfs_insert_orphan_item(trans, 4624 fs_info->tree_root, 4625 dest->root_key.objectid); 4626 if (ret) { 4627 btrfs_abort_transaction(trans, ret); 4628 goto out_end_trans; 4629 } 4630 } 4631 4632 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4633 BTRFS_UUID_KEY_SUBVOL, 4634 dest->root_key.objectid); 4635 if (ret && ret != -ENOENT) { 4636 btrfs_abort_transaction(trans, ret); 4637 goto out_end_trans; 4638 } 4639 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4640 ret = btrfs_uuid_tree_remove(trans, 4641 dest->root_item.received_uuid, 4642 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4643 dest->root_key.objectid); 4644 if (ret && ret != -ENOENT) { 4645 btrfs_abort_transaction(trans, ret); 4646 goto out_end_trans; 4647 } 4648 } 4649 4650 free_anon_bdev(dest->anon_dev); 4651 dest->anon_dev = 0; 4652 out_end_trans: 4653 trans->block_rsv = NULL; 4654 trans->bytes_reserved = 0; 4655 ret = btrfs_end_transaction(trans); 4656 inode->i_flags |= S_DEAD; 4657 out_release: 4658 btrfs_subvolume_release_metadata(root, &block_rsv); 4659 out_up_write: 4660 up_write(&fs_info->subvol_sem); 4661 if (ret) { 4662 spin_lock(&dest->root_item_lock); 4663 root_flags = btrfs_root_flags(&dest->root_item); 4664 btrfs_set_root_flags(&dest->root_item, 4665 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4666 spin_unlock(&dest->root_item_lock); 4667 } else { 4668 d_invalidate(dentry); 4669 btrfs_prune_dentries(dest); 4670 ASSERT(dest->send_in_progress == 0); 4671 } 4672 4673 return ret; 4674 } 4675 4676 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4677 { 4678 struct inode *inode = d_inode(dentry); 4679 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 4680 int err = 0; 4681 struct btrfs_trans_handle *trans; 4682 u64 last_unlink_trans; 4683 struct fscrypt_name fname; 4684 4685 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4686 return -ENOTEMPTY; 4687 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { 4688 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4689 btrfs_err(fs_info, 4690 "extent tree v2 doesn't support snapshot deletion yet"); 4691 return -EOPNOTSUPP; 4692 } 4693 return btrfs_delete_subvolume(BTRFS_I(dir), dentry); 4694 } 4695 4696 err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4697 if (err) 4698 return err; 4699 4700 /* This needs to handle no-key deletions later on */ 4701 4702 trans = __unlink_start_trans(BTRFS_I(dir)); 4703 if (IS_ERR(trans)) { 4704 err = PTR_ERR(trans); 4705 goto out_notrans; 4706 } 4707 4708 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4709 err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); 4710 goto out; 4711 } 4712 4713 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4714 if (err) 4715 goto out; 4716 4717 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4718 4719 /* now the directory is empty */ 4720 err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4721 &fname.disk_name); 4722 if (!err) { 4723 btrfs_i_size_write(BTRFS_I(inode), 0); 4724 /* 4725 * Propagate the last_unlink_trans value of the deleted dir to 4726 * its parent directory. This is to prevent an unrecoverable 4727 * log tree in the case we do something like this: 4728 * 1) create dir foo 4729 * 2) create snapshot under dir foo 4730 * 3) delete the snapshot 4731 * 4) rmdir foo 4732 * 5) mkdir foo 4733 * 6) fsync foo or some file inside foo 4734 */ 4735 if (last_unlink_trans >= trans->transid) 4736 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4737 } 4738 out: 4739 btrfs_end_transaction(trans); 4740 out_notrans: 4741 btrfs_btree_balance_dirty(fs_info); 4742 fscrypt_free_filename(&fname); 4743 4744 return err; 4745 } 4746 4747 /* 4748 * btrfs_truncate_block - read, zero a chunk and write a block 4749 * @inode - inode that we're zeroing 4750 * @from - the offset to start zeroing 4751 * @len - the length to zero, 0 to zero the entire range respective to the 4752 * offset 4753 * @front - zero up to the offset instead of from the offset on 4754 * 4755 * This will find the block for the "from" offset and cow the block and zero the 4756 * part we want to zero. This is used with truncate and hole punching. 4757 */ 4758 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 4759 int front) 4760 { 4761 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4762 struct address_space *mapping = inode->vfs_inode.i_mapping; 4763 struct extent_io_tree *io_tree = &inode->io_tree; 4764 struct btrfs_ordered_extent *ordered; 4765 struct extent_state *cached_state = NULL; 4766 struct extent_changeset *data_reserved = NULL; 4767 bool only_release_metadata = false; 4768 u32 blocksize = fs_info->sectorsize; 4769 pgoff_t index = from >> PAGE_SHIFT; 4770 unsigned offset = from & (blocksize - 1); 4771 struct page *page; 4772 gfp_t mask = btrfs_alloc_write_mask(mapping); 4773 size_t write_bytes = blocksize; 4774 int ret = 0; 4775 u64 block_start; 4776 u64 block_end; 4777 4778 if (IS_ALIGNED(offset, blocksize) && 4779 (!len || IS_ALIGNED(len, blocksize))) 4780 goto out; 4781 4782 block_start = round_down(from, blocksize); 4783 block_end = block_start + blocksize - 1; 4784 4785 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 4786 blocksize, false); 4787 if (ret < 0) { 4788 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { 4789 /* For nocow case, no need to reserve data space */ 4790 only_release_metadata = true; 4791 } else { 4792 goto out; 4793 } 4794 } 4795 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 4796 if (ret < 0) { 4797 if (!only_release_metadata) 4798 btrfs_free_reserved_data_space(inode, data_reserved, 4799 block_start, blocksize); 4800 goto out; 4801 } 4802 again: 4803 page = find_or_create_page(mapping, index, mask); 4804 if (!page) { 4805 btrfs_delalloc_release_space(inode, data_reserved, block_start, 4806 blocksize, true); 4807 btrfs_delalloc_release_extents(inode, blocksize); 4808 ret = -ENOMEM; 4809 goto out; 4810 } 4811 ret = set_page_extent_mapped(page); 4812 if (ret < 0) 4813 goto out_unlock; 4814 4815 if (!PageUptodate(page)) { 4816 ret = btrfs_read_folio(NULL, page_folio(page)); 4817 lock_page(page); 4818 if (page->mapping != mapping) { 4819 unlock_page(page); 4820 put_page(page); 4821 goto again; 4822 } 4823 if (!PageUptodate(page)) { 4824 ret = -EIO; 4825 goto out_unlock; 4826 } 4827 } 4828 wait_on_page_writeback(page); 4829 4830 lock_extent(io_tree, block_start, block_end, &cached_state); 4831 4832 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4833 if (ordered) { 4834 unlock_extent(io_tree, block_start, block_end, &cached_state); 4835 unlock_page(page); 4836 put_page(page); 4837 btrfs_start_ordered_extent(ordered); 4838 btrfs_put_ordered_extent(ordered); 4839 goto again; 4840 } 4841 4842 clear_extent_bit(&inode->io_tree, block_start, block_end, 4843 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4844 &cached_state); 4845 4846 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 4847 &cached_state); 4848 if (ret) { 4849 unlock_extent(io_tree, block_start, block_end, &cached_state); 4850 goto out_unlock; 4851 } 4852 4853 if (offset != blocksize) { 4854 if (!len) 4855 len = blocksize - offset; 4856 if (front) 4857 memzero_page(page, (block_start - page_offset(page)), 4858 offset); 4859 else 4860 memzero_page(page, (block_start - page_offset(page)) + offset, 4861 len); 4862 } 4863 btrfs_page_clear_checked(fs_info, page, block_start, 4864 block_end + 1 - block_start); 4865 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); 4866 unlock_extent(io_tree, block_start, block_end, &cached_state); 4867 4868 if (only_release_metadata) 4869 set_extent_bit(&inode->io_tree, block_start, block_end, 4870 EXTENT_NORESERVE, NULL, GFP_NOFS); 4871 4872 out_unlock: 4873 if (ret) { 4874 if (only_release_metadata) 4875 btrfs_delalloc_release_metadata(inode, blocksize, true); 4876 else 4877 btrfs_delalloc_release_space(inode, data_reserved, 4878 block_start, blocksize, true); 4879 } 4880 btrfs_delalloc_release_extents(inode, blocksize); 4881 unlock_page(page); 4882 put_page(page); 4883 out: 4884 if (only_release_metadata) 4885 btrfs_check_nocow_unlock(inode); 4886 extent_changeset_free(data_reserved); 4887 return ret; 4888 } 4889 4890 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode, 4891 u64 offset, u64 len) 4892 { 4893 struct btrfs_fs_info *fs_info = root->fs_info; 4894 struct btrfs_trans_handle *trans; 4895 struct btrfs_drop_extents_args drop_args = { 0 }; 4896 int ret; 4897 4898 /* 4899 * If NO_HOLES is enabled, we don't need to do anything. 4900 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 4901 * or btrfs_update_inode() will be called, which guarantee that the next 4902 * fsync will know this inode was changed and needs to be logged. 4903 */ 4904 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 4905 return 0; 4906 4907 /* 4908 * 1 - for the one we're dropping 4909 * 1 - for the one we're adding 4910 * 1 - for updating the inode. 4911 */ 4912 trans = btrfs_start_transaction(root, 3); 4913 if (IS_ERR(trans)) 4914 return PTR_ERR(trans); 4915 4916 drop_args.start = offset; 4917 drop_args.end = offset + len; 4918 drop_args.drop_cache = true; 4919 4920 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 4921 if (ret) { 4922 btrfs_abort_transaction(trans, ret); 4923 btrfs_end_transaction(trans); 4924 return ret; 4925 } 4926 4927 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len); 4928 if (ret) { 4929 btrfs_abort_transaction(trans, ret); 4930 } else { 4931 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 4932 btrfs_update_inode(trans, root, inode); 4933 } 4934 btrfs_end_transaction(trans); 4935 return ret; 4936 } 4937 4938 /* 4939 * This function puts in dummy file extents for the area we're creating a hole 4940 * for. So if we are truncating this file to a larger size we need to insert 4941 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4942 * the range between oldsize and size 4943 */ 4944 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 4945 { 4946 struct btrfs_root *root = inode->root; 4947 struct btrfs_fs_info *fs_info = root->fs_info; 4948 struct extent_io_tree *io_tree = &inode->io_tree; 4949 struct extent_map *em = NULL; 4950 struct extent_state *cached_state = NULL; 4951 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 4952 u64 block_end = ALIGN(size, fs_info->sectorsize); 4953 u64 last_byte; 4954 u64 cur_offset; 4955 u64 hole_size; 4956 int err = 0; 4957 4958 /* 4959 * If our size started in the middle of a block we need to zero out the 4960 * rest of the block before we expand the i_size, otherwise we could 4961 * expose stale data. 4962 */ 4963 err = btrfs_truncate_block(inode, oldsize, 0, 0); 4964 if (err) 4965 return err; 4966 4967 if (size <= hole_start) 4968 return 0; 4969 4970 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 4971 &cached_state); 4972 cur_offset = hole_start; 4973 while (1) { 4974 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 4975 block_end - cur_offset); 4976 if (IS_ERR(em)) { 4977 err = PTR_ERR(em); 4978 em = NULL; 4979 break; 4980 } 4981 last_byte = min(extent_map_end(em), block_end); 4982 last_byte = ALIGN(last_byte, fs_info->sectorsize); 4983 hole_size = last_byte - cur_offset; 4984 4985 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 4986 struct extent_map *hole_em; 4987 4988 err = maybe_insert_hole(root, inode, cur_offset, 4989 hole_size); 4990 if (err) 4991 break; 4992 4993 err = btrfs_inode_set_file_extent_range(inode, 4994 cur_offset, hole_size); 4995 if (err) 4996 break; 4997 4998 hole_em = alloc_extent_map(); 4999 if (!hole_em) { 5000 btrfs_drop_extent_map_range(inode, cur_offset, 5001 cur_offset + hole_size - 1, 5002 false); 5003 btrfs_set_inode_full_sync(inode); 5004 goto next; 5005 } 5006 hole_em->start = cur_offset; 5007 hole_em->len = hole_size; 5008 hole_em->orig_start = cur_offset; 5009 5010 hole_em->block_start = EXTENT_MAP_HOLE; 5011 hole_em->block_len = 0; 5012 hole_em->orig_block_len = 0; 5013 hole_em->ram_bytes = hole_size; 5014 hole_em->compress_type = BTRFS_COMPRESS_NONE; 5015 hole_em->generation = fs_info->generation; 5016 5017 err = btrfs_replace_extent_map_range(inode, hole_em, true); 5018 free_extent_map(hole_em); 5019 } else { 5020 err = btrfs_inode_set_file_extent_range(inode, 5021 cur_offset, hole_size); 5022 if (err) 5023 break; 5024 } 5025 next: 5026 free_extent_map(em); 5027 em = NULL; 5028 cur_offset = last_byte; 5029 if (cur_offset >= block_end) 5030 break; 5031 } 5032 free_extent_map(em); 5033 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); 5034 return err; 5035 } 5036 5037 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 5038 { 5039 struct btrfs_root *root = BTRFS_I(inode)->root; 5040 struct btrfs_trans_handle *trans; 5041 loff_t oldsize = i_size_read(inode); 5042 loff_t newsize = attr->ia_size; 5043 int mask = attr->ia_valid; 5044 int ret; 5045 5046 /* 5047 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 5048 * special case where we need to update the times despite not having 5049 * these flags set. For all other operations the VFS set these flags 5050 * explicitly if it wants a timestamp update. 5051 */ 5052 if (newsize != oldsize) { 5053 inode_inc_iversion(inode); 5054 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) { 5055 inode->i_mtime = current_time(inode); 5056 inode->i_ctime = inode->i_mtime; 5057 } 5058 } 5059 5060 if (newsize > oldsize) { 5061 /* 5062 * Don't do an expanding truncate while snapshotting is ongoing. 5063 * This is to ensure the snapshot captures a fully consistent 5064 * state of this file - if the snapshot captures this expanding 5065 * truncation, it must capture all writes that happened before 5066 * this truncation. 5067 */ 5068 btrfs_drew_write_lock(&root->snapshot_lock); 5069 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 5070 if (ret) { 5071 btrfs_drew_write_unlock(&root->snapshot_lock); 5072 return ret; 5073 } 5074 5075 trans = btrfs_start_transaction(root, 1); 5076 if (IS_ERR(trans)) { 5077 btrfs_drew_write_unlock(&root->snapshot_lock); 5078 return PTR_ERR(trans); 5079 } 5080 5081 i_size_write(inode, newsize); 5082 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 5083 pagecache_isize_extended(inode, oldsize, newsize); 5084 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 5085 btrfs_drew_write_unlock(&root->snapshot_lock); 5086 btrfs_end_transaction(trans); 5087 } else { 5088 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5089 5090 if (btrfs_is_zoned(fs_info)) { 5091 ret = btrfs_wait_ordered_range(inode, 5092 ALIGN(newsize, fs_info->sectorsize), 5093 (u64)-1); 5094 if (ret) 5095 return ret; 5096 } 5097 5098 /* 5099 * We're truncating a file that used to have good data down to 5100 * zero. Make sure any new writes to the file get on disk 5101 * on close. 5102 */ 5103 if (newsize == 0) 5104 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5105 &BTRFS_I(inode)->runtime_flags); 5106 5107 truncate_setsize(inode, newsize); 5108 5109 inode_dio_wait(inode); 5110 5111 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); 5112 if (ret && inode->i_nlink) { 5113 int err; 5114 5115 /* 5116 * Truncate failed, so fix up the in-memory size. We 5117 * adjusted disk_i_size down as we removed extents, so 5118 * wait for disk_i_size to be stable and then update the 5119 * in-memory size to match. 5120 */ 5121 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); 5122 if (err) 5123 return err; 5124 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5125 } 5126 } 5127 5128 return ret; 5129 } 5130 5131 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 5132 struct iattr *attr) 5133 { 5134 struct inode *inode = d_inode(dentry); 5135 struct btrfs_root *root = BTRFS_I(inode)->root; 5136 int err; 5137 5138 if (btrfs_root_readonly(root)) 5139 return -EROFS; 5140 5141 err = setattr_prepare(idmap, dentry, attr); 5142 if (err) 5143 return err; 5144 5145 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5146 err = btrfs_setsize(inode, attr); 5147 if (err) 5148 return err; 5149 } 5150 5151 if (attr->ia_valid) { 5152 setattr_copy(idmap, inode, attr); 5153 inode_inc_iversion(inode); 5154 err = btrfs_dirty_inode(BTRFS_I(inode)); 5155 5156 if (!err && attr->ia_valid & ATTR_MODE) 5157 err = posix_acl_chmod(idmap, dentry, inode->i_mode); 5158 } 5159 5160 return err; 5161 } 5162 5163 /* 5164 * While truncating the inode pages during eviction, we get the VFS 5165 * calling btrfs_invalidate_folio() against each folio of the inode. This 5166 * is slow because the calls to btrfs_invalidate_folio() result in a 5167 * huge amount of calls to lock_extent() and clear_extent_bit(), 5168 * which keep merging and splitting extent_state structures over and over, 5169 * wasting lots of time. 5170 * 5171 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5172 * skip all those expensive operations on a per folio basis and do only 5173 * the ordered io finishing, while we release here the extent_map and 5174 * extent_state structures, without the excessive merging and splitting. 5175 */ 5176 static void evict_inode_truncate_pages(struct inode *inode) 5177 { 5178 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5179 struct rb_node *node; 5180 5181 ASSERT(inode->i_state & I_FREEING); 5182 truncate_inode_pages_final(&inode->i_data); 5183 5184 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 5185 5186 /* 5187 * Keep looping until we have no more ranges in the io tree. 5188 * We can have ongoing bios started by readahead that have 5189 * their endio callback (extent_io.c:end_bio_extent_readpage) 5190 * still in progress (unlocked the pages in the bio but did not yet 5191 * unlocked the ranges in the io tree). Therefore this means some 5192 * ranges can still be locked and eviction started because before 5193 * submitting those bios, which are executed by a separate task (work 5194 * queue kthread), inode references (inode->i_count) were not taken 5195 * (which would be dropped in the end io callback of each bio). 5196 * Therefore here we effectively end up waiting for those bios and 5197 * anyone else holding locked ranges without having bumped the inode's 5198 * reference count - if we don't do it, when they access the inode's 5199 * io_tree to unlock a range it may be too late, leading to an 5200 * use-after-free issue. 5201 */ 5202 spin_lock(&io_tree->lock); 5203 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5204 struct extent_state *state; 5205 struct extent_state *cached_state = NULL; 5206 u64 start; 5207 u64 end; 5208 unsigned state_flags; 5209 5210 node = rb_first(&io_tree->state); 5211 state = rb_entry(node, struct extent_state, rb_node); 5212 start = state->start; 5213 end = state->end; 5214 state_flags = state->state; 5215 spin_unlock(&io_tree->lock); 5216 5217 lock_extent(io_tree, start, end, &cached_state); 5218 5219 /* 5220 * If still has DELALLOC flag, the extent didn't reach disk, 5221 * and its reserved space won't be freed by delayed_ref. 5222 * So we need to free its reserved space here. 5223 * (Refer to comment in btrfs_invalidate_folio, case 2) 5224 * 5225 * Note, end is the bytenr of last byte, so we need + 1 here. 5226 */ 5227 if (state_flags & EXTENT_DELALLOC) 5228 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5229 end - start + 1); 5230 5231 clear_extent_bit(io_tree, start, end, 5232 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, 5233 &cached_state); 5234 5235 cond_resched(); 5236 spin_lock(&io_tree->lock); 5237 } 5238 spin_unlock(&io_tree->lock); 5239 } 5240 5241 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5242 struct btrfs_block_rsv *rsv) 5243 { 5244 struct btrfs_fs_info *fs_info = root->fs_info; 5245 struct btrfs_trans_handle *trans; 5246 u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1); 5247 int ret; 5248 5249 /* 5250 * Eviction should be taking place at some place safe because of our 5251 * delayed iputs. However the normal flushing code will run delayed 5252 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5253 * 5254 * We reserve the delayed_refs_extra here again because we can't use 5255 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5256 * above. We reserve our extra bit here because we generate a ton of 5257 * delayed refs activity by truncating. 5258 * 5259 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5260 * if we fail to make this reservation we can re-try without the 5261 * delayed_refs_extra so we can make some forward progress. 5262 */ 5263 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5264 BTRFS_RESERVE_FLUSH_EVICT); 5265 if (ret) { 5266 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5267 BTRFS_RESERVE_FLUSH_EVICT); 5268 if (ret) { 5269 btrfs_warn(fs_info, 5270 "could not allocate space for delete; will truncate on mount"); 5271 return ERR_PTR(-ENOSPC); 5272 } 5273 delayed_refs_extra = 0; 5274 } 5275 5276 trans = btrfs_join_transaction(root); 5277 if (IS_ERR(trans)) 5278 return trans; 5279 5280 if (delayed_refs_extra) { 5281 trans->block_rsv = &fs_info->trans_block_rsv; 5282 trans->bytes_reserved = delayed_refs_extra; 5283 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5284 delayed_refs_extra, 1); 5285 } 5286 return trans; 5287 } 5288 5289 void btrfs_evict_inode(struct inode *inode) 5290 { 5291 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5292 struct btrfs_trans_handle *trans; 5293 struct btrfs_root *root = BTRFS_I(inode)->root; 5294 struct btrfs_block_rsv *rsv; 5295 int ret; 5296 5297 trace_btrfs_inode_evict(inode); 5298 5299 if (!root) { 5300 fsverity_cleanup_inode(inode); 5301 clear_inode(inode); 5302 return; 5303 } 5304 5305 evict_inode_truncate_pages(inode); 5306 5307 if (inode->i_nlink && 5308 ((btrfs_root_refs(&root->root_item) != 0 && 5309 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5310 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5311 goto no_delete; 5312 5313 if (is_bad_inode(inode)) 5314 goto no_delete; 5315 5316 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5317 goto no_delete; 5318 5319 if (inode->i_nlink > 0) { 5320 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5321 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5322 goto no_delete; 5323 } 5324 5325 /* 5326 * This makes sure the inode item in tree is uptodate and the space for 5327 * the inode update is released. 5328 */ 5329 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5330 if (ret) 5331 goto no_delete; 5332 5333 /* 5334 * This drops any pending insert or delete operations we have for this 5335 * inode. We could have a delayed dir index deletion queued up, but 5336 * we're removing the inode completely so that'll be taken care of in 5337 * the truncate. 5338 */ 5339 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5340 5341 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5342 if (!rsv) 5343 goto no_delete; 5344 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5345 rsv->failfast = true; 5346 5347 btrfs_i_size_write(BTRFS_I(inode), 0); 5348 5349 while (1) { 5350 struct btrfs_truncate_control control = { 5351 .inode = BTRFS_I(inode), 5352 .ino = btrfs_ino(BTRFS_I(inode)), 5353 .new_size = 0, 5354 .min_type = 0, 5355 }; 5356 5357 trans = evict_refill_and_join(root, rsv); 5358 if (IS_ERR(trans)) 5359 goto free_rsv; 5360 5361 trans->block_rsv = rsv; 5362 5363 ret = btrfs_truncate_inode_items(trans, root, &control); 5364 trans->block_rsv = &fs_info->trans_block_rsv; 5365 btrfs_end_transaction(trans); 5366 btrfs_btree_balance_dirty(fs_info); 5367 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5368 goto free_rsv; 5369 else if (!ret) 5370 break; 5371 } 5372 5373 /* 5374 * Errors here aren't a big deal, it just means we leave orphan items in 5375 * the tree. They will be cleaned up on the next mount. If the inode 5376 * number gets reused, cleanup deletes the orphan item without doing 5377 * anything, and unlink reuses the existing orphan item. 5378 * 5379 * If it turns out that we are dropping too many of these, we might want 5380 * to add a mechanism for retrying these after a commit. 5381 */ 5382 trans = evict_refill_and_join(root, rsv); 5383 if (!IS_ERR(trans)) { 5384 trans->block_rsv = rsv; 5385 btrfs_orphan_del(trans, BTRFS_I(inode)); 5386 trans->block_rsv = &fs_info->trans_block_rsv; 5387 btrfs_end_transaction(trans); 5388 } 5389 5390 free_rsv: 5391 btrfs_free_block_rsv(fs_info, rsv); 5392 no_delete: 5393 /* 5394 * If we didn't successfully delete, the orphan item will still be in 5395 * the tree and we'll retry on the next mount. Again, we might also want 5396 * to retry these periodically in the future. 5397 */ 5398 btrfs_remove_delayed_node(BTRFS_I(inode)); 5399 fsverity_cleanup_inode(inode); 5400 clear_inode(inode); 5401 } 5402 5403 /* 5404 * Return the key found in the dir entry in the location pointer, fill @type 5405 * with BTRFS_FT_*, and return 0. 5406 * 5407 * If no dir entries were found, returns -ENOENT. 5408 * If found a corrupted location in dir entry, returns -EUCLEAN. 5409 */ 5410 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, 5411 struct btrfs_key *location, u8 *type) 5412 { 5413 struct btrfs_dir_item *di; 5414 struct btrfs_path *path; 5415 struct btrfs_root *root = dir->root; 5416 int ret = 0; 5417 struct fscrypt_name fname; 5418 5419 path = btrfs_alloc_path(); 5420 if (!path) 5421 return -ENOMEM; 5422 5423 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 5424 if (ret < 0) 5425 goto out; 5426 /* 5427 * fscrypt_setup_filename() should never return a positive value, but 5428 * gcc on sparc/parisc thinks it can, so assert that doesn't happen. 5429 */ 5430 ASSERT(ret == 0); 5431 5432 /* This needs to handle no-key deletions later on */ 5433 5434 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), 5435 &fname.disk_name, 0); 5436 if (IS_ERR_OR_NULL(di)) { 5437 ret = di ? PTR_ERR(di) : -ENOENT; 5438 goto out; 5439 } 5440 5441 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5442 if (location->type != BTRFS_INODE_ITEM_KEY && 5443 location->type != BTRFS_ROOT_ITEM_KEY) { 5444 ret = -EUCLEAN; 5445 btrfs_warn(root->fs_info, 5446 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5447 __func__, fname.disk_name.name, btrfs_ino(dir), 5448 location->objectid, location->type, location->offset); 5449 } 5450 if (!ret) 5451 *type = btrfs_dir_ftype(path->nodes[0], di); 5452 out: 5453 fscrypt_free_filename(&fname); 5454 btrfs_free_path(path); 5455 return ret; 5456 } 5457 5458 /* 5459 * when we hit a tree root in a directory, the btrfs part of the inode 5460 * needs to be changed to reflect the root directory of the tree root. This 5461 * is kind of like crossing a mount point. 5462 */ 5463 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5464 struct btrfs_inode *dir, 5465 struct dentry *dentry, 5466 struct btrfs_key *location, 5467 struct btrfs_root **sub_root) 5468 { 5469 struct btrfs_path *path; 5470 struct btrfs_root *new_root; 5471 struct btrfs_root_ref *ref; 5472 struct extent_buffer *leaf; 5473 struct btrfs_key key; 5474 int ret; 5475 int err = 0; 5476 struct fscrypt_name fname; 5477 5478 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); 5479 if (ret) 5480 return ret; 5481 5482 path = btrfs_alloc_path(); 5483 if (!path) { 5484 err = -ENOMEM; 5485 goto out; 5486 } 5487 5488 err = -ENOENT; 5489 key.objectid = dir->root->root_key.objectid; 5490 key.type = BTRFS_ROOT_REF_KEY; 5491 key.offset = location->objectid; 5492 5493 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5494 if (ret) { 5495 if (ret < 0) 5496 err = ret; 5497 goto out; 5498 } 5499 5500 leaf = path->nodes[0]; 5501 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5502 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5503 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) 5504 goto out; 5505 5506 ret = memcmp_extent_buffer(leaf, fname.disk_name.name, 5507 (unsigned long)(ref + 1), fname.disk_name.len); 5508 if (ret) 5509 goto out; 5510 5511 btrfs_release_path(path); 5512 5513 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5514 if (IS_ERR(new_root)) { 5515 err = PTR_ERR(new_root); 5516 goto out; 5517 } 5518 5519 *sub_root = new_root; 5520 location->objectid = btrfs_root_dirid(&new_root->root_item); 5521 location->type = BTRFS_INODE_ITEM_KEY; 5522 location->offset = 0; 5523 err = 0; 5524 out: 5525 btrfs_free_path(path); 5526 fscrypt_free_filename(&fname); 5527 return err; 5528 } 5529 5530 static void inode_tree_add(struct btrfs_inode *inode) 5531 { 5532 struct btrfs_root *root = inode->root; 5533 struct btrfs_inode *entry; 5534 struct rb_node **p; 5535 struct rb_node *parent; 5536 struct rb_node *new = &inode->rb_node; 5537 u64 ino = btrfs_ino(inode); 5538 5539 if (inode_unhashed(&inode->vfs_inode)) 5540 return; 5541 parent = NULL; 5542 spin_lock(&root->inode_lock); 5543 p = &root->inode_tree.rb_node; 5544 while (*p) { 5545 parent = *p; 5546 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5547 5548 if (ino < btrfs_ino(entry)) 5549 p = &parent->rb_left; 5550 else if (ino > btrfs_ino(entry)) 5551 p = &parent->rb_right; 5552 else { 5553 WARN_ON(!(entry->vfs_inode.i_state & 5554 (I_WILL_FREE | I_FREEING))); 5555 rb_replace_node(parent, new, &root->inode_tree); 5556 RB_CLEAR_NODE(parent); 5557 spin_unlock(&root->inode_lock); 5558 return; 5559 } 5560 } 5561 rb_link_node(new, parent, p); 5562 rb_insert_color(new, &root->inode_tree); 5563 spin_unlock(&root->inode_lock); 5564 } 5565 5566 static void inode_tree_del(struct btrfs_inode *inode) 5567 { 5568 struct btrfs_root *root = inode->root; 5569 int empty = 0; 5570 5571 spin_lock(&root->inode_lock); 5572 if (!RB_EMPTY_NODE(&inode->rb_node)) { 5573 rb_erase(&inode->rb_node, &root->inode_tree); 5574 RB_CLEAR_NODE(&inode->rb_node); 5575 empty = RB_EMPTY_ROOT(&root->inode_tree); 5576 } 5577 spin_unlock(&root->inode_lock); 5578 5579 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5580 spin_lock(&root->inode_lock); 5581 empty = RB_EMPTY_ROOT(&root->inode_tree); 5582 spin_unlock(&root->inode_lock); 5583 if (empty) 5584 btrfs_add_dead_root(root); 5585 } 5586 } 5587 5588 5589 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5590 { 5591 struct btrfs_iget_args *args = p; 5592 5593 inode->i_ino = args->ino; 5594 BTRFS_I(inode)->location.objectid = args->ino; 5595 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 5596 BTRFS_I(inode)->location.offset = 0; 5597 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5598 BUG_ON(args->root && !BTRFS_I(inode)->root); 5599 5600 if (args->root && args->root == args->root->fs_info->tree_root && 5601 args->ino != BTRFS_BTREE_INODE_OBJECTID) 5602 set_bit(BTRFS_INODE_FREE_SPACE_INODE, 5603 &BTRFS_I(inode)->runtime_flags); 5604 return 0; 5605 } 5606 5607 static int btrfs_find_actor(struct inode *inode, void *opaque) 5608 { 5609 struct btrfs_iget_args *args = opaque; 5610 5611 return args->ino == BTRFS_I(inode)->location.objectid && 5612 args->root == BTRFS_I(inode)->root; 5613 } 5614 5615 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, 5616 struct btrfs_root *root) 5617 { 5618 struct inode *inode; 5619 struct btrfs_iget_args args; 5620 unsigned long hashval = btrfs_inode_hash(ino, root); 5621 5622 args.ino = ino; 5623 args.root = root; 5624 5625 inode = iget5_locked(s, hashval, btrfs_find_actor, 5626 btrfs_init_locked_inode, 5627 (void *)&args); 5628 return inode; 5629 } 5630 5631 /* 5632 * Get an inode object given its inode number and corresponding root. 5633 * Path can be preallocated to prevent recursing back to iget through 5634 * allocator. NULL is also valid but may require an additional allocation 5635 * later. 5636 */ 5637 struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 5638 struct btrfs_root *root, struct btrfs_path *path) 5639 { 5640 struct inode *inode; 5641 5642 inode = btrfs_iget_locked(s, ino, root); 5643 if (!inode) 5644 return ERR_PTR(-ENOMEM); 5645 5646 if (inode->i_state & I_NEW) { 5647 int ret; 5648 5649 ret = btrfs_read_locked_inode(inode, path); 5650 if (!ret) { 5651 inode_tree_add(BTRFS_I(inode)); 5652 unlock_new_inode(inode); 5653 } else { 5654 iget_failed(inode); 5655 /* 5656 * ret > 0 can come from btrfs_search_slot called by 5657 * btrfs_read_locked_inode, this means the inode item 5658 * was not found. 5659 */ 5660 if (ret > 0) 5661 ret = -ENOENT; 5662 inode = ERR_PTR(ret); 5663 } 5664 } 5665 5666 return inode; 5667 } 5668 5669 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) 5670 { 5671 return btrfs_iget_path(s, ino, root, NULL); 5672 } 5673 5674 static struct inode *new_simple_dir(struct super_block *s, 5675 struct btrfs_key *key, 5676 struct btrfs_root *root) 5677 { 5678 struct inode *inode = new_inode(s); 5679 5680 if (!inode) 5681 return ERR_PTR(-ENOMEM); 5682 5683 BTRFS_I(inode)->root = btrfs_grab_root(root); 5684 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5685 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5686 5687 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5688 /* 5689 * We only need lookup, the rest is read-only and there's no inode 5690 * associated with the dentry 5691 */ 5692 inode->i_op = &simple_dir_inode_operations; 5693 inode->i_opflags &= ~IOP_XATTR; 5694 inode->i_fop = &simple_dir_operations; 5695 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5696 inode->i_mtime = current_time(inode); 5697 inode->i_atime = inode->i_mtime; 5698 inode->i_ctime = inode->i_mtime; 5699 BTRFS_I(inode)->i_otime = inode->i_mtime; 5700 5701 return inode; 5702 } 5703 5704 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5705 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5706 static_assert(BTRFS_FT_DIR == FT_DIR); 5707 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5708 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5709 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5710 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5711 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5712 5713 static inline u8 btrfs_inode_type(struct inode *inode) 5714 { 5715 return fs_umode_to_ftype(inode->i_mode); 5716 } 5717 5718 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5719 { 5720 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 5721 struct inode *inode; 5722 struct btrfs_root *root = BTRFS_I(dir)->root; 5723 struct btrfs_root *sub_root = root; 5724 struct btrfs_key location; 5725 u8 di_type = 0; 5726 int ret = 0; 5727 5728 if (dentry->d_name.len > BTRFS_NAME_LEN) 5729 return ERR_PTR(-ENAMETOOLONG); 5730 5731 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type); 5732 if (ret < 0) 5733 return ERR_PTR(ret); 5734 5735 if (location.type == BTRFS_INODE_ITEM_KEY) { 5736 inode = btrfs_iget(dir->i_sb, location.objectid, root); 5737 if (IS_ERR(inode)) 5738 return inode; 5739 5740 /* Do extra check against inode mode with di_type */ 5741 if (btrfs_inode_type(inode) != di_type) { 5742 btrfs_crit(fs_info, 5743 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5744 inode->i_mode, btrfs_inode_type(inode), 5745 di_type); 5746 iput(inode); 5747 return ERR_PTR(-EUCLEAN); 5748 } 5749 return inode; 5750 } 5751 5752 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry, 5753 &location, &sub_root); 5754 if (ret < 0) { 5755 if (ret != -ENOENT) 5756 inode = ERR_PTR(ret); 5757 else 5758 inode = new_simple_dir(dir->i_sb, &location, root); 5759 } else { 5760 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); 5761 btrfs_put_root(sub_root); 5762 5763 if (IS_ERR(inode)) 5764 return inode; 5765 5766 down_read(&fs_info->cleanup_work_sem); 5767 if (!sb_rdonly(inode->i_sb)) 5768 ret = btrfs_orphan_cleanup(sub_root); 5769 up_read(&fs_info->cleanup_work_sem); 5770 if (ret) { 5771 iput(inode); 5772 inode = ERR_PTR(ret); 5773 } 5774 } 5775 5776 return inode; 5777 } 5778 5779 static int btrfs_dentry_delete(const struct dentry *dentry) 5780 { 5781 struct btrfs_root *root; 5782 struct inode *inode = d_inode(dentry); 5783 5784 if (!inode && !IS_ROOT(dentry)) 5785 inode = d_inode(dentry->d_parent); 5786 5787 if (inode) { 5788 root = BTRFS_I(inode)->root; 5789 if (btrfs_root_refs(&root->root_item) == 0) 5790 return 1; 5791 5792 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5793 return 1; 5794 } 5795 return 0; 5796 } 5797 5798 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5799 unsigned int flags) 5800 { 5801 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5802 5803 if (inode == ERR_PTR(-ENOENT)) 5804 inode = NULL; 5805 return d_splice_alias(inode, dentry); 5806 } 5807 5808 /* 5809 * All this infrastructure exists because dir_emit can fault, and we are holding 5810 * the tree lock when doing readdir. For now just allocate a buffer and copy 5811 * our information into that, and then dir_emit from the buffer. This is 5812 * similar to what NFS does, only we don't keep the buffer around in pagecache 5813 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5814 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5815 * tree lock. 5816 */ 5817 static int btrfs_opendir(struct inode *inode, struct file *file) 5818 { 5819 struct btrfs_file_private *private; 5820 5821 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5822 if (!private) 5823 return -ENOMEM; 5824 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5825 if (!private->filldir_buf) { 5826 kfree(private); 5827 return -ENOMEM; 5828 } 5829 file->private_data = private; 5830 return 0; 5831 } 5832 5833 struct dir_entry { 5834 u64 ino; 5835 u64 offset; 5836 unsigned type; 5837 int name_len; 5838 }; 5839 5840 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5841 { 5842 while (entries--) { 5843 struct dir_entry *entry = addr; 5844 char *name = (char *)(entry + 1); 5845 5846 ctx->pos = get_unaligned(&entry->offset); 5847 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5848 get_unaligned(&entry->ino), 5849 get_unaligned(&entry->type))) 5850 return 1; 5851 addr += sizeof(struct dir_entry) + 5852 get_unaligned(&entry->name_len); 5853 ctx->pos++; 5854 } 5855 return 0; 5856 } 5857 5858 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5859 { 5860 struct inode *inode = file_inode(file); 5861 struct btrfs_root *root = BTRFS_I(inode)->root; 5862 struct btrfs_file_private *private = file->private_data; 5863 struct btrfs_dir_item *di; 5864 struct btrfs_key key; 5865 struct btrfs_key found_key; 5866 struct btrfs_path *path; 5867 void *addr; 5868 struct list_head ins_list; 5869 struct list_head del_list; 5870 int ret; 5871 char *name_ptr; 5872 int name_len; 5873 int entries = 0; 5874 int total_len = 0; 5875 bool put = false; 5876 struct btrfs_key location; 5877 5878 if (!dir_emit_dots(file, ctx)) 5879 return 0; 5880 5881 path = btrfs_alloc_path(); 5882 if (!path) 5883 return -ENOMEM; 5884 5885 addr = private->filldir_buf; 5886 path->reada = READA_FORWARD; 5887 5888 INIT_LIST_HEAD(&ins_list); 5889 INIT_LIST_HEAD(&del_list); 5890 put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list); 5891 5892 again: 5893 key.type = BTRFS_DIR_INDEX_KEY; 5894 key.offset = ctx->pos; 5895 key.objectid = btrfs_ino(BTRFS_I(inode)); 5896 5897 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 5898 struct dir_entry *entry; 5899 struct extent_buffer *leaf = path->nodes[0]; 5900 u8 ftype; 5901 5902 if (found_key.objectid != key.objectid) 5903 break; 5904 if (found_key.type != BTRFS_DIR_INDEX_KEY) 5905 break; 5906 if (found_key.offset < ctx->pos) 5907 continue; 5908 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 5909 continue; 5910 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 5911 name_len = btrfs_dir_name_len(leaf, di); 5912 if ((total_len + sizeof(struct dir_entry) + name_len) >= 5913 PAGE_SIZE) { 5914 btrfs_release_path(path); 5915 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5916 if (ret) 5917 goto nopos; 5918 addr = private->filldir_buf; 5919 entries = 0; 5920 total_len = 0; 5921 goto again; 5922 } 5923 5924 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di)); 5925 entry = addr; 5926 name_ptr = (char *)(entry + 1); 5927 read_extent_buffer(leaf, name_ptr, 5928 (unsigned long)(di + 1), name_len); 5929 put_unaligned(name_len, &entry->name_len); 5930 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); 5931 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5932 put_unaligned(location.objectid, &entry->ino); 5933 put_unaligned(found_key.offset, &entry->offset); 5934 entries++; 5935 addr += sizeof(struct dir_entry) + name_len; 5936 total_len += sizeof(struct dir_entry) + name_len; 5937 } 5938 /* Catch error encountered during iteration */ 5939 if (ret < 0) 5940 goto err; 5941 5942 btrfs_release_path(path); 5943 5944 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5945 if (ret) 5946 goto nopos; 5947 5948 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 5949 if (ret) 5950 goto nopos; 5951 5952 /* 5953 * Stop new entries from being returned after we return the last 5954 * entry. 5955 * 5956 * New directory entries are assigned a strictly increasing 5957 * offset. This means that new entries created during readdir 5958 * are *guaranteed* to be seen in the future by that readdir. 5959 * This has broken buggy programs which operate on names as 5960 * they're returned by readdir. Until we re-use freed offsets 5961 * we have this hack to stop new entries from being returned 5962 * under the assumption that they'll never reach this huge 5963 * offset. 5964 * 5965 * This is being careful not to overflow 32bit loff_t unless the 5966 * last entry requires it because doing so has broken 32bit apps 5967 * in the past. 5968 */ 5969 if (ctx->pos >= INT_MAX) 5970 ctx->pos = LLONG_MAX; 5971 else 5972 ctx->pos = INT_MAX; 5973 nopos: 5974 ret = 0; 5975 err: 5976 if (put) 5977 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); 5978 btrfs_free_path(path); 5979 return ret; 5980 } 5981 5982 /* 5983 * This is somewhat expensive, updating the tree every time the 5984 * inode changes. But, it is most likely to find the inode in cache. 5985 * FIXME, needs more benchmarking...there are no reasons other than performance 5986 * to keep or drop this code. 5987 */ 5988 static int btrfs_dirty_inode(struct btrfs_inode *inode) 5989 { 5990 struct btrfs_root *root = inode->root; 5991 struct btrfs_fs_info *fs_info = root->fs_info; 5992 struct btrfs_trans_handle *trans; 5993 int ret; 5994 5995 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) 5996 return 0; 5997 5998 trans = btrfs_join_transaction(root); 5999 if (IS_ERR(trans)) 6000 return PTR_ERR(trans); 6001 6002 ret = btrfs_update_inode(trans, root, inode); 6003 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) { 6004 /* whoops, lets try again with the full transaction */ 6005 btrfs_end_transaction(trans); 6006 trans = btrfs_start_transaction(root, 1); 6007 if (IS_ERR(trans)) 6008 return PTR_ERR(trans); 6009 6010 ret = btrfs_update_inode(trans, root, inode); 6011 } 6012 btrfs_end_transaction(trans); 6013 if (inode->delayed_node) 6014 btrfs_balance_delayed_items(fs_info); 6015 6016 return ret; 6017 } 6018 6019 /* 6020 * This is a copy of file_update_time. We need this so we can return error on 6021 * ENOSPC for updating the inode in the case of file write and mmap writes. 6022 */ 6023 static int btrfs_update_time(struct inode *inode, struct timespec64 *now, 6024 int flags) 6025 { 6026 struct btrfs_root *root = BTRFS_I(inode)->root; 6027 bool dirty = flags & ~S_VERSION; 6028 6029 if (btrfs_root_readonly(root)) 6030 return -EROFS; 6031 6032 if (flags & S_VERSION) 6033 dirty |= inode_maybe_inc_iversion(inode, dirty); 6034 if (flags & S_CTIME) 6035 inode->i_ctime = *now; 6036 if (flags & S_MTIME) 6037 inode->i_mtime = *now; 6038 if (flags & S_ATIME) 6039 inode->i_atime = *now; 6040 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6041 } 6042 6043 /* 6044 * find the highest existing sequence number in a directory 6045 * and then set the in-memory index_cnt variable to reflect 6046 * free sequence numbers 6047 */ 6048 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 6049 { 6050 struct btrfs_root *root = inode->root; 6051 struct btrfs_key key, found_key; 6052 struct btrfs_path *path; 6053 struct extent_buffer *leaf; 6054 int ret; 6055 6056 key.objectid = btrfs_ino(inode); 6057 key.type = BTRFS_DIR_INDEX_KEY; 6058 key.offset = (u64)-1; 6059 6060 path = btrfs_alloc_path(); 6061 if (!path) 6062 return -ENOMEM; 6063 6064 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6065 if (ret < 0) 6066 goto out; 6067 /* FIXME: we should be able to handle this */ 6068 if (ret == 0) 6069 goto out; 6070 ret = 0; 6071 6072 if (path->slots[0] == 0) { 6073 inode->index_cnt = BTRFS_DIR_START_INDEX; 6074 goto out; 6075 } 6076 6077 path->slots[0]--; 6078 6079 leaf = path->nodes[0]; 6080 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6081 6082 if (found_key.objectid != btrfs_ino(inode) || 6083 found_key.type != BTRFS_DIR_INDEX_KEY) { 6084 inode->index_cnt = BTRFS_DIR_START_INDEX; 6085 goto out; 6086 } 6087 6088 inode->index_cnt = found_key.offset + 1; 6089 out: 6090 btrfs_free_path(path); 6091 return ret; 6092 } 6093 6094 /* 6095 * helper to find a free sequence number in a given directory. This current 6096 * code is very simple, later versions will do smarter things in the btree 6097 */ 6098 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6099 { 6100 int ret = 0; 6101 6102 if (dir->index_cnt == (u64)-1) { 6103 ret = btrfs_inode_delayed_dir_index_count(dir); 6104 if (ret) { 6105 ret = btrfs_set_inode_index_count(dir); 6106 if (ret) 6107 return ret; 6108 } 6109 } 6110 6111 *index = dir->index_cnt; 6112 dir->index_cnt++; 6113 6114 return ret; 6115 } 6116 6117 static int btrfs_insert_inode_locked(struct inode *inode) 6118 { 6119 struct btrfs_iget_args args; 6120 6121 args.ino = BTRFS_I(inode)->location.objectid; 6122 args.root = BTRFS_I(inode)->root; 6123 6124 return insert_inode_locked4(inode, 6125 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6126 btrfs_find_actor, &args); 6127 } 6128 6129 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6130 unsigned int *trans_num_items) 6131 { 6132 struct inode *dir = args->dir; 6133 struct inode *inode = args->inode; 6134 int ret; 6135 6136 if (!args->orphan) { 6137 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, 6138 &args->fname); 6139 if (ret) 6140 return ret; 6141 } 6142 6143 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6144 if (ret) { 6145 fscrypt_free_filename(&args->fname); 6146 return ret; 6147 } 6148 6149 /* 1 to add inode item */ 6150 *trans_num_items = 1; 6151 /* 1 to add compression property */ 6152 if (BTRFS_I(dir)->prop_compress) 6153 (*trans_num_items)++; 6154 /* 1 to add default ACL xattr */ 6155 if (args->default_acl) 6156 (*trans_num_items)++; 6157 /* 1 to add access ACL xattr */ 6158 if (args->acl) 6159 (*trans_num_items)++; 6160 #ifdef CONFIG_SECURITY 6161 /* 1 to add LSM xattr */ 6162 if (dir->i_security) 6163 (*trans_num_items)++; 6164 #endif 6165 if (args->orphan) { 6166 /* 1 to add orphan item */ 6167 (*trans_num_items)++; 6168 } else { 6169 /* 6170 * 1 to add dir item 6171 * 1 to add dir index 6172 * 1 to update parent inode item 6173 * 6174 * No need for 1 unit for the inode ref item because it is 6175 * inserted in a batch together with the inode item at 6176 * btrfs_create_new_inode(). 6177 */ 6178 *trans_num_items += 3; 6179 } 6180 return 0; 6181 } 6182 6183 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6184 { 6185 posix_acl_release(args->acl); 6186 posix_acl_release(args->default_acl); 6187 fscrypt_free_filename(&args->fname); 6188 } 6189 6190 /* 6191 * Inherit flags from the parent inode. 6192 * 6193 * Currently only the compression flags and the cow flags are inherited. 6194 */ 6195 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir) 6196 { 6197 unsigned int flags; 6198 6199 flags = dir->flags; 6200 6201 if (flags & BTRFS_INODE_NOCOMPRESS) { 6202 inode->flags &= ~BTRFS_INODE_COMPRESS; 6203 inode->flags |= BTRFS_INODE_NOCOMPRESS; 6204 } else if (flags & BTRFS_INODE_COMPRESS) { 6205 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 6206 inode->flags |= BTRFS_INODE_COMPRESS; 6207 } 6208 6209 if (flags & BTRFS_INODE_NODATACOW) { 6210 inode->flags |= BTRFS_INODE_NODATACOW; 6211 if (S_ISREG(inode->vfs_inode.i_mode)) 6212 inode->flags |= BTRFS_INODE_NODATASUM; 6213 } 6214 6215 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); 6216 } 6217 6218 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6219 struct btrfs_new_inode_args *args) 6220 { 6221 struct inode *dir = args->dir; 6222 struct inode *inode = args->inode; 6223 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; 6224 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6225 struct btrfs_root *root; 6226 struct btrfs_inode_item *inode_item; 6227 struct btrfs_key *location; 6228 struct btrfs_path *path; 6229 u64 objectid; 6230 struct btrfs_inode_ref *ref; 6231 struct btrfs_key key[2]; 6232 u32 sizes[2]; 6233 struct btrfs_item_batch batch; 6234 unsigned long ptr; 6235 int ret; 6236 6237 path = btrfs_alloc_path(); 6238 if (!path) 6239 return -ENOMEM; 6240 6241 if (!args->subvol) 6242 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6243 root = BTRFS_I(inode)->root; 6244 6245 ret = btrfs_get_free_objectid(root, &objectid); 6246 if (ret) 6247 goto out; 6248 inode->i_ino = objectid; 6249 6250 if (args->orphan) { 6251 /* 6252 * O_TMPFILE, set link count to 0, so that after this point, we 6253 * fill in an inode item with the correct link count. 6254 */ 6255 set_nlink(inode, 0); 6256 } else { 6257 trace_btrfs_inode_request(dir); 6258 6259 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6260 if (ret) 6261 goto out; 6262 } 6263 /* index_cnt is ignored for everything but a dir. */ 6264 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6265 BTRFS_I(inode)->generation = trans->transid; 6266 inode->i_generation = BTRFS_I(inode)->generation; 6267 6268 /* 6269 * Subvolumes don't inherit flags from their parent directory. 6270 * Originally this was probably by accident, but we probably can't 6271 * change it now without compatibility issues. 6272 */ 6273 if (!args->subvol) 6274 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir)); 6275 6276 if (S_ISREG(inode->i_mode)) { 6277 if (btrfs_test_opt(fs_info, NODATASUM)) 6278 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6279 if (btrfs_test_opt(fs_info, NODATACOW)) 6280 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6281 BTRFS_INODE_NODATASUM; 6282 } 6283 6284 location = &BTRFS_I(inode)->location; 6285 location->objectid = objectid; 6286 location->offset = 0; 6287 location->type = BTRFS_INODE_ITEM_KEY; 6288 6289 ret = btrfs_insert_inode_locked(inode); 6290 if (ret < 0) { 6291 if (!args->orphan) 6292 BTRFS_I(dir)->index_cnt--; 6293 goto out; 6294 } 6295 6296 /* 6297 * We could have gotten an inode number from somebody who was fsynced 6298 * and then removed in this same transaction, so let's just set full 6299 * sync since it will be a full sync anyway and this will blow away the 6300 * old info in the log. 6301 */ 6302 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6303 6304 key[0].objectid = objectid; 6305 key[0].type = BTRFS_INODE_ITEM_KEY; 6306 key[0].offset = 0; 6307 6308 sizes[0] = sizeof(struct btrfs_inode_item); 6309 6310 if (!args->orphan) { 6311 /* 6312 * Start new inodes with an inode_ref. This is slightly more 6313 * efficient for small numbers of hard links since they will 6314 * be packed into one item. Extended refs will kick in if we 6315 * add more hard links than can fit in the ref item. 6316 */ 6317 key[1].objectid = objectid; 6318 key[1].type = BTRFS_INODE_REF_KEY; 6319 if (args->subvol) { 6320 key[1].offset = objectid; 6321 sizes[1] = 2 + sizeof(*ref); 6322 } else { 6323 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6324 sizes[1] = name->len + sizeof(*ref); 6325 } 6326 } 6327 6328 batch.keys = &key[0]; 6329 batch.data_sizes = &sizes[0]; 6330 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6331 batch.nr = args->orphan ? 1 : 2; 6332 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6333 if (ret != 0) { 6334 btrfs_abort_transaction(trans, ret); 6335 goto discard; 6336 } 6337 6338 inode->i_mtime = current_time(inode); 6339 inode->i_atime = inode->i_mtime; 6340 inode->i_ctime = inode->i_mtime; 6341 BTRFS_I(inode)->i_otime = inode->i_mtime; 6342 6343 /* 6344 * We're going to fill the inode item now, so at this point the inode 6345 * must be fully initialized. 6346 */ 6347 6348 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6349 struct btrfs_inode_item); 6350 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6351 sizeof(*inode_item)); 6352 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6353 6354 if (!args->orphan) { 6355 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6356 struct btrfs_inode_ref); 6357 ptr = (unsigned long)(ref + 1); 6358 if (args->subvol) { 6359 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6360 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6361 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6362 } else { 6363 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 6364 name->len); 6365 btrfs_set_inode_ref_index(path->nodes[0], ref, 6366 BTRFS_I(inode)->dir_index); 6367 write_extent_buffer(path->nodes[0], name->name, ptr, 6368 name->len); 6369 } 6370 } 6371 6372 btrfs_mark_buffer_dirty(path->nodes[0]); 6373 /* 6374 * We don't need the path anymore, plus inheriting properties, adding 6375 * ACLs, security xattrs, orphan item or adding the link, will result in 6376 * allocating yet another path. So just free our path. 6377 */ 6378 btrfs_free_path(path); 6379 path = NULL; 6380 6381 if (args->subvol) { 6382 struct inode *parent; 6383 6384 /* 6385 * Subvolumes inherit properties from their parent subvolume, 6386 * not the directory they were created in. 6387 */ 6388 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, 6389 BTRFS_I(dir)->root); 6390 if (IS_ERR(parent)) { 6391 ret = PTR_ERR(parent); 6392 } else { 6393 ret = btrfs_inode_inherit_props(trans, inode, parent); 6394 iput(parent); 6395 } 6396 } else { 6397 ret = btrfs_inode_inherit_props(trans, inode, dir); 6398 } 6399 if (ret) { 6400 btrfs_err(fs_info, 6401 "error inheriting props for ino %llu (root %llu): %d", 6402 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, 6403 ret); 6404 } 6405 6406 /* 6407 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6408 * probably a bug. 6409 */ 6410 if (!args->subvol) { 6411 ret = btrfs_init_inode_security(trans, args); 6412 if (ret) { 6413 btrfs_abort_transaction(trans, ret); 6414 goto discard; 6415 } 6416 } 6417 6418 inode_tree_add(BTRFS_I(inode)); 6419 6420 trace_btrfs_inode_new(inode); 6421 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6422 6423 btrfs_update_root_times(trans, root); 6424 6425 if (args->orphan) { 6426 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6427 } else { 6428 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6429 0, BTRFS_I(inode)->dir_index); 6430 } 6431 if (ret) { 6432 btrfs_abort_transaction(trans, ret); 6433 goto discard; 6434 } 6435 6436 return 0; 6437 6438 discard: 6439 /* 6440 * discard_new_inode() calls iput(), but the caller owns the reference 6441 * to the inode. 6442 */ 6443 ihold(inode); 6444 discard_new_inode(inode); 6445 out: 6446 btrfs_free_path(path); 6447 return ret; 6448 } 6449 6450 /* 6451 * utility function to add 'inode' into 'parent_inode' with 6452 * a give name and a given sequence number. 6453 * if 'add_backref' is true, also insert a backref from the 6454 * inode to the parent directory. 6455 */ 6456 int btrfs_add_link(struct btrfs_trans_handle *trans, 6457 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6458 const struct fscrypt_str *name, int add_backref, u64 index) 6459 { 6460 int ret = 0; 6461 struct btrfs_key key; 6462 struct btrfs_root *root = parent_inode->root; 6463 u64 ino = btrfs_ino(inode); 6464 u64 parent_ino = btrfs_ino(parent_inode); 6465 6466 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6467 memcpy(&key, &inode->root->root_key, sizeof(key)); 6468 } else { 6469 key.objectid = ino; 6470 key.type = BTRFS_INODE_ITEM_KEY; 6471 key.offset = 0; 6472 } 6473 6474 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6475 ret = btrfs_add_root_ref(trans, key.objectid, 6476 root->root_key.objectid, parent_ino, 6477 index, name); 6478 } else if (add_backref) { 6479 ret = btrfs_insert_inode_ref(trans, root, name, 6480 ino, parent_ino, index); 6481 } 6482 6483 /* Nothing to clean up yet */ 6484 if (ret) 6485 return ret; 6486 6487 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, 6488 btrfs_inode_type(&inode->vfs_inode), index); 6489 if (ret == -EEXIST || ret == -EOVERFLOW) 6490 goto fail_dir_item; 6491 else if (ret) { 6492 btrfs_abort_transaction(trans, ret); 6493 return ret; 6494 } 6495 6496 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6497 name->len * 2); 6498 inode_inc_iversion(&parent_inode->vfs_inode); 6499 /* 6500 * If we are replaying a log tree, we do not want to update the mtime 6501 * and ctime of the parent directory with the current time, since the 6502 * log replay procedure is responsible for setting them to their correct 6503 * values (the ones it had when the fsync was done). 6504 */ 6505 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { 6506 struct timespec64 now = current_time(&parent_inode->vfs_inode); 6507 6508 parent_inode->vfs_inode.i_mtime = now; 6509 parent_inode->vfs_inode.i_ctime = now; 6510 } 6511 ret = btrfs_update_inode(trans, root, parent_inode); 6512 if (ret) 6513 btrfs_abort_transaction(trans, ret); 6514 return ret; 6515 6516 fail_dir_item: 6517 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6518 u64 local_index; 6519 int err; 6520 err = btrfs_del_root_ref(trans, key.objectid, 6521 root->root_key.objectid, parent_ino, 6522 &local_index, name); 6523 if (err) 6524 btrfs_abort_transaction(trans, err); 6525 } else if (add_backref) { 6526 u64 local_index; 6527 int err; 6528 6529 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, 6530 &local_index); 6531 if (err) 6532 btrfs_abort_transaction(trans, err); 6533 } 6534 6535 /* Return the original error code */ 6536 return ret; 6537 } 6538 6539 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6540 struct inode *inode) 6541 { 6542 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6543 struct btrfs_root *root = BTRFS_I(dir)->root; 6544 struct btrfs_new_inode_args new_inode_args = { 6545 .dir = dir, 6546 .dentry = dentry, 6547 .inode = inode, 6548 }; 6549 unsigned int trans_num_items; 6550 struct btrfs_trans_handle *trans; 6551 int err; 6552 6553 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6554 if (err) 6555 goto out_inode; 6556 6557 trans = btrfs_start_transaction(root, trans_num_items); 6558 if (IS_ERR(trans)) { 6559 err = PTR_ERR(trans); 6560 goto out_new_inode_args; 6561 } 6562 6563 err = btrfs_create_new_inode(trans, &new_inode_args); 6564 if (!err) 6565 d_instantiate_new(dentry, inode); 6566 6567 btrfs_end_transaction(trans); 6568 btrfs_btree_balance_dirty(fs_info); 6569 out_new_inode_args: 6570 btrfs_new_inode_args_destroy(&new_inode_args); 6571 out_inode: 6572 if (err) 6573 iput(inode); 6574 return err; 6575 } 6576 6577 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir, 6578 struct dentry *dentry, umode_t mode, dev_t rdev) 6579 { 6580 struct inode *inode; 6581 6582 inode = new_inode(dir->i_sb); 6583 if (!inode) 6584 return -ENOMEM; 6585 inode_init_owner(idmap, inode, dir, mode); 6586 inode->i_op = &btrfs_special_inode_operations; 6587 init_special_inode(inode, inode->i_mode, rdev); 6588 return btrfs_create_common(dir, dentry, inode); 6589 } 6590 6591 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir, 6592 struct dentry *dentry, umode_t mode, bool excl) 6593 { 6594 struct inode *inode; 6595 6596 inode = new_inode(dir->i_sb); 6597 if (!inode) 6598 return -ENOMEM; 6599 inode_init_owner(idmap, inode, dir, mode); 6600 inode->i_fop = &btrfs_file_operations; 6601 inode->i_op = &btrfs_file_inode_operations; 6602 inode->i_mapping->a_ops = &btrfs_aops; 6603 return btrfs_create_common(dir, dentry, inode); 6604 } 6605 6606 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6607 struct dentry *dentry) 6608 { 6609 struct btrfs_trans_handle *trans = NULL; 6610 struct btrfs_root *root = BTRFS_I(dir)->root; 6611 struct inode *inode = d_inode(old_dentry); 6612 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 6613 struct fscrypt_name fname; 6614 u64 index; 6615 int err; 6616 int drop_inode = 0; 6617 6618 /* do not allow sys_link's with other subvols of the same device */ 6619 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) 6620 return -EXDEV; 6621 6622 if (inode->i_nlink >= BTRFS_LINK_MAX) 6623 return -EMLINK; 6624 6625 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); 6626 if (err) 6627 goto fail; 6628 6629 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6630 if (err) 6631 goto fail; 6632 6633 /* 6634 * 2 items for inode and inode ref 6635 * 2 items for dir items 6636 * 1 item for parent inode 6637 * 1 item for orphan item deletion if O_TMPFILE 6638 */ 6639 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6640 if (IS_ERR(trans)) { 6641 err = PTR_ERR(trans); 6642 trans = NULL; 6643 goto fail; 6644 } 6645 6646 /* There are several dir indexes for this inode, clear the cache. */ 6647 BTRFS_I(inode)->dir_index = 0ULL; 6648 inc_nlink(inode); 6649 inode_inc_iversion(inode); 6650 inode->i_ctime = current_time(inode); 6651 ihold(inode); 6652 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6653 6654 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6655 &fname.disk_name, 1, index); 6656 6657 if (err) { 6658 drop_inode = 1; 6659 } else { 6660 struct dentry *parent = dentry->d_parent; 6661 6662 err = btrfs_update_inode(trans, root, BTRFS_I(inode)); 6663 if (err) 6664 goto fail; 6665 if (inode->i_nlink == 1) { 6666 /* 6667 * If new hard link count is 1, it's a file created 6668 * with open(2) O_TMPFILE flag. 6669 */ 6670 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6671 if (err) 6672 goto fail; 6673 } 6674 d_instantiate(dentry, inode); 6675 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent); 6676 } 6677 6678 fail: 6679 fscrypt_free_filename(&fname); 6680 if (trans) 6681 btrfs_end_transaction(trans); 6682 if (drop_inode) { 6683 inode_dec_link_count(inode); 6684 iput(inode); 6685 } 6686 btrfs_btree_balance_dirty(fs_info); 6687 return err; 6688 } 6689 6690 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 6691 struct dentry *dentry, umode_t mode) 6692 { 6693 struct inode *inode; 6694 6695 inode = new_inode(dir->i_sb); 6696 if (!inode) 6697 return -ENOMEM; 6698 inode_init_owner(idmap, inode, dir, S_IFDIR | mode); 6699 inode->i_op = &btrfs_dir_inode_operations; 6700 inode->i_fop = &btrfs_dir_file_operations; 6701 return btrfs_create_common(dir, dentry, inode); 6702 } 6703 6704 static noinline int uncompress_inline(struct btrfs_path *path, 6705 struct page *page, 6706 struct btrfs_file_extent_item *item) 6707 { 6708 int ret; 6709 struct extent_buffer *leaf = path->nodes[0]; 6710 char *tmp; 6711 size_t max_size; 6712 unsigned long inline_size; 6713 unsigned long ptr; 6714 int compress_type; 6715 6716 compress_type = btrfs_file_extent_compression(leaf, item); 6717 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6718 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 6719 tmp = kmalloc(inline_size, GFP_NOFS); 6720 if (!tmp) 6721 return -ENOMEM; 6722 ptr = btrfs_file_extent_inline_start(item); 6723 6724 read_extent_buffer(leaf, tmp, ptr, inline_size); 6725 6726 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6727 ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size); 6728 6729 /* 6730 * decompression code contains a memset to fill in any space between the end 6731 * of the uncompressed data and the end of max_size in case the decompressed 6732 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6733 * the end of an inline extent and the beginning of the next block, so we 6734 * cover that region here. 6735 */ 6736 6737 if (max_size < PAGE_SIZE) 6738 memzero_page(page, max_size, PAGE_SIZE - max_size); 6739 kfree(tmp); 6740 return ret; 6741 } 6742 6743 static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path, 6744 struct page *page) 6745 { 6746 struct btrfs_file_extent_item *fi; 6747 void *kaddr; 6748 size_t copy_size; 6749 6750 if (!page || PageUptodate(page)) 6751 return 0; 6752 6753 ASSERT(page_offset(page) == 0); 6754 6755 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 6756 struct btrfs_file_extent_item); 6757 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) 6758 return uncompress_inline(path, page, fi); 6759 6760 copy_size = min_t(u64, PAGE_SIZE, 6761 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); 6762 kaddr = kmap_local_page(page); 6763 read_extent_buffer(path->nodes[0], kaddr, 6764 btrfs_file_extent_inline_start(fi), copy_size); 6765 kunmap_local(kaddr); 6766 if (copy_size < PAGE_SIZE) 6767 memzero_page(page, copy_size, PAGE_SIZE - copy_size); 6768 return 0; 6769 } 6770 6771 /* 6772 * Lookup the first extent overlapping a range in a file. 6773 * 6774 * @inode: file to search in 6775 * @page: page to read extent data into if the extent is inline 6776 * @pg_offset: offset into @page to copy to 6777 * @start: file offset 6778 * @len: length of range starting at @start 6779 * 6780 * Return the first &struct extent_map which overlaps the given range, reading 6781 * it from the B-tree and caching it if necessary. Note that there may be more 6782 * extents which overlap the given range after the returned extent_map. 6783 * 6784 * If @page is not NULL and the extent is inline, this also reads the extent 6785 * data directly into the page and marks the extent up to date in the io_tree. 6786 * 6787 * Return: ERR_PTR on error, non-NULL extent_map on success. 6788 */ 6789 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6790 struct page *page, size_t pg_offset, 6791 u64 start, u64 len) 6792 { 6793 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6794 int ret = 0; 6795 u64 extent_start = 0; 6796 u64 extent_end = 0; 6797 u64 objectid = btrfs_ino(inode); 6798 int extent_type = -1; 6799 struct btrfs_path *path = NULL; 6800 struct btrfs_root *root = inode->root; 6801 struct btrfs_file_extent_item *item; 6802 struct extent_buffer *leaf; 6803 struct btrfs_key found_key; 6804 struct extent_map *em = NULL; 6805 struct extent_map_tree *em_tree = &inode->extent_tree; 6806 6807 read_lock(&em_tree->lock); 6808 em = lookup_extent_mapping(em_tree, start, len); 6809 read_unlock(&em_tree->lock); 6810 6811 if (em) { 6812 if (em->start > start || em->start + em->len <= start) 6813 free_extent_map(em); 6814 else if (em->block_start == EXTENT_MAP_INLINE && page) 6815 free_extent_map(em); 6816 else 6817 goto out; 6818 } 6819 em = alloc_extent_map(); 6820 if (!em) { 6821 ret = -ENOMEM; 6822 goto out; 6823 } 6824 em->start = EXTENT_MAP_HOLE; 6825 em->orig_start = EXTENT_MAP_HOLE; 6826 em->len = (u64)-1; 6827 em->block_len = (u64)-1; 6828 6829 path = btrfs_alloc_path(); 6830 if (!path) { 6831 ret = -ENOMEM; 6832 goto out; 6833 } 6834 6835 /* Chances are we'll be called again, so go ahead and do readahead */ 6836 path->reada = READA_FORWARD; 6837 6838 /* 6839 * The same explanation in load_free_space_cache applies here as well, 6840 * we only read when we're loading the free space cache, and at that 6841 * point the commit_root has everything we need. 6842 */ 6843 if (btrfs_is_free_space_inode(inode)) { 6844 path->search_commit_root = 1; 6845 path->skip_locking = 1; 6846 } 6847 6848 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6849 if (ret < 0) { 6850 goto out; 6851 } else if (ret > 0) { 6852 if (path->slots[0] == 0) 6853 goto not_found; 6854 path->slots[0]--; 6855 ret = 0; 6856 } 6857 6858 leaf = path->nodes[0]; 6859 item = btrfs_item_ptr(leaf, path->slots[0], 6860 struct btrfs_file_extent_item); 6861 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6862 if (found_key.objectid != objectid || 6863 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6864 /* 6865 * If we backup past the first extent we want to move forward 6866 * and see if there is an extent in front of us, otherwise we'll 6867 * say there is a hole for our whole search range which can 6868 * cause problems. 6869 */ 6870 extent_end = start; 6871 goto next; 6872 } 6873 6874 extent_type = btrfs_file_extent_type(leaf, item); 6875 extent_start = found_key.offset; 6876 extent_end = btrfs_file_extent_end(path); 6877 if (extent_type == BTRFS_FILE_EXTENT_REG || 6878 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6879 /* Only regular file could have regular/prealloc extent */ 6880 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6881 ret = -EUCLEAN; 6882 btrfs_crit(fs_info, 6883 "regular/prealloc extent found for non-regular inode %llu", 6884 btrfs_ino(inode)); 6885 goto out; 6886 } 6887 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6888 extent_start); 6889 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6890 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 6891 path->slots[0], 6892 extent_start); 6893 } 6894 next: 6895 if (start >= extent_end) { 6896 path->slots[0]++; 6897 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6898 ret = btrfs_next_leaf(root, path); 6899 if (ret < 0) 6900 goto out; 6901 else if (ret > 0) 6902 goto not_found; 6903 6904 leaf = path->nodes[0]; 6905 } 6906 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6907 if (found_key.objectid != objectid || 6908 found_key.type != BTRFS_EXTENT_DATA_KEY) 6909 goto not_found; 6910 if (start + len <= found_key.offset) 6911 goto not_found; 6912 if (start > found_key.offset) 6913 goto next; 6914 6915 /* New extent overlaps with existing one */ 6916 em->start = start; 6917 em->orig_start = start; 6918 em->len = found_key.offset - start; 6919 em->block_start = EXTENT_MAP_HOLE; 6920 goto insert; 6921 } 6922 6923 btrfs_extent_item_to_extent_map(inode, path, item, em); 6924 6925 if (extent_type == BTRFS_FILE_EXTENT_REG || 6926 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6927 goto insert; 6928 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6929 /* 6930 * Inline extent can only exist at file offset 0. This is 6931 * ensured by tree-checker and inline extent creation path. 6932 * Thus all members representing file offsets should be zero. 6933 */ 6934 ASSERT(pg_offset == 0); 6935 ASSERT(extent_start == 0); 6936 ASSERT(em->start == 0); 6937 6938 /* 6939 * btrfs_extent_item_to_extent_map() should have properly 6940 * initialized em members already. 6941 * 6942 * Other members are not utilized for inline extents. 6943 */ 6944 ASSERT(em->block_start == EXTENT_MAP_INLINE); 6945 ASSERT(em->len == fs_info->sectorsize); 6946 6947 ret = read_inline_extent(inode, path, page); 6948 if (ret < 0) 6949 goto out; 6950 goto insert; 6951 } 6952 not_found: 6953 em->start = start; 6954 em->orig_start = start; 6955 em->len = len; 6956 em->block_start = EXTENT_MAP_HOLE; 6957 insert: 6958 ret = 0; 6959 btrfs_release_path(path); 6960 if (em->start > start || extent_map_end(em) <= start) { 6961 btrfs_err(fs_info, 6962 "bad extent! em: [%llu %llu] passed [%llu %llu]", 6963 em->start, em->len, start, len); 6964 ret = -EIO; 6965 goto out; 6966 } 6967 6968 write_lock(&em_tree->lock); 6969 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 6970 write_unlock(&em_tree->lock); 6971 out: 6972 btrfs_free_path(path); 6973 6974 trace_btrfs_get_extent(root, inode, em); 6975 6976 if (ret) { 6977 free_extent_map(em); 6978 return ERR_PTR(ret); 6979 } 6980 return em; 6981 } 6982 6983 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, 6984 const u64 start, 6985 const u64 len, 6986 const u64 orig_start, 6987 const u64 block_start, 6988 const u64 block_len, 6989 const u64 orig_block_len, 6990 const u64 ram_bytes, 6991 const int type) 6992 { 6993 struct extent_map *em = NULL; 6994 int ret; 6995 6996 if (type != BTRFS_ORDERED_NOCOW) { 6997 em = create_io_em(inode, start, len, orig_start, block_start, 6998 block_len, orig_block_len, ram_bytes, 6999 BTRFS_COMPRESS_NONE, /* compress_type */ 7000 type); 7001 if (IS_ERR(em)) 7002 goto out; 7003 } 7004 ret = btrfs_add_ordered_extent(inode, start, len, len, block_start, 7005 block_len, 0, 7006 (1 << type) | 7007 (1 << BTRFS_ORDERED_DIRECT), 7008 BTRFS_COMPRESS_NONE); 7009 if (ret) { 7010 if (em) { 7011 free_extent_map(em); 7012 btrfs_drop_extent_map_range(inode, start, 7013 start + len - 1, false); 7014 } 7015 em = ERR_PTR(ret); 7016 } 7017 out: 7018 7019 return em; 7020 } 7021 7022 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, 7023 u64 start, u64 len) 7024 { 7025 struct btrfs_root *root = inode->root; 7026 struct btrfs_fs_info *fs_info = root->fs_info; 7027 struct extent_map *em; 7028 struct btrfs_key ins; 7029 u64 alloc_hint; 7030 int ret; 7031 7032 alloc_hint = get_extent_allocation_hint(inode, start, len); 7033 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 7034 0, alloc_hint, &ins, 1, 1); 7035 if (ret) 7036 return ERR_PTR(ret); 7037 7038 em = btrfs_create_dio_extent(inode, start, ins.offset, start, 7039 ins.objectid, ins.offset, ins.offset, 7040 ins.offset, BTRFS_ORDERED_REGULAR); 7041 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 7042 if (IS_ERR(em)) 7043 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 7044 1); 7045 7046 return em; 7047 } 7048 7049 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 7050 { 7051 struct btrfs_block_group *block_group; 7052 bool readonly = false; 7053 7054 block_group = btrfs_lookup_block_group(fs_info, bytenr); 7055 if (!block_group || block_group->ro) 7056 readonly = true; 7057 if (block_group) 7058 btrfs_put_block_group(block_group); 7059 return readonly; 7060 } 7061 7062 /* 7063 * Check if we can do nocow write into the range [@offset, @offset + @len) 7064 * 7065 * @offset: File offset 7066 * @len: The length to write, will be updated to the nocow writeable 7067 * range 7068 * @orig_start: (optional) Return the original file offset of the file extent 7069 * @orig_len: (optional) Return the original on-disk length of the file extent 7070 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7071 * @strict: if true, omit optimizations that might force us into unnecessary 7072 * cow. e.g., don't trust generation number. 7073 * 7074 * Return: 7075 * >0 and update @len if we can do nocow write 7076 * 0 if we can't do nocow write 7077 * <0 if error happened 7078 * 7079 * NOTE: This only checks the file extents, caller is responsible to wait for 7080 * any ordered extents. 7081 */ 7082 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7083 u64 *orig_start, u64 *orig_block_len, 7084 u64 *ram_bytes, bool nowait, bool strict) 7085 { 7086 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7087 struct can_nocow_file_extent_args nocow_args = { 0 }; 7088 struct btrfs_path *path; 7089 int ret; 7090 struct extent_buffer *leaf; 7091 struct btrfs_root *root = BTRFS_I(inode)->root; 7092 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7093 struct btrfs_file_extent_item *fi; 7094 struct btrfs_key key; 7095 int found_type; 7096 7097 path = btrfs_alloc_path(); 7098 if (!path) 7099 return -ENOMEM; 7100 path->nowait = nowait; 7101 7102 ret = btrfs_lookup_file_extent(NULL, root, path, 7103 btrfs_ino(BTRFS_I(inode)), offset, 0); 7104 if (ret < 0) 7105 goto out; 7106 7107 if (ret == 1) { 7108 if (path->slots[0] == 0) { 7109 /* can't find the item, must cow */ 7110 ret = 0; 7111 goto out; 7112 } 7113 path->slots[0]--; 7114 } 7115 ret = 0; 7116 leaf = path->nodes[0]; 7117 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7118 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7119 key.type != BTRFS_EXTENT_DATA_KEY) { 7120 /* not our file or wrong item type, must cow */ 7121 goto out; 7122 } 7123 7124 if (key.offset > offset) { 7125 /* Wrong offset, must cow */ 7126 goto out; 7127 } 7128 7129 if (btrfs_file_extent_end(path) <= offset) 7130 goto out; 7131 7132 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7133 found_type = btrfs_file_extent_type(leaf, fi); 7134 if (ram_bytes) 7135 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7136 7137 nocow_args.start = offset; 7138 nocow_args.end = offset + *len - 1; 7139 nocow_args.strict = strict; 7140 nocow_args.free_path = true; 7141 7142 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args); 7143 /* can_nocow_file_extent() has freed the path. */ 7144 path = NULL; 7145 7146 if (ret != 1) { 7147 /* Treat errors as not being able to NOCOW. */ 7148 ret = 0; 7149 goto out; 7150 } 7151 7152 ret = 0; 7153 if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr)) 7154 goto out; 7155 7156 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7157 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7158 u64 range_end; 7159 7160 range_end = round_up(offset + nocow_args.num_bytes, 7161 root->fs_info->sectorsize) - 1; 7162 ret = test_range_bit(io_tree, offset, range_end, 7163 EXTENT_DELALLOC, 0, NULL); 7164 if (ret) { 7165 ret = -EAGAIN; 7166 goto out; 7167 } 7168 } 7169 7170 if (orig_start) 7171 *orig_start = key.offset - nocow_args.extent_offset; 7172 if (orig_block_len) 7173 *orig_block_len = nocow_args.disk_num_bytes; 7174 7175 *len = nocow_args.num_bytes; 7176 ret = 1; 7177 out: 7178 btrfs_free_path(path); 7179 return ret; 7180 } 7181 7182 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7183 struct extent_state **cached_state, 7184 unsigned int iomap_flags) 7185 { 7186 const bool writing = (iomap_flags & IOMAP_WRITE); 7187 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7188 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7189 struct btrfs_ordered_extent *ordered; 7190 int ret = 0; 7191 7192 while (1) { 7193 if (nowait) { 7194 if (!try_lock_extent(io_tree, lockstart, lockend, 7195 cached_state)) 7196 return -EAGAIN; 7197 } else { 7198 lock_extent(io_tree, lockstart, lockend, cached_state); 7199 } 7200 /* 7201 * We're concerned with the entire range that we're going to be 7202 * doing DIO to, so we need to make sure there's no ordered 7203 * extents in this range. 7204 */ 7205 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, 7206 lockend - lockstart + 1); 7207 7208 /* 7209 * We need to make sure there are no buffered pages in this 7210 * range either, we could have raced between the invalidate in 7211 * generic_file_direct_write and locking the extent. The 7212 * invalidate needs to happen so that reads after a write do not 7213 * get stale data. 7214 */ 7215 if (!ordered && 7216 (!writing || !filemap_range_has_page(inode->i_mapping, 7217 lockstart, lockend))) 7218 break; 7219 7220 unlock_extent(io_tree, lockstart, lockend, cached_state); 7221 7222 if (ordered) { 7223 if (nowait) { 7224 btrfs_put_ordered_extent(ordered); 7225 ret = -EAGAIN; 7226 break; 7227 } 7228 /* 7229 * If we are doing a DIO read and the ordered extent we 7230 * found is for a buffered write, we can not wait for it 7231 * to complete and retry, because if we do so we can 7232 * deadlock with concurrent buffered writes on page 7233 * locks. This happens only if our DIO read covers more 7234 * than one extent map, if at this point has already 7235 * created an ordered extent for a previous extent map 7236 * and locked its range in the inode's io tree, and a 7237 * concurrent write against that previous extent map's 7238 * range and this range started (we unlock the ranges 7239 * in the io tree only when the bios complete and 7240 * buffered writes always lock pages before attempting 7241 * to lock range in the io tree). 7242 */ 7243 if (writing || 7244 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) 7245 btrfs_start_ordered_extent(ordered); 7246 else 7247 ret = nowait ? -EAGAIN : -ENOTBLK; 7248 btrfs_put_ordered_extent(ordered); 7249 } else { 7250 /* 7251 * We could trigger writeback for this range (and wait 7252 * for it to complete) and then invalidate the pages for 7253 * this range (through invalidate_inode_pages2_range()), 7254 * but that can lead us to a deadlock with a concurrent 7255 * call to readahead (a buffered read or a defrag call 7256 * triggered a readahead) on a page lock due to an 7257 * ordered dio extent we created before but did not have 7258 * yet a corresponding bio submitted (whence it can not 7259 * complete), which makes readahead wait for that 7260 * ordered extent to complete while holding a lock on 7261 * that page. 7262 */ 7263 ret = nowait ? -EAGAIN : -ENOTBLK; 7264 } 7265 7266 if (ret) 7267 break; 7268 7269 cond_resched(); 7270 } 7271 7272 return ret; 7273 } 7274 7275 /* The callers of this must take lock_extent() */ 7276 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 7277 u64 len, u64 orig_start, u64 block_start, 7278 u64 block_len, u64 orig_block_len, 7279 u64 ram_bytes, int compress_type, 7280 int type) 7281 { 7282 struct extent_map *em; 7283 int ret; 7284 7285 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7286 type == BTRFS_ORDERED_COMPRESSED || 7287 type == BTRFS_ORDERED_NOCOW || 7288 type == BTRFS_ORDERED_REGULAR); 7289 7290 em = alloc_extent_map(); 7291 if (!em) 7292 return ERR_PTR(-ENOMEM); 7293 7294 em->start = start; 7295 em->orig_start = orig_start; 7296 em->len = len; 7297 em->block_len = block_len; 7298 em->block_start = block_start; 7299 em->orig_block_len = orig_block_len; 7300 em->ram_bytes = ram_bytes; 7301 em->generation = -1; 7302 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7303 if (type == BTRFS_ORDERED_PREALLOC) { 7304 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7305 } else if (type == BTRFS_ORDERED_COMPRESSED) { 7306 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 7307 em->compress_type = compress_type; 7308 } 7309 7310 ret = btrfs_replace_extent_map_range(inode, em, true); 7311 if (ret) { 7312 free_extent_map(em); 7313 return ERR_PTR(ret); 7314 } 7315 7316 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7317 return em; 7318 } 7319 7320 7321 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7322 struct inode *inode, 7323 struct btrfs_dio_data *dio_data, 7324 u64 start, u64 len, 7325 unsigned int iomap_flags) 7326 { 7327 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7328 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7329 struct extent_map *em = *map; 7330 int type; 7331 u64 block_start, orig_start, orig_block_len, ram_bytes; 7332 struct btrfs_block_group *bg; 7333 bool can_nocow = false; 7334 bool space_reserved = false; 7335 u64 prev_len; 7336 int ret = 0; 7337 7338 /* 7339 * We don't allocate a new extent in the following cases 7340 * 7341 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7342 * existing extent. 7343 * 2) The extent is marked as PREALLOC. We're good to go here and can 7344 * just use the extent. 7345 * 7346 */ 7347 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7348 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7349 em->block_start != EXTENT_MAP_HOLE)) { 7350 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7351 type = BTRFS_ORDERED_PREALLOC; 7352 else 7353 type = BTRFS_ORDERED_NOCOW; 7354 len = min(len, em->len - (start - em->start)); 7355 block_start = em->block_start + (start - em->start); 7356 7357 if (can_nocow_extent(inode, start, &len, &orig_start, 7358 &orig_block_len, &ram_bytes, false, false) == 1) { 7359 bg = btrfs_inc_nocow_writers(fs_info, block_start); 7360 if (bg) 7361 can_nocow = true; 7362 } 7363 } 7364 7365 prev_len = len; 7366 if (can_nocow) { 7367 struct extent_map *em2; 7368 7369 /* We can NOCOW, so only need to reserve metadata space. */ 7370 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7371 nowait); 7372 if (ret < 0) { 7373 /* Our caller expects us to free the input extent map. */ 7374 free_extent_map(em); 7375 *map = NULL; 7376 btrfs_dec_nocow_writers(bg); 7377 if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) 7378 ret = -EAGAIN; 7379 goto out; 7380 } 7381 space_reserved = true; 7382 7383 em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len, 7384 orig_start, block_start, 7385 len, orig_block_len, 7386 ram_bytes, type); 7387 btrfs_dec_nocow_writers(bg); 7388 if (type == BTRFS_ORDERED_PREALLOC) { 7389 free_extent_map(em); 7390 *map = em2; 7391 em = em2; 7392 } 7393 7394 if (IS_ERR(em2)) { 7395 ret = PTR_ERR(em2); 7396 goto out; 7397 } 7398 7399 dio_data->nocow_done = true; 7400 } else { 7401 /* Our caller expects us to free the input extent map. */ 7402 free_extent_map(em); 7403 *map = NULL; 7404 7405 if (nowait) 7406 return -EAGAIN; 7407 7408 /* 7409 * If we could not allocate data space before locking the file 7410 * range and we can't do a NOCOW write, then we have to fail. 7411 */ 7412 if (!dio_data->data_space_reserved) 7413 return -ENOSPC; 7414 7415 /* 7416 * We have to COW and we have already reserved data space before, 7417 * so now we reserve only metadata. 7418 */ 7419 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7420 false); 7421 if (ret < 0) 7422 goto out; 7423 space_reserved = true; 7424 7425 em = btrfs_new_extent_direct(BTRFS_I(inode), start, len); 7426 if (IS_ERR(em)) { 7427 ret = PTR_ERR(em); 7428 goto out; 7429 } 7430 *map = em; 7431 len = min(len, em->len - (start - em->start)); 7432 if (len < prev_len) 7433 btrfs_delalloc_release_metadata(BTRFS_I(inode), 7434 prev_len - len, true); 7435 } 7436 7437 /* 7438 * We have created our ordered extent, so we can now release our reservation 7439 * for an outstanding extent. 7440 */ 7441 btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len); 7442 7443 /* 7444 * Need to update the i_size under the extent lock so buffered 7445 * readers will get the updated i_size when we unlock. 7446 */ 7447 if (start + len > i_size_read(inode)) 7448 i_size_write(inode, start + len); 7449 out: 7450 if (ret && space_reserved) { 7451 btrfs_delalloc_release_extents(BTRFS_I(inode), len); 7452 btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true); 7453 } 7454 return ret; 7455 } 7456 7457 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, 7458 loff_t length, unsigned int flags, struct iomap *iomap, 7459 struct iomap *srcmap) 7460 { 7461 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7462 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7463 struct extent_map *em; 7464 struct extent_state *cached_state = NULL; 7465 struct btrfs_dio_data *dio_data = iter->private; 7466 u64 lockstart, lockend; 7467 const bool write = !!(flags & IOMAP_WRITE); 7468 int ret = 0; 7469 u64 len = length; 7470 const u64 data_alloc_len = length; 7471 bool unlock_extents = false; 7472 7473 /* 7474 * We could potentially fault if we have a buffer > PAGE_SIZE, and if 7475 * we're NOWAIT we may submit a bio for a partial range and return 7476 * EIOCBQUEUED, which would result in an errant short read. 7477 * 7478 * The best way to handle this would be to allow for partial completions 7479 * of iocb's, so we could submit the partial bio, return and fault in 7480 * the rest of the pages, and then submit the io for the rest of the 7481 * range. However we don't have that currently, so simply return 7482 * -EAGAIN at this point so that the normal path is used. 7483 */ 7484 if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE) 7485 return -EAGAIN; 7486 7487 /* 7488 * Cap the size of reads to that usually seen in buffered I/O as we need 7489 * to allocate a contiguous array for the checksums. 7490 */ 7491 if (!write) 7492 len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS); 7493 7494 lockstart = start; 7495 lockend = start + len - 1; 7496 7497 /* 7498 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't 7499 * enough if we've written compressed pages to this area, so we need to 7500 * flush the dirty pages again to make absolutely sure that any 7501 * outstanding dirty pages are on disk - the first flush only starts 7502 * compression on the data, while keeping the pages locked, so by the 7503 * time the second flush returns we know bios for the compressed pages 7504 * were submitted and finished, and the pages no longer under writeback. 7505 * 7506 * If we have a NOWAIT request and we have any pages in the range that 7507 * are locked, likely due to compression still in progress, we don't want 7508 * to block on page locks. We also don't want to block on pages marked as 7509 * dirty or under writeback (same as for the non-compression case). 7510 * iomap_dio_rw() did the same check, but after that and before we got 7511 * here, mmap'ed writes may have happened or buffered reads started 7512 * (readpage() and readahead(), which lock pages), as we haven't locked 7513 * the file range yet. 7514 */ 7515 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7516 &BTRFS_I(inode)->runtime_flags)) { 7517 if (flags & IOMAP_NOWAIT) { 7518 if (filemap_range_needs_writeback(inode->i_mapping, 7519 lockstart, lockend)) 7520 return -EAGAIN; 7521 } else { 7522 ret = filemap_fdatawrite_range(inode->i_mapping, start, 7523 start + length - 1); 7524 if (ret) 7525 return ret; 7526 } 7527 } 7528 7529 memset(dio_data, 0, sizeof(*dio_data)); 7530 7531 /* 7532 * We always try to allocate data space and must do it before locking 7533 * the file range, to avoid deadlocks with concurrent writes to the same 7534 * range if the range has several extents and the writes don't expand the 7535 * current i_size (the inode lock is taken in shared mode). If we fail to 7536 * allocate data space here we continue and later, after locking the 7537 * file range, we fail with ENOSPC only if we figure out we can not do a 7538 * NOCOW write. 7539 */ 7540 if (write && !(flags & IOMAP_NOWAIT)) { 7541 ret = btrfs_check_data_free_space(BTRFS_I(inode), 7542 &dio_data->data_reserved, 7543 start, data_alloc_len, false); 7544 if (!ret) 7545 dio_data->data_space_reserved = true; 7546 else if (ret && !(BTRFS_I(inode)->flags & 7547 (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 7548 goto err; 7549 } 7550 7551 /* 7552 * If this errors out it's because we couldn't invalidate pagecache for 7553 * this range and we need to fallback to buffered IO, or we are doing a 7554 * NOWAIT read/write and we need to block. 7555 */ 7556 ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags); 7557 if (ret < 0) 7558 goto err; 7559 7560 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 7561 if (IS_ERR(em)) { 7562 ret = PTR_ERR(em); 7563 goto unlock_err; 7564 } 7565 7566 /* 7567 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7568 * io. INLINE is special, and we could probably kludge it in here, but 7569 * it's still buffered so for safety lets just fall back to the generic 7570 * buffered path. 7571 * 7572 * For COMPRESSED we _have_ to read the entire extent in so we can 7573 * decompress it, so there will be buffering required no matter what we 7574 * do, so go ahead and fallback to buffered. 7575 * 7576 * We return -ENOTBLK because that's what makes DIO go ahead and go back 7577 * to buffered IO. Don't blame me, this is the price we pay for using 7578 * the generic code. 7579 */ 7580 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7581 em->block_start == EXTENT_MAP_INLINE) { 7582 free_extent_map(em); 7583 /* 7584 * If we are in a NOWAIT context, return -EAGAIN in order to 7585 * fallback to buffered IO. This is not only because we can 7586 * block with buffered IO (no support for NOWAIT semantics at 7587 * the moment) but also to avoid returning short reads to user 7588 * space - this happens if we were able to read some data from 7589 * previous non-compressed extents and then when we fallback to 7590 * buffered IO, at btrfs_file_read_iter() by calling 7591 * filemap_read(), we fail to fault in pages for the read buffer, 7592 * in which case filemap_read() returns a short read (the number 7593 * of bytes previously read is > 0, so it does not return -EFAULT). 7594 */ 7595 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; 7596 goto unlock_err; 7597 } 7598 7599 len = min(len, em->len - (start - em->start)); 7600 7601 /* 7602 * If we have a NOWAIT request and the range contains multiple extents 7603 * (or a mix of extents and holes), then we return -EAGAIN to make the 7604 * caller fallback to a context where it can do a blocking (without 7605 * NOWAIT) request. This way we avoid doing partial IO and returning 7606 * success to the caller, which is not optimal for writes and for reads 7607 * it can result in unexpected behaviour for an application. 7608 * 7609 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling 7610 * iomap_dio_rw(), we can end up returning less data then what the caller 7611 * asked for, resulting in an unexpected, and incorrect, short read. 7612 * That is, the caller asked to read N bytes and we return less than that, 7613 * which is wrong unless we are crossing EOF. This happens if we get a 7614 * page fault error when trying to fault in pages for the buffer that is 7615 * associated to the struct iov_iter passed to iomap_dio_rw(), and we 7616 * have previously submitted bios for other extents in the range, in 7617 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of 7618 * those bios have completed by the time we get the page fault error, 7619 * which we return back to our caller - we should only return EIOCBQUEUED 7620 * after we have submitted bios for all the extents in the range. 7621 */ 7622 if ((flags & IOMAP_NOWAIT) && len < length) { 7623 free_extent_map(em); 7624 ret = -EAGAIN; 7625 goto unlock_err; 7626 } 7627 7628 if (write) { 7629 ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, 7630 start, len, flags); 7631 if (ret < 0) 7632 goto unlock_err; 7633 unlock_extents = true; 7634 /* Recalc len in case the new em is smaller than requested */ 7635 len = min(len, em->len - (start - em->start)); 7636 if (dio_data->data_space_reserved) { 7637 u64 release_offset; 7638 u64 release_len = 0; 7639 7640 if (dio_data->nocow_done) { 7641 release_offset = start; 7642 release_len = data_alloc_len; 7643 } else if (len < data_alloc_len) { 7644 release_offset = start + len; 7645 release_len = data_alloc_len - len; 7646 } 7647 7648 if (release_len > 0) 7649 btrfs_free_reserved_data_space(BTRFS_I(inode), 7650 dio_data->data_reserved, 7651 release_offset, 7652 release_len); 7653 } 7654 } else { 7655 /* 7656 * We need to unlock only the end area that we aren't using. 7657 * The rest is going to be unlocked by the endio routine. 7658 */ 7659 lockstart = start + len; 7660 if (lockstart < lockend) 7661 unlock_extents = true; 7662 } 7663 7664 if (unlock_extents) 7665 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7666 &cached_state); 7667 else 7668 free_extent_state(cached_state); 7669 7670 /* 7671 * Translate extent map information to iomap. 7672 * We trim the extents (and move the addr) even though iomap code does 7673 * that, since we have locked only the parts we are performing I/O in. 7674 */ 7675 if ((em->block_start == EXTENT_MAP_HOLE) || 7676 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { 7677 iomap->addr = IOMAP_NULL_ADDR; 7678 iomap->type = IOMAP_HOLE; 7679 } else { 7680 iomap->addr = em->block_start + (start - em->start); 7681 iomap->type = IOMAP_MAPPED; 7682 } 7683 iomap->offset = start; 7684 iomap->bdev = fs_info->fs_devices->latest_dev->bdev; 7685 iomap->length = len; 7686 free_extent_map(em); 7687 7688 return 0; 7689 7690 unlock_err: 7691 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7692 &cached_state); 7693 err: 7694 if (dio_data->data_space_reserved) { 7695 btrfs_free_reserved_data_space(BTRFS_I(inode), 7696 dio_data->data_reserved, 7697 start, data_alloc_len); 7698 extent_changeset_free(dio_data->data_reserved); 7699 } 7700 7701 return ret; 7702 } 7703 7704 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, 7705 ssize_t written, unsigned int flags, struct iomap *iomap) 7706 { 7707 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7708 struct btrfs_dio_data *dio_data = iter->private; 7709 size_t submitted = dio_data->submitted; 7710 const bool write = !!(flags & IOMAP_WRITE); 7711 int ret = 0; 7712 7713 if (!write && (iomap->type == IOMAP_HOLE)) { 7714 /* If reading from a hole, unlock and return */ 7715 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, 7716 NULL); 7717 return 0; 7718 } 7719 7720 if (submitted < length) { 7721 pos += submitted; 7722 length -= submitted; 7723 if (write) 7724 btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL, 7725 pos, length, false); 7726 else 7727 unlock_extent(&BTRFS_I(inode)->io_tree, pos, 7728 pos + length - 1, NULL); 7729 ret = -ENOTBLK; 7730 } 7731 7732 if (write) 7733 extent_changeset_free(dio_data->data_reserved); 7734 return ret; 7735 } 7736 7737 static void btrfs_dio_end_io(struct btrfs_bio *bbio) 7738 { 7739 struct btrfs_dio_private *dip = 7740 container_of(bbio, struct btrfs_dio_private, bbio); 7741 struct btrfs_inode *inode = bbio->inode; 7742 struct bio *bio = &bbio->bio; 7743 7744 if (bio->bi_status) { 7745 btrfs_warn(inode->root->fs_info, 7746 "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d", 7747 btrfs_ino(inode), bio->bi_opf, 7748 dip->file_offset, dip->bytes, bio->bi_status); 7749 } 7750 7751 if (btrfs_op(bio) == BTRFS_MAP_WRITE) 7752 btrfs_mark_ordered_io_finished(inode, NULL, dip->file_offset, 7753 dip->bytes, !bio->bi_status); 7754 else 7755 unlock_extent(&inode->io_tree, dip->file_offset, 7756 dip->file_offset + dip->bytes - 1, NULL); 7757 7758 bbio->bio.bi_private = bbio->private; 7759 iomap_dio_bio_end_io(bio); 7760 } 7761 7762 static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio, 7763 loff_t file_offset) 7764 { 7765 struct btrfs_bio *bbio = btrfs_bio(bio); 7766 struct btrfs_dio_private *dip = 7767 container_of(bbio, struct btrfs_dio_private, bbio); 7768 struct btrfs_dio_data *dio_data = iter->private; 7769 7770 btrfs_bio_init(bbio, BTRFS_I(iter->inode), btrfs_dio_end_io, bio->bi_private); 7771 bbio->file_offset = file_offset; 7772 7773 dip->file_offset = file_offset; 7774 dip->bytes = bio->bi_iter.bi_size; 7775 7776 dio_data->submitted += bio->bi_iter.bi_size; 7777 btrfs_submit_bio(bio, 0); 7778 } 7779 7780 static const struct iomap_ops btrfs_dio_iomap_ops = { 7781 .iomap_begin = btrfs_dio_iomap_begin, 7782 .iomap_end = btrfs_dio_iomap_end, 7783 }; 7784 7785 static const struct iomap_dio_ops btrfs_dio_ops = { 7786 .submit_io = btrfs_dio_submit_io, 7787 .bio_set = &btrfs_dio_bioset, 7788 }; 7789 7790 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) 7791 { 7792 struct btrfs_dio_data data; 7793 7794 return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7795 IOMAP_DIO_PARTIAL, &data, done_before); 7796 } 7797 7798 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, 7799 size_t done_before) 7800 { 7801 struct btrfs_dio_data data; 7802 7803 return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7804 IOMAP_DIO_PARTIAL, &data, done_before); 7805 } 7806 7807 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 7808 u64 start, u64 len) 7809 { 7810 int ret; 7811 7812 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 7813 if (ret) 7814 return ret; 7815 7816 /* 7817 * fiemap_prep() called filemap_write_and_wait() for the whole possible 7818 * file range (0 to LLONG_MAX), but that is not enough if we have 7819 * compression enabled. The first filemap_fdatawrite_range() only kicks 7820 * in the compression of data (in an async thread) and will return 7821 * before the compression is done and writeback is started. A second 7822 * filemap_fdatawrite_range() is needed to wait for the compression to 7823 * complete and writeback to start. We also need to wait for ordered 7824 * extents to complete, because our fiemap implementation uses mainly 7825 * file extent items to list the extents, searching for extent maps 7826 * only for file ranges with holes or prealloc extents to figure out 7827 * if we have delalloc in those ranges. 7828 */ 7829 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { 7830 ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX); 7831 if (ret) 7832 return ret; 7833 } 7834 7835 return extent_fiemap(BTRFS_I(inode), fieinfo, start, len); 7836 } 7837 7838 static int btrfs_writepages(struct address_space *mapping, 7839 struct writeback_control *wbc) 7840 { 7841 return extent_writepages(mapping, wbc); 7842 } 7843 7844 static void btrfs_readahead(struct readahead_control *rac) 7845 { 7846 extent_readahead(rac); 7847 } 7848 7849 /* 7850 * For release_folio() and invalidate_folio() we have a race window where 7851 * folio_end_writeback() is called but the subpage spinlock is not yet released. 7852 * If we continue to release/invalidate the page, we could cause use-after-free 7853 * for subpage spinlock. So this function is to spin and wait for subpage 7854 * spinlock. 7855 */ 7856 static void wait_subpage_spinlock(struct page *page) 7857 { 7858 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); 7859 struct btrfs_subpage *subpage; 7860 7861 if (!btrfs_is_subpage(fs_info, page)) 7862 return; 7863 7864 ASSERT(PagePrivate(page) && page->private); 7865 subpage = (struct btrfs_subpage *)page->private; 7866 7867 /* 7868 * This may look insane as we just acquire the spinlock and release it, 7869 * without doing anything. But we just want to make sure no one is 7870 * still holding the subpage spinlock. 7871 * And since the page is not dirty nor writeback, and we have page 7872 * locked, the only possible way to hold a spinlock is from the endio 7873 * function to clear page writeback. 7874 * 7875 * Here we just acquire the spinlock so that all existing callers 7876 * should exit and we're safe to release/invalidate the page. 7877 */ 7878 spin_lock_irq(&subpage->lock); 7879 spin_unlock_irq(&subpage->lock); 7880 } 7881 7882 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7883 { 7884 int ret = try_release_extent_mapping(&folio->page, gfp_flags); 7885 7886 if (ret == 1) { 7887 wait_subpage_spinlock(&folio->page); 7888 clear_page_extent_mapped(&folio->page); 7889 } 7890 return ret; 7891 } 7892 7893 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7894 { 7895 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 7896 return false; 7897 return __btrfs_release_folio(folio, gfp_flags); 7898 } 7899 7900 #ifdef CONFIG_MIGRATION 7901 static int btrfs_migrate_folio(struct address_space *mapping, 7902 struct folio *dst, struct folio *src, 7903 enum migrate_mode mode) 7904 { 7905 int ret = filemap_migrate_folio(mapping, dst, src, mode); 7906 7907 if (ret != MIGRATEPAGE_SUCCESS) 7908 return ret; 7909 7910 if (folio_test_ordered(src)) { 7911 folio_clear_ordered(src); 7912 folio_set_ordered(dst); 7913 } 7914 7915 return MIGRATEPAGE_SUCCESS; 7916 } 7917 #else 7918 #define btrfs_migrate_folio NULL 7919 #endif 7920 7921 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 7922 size_t length) 7923 { 7924 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); 7925 struct btrfs_fs_info *fs_info = inode->root->fs_info; 7926 struct extent_io_tree *tree = &inode->io_tree; 7927 struct extent_state *cached_state = NULL; 7928 u64 page_start = folio_pos(folio); 7929 u64 page_end = page_start + folio_size(folio) - 1; 7930 u64 cur; 7931 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 7932 7933 /* 7934 * We have folio locked so no new ordered extent can be created on this 7935 * page, nor bio can be submitted for this folio. 7936 * 7937 * But already submitted bio can still be finished on this folio. 7938 * Furthermore, endio function won't skip folio which has Ordered 7939 * (Private2) already cleared, so it's possible for endio and 7940 * invalidate_folio to do the same ordered extent accounting twice 7941 * on one folio. 7942 * 7943 * So here we wait for any submitted bios to finish, so that we won't 7944 * do double ordered extent accounting on the same folio. 7945 */ 7946 folio_wait_writeback(folio); 7947 wait_subpage_spinlock(&folio->page); 7948 7949 /* 7950 * For subpage case, we have call sites like 7951 * btrfs_punch_hole_lock_range() which passes range not aligned to 7952 * sectorsize. 7953 * If the range doesn't cover the full folio, we don't need to and 7954 * shouldn't clear page extent mapped, as folio->private can still 7955 * record subpage dirty bits for other part of the range. 7956 * 7957 * For cases that invalidate the full folio even the range doesn't 7958 * cover the full folio, like invalidating the last folio, we're 7959 * still safe to wait for ordered extent to finish. 7960 */ 7961 if (!(offset == 0 && length == folio_size(folio))) { 7962 btrfs_release_folio(folio, GFP_NOFS); 7963 return; 7964 } 7965 7966 if (!inode_evicting) 7967 lock_extent(tree, page_start, page_end, &cached_state); 7968 7969 cur = page_start; 7970 while (cur < page_end) { 7971 struct btrfs_ordered_extent *ordered; 7972 u64 range_end; 7973 u32 range_len; 7974 u32 extra_flags = 0; 7975 7976 ordered = btrfs_lookup_first_ordered_range(inode, cur, 7977 page_end + 1 - cur); 7978 if (!ordered) { 7979 range_end = page_end; 7980 /* 7981 * No ordered extent covering this range, we are safe 7982 * to delete all extent states in the range. 7983 */ 7984 extra_flags = EXTENT_CLEAR_ALL_BITS; 7985 goto next; 7986 } 7987 if (ordered->file_offset > cur) { 7988 /* 7989 * There is a range between [cur, oe->file_offset) not 7990 * covered by any ordered extent. 7991 * We are safe to delete all extent states, and handle 7992 * the ordered extent in the next iteration. 7993 */ 7994 range_end = ordered->file_offset - 1; 7995 extra_flags = EXTENT_CLEAR_ALL_BITS; 7996 goto next; 7997 } 7998 7999 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 8000 page_end); 8001 ASSERT(range_end + 1 - cur < U32_MAX); 8002 range_len = range_end + 1 - cur; 8003 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) { 8004 /* 8005 * If Ordered (Private2) is cleared, it means endio has 8006 * already been executed for the range. 8007 * We can't delete the extent states as 8008 * btrfs_finish_ordered_io() may still use some of them. 8009 */ 8010 goto next; 8011 } 8012 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len); 8013 8014 /* 8015 * IO on this page will never be started, so we need to account 8016 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 8017 * here, must leave that up for the ordered extent completion. 8018 * 8019 * This will also unlock the range for incoming 8020 * btrfs_finish_ordered_io(). 8021 */ 8022 if (!inode_evicting) 8023 clear_extent_bit(tree, cur, range_end, 8024 EXTENT_DELALLOC | 8025 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8026 EXTENT_DEFRAG, &cached_state); 8027 8028 spin_lock_irq(&inode->ordered_tree.lock); 8029 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8030 ordered->truncated_len = min(ordered->truncated_len, 8031 cur - ordered->file_offset); 8032 spin_unlock_irq(&inode->ordered_tree.lock); 8033 8034 /* 8035 * If the ordered extent has finished, we're safe to delete all 8036 * the extent states of the range, otherwise 8037 * btrfs_finish_ordered_io() will get executed by endio for 8038 * other pages, so we can't delete extent states. 8039 */ 8040 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8041 cur, range_end + 1 - cur)) { 8042 btrfs_finish_ordered_io(ordered); 8043 /* 8044 * The ordered extent has finished, now we're again 8045 * safe to delete all extent states of the range. 8046 */ 8047 extra_flags = EXTENT_CLEAR_ALL_BITS; 8048 } 8049 next: 8050 if (ordered) 8051 btrfs_put_ordered_extent(ordered); 8052 /* 8053 * Qgroup reserved space handler 8054 * Sector(s) here will be either: 8055 * 8056 * 1) Already written to disk or bio already finished 8057 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 8058 * Qgroup will be handled by its qgroup_record then. 8059 * btrfs_qgroup_free_data() call will do nothing here. 8060 * 8061 * 2) Not written to disk yet 8062 * Then btrfs_qgroup_free_data() call will clear the 8063 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 8064 * reserved data space. 8065 * Since the IO will never happen for this page. 8066 */ 8067 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur); 8068 if (!inode_evicting) { 8069 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 8070 EXTENT_DELALLOC | EXTENT_UPTODATE | 8071 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG | 8072 extra_flags, &cached_state); 8073 } 8074 cur = range_end + 1; 8075 } 8076 /* 8077 * We have iterated through all ordered extents of the page, the page 8078 * should not have Ordered (Private2) anymore, or the above iteration 8079 * did something wrong. 8080 */ 8081 ASSERT(!folio_test_ordered(folio)); 8082 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio)); 8083 if (!inode_evicting) 8084 __btrfs_release_folio(folio, GFP_NOFS); 8085 clear_page_extent_mapped(&folio->page); 8086 } 8087 8088 /* 8089 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8090 * called from a page fault handler when a page is first dirtied. Hence we must 8091 * be careful to check for EOF conditions here. We set the page up correctly 8092 * for a written page which means we get ENOSPC checking when writing into 8093 * holes and correct delalloc and unwritten extent mapping on filesystems that 8094 * support these features. 8095 * 8096 * We are not allowed to take the i_mutex here so we have to play games to 8097 * protect against truncate races as the page could now be beyond EOF. Because 8098 * truncate_setsize() writes the inode size before removing pages, once we have 8099 * the page lock we can determine safely if the page is beyond EOF. If it is not 8100 * beyond EOF, then the page is guaranteed safe against truncation until we 8101 * unlock the page. 8102 */ 8103 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 8104 { 8105 struct page *page = vmf->page; 8106 struct inode *inode = file_inode(vmf->vma->vm_file); 8107 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8108 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8109 struct btrfs_ordered_extent *ordered; 8110 struct extent_state *cached_state = NULL; 8111 struct extent_changeset *data_reserved = NULL; 8112 unsigned long zero_start; 8113 loff_t size; 8114 vm_fault_t ret; 8115 int ret2; 8116 int reserved = 0; 8117 u64 reserved_space; 8118 u64 page_start; 8119 u64 page_end; 8120 u64 end; 8121 8122 reserved_space = PAGE_SIZE; 8123 8124 sb_start_pagefault(inode->i_sb); 8125 page_start = page_offset(page); 8126 page_end = page_start + PAGE_SIZE - 1; 8127 end = page_end; 8128 8129 /* 8130 * Reserving delalloc space after obtaining the page lock can lead to 8131 * deadlock. For example, if a dirty page is locked by this function 8132 * and the call to btrfs_delalloc_reserve_space() ends up triggering 8133 * dirty page write out, then the btrfs_writepages() function could 8134 * end up waiting indefinitely to get a lock on the page currently 8135 * being processed by btrfs_page_mkwrite() function. 8136 */ 8137 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 8138 page_start, reserved_space); 8139 if (!ret2) { 8140 ret2 = file_update_time(vmf->vma->vm_file); 8141 reserved = 1; 8142 } 8143 if (ret2) { 8144 ret = vmf_error(ret2); 8145 if (reserved) 8146 goto out; 8147 goto out_noreserve; 8148 } 8149 8150 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8151 again: 8152 down_read(&BTRFS_I(inode)->i_mmap_lock); 8153 lock_page(page); 8154 size = i_size_read(inode); 8155 8156 if ((page->mapping != inode->i_mapping) || 8157 (page_start >= size)) { 8158 /* page got truncated out from underneath us */ 8159 goto out_unlock; 8160 } 8161 wait_on_page_writeback(page); 8162 8163 lock_extent(io_tree, page_start, page_end, &cached_state); 8164 ret2 = set_page_extent_mapped(page); 8165 if (ret2 < 0) { 8166 ret = vmf_error(ret2); 8167 unlock_extent(io_tree, page_start, page_end, &cached_state); 8168 goto out_unlock; 8169 } 8170 8171 /* 8172 * we can't set the delalloc bits if there are pending ordered 8173 * extents. Drop our locks and wait for them to finish 8174 */ 8175 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, 8176 PAGE_SIZE); 8177 if (ordered) { 8178 unlock_extent(io_tree, page_start, page_end, &cached_state); 8179 unlock_page(page); 8180 up_read(&BTRFS_I(inode)->i_mmap_lock); 8181 btrfs_start_ordered_extent(ordered); 8182 btrfs_put_ordered_extent(ordered); 8183 goto again; 8184 } 8185 8186 if (page->index == ((size - 1) >> PAGE_SHIFT)) { 8187 reserved_space = round_up(size - page_start, 8188 fs_info->sectorsize); 8189 if (reserved_space < PAGE_SIZE) { 8190 end = page_start + reserved_space - 1; 8191 btrfs_delalloc_release_space(BTRFS_I(inode), 8192 data_reserved, page_start, 8193 PAGE_SIZE - reserved_space, true); 8194 } 8195 } 8196 8197 /* 8198 * page_mkwrite gets called when the page is firstly dirtied after it's 8199 * faulted in, but write(2) could also dirty a page and set delalloc 8200 * bits, thus in this case for space account reason, we still need to 8201 * clear any delalloc bits within this page range since we have to 8202 * reserve data&meta space before lock_page() (see above comments). 8203 */ 8204 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, 8205 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8206 EXTENT_DEFRAG, &cached_state); 8207 8208 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, 8209 &cached_state); 8210 if (ret2) { 8211 unlock_extent(io_tree, page_start, page_end, &cached_state); 8212 ret = VM_FAULT_SIGBUS; 8213 goto out_unlock; 8214 } 8215 8216 /* page is wholly or partially inside EOF */ 8217 if (page_start + PAGE_SIZE > size) 8218 zero_start = offset_in_page(size); 8219 else 8220 zero_start = PAGE_SIZE; 8221 8222 if (zero_start != PAGE_SIZE) 8223 memzero_page(page, zero_start, PAGE_SIZE - zero_start); 8224 8225 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); 8226 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); 8227 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); 8228 8229 btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); 8230 8231 unlock_extent(io_tree, page_start, page_end, &cached_state); 8232 up_read(&BTRFS_I(inode)->i_mmap_lock); 8233 8234 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8235 sb_end_pagefault(inode->i_sb); 8236 extent_changeset_free(data_reserved); 8237 return VM_FAULT_LOCKED; 8238 8239 out_unlock: 8240 unlock_page(page); 8241 up_read(&BTRFS_I(inode)->i_mmap_lock); 8242 out: 8243 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8244 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, 8245 reserved_space, (ret != 0)); 8246 out_noreserve: 8247 sb_end_pagefault(inode->i_sb); 8248 extent_changeset_free(data_reserved); 8249 return ret; 8250 } 8251 8252 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) 8253 { 8254 struct btrfs_truncate_control control = { 8255 .inode = inode, 8256 .ino = btrfs_ino(inode), 8257 .min_type = BTRFS_EXTENT_DATA_KEY, 8258 .clear_extent_range = true, 8259 }; 8260 struct btrfs_root *root = inode->root; 8261 struct btrfs_fs_info *fs_info = root->fs_info; 8262 struct btrfs_block_rsv *rsv; 8263 int ret; 8264 struct btrfs_trans_handle *trans; 8265 u64 mask = fs_info->sectorsize - 1; 8266 u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 8267 8268 if (!skip_writeback) { 8269 ret = btrfs_wait_ordered_range(&inode->vfs_inode, 8270 inode->vfs_inode.i_size & (~mask), 8271 (u64)-1); 8272 if (ret) 8273 return ret; 8274 } 8275 8276 /* 8277 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 8278 * things going on here: 8279 * 8280 * 1) We need to reserve space to update our inode. 8281 * 8282 * 2) We need to have something to cache all the space that is going to 8283 * be free'd up by the truncate operation, but also have some slack 8284 * space reserved in case it uses space during the truncate (thank you 8285 * very much snapshotting). 8286 * 8287 * And we need these to be separate. The fact is we can use a lot of 8288 * space doing the truncate, and we have no earthly idea how much space 8289 * we will use, so we need the truncate reservation to be separate so it 8290 * doesn't end up using space reserved for updating the inode. We also 8291 * need to be able to stop the transaction and start a new one, which 8292 * means we need to be able to update the inode several times, and we 8293 * have no idea of knowing how many times that will be, so we can't just 8294 * reserve 1 item for the entirety of the operation, so that has to be 8295 * done separately as well. 8296 * 8297 * So that leaves us with 8298 * 8299 * 1) rsv - for the truncate reservation, which we will steal from the 8300 * transaction reservation. 8301 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 8302 * updating the inode. 8303 */ 8304 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 8305 if (!rsv) 8306 return -ENOMEM; 8307 rsv->size = min_size; 8308 rsv->failfast = true; 8309 8310 /* 8311 * 1 for the truncate slack space 8312 * 1 for updating the inode. 8313 */ 8314 trans = btrfs_start_transaction(root, 2); 8315 if (IS_ERR(trans)) { 8316 ret = PTR_ERR(trans); 8317 goto out; 8318 } 8319 8320 /* Migrate the slack space for the truncate to our reserve */ 8321 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 8322 min_size, false); 8323 BUG_ON(ret); 8324 8325 trans->block_rsv = rsv; 8326 8327 while (1) { 8328 struct extent_state *cached_state = NULL; 8329 const u64 new_size = inode->vfs_inode.i_size; 8330 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 8331 8332 control.new_size = new_size; 8333 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8334 /* 8335 * We want to drop from the next block forward in case this new 8336 * size is not block aligned since we will be keeping the last 8337 * block of the extent just the way it is. 8338 */ 8339 btrfs_drop_extent_map_range(inode, 8340 ALIGN(new_size, fs_info->sectorsize), 8341 (u64)-1, false); 8342 8343 ret = btrfs_truncate_inode_items(trans, root, &control); 8344 8345 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 8346 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 8347 8348 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8349 8350 trans->block_rsv = &fs_info->trans_block_rsv; 8351 if (ret != -ENOSPC && ret != -EAGAIN) 8352 break; 8353 8354 ret = btrfs_update_inode(trans, root, inode); 8355 if (ret) 8356 break; 8357 8358 btrfs_end_transaction(trans); 8359 btrfs_btree_balance_dirty(fs_info); 8360 8361 trans = btrfs_start_transaction(root, 2); 8362 if (IS_ERR(trans)) { 8363 ret = PTR_ERR(trans); 8364 trans = NULL; 8365 break; 8366 } 8367 8368 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 8369 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 8370 rsv, min_size, false); 8371 BUG_ON(ret); /* shouldn't happen */ 8372 trans->block_rsv = rsv; 8373 } 8374 8375 /* 8376 * We can't call btrfs_truncate_block inside a trans handle as we could 8377 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 8378 * know we've truncated everything except the last little bit, and can 8379 * do btrfs_truncate_block and then update the disk_i_size. 8380 */ 8381 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 8382 btrfs_end_transaction(trans); 8383 btrfs_btree_balance_dirty(fs_info); 8384 8385 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); 8386 if (ret) 8387 goto out; 8388 trans = btrfs_start_transaction(root, 1); 8389 if (IS_ERR(trans)) { 8390 ret = PTR_ERR(trans); 8391 goto out; 8392 } 8393 btrfs_inode_safe_disk_i_size_write(inode, 0); 8394 } 8395 8396 if (trans) { 8397 int ret2; 8398 8399 trans->block_rsv = &fs_info->trans_block_rsv; 8400 ret2 = btrfs_update_inode(trans, root, inode); 8401 if (ret2 && !ret) 8402 ret = ret2; 8403 8404 ret2 = btrfs_end_transaction(trans); 8405 if (ret2 && !ret) 8406 ret = ret2; 8407 btrfs_btree_balance_dirty(fs_info); 8408 } 8409 out: 8410 btrfs_free_block_rsv(fs_info, rsv); 8411 /* 8412 * So if we truncate and then write and fsync we normally would just 8413 * write the extents that changed, which is a problem if we need to 8414 * first truncate that entire inode. So set this flag so we write out 8415 * all of the extents in the inode to the sync log so we're completely 8416 * safe. 8417 * 8418 * If no extents were dropped or trimmed we don't need to force the next 8419 * fsync to truncate all the inode's items from the log and re-log them 8420 * all. This means the truncate operation did not change the file size, 8421 * or changed it to a smaller size but there was only an implicit hole 8422 * between the old i_size and the new i_size, and there were no prealloc 8423 * extents beyond i_size to drop. 8424 */ 8425 if (control.extents_found > 0) 8426 btrfs_set_inode_full_sync(inode); 8427 8428 return ret; 8429 } 8430 8431 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap, 8432 struct inode *dir) 8433 { 8434 struct inode *inode; 8435 8436 inode = new_inode(dir->i_sb); 8437 if (inode) { 8438 /* 8439 * Subvolumes don't inherit the sgid bit or the parent's gid if 8440 * the parent's sgid bit is set. This is probably a bug. 8441 */ 8442 inode_init_owner(idmap, inode, NULL, 8443 S_IFDIR | (~current_umask() & S_IRWXUGO)); 8444 inode->i_op = &btrfs_dir_inode_operations; 8445 inode->i_fop = &btrfs_dir_file_operations; 8446 } 8447 return inode; 8448 } 8449 8450 struct inode *btrfs_alloc_inode(struct super_block *sb) 8451 { 8452 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 8453 struct btrfs_inode *ei; 8454 struct inode *inode; 8455 8456 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 8457 if (!ei) 8458 return NULL; 8459 8460 ei->root = NULL; 8461 ei->generation = 0; 8462 ei->last_trans = 0; 8463 ei->last_sub_trans = 0; 8464 ei->logged_trans = 0; 8465 ei->delalloc_bytes = 0; 8466 ei->new_delalloc_bytes = 0; 8467 ei->defrag_bytes = 0; 8468 ei->disk_i_size = 0; 8469 ei->flags = 0; 8470 ei->ro_flags = 0; 8471 ei->csum_bytes = 0; 8472 ei->index_cnt = (u64)-1; 8473 ei->dir_index = 0; 8474 ei->last_unlink_trans = 0; 8475 ei->last_reflink_trans = 0; 8476 ei->last_log_commit = 0; 8477 8478 spin_lock_init(&ei->lock); 8479 ei->outstanding_extents = 0; 8480 if (sb->s_magic != BTRFS_TEST_MAGIC) 8481 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 8482 BTRFS_BLOCK_RSV_DELALLOC); 8483 ei->runtime_flags = 0; 8484 ei->prop_compress = BTRFS_COMPRESS_NONE; 8485 ei->defrag_compress = BTRFS_COMPRESS_NONE; 8486 8487 ei->delayed_node = NULL; 8488 8489 ei->i_otime.tv_sec = 0; 8490 ei->i_otime.tv_nsec = 0; 8491 8492 inode = &ei->vfs_inode; 8493 extent_map_tree_init(&ei->extent_tree); 8494 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); 8495 ei->io_tree.inode = ei; 8496 extent_io_tree_init(fs_info, &ei->file_extent_tree, 8497 IO_TREE_INODE_FILE_EXTENT); 8498 atomic_set(&ei->sync_writers, 0); 8499 mutex_init(&ei->log_mutex); 8500 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 8501 INIT_LIST_HEAD(&ei->delalloc_inodes); 8502 INIT_LIST_HEAD(&ei->delayed_iput); 8503 RB_CLEAR_NODE(&ei->rb_node); 8504 init_rwsem(&ei->i_mmap_lock); 8505 8506 return inode; 8507 } 8508 8509 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 8510 void btrfs_test_destroy_inode(struct inode *inode) 8511 { 8512 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 8513 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8514 } 8515 #endif 8516 8517 void btrfs_free_inode(struct inode *inode) 8518 { 8519 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8520 } 8521 8522 void btrfs_destroy_inode(struct inode *vfs_inode) 8523 { 8524 struct btrfs_ordered_extent *ordered; 8525 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 8526 struct btrfs_root *root = inode->root; 8527 bool freespace_inode; 8528 8529 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 8530 WARN_ON(vfs_inode->i_data.nrpages); 8531 WARN_ON(inode->block_rsv.reserved); 8532 WARN_ON(inode->block_rsv.size); 8533 WARN_ON(inode->outstanding_extents); 8534 if (!S_ISDIR(vfs_inode->i_mode)) { 8535 WARN_ON(inode->delalloc_bytes); 8536 WARN_ON(inode->new_delalloc_bytes); 8537 } 8538 WARN_ON(inode->csum_bytes); 8539 WARN_ON(inode->defrag_bytes); 8540 8541 /* 8542 * This can happen where we create an inode, but somebody else also 8543 * created the same inode and we need to destroy the one we already 8544 * created. 8545 */ 8546 if (!root) 8547 return; 8548 8549 /* 8550 * If this is a free space inode do not take the ordered extents lockdep 8551 * map. 8552 */ 8553 freespace_inode = btrfs_is_free_space_inode(inode); 8554 8555 while (1) { 8556 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8557 if (!ordered) 8558 break; 8559 else { 8560 btrfs_err(root->fs_info, 8561 "found ordered extent %llu %llu on inode cleanup", 8562 ordered->file_offset, ordered->num_bytes); 8563 8564 if (!freespace_inode) 8565 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); 8566 8567 btrfs_remove_ordered_extent(inode, ordered); 8568 btrfs_put_ordered_extent(ordered); 8569 btrfs_put_ordered_extent(ordered); 8570 } 8571 } 8572 btrfs_qgroup_check_reserved_leak(inode); 8573 inode_tree_del(inode); 8574 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 8575 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 8576 btrfs_put_root(inode->root); 8577 } 8578 8579 int btrfs_drop_inode(struct inode *inode) 8580 { 8581 struct btrfs_root *root = BTRFS_I(inode)->root; 8582 8583 if (root == NULL) 8584 return 1; 8585 8586 /* the snap/subvol tree is on deleting */ 8587 if (btrfs_root_refs(&root->root_item) == 0) 8588 return 1; 8589 else 8590 return generic_drop_inode(inode); 8591 } 8592 8593 static void init_once(void *foo) 8594 { 8595 struct btrfs_inode *ei = foo; 8596 8597 inode_init_once(&ei->vfs_inode); 8598 } 8599 8600 void __cold btrfs_destroy_cachep(void) 8601 { 8602 /* 8603 * Make sure all delayed rcu free inodes are flushed before we 8604 * destroy cache. 8605 */ 8606 rcu_barrier(); 8607 bioset_exit(&btrfs_dio_bioset); 8608 kmem_cache_destroy(btrfs_inode_cachep); 8609 } 8610 8611 int __init btrfs_init_cachep(void) 8612 { 8613 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 8614 sizeof(struct btrfs_inode), 0, 8615 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, 8616 init_once); 8617 if (!btrfs_inode_cachep) 8618 goto fail; 8619 8620 if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE, 8621 offsetof(struct btrfs_dio_private, bbio.bio), 8622 BIOSET_NEED_BVECS)) 8623 goto fail; 8624 8625 return 0; 8626 fail: 8627 btrfs_destroy_cachep(); 8628 return -ENOMEM; 8629 } 8630 8631 static int btrfs_getattr(struct mnt_idmap *idmap, 8632 const struct path *path, struct kstat *stat, 8633 u32 request_mask, unsigned int flags) 8634 { 8635 u64 delalloc_bytes; 8636 u64 inode_bytes; 8637 struct inode *inode = d_inode(path->dentry); 8638 u32 blocksize = inode->i_sb->s_blocksize; 8639 u32 bi_flags = BTRFS_I(inode)->flags; 8640 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 8641 8642 stat->result_mask |= STATX_BTIME; 8643 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; 8644 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; 8645 if (bi_flags & BTRFS_INODE_APPEND) 8646 stat->attributes |= STATX_ATTR_APPEND; 8647 if (bi_flags & BTRFS_INODE_COMPRESS) 8648 stat->attributes |= STATX_ATTR_COMPRESSED; 8649 if (bi_flags & BTRFS_INODE_IMMUTABLE) 8650 stat->attributes |= STATX_ATTR_IMMUTABLE; 8651 if (bi_flags & BTRFS_INODE_NODUMP) 8652 stat->attributes |= STATX_ATTR_NODUMP; 8653 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 8654 stat->attributes |= STATX_ATTR_VERITY; 8655 8656 stat->attributes_mask |= (STATX_ATTR_APPEND | 8657 STATX_ATTR_COMPRESSED | 8658 STATX_ATTR_IMMUTABLE | 8659 STATX_ATTR_NODUMP); 8660 8661 generic_fillattr(idmap, inode, stat); 8662 stat->dev = BTRFS_I(inode)->root->anon_dev; 8663 8664 spin_lock(&BTRFS_I(inode)->lock); 8665 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 8666 inode_bytes = inode_get_bytes(inode); 8667 spin_unlock(&BTRFS_I(inode)->lock); 8668 stat->blocks = (ALIGN(inode_bytes, blocksize) + 8669 ALIGN(delalloc_bytes, blocksize)) >> 9; 8670 return 0; 8671 } 8672 8673 static int btrfs_rename_exchange(struct inode *old_dir, 8674 struct dentry *old_dentry, 8675 struct inode *new_dir, 8676 struct dentry *new_dentry) 8677 { 8678 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 8679 struct btrfs_trans_handle *trans; 8680 unsigned int trans_num_items; 8681 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8682 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8683 struct inode *new_inode = new_dentry->d_inode; 8684 struct inode *old_inode = old_dentry->d_inode; 8685 struct timespec64 ctime = current_time(old_inode); 8686 struct btrfs_rename_ctx old_rename_ctx; 8687 struct btrfs_rename_ctx new_rename_ctx; 8688 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8689 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 8690 u64 old_idx = 0; 8691 u64 new_idx = 0; 8692 int ret; 8693 int ret2; 8694 bool need_abort = false; 8695 struct fscrypt_name old_fname, new_fname; 8696 struct fscrypt_str *old_name, *new_name; 8697 8698 /* 8699 * For non-subvolumes allow exchange only within one subvolume, in the 8700 * same inode namespace. Two subvolumes (represented as directory) can 8701 * be exchanged as they're a logical link and have a fixed inode number. 8702 */ 8703 if (root != dest && 8704 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 8705 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 8706 return -EXDEV; 8707 8708 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8709 if (ret) 8710 return ret; 8711 8712 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8713 if (ret) { 8714 fscrypt_free_filename(&old_fname); 8715 return ret; 8716 } 8717 8718 old_name = &old_fname.disk_name; 8719 new_name = &new_fname.disk_name; 8720 8721 /* close the race window with snapshot create/destroy ioctl */ 8722 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 8723 new_ino == BTRFS_FIRST_FREE_OBJECTID) 8724 down_read(&fs_info->subvol_sem); 8725 8726 /* 8727 * For each inode: 8728 * 1 to remove old dir item 8729 * 1 to remove old dir index 8730 * 1 to add new dir item 8731 * 1 to add new dir index 8732 * 1 to update parent inode 8733 * 8734 * If the parents are the same, we only need to account for one 8735 */ 8736 trans_num_items = (old_dir == new_dir ? 9 : 10); 8737 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8738 /* 8739 * 1 to remove old root ref 8740 * 1 to remove old root backref 8741 * 1 to add new root ref 8742 * 1 to add new root backref 8743 */ 8744 trans_num_items += 4; 8745 } else { 8746 /* 8747 * 1 to update inode item 8748 * 1 to remove old inode ref 8749 * 1 to add new inode ref 8750 */ 8751 trans_num_items += 3; 8752 } 8753 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 8754 trans_num_items += 4; 8755 else 8756 trans_num_items += 3; 8757 trans = btrfs_start_transaction(root, trans_num_items); 8758 if (IS_ERR(trans)) { 8759 ret = PTR_ERR(trans); 8760 goto out_notrans; 8761 } 8762 8763 if (dest != root) { 8764 ret = btrfs_record_root_in_trans(trans, dest); 8765 if (ret) 8766 goto out_fail; 8767 } 8768 8769 /* 8770 * We need to find a free sequence number both in the source and 8771 * in the destination directory for the exchange. 8772 */ 8773 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 8774 if (ret) 8775 goto out_fail; 8776 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 8777 if (ret) 8778 goto out_fail; 8779 8780 BTRFS_I(old_inode)->dir_index = 0ULL; 8781 BTRFS_I(new_inode)->dir_index = 0ULL; 8782 8783 /* Reference for the source. */ 8784 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8785 /* force full log commit if subvolume involved. */ 8786 btrfs_set_log_full_commit(trans); 8787 } else { 8788 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, 8789 btrfs_ino(BTRFS_I(new_dir)), 8790 old_idx); 8791 if (ret) 8792 goto out_fail; 8793 need_abort = true; 8794 } 8795 8796 /* And now for the dest. */ 8797 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8798 /* force full log commit if subvolume involved. */ 8799 btrfs_set_log_full_commit(trans); 8800 } else { 8801 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, 8802 btrfs_ino(BTRFS_I(old_dir)), 8803 new_idx); 8804 if (ret) { 8805 if (need_abort) 8806 btrfs_abort_transaction(trans, ret); 8807 goto out_fail; 8808 } 8809 } 8810 8811 /* Update inode version and ctime/mtime. */ 8812 inode_inc_iversion(old_dir); 8813 inode_inc_iversion(new_dir); 8814 inode_inc_iversion(old_inode); 8815 inode_inc_iversion(new_inode); 8816 old_dir->i_mtime = ctime; 8817 old_dir->i_ctime = ctime; 8818 new_dir->i_mtime = ctime; 8819 new_dir->i_ctime = ctime; 8820 old_inode->i_ctime = ctime; 8821 new_inode->i_ctime = ctime; 8822 8823 if (old_dentry->d_parent != new_dentry->d_parent) { 8824 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8825 BTRFS_I(old_inode), 1); 8826 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 8827 BTRFS_I(new_inode), 1); 8828 } 8829 8830 /* src is a subvolume */ 8831 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8832 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8833 } else { /* src is an inode */ 8834 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8835 BTRFS_I(old_dentry->d_inode), 8836 old_name, &old_rename_ctx); 8837 if (!ret) 8838 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 8839 } 8840 if (ret) { 8841 btrfs_abort_transaction(trans, ret); 8842 goto out_fail; 8843 } 8844 8845 /* dest is a subvolume */ 8846 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8847 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8848 } else { /* dest is an inode */ 8849 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8850 BTRFS_I(new_dentry->d_inode), 8851 new_name, &new_rename_ctx); 8852 if (!ret) 8853 ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode)); 8854 } 8855 if (ret) { 8856 btrfs_abort_transaction(trans, ret); 8857 goto out_fail; 8858 } 8859 8860 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8861 new_name, 0, old_idx); 8862 if (ret) { 8863 btrfs_abort_transaction(trans, ret); 8864 goto out_fail; 8865 } 8866 8867 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 8868 old_name, 0, new_idx); 8869 if (ret) { 8870 btrfs_abort_transaction(trans, ret); 8871 goto out_fail; 8872 } 8873 8874 if (old_inode->i_nlink == 1) 8875 BTRFS_I(old_inode)->dir_index = old_idx; 8876 if (new_inode->i_nlink == 1) 8877 BTRFS_I(new_inode)->dir_index = new_idx; 8878 8879 /* 8880 * Now pin the logs of the roots. We do it to ensure that no other task 8881 * can sync the logs while we are in progress with the rename, because 8882 * that could result in an inconsistency in case any of the inodes that 8883 * are part of this rename operation were logged before. 8884 */ 8885 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8886 btrfs_pin_log_trans(root); 8887 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8888 btrfs_pin_log_trans(dest); 8889 8890 /* Do the log updates for all inodes. */ 8891 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8892 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8893 old_rename_ctx.index, new_dentry->d_parent); 8894 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8895 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 8896 new_rename_ctx.index, old_dentry->d_parent); 8897 8898 /* Now unpin the logs. */ 8899 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8900 btrfs_end_log_trans(root); 8901 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8902 btrfs_end_log_trans(dest); 8903 out_fail: 8904 ret2 = btrfs_end_transaction(trans); 8905 ret = ret ? ret : ret2; 8906 out_notrans: 8907 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 8908 old_ino == BTRFS_FIRST_FREE_OBJECTID) 8909 up_read(&fs_info->subvol_sem); 8910 8911 fscrypt_free_filename(&new_fname); 8912 fscrypt_free_filename(&old_fname); 8913 return ret; 8914 } 8915 8916 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap, 8917 struct inode *dir) 8918 { 8919 struct inode *inode; 8920 8921 inode = new_inode(dir->i_sb); 8922 if (inode) { 8923 inode_init_owner(idmap, inode, dir, 8924 S_IFCHR | WHITEOUT_MODE); 8925 inode->i_op = &btrfs_special_inode_operations; 8926 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 8927 } 8928 return inode; 8929 } 8930 8931 static int btrfs_rename(struct mnt_idmap *idmap, 8932 struct inode *old_dir, struct dentry *old_dentry, 8933 struct inode *new_dir, struct dentry *new_dentry, 8934 unsigned int flags) 8935 { 8936 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 8937 struct btrfs_new_inode_args whiteout_args = { 8938 .dir = old_dir, 8939 .dentry = old_dentry, 8940 }; 8941 struct btrfs_trans_handle *trans; 8942 unsigned int trans_num_items; 8943 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8944 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8945 struct inode *new_inode = d_inode(new_dentry); 8946 struct inode *old_inode = d_inode(old_dentry); 8947 struct btrfs_rename_ctx rename_ctx; 8948 u64 index = 0; 8949 int ret; 8950 int ret2; 8951 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8952 struct fscrypt_name old_fname, new_fname; 8953 8954 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 8955 return -EPERM; 8956 8957 /* we only allow rename subvolume link between subvolumes */ 8958 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 8959 return -EXDEV; 8960 8961 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 8962 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 8963 return -ENOTEMPTY; 8964 8965 if (S_ISDIR(old_inode->i_mode) && new_inode && 8966 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 8967 return -ENOTEMPTY; 8968 8969 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8970 if (ret) 8971 return ret; 8972 8973 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8974 if (ret) { 8975 fscrypt_free_filename(&old_fname); 8976 return ret; 8977 } 8978 8979 /* check for collisions, even if the name isn't there */ 8980 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); 8981 if (ret) { 8982 if (ret == -EEXIST) { 8983 /* we shouldn't get 8984 * eexist without a new_inode */ 8985 if (WARN_ON(!new_inode)) { 8986 goto out_fscrypt_names; 8987 } 8988 } else { 8989 /* maybe -EOVERFLOW */ 8990 goto out_fscrypt_names; 8991 } 8992 } 8993 ret = 0; 8994 8995 /* 8996 * we're using rename to replace one file with another. Start IO on it 8997 * now so we don't add too much work to the end of the transaction 8998 */ 8999 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 9000 filemap_flush(old_inode->i_mapping); 9001 9002 if (flags & RENAME_WHITEOUT) { 9003 whiteout_args.inode = new_whiteout_inode(idmap, old_dir); 9004 if (!whiteout_args.inode) { 9005 ret = -ENOMEM; 9006 goto out_fscrypt_names; 9007 } 9008 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 9009 if (ret) 9010 goto out_whiteout_inode; 9011 } else { 9012 /* 1 to update the old parent inode. */ 9013 trans_num_items = 1; 9014 } 9015 9016 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9017 /* Close the race window with snapshot create/destroy ioctl */ 9018 down_read(&fs_info->subvol_sem); 9019 /* 9020 * 1 to remove old root ref 9021 * 1 to remove old root backref 9022 * 1 to add new root ref 9023 * 1 to add new root backref 9024 */ 9025 trans_num_items += 4; 9026 } else { 9027 /* 9028 * 1 to update inode 9029 * 1 to remove old inode ref 9030 * 1 to add new inode ref 9031 */ 9032 trans_num_items += 3; 9033 } 9034 /* 9035 * 1 to remove old dir item 9036 * 1 to remove old dir index 9037 * 1 to add new dir item 9038 * 1 to add new dir index 9039 */ 9040 trans_num_items += 4; 9041 /* 1 to update new parent inode if it's not the same as the old parent */ 9042 if (new_dir != old_dir) 9043 trans_num_items++; 9044 if (new_inode) { 9045 /* 9046 * 1 to update inode 9047 * 1 to remove inode ref 9048 * 1 to remove dir item 9049 * 1 to remove dir index 9050 * 1 to possibly add orphan item 9051 */ 9052 trans_num_items += 5; 9053 } 9054 trans = btrfs_start_transaction(root, trans_num_items); 9055 if (IS_ERR(trans)) { 9056 ret = PTR_ERR(trans); 9057 goto out_notrans; 9058 } 9059 9060 if (dest != root) { 9061 ret = btrfs_record_root_in_trans(trans, dest); 9062 if (ret) 9063 goto out_fail; 9064 } 9065 9066 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 9067 if (ret) 9068 goto out_fail; 9069 9070 BTRFS_I(old_inode)->dir_index = 0ULL; 9071 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9072 /* force full log commit if subvolume involved. */ 9073 btrfs_set_log_full_commit(trans); 9074 } else { 9075 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, 9076 old_ino, btrfs_ino(BTRFS_I(new_dir)), 9077 index); 9078 if (ret) 9079 goto out_fail; 9080 } 9081 9082 inode_inc_iversion(old_dir); 9083 inode_inc_iversion(new_dir); 9084 inode_inc_iversion(old_inode); 9085 old_dir->i_mtime = current_time(old_dir); 9086 old_dir->i_ctime = old_dir->i_mtime; 9087 new_dir->i_mtime = old_dir->i_mtime; 9088 new_dir->i_ctime = old_dir->i_mtime; 9089 old_inode->i_ctime = old_dir->i_mtime; 9090 9091 if (old_dentry->d_parent != new_dentry->d_parent) 9092 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9093 BTRFS_I(old_inode), 1); 9094 9095 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9096 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 9097 } else { 9098 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 9099 BTRFS_I(d_inode(old_dentry)), 9100 &old_fname.disk_name, &rename_ctx); 9101 if (!ret) 9102 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 9103 } 9104 if (ret) { 9105 btrfs_abort_transaction(trans, ret); 9106 goto out_fail; 9107 } 9108 9109 if (new_inode) { 9110 inode_inc_iversion(new_inode); 9111 new_inode->i_ctime = current_time(new_inode); 9112 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 9113 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9114 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 9115 BUG_ON(new_inode->i_nlink == 0); 9116 } else { 9117 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 9118 BTRFS_I(d_inode(new_dentry)), 9119 &new_fname.disk_name); 9120 } 9121 if (!ret && new_inode->i_nlink == 0) 9122 ret = btrfs_orphan_add(trans, 9123 BTRFS_I(d_inode(new_dentry))); 9124 if (ret) { 9125 btrfs_abort_transaction(trans, ret); 9126 goto out_fail; 9127 } 9128 } 9129 9130 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9131 &new_fname.disk_name, 0, index); 9132 if (ret) { 9133 btrfs_abort_transaction(trans, ret); 9134 goto out_fail; 9135 } 9136 9137 if (old_inode->i_nlink == 1) 9138 BTRFS_I(old_inode)->dir_index = index; 9139 9140 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9141 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9142 rename_ctx.index, new_dentry->d_parent); 9143 9144 if (flags & RENAME_WHITEOUT) { 9145 ret = btrfs_create_new_inode(trans, &whiteout_args); 9146 if (ret) { 9147 btrfs_abort_transaction(trans, ret); 9148 goto out_fail; 9149 } else { 9150 unlock_new_inode(whiteout_args.inode); 9151 iput(whiteout_args.inode); 9152 whiteout_args.inode = NULL; 9153 } 9154 } 9155 out_fail: 9156 ret2 = btrfs_end_transaction(trans); 9157 ret = ret ? ret : ret2; 9158 out_notrans: 9159 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9160 up_read(&fs_info->subvol_sem); 9161 if (flags & RENAME_WHITEOUT) 9162 btrfs_new_inode_args_destroy(&whiteout_args); 9163 out_whiteout_inode: 9164 if (flags & RENAME_WHITEOUT) 9165 iput(whiteout_args.inode); 9166 out_fscrypt_names: 9167 fscrypt_free_filename(&old_fname); 9168 fscrypt_free_filename(&new_fname); 9169 return ret; 9170 } 9171 9172 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir, 9173 struct dentry *old_dentry, struct inode *new_dir, 9174 struct dentry *new_dentry, unsigned int flags) 9175 { 9176 int ret; 9177 9178 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 9179 return -EINVAL; 9180 9181 if (flags & RENAME_EXCHANGE) 9182 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir, 9183 new_dentry); 9184 else 9185 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir, 9186 new_dentry, flags); 9187 9188 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); 9189 9190 return ret; 9191 } 9192 9193 struct btrfs_delalloc_work { 9194 struct inode *inode; 9195 struct completion completion; 9196 struct list_head list; 9197 struct btrfs_work work; 9198 }; 9199 9200 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9201 { 9202 struct btrfs_delalloc_work *delalloc_work; 9203 struct inode *inode; 9204 9205 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9206 work); 9207 inode = delalloc_work->inode; 9208 filemap_flush(inode->i_mapping); 9209 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9210 &BTRFS_I(inode)->runtime_flags)) 9211 filemap_flush(inode->i_mapping); 9212 9213 iput(inode); 9214 complete(&delalloc_work->completion); 9215 } 9216 9217 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 9218 { 9219 struct btrfs_delalloc_work *work; 9220 9221 work = kmalloc(sizeof(*work), GFP_NOFS); 9222 if (!work) 9223 return NULL; 9224 9225 init_completion(&work->completion); 9226 INIT_LIST_HEAD(&work->list); 9227 work->inode = inode; 9228 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); 9229 9230 return work; 9231 } 9232 9233 /* 9234 * some fairly slow code that needs optimization. This walks the list 9235 * of all the inodes with pending delalloc and forces them to disk. 9236 */ 9237 static int start_delalloc_inodes(struct btrfs_root *root, 9238 struct writeback_control *wbc, bool snapshot, 9239 bool in_reclaim_context) 9240 { 9241 struct btrfs_inode *binode; 9242 struct inode *inode; 9243 struct btrfs_delalloc_work *work, *next; 9244 struct list_head works; 9245 struct list_head splice; 9246 int ret = 0; 9247 bool full_flush = wbc->nr_to_write == LONG_MAX; 9248 9249 INIT_LIST_HEAD(&works); 9250 INIT_LIST_HEAD(&splice); 9251 9252 mutex_lock(&root->delalloc_mutex); 9253 spin_lock(&root->delalloc_lock); 9254 list_splice_init(&root->delalloc_inodes, &splice); 9255 while (!list_empty(&splice)) { 9256 binode = list_entry(splice.next, struct btrfs_inode, 9257 delalloc_inodes); 9258 9259 list_move_tail(&binode->delalloc_inodes, 9260 &root->delalloc_inodes); 9261 9262 if (in_reclaim_context && 9263 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 9264 continue; 9265 9266 inode = igrab(&binode->vfs_inode); 9267 if (!inode) { 9268 cond_resched_lock(&root->delalloc_lock); 9269 continue; 9270 } 9271 spin_unlock(&root->delalloc_lock); 9272 9273 if (snapshot) 9274 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 9275 &binode->runtime_flags); 9276 if (full_flush) { 9277 work = btrfs_alloc_delalloc_work(inode); 9278 if (!work) { 9279 iput(inode); 9280 ret = -ENOMEM; 9281 goto out; 9282 } 9283 list_add_tail(&work->list, &works); 9284 btrfs_queue_work(root->fs_info->flush_workers, 9285 &work->work); 9286 } else { 9287 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); 9288 btrfs_add_delayed_iput(BTRFS_I(inode)); 9289 if (ret || wbc->nr_to_write <= 0) 9290 goto out; 9291 } 9292 cond_resched(); 9293 spin_lock(&root->delalloc_lock); 9294 } 9295 spin_unlock(&root->delalloc_lock); 9296 9297 out: 9298 list_for_each_entry_safe(work, next, &works, list) { 9299 list_del_init(&work->list); 9300 wait_for_completion(&work->completion); 9301 kfree(work); 9302 } 9303 9304 if (!list_empty(&splice)) { 9305 spin_lock(&root->delalloc_lock); 9306 list_splice_tail(&splice, &root->delalloc_inodes); 9307 spin_unlock(&root->delalloc_lock); 9308 } 9309 mutex_unlock(&root->delalloc_mutex); 9310 return ret; 9311 } 9312 9313 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 9314 { 9315 struct writeback_control wbc = { 9316 .nr_to_write = LONG_MAX, 9317 .sync_mode = WB_SYNC_NONE, 9318 .range_start = 0, 9319 .range_end = LLONG_MAX, 9320 }; 9321 struct btrfs_fs_info *fs_info = root->fs_info; 9322 9323 if (BTRFS_FS_ERROR(fs_info)) 9324 return -EROFS; 9325 9326 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context); 9327 } 9328 9329 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 9330 bool in_reclaim_context) 9331 { 9332 struct writeback_control wbc = { 9333 .nr_to_write = nr, 9334 .sync_mode = WB_SYNC_NONE, 9335 .range_start = 0, 9336 .range_end = LLONG_MAX, 9337 }; 9338 struct btrfs_root *root; 9339 struct list_head splice; 9340 int ret; 9341 9342 if (BTRFS_FS_ERROR(fs_info)) 9343 return -EROFS; 9344 9345 INIT_LIST_HEAD(&splice); 9346 9347 mutex_lock(&fs_info->delalloc_root_mutex); 9348 spin_lock(&fs_info->delalloc_root_lock); 9349 list_splice_init(&fs_info->delalloc_roots, &splice); 9350 while (!list_empty(&splice)) { 9351 /* 9352 * Reset nr_to_write here so we know that we're doing a full 9353 * flush. 9354 */ 9355 if (nr == LONG_MAX) 9356 wbc.nr_to_write = LONG_MAX; 9357 9358 root = list_first_entry(&splice, struct btrfs_root, 9359 delalloc_root); 9360 root = btrfs_grab_root(root); 9361 BUG_ON(!root); 9362 list_move_tail(&root->delalloc_root, 9363 &fs_info->delalloc_roots); 9364 spin_unlock(&fs_info->delalloc_root_lock); 9365 9366 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context); 9367 btrfs_put_root(root); 9368 if (ret < 0 || wbc.nr_to_write <= 0) 9369 goto out; 9370 spin_lock(&fs_info->delalloc_root_lock); 9371 } 9372 spin_unlock(&fs_info->delalloc_root_lock); 9373 9374 ret = 0; 9375 out: 9376 if (!list_empty(&splice)) { 9377 spin_lock(&fs_info->delalloc_root_lock); 9378 list_splice_tail(&splice, &fs_info->delalloc_roots); 9379 spin_unlock(&fs_info->delalloc_root_lock); 9380 } 9381 mutex_unlock(&fs_info->delalloc_root_mutex); 9382 return ret; 9383 } 9384 9385 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, 9386 struct dentry *dentry, const char *symname) 9387 { 9388 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9389 struct btrfs_trans_handle *trans; 9390 struct btrfs_root *root = BTRFS_I(dir)->root; 9391 struct btrfs_path *path; 9392 struct btrfs_key key; 9393 struct inode *inode; 9394 struct btrfs_new_inode_args new_inode_args = { 9395 .dir = dir, 9396 .dentry = dentry, 9397 }; 9398 unsigned int trans_num_items; 9399 int err; 9400 int name_len; 9401 int datasize; 9402 unsigned long ptr; 9403 struct btrfs_file_extent_item *ei; 9404 struct extent_buffer *leaf; 9405 9406 name_len = strlen(symname); 9407 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 9408 return -ENAMETOOLONG; 9409 9410 inode = new_inode(dir->i_sb); 9411 if (!inode) 9412 return -ENOMEM; 9413 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO); 9414 inode->i_op = &btrfs_symlink_inode_operations; 9415 inode_nohighmem(inode); 9416 inode->i_mapping->a_ops = &btrfs_aops; 9417 btrfs_i_size_write(BTRFS_I(inode), name_len); 9418 inode_set_bytes(inode, name_len); 9419 9420 new_inode_args.inode = inode; 9421 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9422 if (err) 9423 goto out_inode; 9424 /* 1 additional item for the inline extent */ 9425 trans_num_items++; 9426 9427 trans = btrfs_start_transaction(root, trans_num_items); 9428 if (IS_ERR(trans)) { 9429 err = PTR_ERR(trans); 9430 goto out_new_inode_args; 9431 } 9432 9433 err = btrfs_create_new_inode(trans, &new_inode_args); 9434 if (err) 9435 goto out; 9436 9437 path = btrfs_alloc_path(); 9438 if (!path) { 9439 err = -ENOMEM; 9440 btrfs_abort_transaction(trans, err); 9441 discard_new_inode(inode); 9442 inode = NULL; 9443 goto out; 9444 } 9445 key.objectid = btrfs_ino(BTRFS_I(inode)); 9446 key.offset = 0; 9447 key.type = BTRFS_EXTENT_DATA_KEY; 9448 datasize = btrfs_file_extent_calc_inline_size(name_len); 9449 err = btrfs_insert_empty_item(trans, root, path, &key, 9450 datasize); 9451 if (err) { 9452 btrfs_abort_transaction(trans, err); 9453 btrfs_free_path(path); 9454 discard_new_inode(inode); 9455 inode = NULL; 9456 goto out; 9457 } 9458 leaf = path->nodes[0]; 9459 ei = btrfs_item_ptr(leaf, path->slots[0], 9460 struct btrfs_file_extent_item); 9461 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9462 btrfs_set_file_extent_type(leaf, ei, 9463 BTRFS_FILE_EXTENT_INLINE); 9464 btrfs_set_file_extent_encryption(leaf, ei, 0); 9465 btrfs_set_file_extent_compression(leaf, ei, 0); 9466 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9467 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9468 9469 ptr = btrfs_file_extent_inline_start(ei); 9470 write_extent_buffer(leaf, symname, ptr, name_len); 9471 btrfs_mark_buffer_dirty(leaf); 9472 btrfs_free_path(path); 9473 9474 d_instantiate_new(dentry, inode); 9475 err = 0; 9476 out: 9477 btrfs_end_transaction(trans); 9478 btrfs_btree_balance_dirty(fs_info); 9479 out_new_inode_args: 9480 btrfs_new_inode_args_destroy(&new_inode_args); 9481 out_inode: 9482 if (err) 9483 iput(inode); 9484 return err; 9485 } 9486 9487 static struct btrfs_trans_handle *insert_prealloc_file_extent( 9488 struct btrfs_trans_handle *trans_in, 9489 struct btrfs_inode *inode, 9490 struct btrfs_key *ins, 9491 u64 file_offset) 9492 { 9493 struct btrfs_file_extent_item stack_fi; 9494 struct btrfs_replace_extent_info extent_info; 9495 struct btrfs_trans_handle *trans = trans_in; 9496 struct btrfs_path *path; 9497 u64 start = ins->objectid; 9498 u64 len = ins->offset; 9499 int qgroup_released; 9500 int ret; 9501 9502 memset(&stack_fi, 0, sizeof(stack_fi)); 9503 9504 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 9505 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 9506 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 9507 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 9508 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 9509 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9510 /* Encryption and other encoding is reserved and all 0 */ 9511 9512 qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len); 9513 if (qgroup_released < 0) 9514 return ERR_PTR(qgroup_released); 9515 9516 if (trans) { 9517 ret = insert_reserved_file_extent(trans, inode, 9518 file_offset, &stack_fi, 9519 true, qgroup_released); 9520 if (ret) 9521 goto free_qgroup; 9522 return trans; 9523 } 9524 9525 extent_info.disk_offset = start; 9526 extent_info.disk_len = len; 9527 extent_info.data_offset = 0; 9528 extent_info.data_len = len; 9529 extent_info.file_offset = file_offset; 9530 extent_info.extent_buf = (char *)&stack_fi; 9531 extent_info.is_new_extent = true; 9532 extent_info.update_times = true; 9533 extent_info.qgroup_reserved = qgroup_released; 9534 extent_info.insertions = 0; 9535 9536 path = btrfs_alloc_path(); 9537 if (!path) { 9538 ret = -ENOMEM; 9539 goto free_qgroup; 9540 } 9541 9542 ret = btrfs_replace_file_extents(inode, path, file_offset, 9543 file_offset + len - 1, &extent_info, 9544 &trans); 9545 btrfs_free_path(path); 9546 if (ret) 9547 goto free_qgroup; 9548 return trans; 9549 9550 free_qgroup: 9551 /* 9552 * We have released qgroup data range at the beginning of the function, 9553 * and normally qgroup_released bytes will be freed when committing 9554 * transaction. 9555 * But if we error out early, we have to free what we have released 9556 * or we leak qgroup data reservation. 9557 */ 9558 btrfs_qgroup_free_refroot(inode->root->fs_info, 9559 inode->root->root_key.objectid, qgroup_released, 9560 BTRFS_QGROUP_RSV_DATA); 9561 return ERR_PTR(ret); 9562 } 9563 9564 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9565 u64 start, u64 num_bytes, u64 min_size, 9566 loff_t actual_len, u64 *alloc_hint, 9567 struct btrfs_trans_handle *trans) 9568 { 9569 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 9570 struct extent_map *em; 9571 struct btrfs_root *root = BTRFS_I(inode)->root; 9572 struct btrfs_key ins; 9573 u64 cur_offset = start; 9574 u64 clear_offset = start; 9575 u64 i_size; 9576 u64 cur_bytes; 9577 u64 last_alloc = (u64)-1; 9578 int ret = 0; 9579 bool own_trans = true; 9580 u64 end = start + num_bytes - 1; 9581 9582 if (trans) 9583 own_trans = false; 9584 while (num_bytes > 0) { 9585 cur_bytes = min_t(u64, num_bytes, SZ_256M); 9586 cur_bytes = max(cur_bytes, min_size); 9587 /* 9588 * If we are severely fragmented we could end up with really 9589 * small allocations, so if the allocator is returning small 9590 * chunks lets make its job easier by only searching for those 9591 * sized chunks. 9592 */ 9593 cur_bytes = min(cur_bytes, last_alloc); 9594 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 9595 min_size, 0, *alloc_hint, &ins, 1, 0); 9596 if (ret) 9597 break; 9598 9599 /* 9600 * We've reserved this space, and thus converted it from 9601 * ->bytes_may_use to ->bytes_reserved. Any error that happens 9602 * from here on out we will only need to clear our reservation 9603 * for the remaining unreserved area, so advance our 9604 * clear_offset by our extent size. 9605 */ 9606 clear_offset += ins.offset; 9607 9608 last_alloc = ins.offset; 9609 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 9610 &ins, cur_offset); 9611 /* 9612 * Now that we inserted the prealloc extent we can finally 9613 * decrement the number of reservations in the block group. 9614 * If we did it before, we could race with relocation and have 9615 * relocation miss the reserved extent, making it fail later. 9616 */ 9617 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9618 if (IS_ERR(trans)) { 9619 ret = PTR_ERR(trans); 9620 btrfs_free_reserved_extent(fs_info, ins.objectid, 9621 ins.offset, 0); 9622 break; 9623 } 9624 9625 em = alloc_extent_map(); 9626 if (!em) { 9627 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, 9628 cur_offset + ins.offset - 1, false); 9629 btrfs_set_inode_full_sync(BTRFS_I(inode)); 9630 goto next; 9631 } 9632 9633 em->start = cur_offset; 9634 em->orig_start = cur_offset; 9635 em->len = ins.offset; 9636 em->block_start = ins.objectid; 9637 em->block_len = ins.offset; 9638 em->orig_block_len = ins.offset; 9639 em->ram_bytes = ins.offset; 9640 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 9641 em->generation = trans->transid; 9642 9643 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); 9644 free_extent_map(em); 9645 next: 9646 num_bytes -= ins.offset; 9647 cur_offset += ins.offset; 9648 *alloc_hint = ins.objectid + ins.offset; 9649 9650 inode_inc_iversion(inode); 9651 inode->i_ctime = current_time(inode); 9652 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9653 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9654 (actual_len > inode->i_size) && 9655 (cur_offset > inode->i_size)) { 9656 if (cur_offset > actual_len) 9657 i_size = actual_len; 9658 else 9659 i_size = cur_offset; 9660 i_size_write(inode, i_size); 9661 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 9662 } 9663 9664 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 9665 9666 if (ret) { 9667 btrfs_abort_transaction(trans, ret); 9668 if (own_trans) 9669 btrfs_end_transaction(trans); 9670 break; 9671 } 9672 9673 if (own_trans) { 9674 btrfs_end_transaction(trans); 9675 trans = NULL; 9676 } 9677 } 9678 if (clear_offset < end) 9679 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 9680 end - clear_offset + 1); 9681 return ret; 9682 } 9683 9684 int btrfs_prealloc_file_range(struct inode *inode, int mode, 9685 u64 start, u64 num_bytes, u64 min_size, 9686 loff_t actual_len, u64 *alloc_hint) 9687 { 9688 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9689 min_size, actual_len, alloc_hint, 9690 NULL); 9691 } 9692 9693 int btrfs_prealloc_file_range_trans(struct inode *inode, 9694 struct btrfs_trans_handle *trans, int mode, 9695 u64 start, u64 num_bytes, u64 min_size, 9696 loff_t actual_len, u64 *alloc_hint) 9697 { 9698 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9699 min_size, actual_len, alloc_hint, trans); 9700 } 9701 9702 static int btrfs_permission(struct mnt_idmap *idmap, 9703 struct inode *inode, int mask) 9704 { 9705 struct btrfs_root *root = BTRFS_I(inode)->root; 9706 umode_t mode = inode->i_mode; 9707 9708 if (mask & MAY_WRITE && 9709 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9710 if (btrfs_root_readonly(root)) 9711 return -EROFS; 9712 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9713 return -EACCES; 9714 } 9715 return generic_permission(idmap, inode, mask); 9716 } 9717 9718 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 9719 struct file *file, umode_t mode) 9720 { 9721 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9722 struct btrfs_trans_handle *trans; 9723 struct btrfs_root *root = BTRFS_I(dir)->root; 9724 struct inode *inode; 9725 struct btrfs_new_inode_args new_inode_args = { 9726 .dir = dir, 9727 .dentry = file->f_path.dentry, 9728 .orphan = true, 9729 }; 9730 unsigned int trans_num_items; 9731 int ret; 9732 9733 inode = new_inode(dir->i_sb); 9734 if (!inode) 9735 return -ENOMEM; 9736 inode_init_owner(idmap, inode, dir, mode); 9737 inode->i_fop = &btrfs_file_operations; 9738 inode->i_op = &btrfs_file_inode_operations; 9739 inode->i_mapping->a_ops = &btrfs_aops; 9740 9741 new_inode_args.inode = inode; 9742 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9743 if (ret) 9744 goto out_inode; 9745 9746 trans = btrfs_start_transaction(root, trans_num_items); 9747 if (IS_ERR(trans)) { 9748 ret = PTR_ERR(trans); 9749 goto out_new_inode_args; 9750 } 9751 9752 ret = btrfs_create_new_inode(trans, &new_inode_args); 9753 9754 /* 9755 * We set number of links to 0 in btrfs_create_new_inode(), and here we 9756 * set it to 1 because d_tmpfile() will issue a warning if the count is 9757 * 0, through: 9758 * 9759 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9760 */ 9761 set_nlink(inode, 1); 9762 9763 if (!ret) { 9764 d_tmpfile(file, inode); 9765 unlock_new_inode(inode); 9766 mark_inode_dirty(inode); 9767 } 9768 9769 btrfs_end_transaction(trans); 9770 btrfs_btree_balance_dirty(fs_info); 9771 out_new_inode_args: 9772 btrfs_new_inode_args_destroy(&new_inode_args); 9773 out_inode: 9774 if (ret) 9775 iput(inode); 9776 return finish_open_simple(file, ret); 9777 } 9778 9779 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end) 9780 { 9781 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9782 unsigned long index = start >> PAGE_SHIFT; 9783 unsigned long end_index = end >> PAGE_SHIFT; 9784 struct page *page; 9785 u32 len; 9786 9787 ASSERT(end + 1 - start <= U32_MAX); 9788 len = end + 1 - start; 9789 while (index <= end_index) { 9790 page = find_get_page(inode->vfs_inode.i_mapping, index); 9791 ASSERT(page); /* Pages should be in the extent_io_tree */ 9792 9793 btrfs_page_set_writeback(fs_info, page, start, len); 9794 put_page(page); 9795 index++; 9796 } 9797 } 9798 9799 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 9800 int compress_type) 9801 { 9802 switch (compress_type) { 9803 case BTRFS_COMPRESS_NONE: 9804 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 9805 case BTRFS_COMPRESS_ZLIB: 9806 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 9807 case BTRFS_COMPRESS_LZO: 9808 /* 9809 * The LZO format depends on the sector size. 64K is the maximum 9810 * sector size that we support. 9811 */ 9812 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 9813 return -EINVAL; 9814 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 9815 (fs_info->sectorsize_bits - 12); 9816 case BTRFS_COMPRESS_ZSTD: 9817 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 9818 default: 9819 return -EUCLEAN; 9820 } 9821 } 9822 9823 static ssize_t btrfs_encoded_read_inline( 9824 struct kiocb *iocb, 9825 struct iov_iter *iter, u64 start, 9826 u64 lockend, 9827 struct extent_state **cached_state, 9828 u64 extent_start, size_t count, 9829 struct btrfs_ioctl_encoded_io_args *encoded, 9830 bool *unlocked) 9831 { 9832 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9833 struct btrfs_root *root = inode->root; 9834 struct btrfs_fs_info *fs_info = root->fs_info; 9835 struct extent_io_tree *io_tree = &inode->io_tree; 9836 struct btrfs_path *path; 9837 struct extent_buffer *leaf; 9838 struct btrfs_file_extent_item *item; 9839 u64 ram_bytes; 9840 unsigned long ptr; 9841 void *tmp; 9842 ssize_t ret; 9843 9844 path = btrfs_alloc_path(); 9845 if (!path) { 9846 ret = -ENOMEM; 9847 goto out; 9848 } 9849 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 9850 extent_start, 0); 9851 if (ret) { 9852 if (ret > 0) { 9853 /* The extent item disappeared? */ 9854 ret = -EIO; 9855 } 9856 goto out; 9857 } 9858 leaf = path->nodes[0]; 9859 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 9860 9861 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 9862 ptr = btrfs_file_extent_inline_start(item); 9863 9864 encoded->len = min_t(u64, extent_start + ram_bytes, 9865 inode->vfs_inode.i_size) - iocb->ki_pos; 9866 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9867 btrfs_file_extent_compression(leaf, item)); 9868 if (ret < 0) 9869 goto out; 9870 encoded->compression = ret; 9871 if (encoded->compression) { 9872 size_t inline_size; 9873 9874 inline_size = btrfs_file_extent_inline_item_len(leaf, 9875 path->slots[0]); 9876 if (inline_size > count) { 9877 ret = -ENOBUFS; 9878 goto out; 9879 } 9880 count = inline_size; 9881 encoded->unencoded_len = ram_bytes; 9882 encoded->unencoded_offset = iocb->ki_pos - extent_start; 9883 } else { 9884 count = min_t(u64, count, encoded->len); 9885 encoded->len = count; 9886 encoded->unencoded_len = count; 9887 ptr += iocb->ki_pos - extent_start; 9888 } 9889 9890 tmp = kmalloc(count, GFP_NOFS); 9891 if (!tmp) { 9892 ret = -ENOMEM; 9893 goto out; 9894 } 9895 read_extent_buffer(leaf, tmp, ptr, count); 9896 btrfs_release_path(path); 9897 unlock_extent(io_tree, start, lockend, cached_state); 9898 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9899 *unlocked = true; 9900 9901 ret = copy_to_iter(tmp, count, iter); 9902 if (ret != count) 9903 ret = -EFAULT; 9904 kfree(tmp); 9905 out: 9906 btrfs_free_path(path); 9907 return ret; 9908 } 9909 9910 struct btrfs_encoded_read_private { 9911 struct btrfs_inode *inode; 9912 u64 file_offset; 9913 wait_queue_head_t wait; 9914 atomic_t pending; 9915 blk_status_t status; 9916 }; 9917 9918 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) 9919 { 9920 struct btrfs_encoded_read_private *priv = bbio->private; 9921 9922 if (bbio->bio.bi_status) { 9923 /* 9924 * The memory barrier implied by the atomic_dec_return() here 9925 * pairs with the memory barrier implied by the 9926 * atomic_dec_return() or io_wait_event() in 9927 * btrfs_encoded_read_regular_fill_pages() to ensure that this 9928 * write is observed before the load of status in 9929 * btrfs_encoded_read_regular_fill_pages(). 9930 */ 9931 WRITE_ONCE(priv->status, bbio->bio.bi_status); 9932 } 9933 if (!atomic_dec_return(&priv->pending)) 9934 wake_up(&priv->wait); 9935 bio_put(&bbio->bio); 9936 } 9937 9938 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 9939 u64 file_offset, u64 disk_bytenr, 9940 u64 disk_io_size, struct page **pages) 9941 { 9942 struct btrfs_encoded_read_private priv = { 9943 .inode = inode, 9944 .file_offset = file_offset, 9945 .pending = ATOMIC_INIT(1), 9946 }; 9947 unsigned long i = 0; 9948 u64 cur = 0; 9949 9950 init_waitqueue_head(&priv.wait); 9951 /* Submit bios for the extent, splitting due to bio limits as necessary. */ 9952 while (cur < disk_io_size) { 9953 struct bio *bio = NULL; 9954 u64 remaining = disk_io_size - cur; 9955 9956 while (bio || remaining) { 9957 size_t bytes = min_t(u64, remaining, PAGE_SIZE); 9958 9959 if (!bio) { 9960 bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, 9961 inode, 9962 btrfs_encoded_read_endio, 9963 &priv); 9964 bio->bi_iter.bi_sector = 9965 (disk_bytenr + cur) >> SECTOR_SHIFT; 9966 } 9967 9968 if (!bytes || 9969 bio_add_page(bio, pages[i], bytes, 0) < bytes) { 9970 atomic_inc(&priv.pending); 9971 btrfs_submit_bio(bio, 0); 9972 bio = NULL; 9973 continue; 9974 } 9975 9976 i++; 9977 cur += bytes; 9978 remaining -= bytes; 9979 } 9980 } 9981 9982 if (atomic_dec_return(&priv.pending)) 9983 io_wait_event(priv.wait, !atomic_read(&priv.pending)); 9984 /* See btrfs_encoded_read_endio() for ordering. */ 9985 return blk_status_to_errno(READ_ONCE(priv.status)); 9986 } 9987 9988 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, 9989 struct iov_iter *iter, 9990 u64 start, u64 lockend, 9991 struct extent_state **cached_state, 9992 u64 disk_bytenr, u64 disk_io_size, 9993 size_t count, bool compressed, 9994 bool *unlocked) 9995 { 9996 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9997 struct extent_io_tree *io_tree = &inode->io_tree; 9998 struct page **pages; 9999 unsigned long nr_pages, i; 10000 u64 cur; 10001 size_t page_offset; 10002 ssize_t ret; 10003 10004 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 10005 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 10006 if (!pages) 10007 return -ENOMEM; 10008 ret = btrfs_alloc_page_array(nr_pages, pages); 10009 if (ret) { 10010 ret = -ENOMEM; 10011 goto out; 10012 } 10013 10014 ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr, 10015 disk_io_size, pages); 10016 if (ret) 10017 goto out; 10018 10019 unlock_extent(io_tree, start, lockend, cached_state); 10020 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10021 *unlocked = true; 10022 10023 if (compressed) { 10024 i = 0; 10025 page_offset = 0; 10026 } else { 10027 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 10028 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 10029 } 10030 cur = 0; 10031 while (cur < count) { 10032 size_t bytes = min_t(size_t, count - cur, 10033 PAGE_SIZE - page_offset); 10034 10035 if (copy_page_to_iter(pages[i], page_offset, bytes, 10036 iter) != bytes) { 10037 ret = -EFAULT; 10038 goto out; 10039 } 10040 i++; 10041 cur += bytes; 10042 page_offset = 0; 10043 } 10044 ret = count; 10045 out: 10046 for (i = 0; i < nr_pages; i++) { 10047 if (pages[i]) 10048 __free_page(pages[i]); 10049 } 10050 kfree(pages); 10051 return ret; 10052 } 10053 10054 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 10055 struct btrfs_ioctl_encoded_io_args *encoded) 10056 { 10057 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10058 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10059 struct extent_io_tree *io_tree = &inode->io_tree; 10060 ssize_t ret; 10061 size_t count = iov_iter_count(iter); 10062 u64 start, lockend, disk_bytenr, disk_io_size; 10063 struct extent_state *cached_state = NULL; 10064 struct extent_map *em; 10065 bool unlocked = false; 10066 10067 file_accessed(iocb->ki_filp); 10068 10069 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED); 10070 10071 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 10072 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10073 return 0; 10074 } 10075 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 10076 /* 10077 * We don't know how long the extent containing iocb->ki_pos is, but if 10078 * it's compressed we know that it won't be longer than this. 10079 */ 10080 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 10081 10082 for (;;) { 10083 struct btrfs_ordered_extent *ordered; 10084 10085 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, 10086 lockend - start + 1); 10087 if (ret) 10088 goto out_unlock_inode; 10089 lock_extent(io_tree, start, lockend, &cached_state); 10090 ordered = btrfs_lookup_ordered_range(inode, start, 10091 lockend - start + 1); 10092 if (!ordered) 10093 break; 10094 btrfs_put_ordered_extent(ordered); 10095 unlock_extent(io_tree, start, lockend, &cached_state); 10096 cond_resched(); 10097 } 10098 10099 em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1); 10100 if (IS_ERR(em)) { 10101 ret = PTR_ERR(em); 10102 goto out_unlock_extent; 10103 } 10104 10105 if (em->block_start == EXTENT_MAP_INLINE) { 10106 u64 extent_start = em->start; 10107 10108 /* 10109 * For inline extents we get everything we need out of the 10110 * extent item. 10111 */ 10112 free_extent_map(em); 10113 em = NULL; 10114 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 10115 &cached_state, extent_start, 10116 count, encoded, &unlocked); 10117 goto out; 10118 } 10119 10120 /* 10121 * We only want to return up to EOF even if the extent extends beyond 10122 * that. 10123 */ 10124 encoded->len = min_t(u64, extent_map_end(em), 10125 inode->vfs_inode.i_size) - iocb->ki_pos; 10126 if (em->block_start == EXTENT_MAP_HOLE || 10127 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 10128 disk_bytenr = EXTENT_MAP_HOLE; 10129 count = min_t(u64, count, encoded->len); 10130 encoded->len = count; 10131 encoded->unencoded_len = count; 10132 } else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10133 disk_bytenr = em->block_start; 10134 /* 10135 * Bail if the buffer isn't large enough to return the whole 10136 * compressed extent. 10137 */ 10138 if (em->block_len > count) { 10139 ret = -ENOBUFS; 10140 goto out_em; 10141 } 10142 disk_io_size = em->block_len; 10143 count = em->block_len; 10144 encoded->unencoded_len = em->ram_bytes; 10145 encoded->unencoded_offset = iocb->ki_pos - em->orig_start; 10146 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10147 em->compress_type); 10148 if (ret < 0) 10149 goto out_em; 10150 encoded->compression = ret; 10151 } else { 10152 disk_bytenr = em->block_start + (start - em->start); 10153 if (encoded->len > count) 10154 encoded->len = count; 10155 /* 10156 * Don't read beyond what we locked. This also limits the page 10157 * allocations that we'll do. 10158 */ 10159 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 10160 count = start + disk_io_size - iocb->ki_pos; 10161 encoded->len = count; 10162 encoded->unencoded_len = count; 10163 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize); 10164 } 10165 free_extent_map(em); 10166 em = NULL; 10167 10168 if (disk_bytenr == EXTENT_MAP_HOLE) { 10169 unlock_extent(io_tree, start, lockend, &cached_state); 10170 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10171 unlocked = true; 10172 ret = iov_iter_zero(count, iter); 10173 if (ret != count) 10174 ret = -EFAULT; 10175 } else { 10176 ret = btrfs_encoded_read_regular(iocb, iter, start, lockend, 10177 &cached_state, disk_bytenr, 10178 disk_io_size, count, 10179 encoded->compression, 10180 &unlocked); 10181 } 10182 10183 out: 10184 if (ret >= 0) 10185 iocb->ki_pos += encoded->len; 10186 out_em: 10187 free_extent_map(em); 10188 out_unlock_extent: 10189 if (!unlocked) 10190 unlock_extent(io_tree, start, lockend, &cached_state); 10191 out_unlock_inode: 10192 if (!unlocked) 10193 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10194 return ret; 10195 } 10196 10197 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 10198 const struct btrfs_ioctl_encoded_io_args *encoded) 10199 { 10200 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10201 struct btrfs_root *root = inode->root; 10202 struct btrfs_fs_info *fs_info = root->fs_info; 10203 struct extent_io_tree *io_tree = &inode->io_tree; 10204 struct extent_changeset *data_reserved = NULL; 10205 struct extent_state *cached_state = NULL; 10206 int compression; 10207 size_t orig_count; 10208 u64 start, end; 10209 u64 num_bytes, ram_bytes, disk_num_bytes; 10210 unsigned long nr_pages, i; 10211 struct page **pages; 10212 struct btrfs_key ins; 10213 bool extent_reserved = false; 10214 struct extent_map *em; 10215 ssize_t ret; 10216 10217 switch (encoded->compression) { 10218 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 10219 compression = BTRFS_COMPRESS_ZLIB; 10220 break; 10221 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 10222 compression = BTRFS_COMPRESS_ZSTD; 10223 break; 10224 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 10225 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 10226 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 10227 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 10228 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 10229 /* The sector size must match for LZO. */ 10230 if (encoded->compression - 10231 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 10232 fs_info->sectorsize_bits) 10233 return -EINVAL; 10234 compression = BTRFS_COMPRESS_LZO; 10235 break; 10236 default: 10237 return -EINVAL; 10238 } 10239 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 10240 return -EINVAL; 10241 10242 orig_count = iov_iter_count(from); 10243 10244 /* The extent size must be sane. */ 10245 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 10246 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 10247 return -EINVAL; 10248 10249 /* 10250 * The compressed data must be smaller than the decompressed data. 10251 * 10252 * It's of course possible for data to compress to larger or the same 10253 * size, but the buffered I/O path falls back to no compression for such 10254 * data, and we don't want to break any assumptions by creating these 10255 * extents. 10256 * 10257 * Note that this is less strict than the current check we have that the 10258 * compressed data must be at least one sector smaller than the 10259 * decompressed data. We only want to enforce the weaker requirement 10260 * from old kernels that it is at least one byte smaller. 10261 */ 10262 if (orig_count >= encoded->unencoded_len) 10263 return -EINVAL; 10264 10265 /* The extent must start on a sector boundary. */ 10266 start = iocb->ki_pos; 10267 if (!IS_ALIGNED(start, fs_info->sectorsize)) 10268 return -EINVAL; 10269 10270 /* 10271 * The extent must end on a sector boundary. However, we allow a write 10272 * which ends at or extends i_size to have an unaligned length; we round 10273 * up the extent size and set i_size to the unaligned end. 10274 */ 10275 if (start + encoded->len < inode->vfs_inode.i_size && 10276 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 10277 return -EINVAL; 10278 10279 /* Finally, the offset in the unencoded data must be sector-aligned. */ 10280 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 10281 return -EINVAL; 10282 10283 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 10284 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 10285 end = start + num_bytes - 1; 10286 10287 /* 10288 * If the extent cannot be inline, the compressed data on disk must be 10289 * sector-aligned. For convenience, we extend it with zeroes if it 10290 * isn't. 10291 */ 10292 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 10293 nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 10294 pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT); 10295 if (!pages) 10296 return -ENOMEM; 10297 for (i = 0; i < nr_pages; i++) { 10298 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 10299 char *kaddr; 10300 10301 pages[i] = alloc_page(GFP_KERNEL_ACCOUNT); 10302 if (!pages[i]) { 10303 ret = -ENOMEM; 10304 goto out_pages; 10305 } 10306 kaddr = kmap_local_page(pages[i]); 10307 if (copy_from_iter(kaddr, bytes, from) != bytes) { 10308 kunmap_local(kaddr); 10309 ret = -EFAULT; 10310 goto out_pages; 10311 } 10312 if (bytes < PAGE_SIZE) 10313 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 10314 kunmap_local(kaddr); 10315 } 10316 10317 for (;;) { 10318 struct btrfs_ordered_extent *ordered; 10319 10320 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes); 10321 if (ret) 10322 goto out_pages; 10323 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 10324 start >> PAGE_SHIFT, 10325 end >> PAGE_SHIFT); 10326 if (ret) 10327 goto out_pages; 10328 lock_extent(io_tree, start, end, &cached_state); 10329 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 10330 if (!ordered && 10331 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 10332 break; 10333 if (ordered) 10334 btrfs_put_ordered_extent(ordered); 10335 unlock_extent(io_tree, start, end, &cached_state); 10336 cond_resched(); 10337 } 10338 10339 /* 10340 * We don't use the higher-level delalloc space functions because our 10341 * num_bytes and disk_num_bytes are different. 10342 */ 10343 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 10344 if (ret) 10345 goto out_unlock; 10346 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 10347 if (ret) 10348 goto out_free_data_space; 10349 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 10350 false); 10351 if (ret) 10352 goto out_qgroup_free_data; 10353 10354 /* Try an inline extent first. */ 10355 if (start == 0 && encoded->unencoded_len == encoded->len && 10356 encoded->unencoded_offset == 0) { 10357 ret = cow_file_range_inline(inode, encoded->len, orig_count, 10358 compression, pages, true); 10359 if (ret <= 0) { 10360 if (ret == 0) 10361 ret = orig_count; 10362 goto out_delalloc_release; 10363 } 10364 } 10365 10366 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 10367 disk_num_bytes, 0, 0, &ins, 1, 1); 10368 if (ret) 10369 goto out_delalloc_release; 10370 extent_reserved = true; 10371 10372 em = create_io_em(inode, start, num_bytes, 10373 start - encoded->unencoded_offset, ins.objectid, 10374 ins.offset, ins.offset, ram_bytes, compression, 10375 BTRFS_ORDERED_COMPRESSED); 10376 if (IS_ERR(em)) { 10377 ret = PTR_ERR(em); 10378 goto out_free_reserved; 10379 } 10380 free_extent_map(em); 10381 10382 ret = btrfs_add_ordered_extent(inode, start, num_bytes, ram_bytes, 10383 ins.objectid, ins.offset, 10384 encoded->unencoded_offset, 10385 (1 << BTRFS_ORDERED_ENCODED) | 10386 (1 << BTRFS_ORDERED_COMPRESSED), 10387 compression); 10388 if (ret) { 10389 btrfs_drop_extent_map_range(inode, start, end, false); 10390 goto out_free_reserved; 10391 } 10392 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10393 10394 if (start + encoded->len > inode->vfs_inode.i_size) 10395 i_size_write(&inode->vfs_inode, start + encoded->len); 10396 10397 unlock_extent(io_tree, start, end, &cached_state); 10398 10399 btrfs_delalloc_release_extents(inode, num_bytes); 10400 10401 if (btrfs_submit_compressed_write(inode, start, num_bytes, ins.objectid, 10402 ins.offset, pages, nr_pages, 0, NULL, 10403 false)) { 10404 btrfs_writepage_endio_finish_ordered(inode, pages[0], start, end, 0); 10405 ret = -EIO; 10406 goto out_pages; 10407 } 10408 ret = orig_count; 10409 goto out; 10410 10411 out_free_reserved: 10412 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10413 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 10414 out_delalloc_release: 10415 btrfs_delalloc_release_extents(inode, num_bytes); 10416 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 10417 out_qgroup_free_data: 10418 if (ret < 0) 10419 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes); 10420 out_free_data_space: 10421 /* 10422 * If btrfs_reserve_extent() succeeded, then we already decremented 10423 * bytes_may_use. 10424 */ 10425 if (!extent_reserved) 10426 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); 10427 out_unlock: 10428 unlock_extent(io_tree, start, end, &cached_state); 10429 out_pages: 10430 for (i = 0; i < nr_pages; i++) { 10431 if (pages[i]) 10432 __free_page(pages[i]); 10433 } 10434 kvfree(pages); 10435 out: 10436 if (ret >= 0) 10437 iocb->ki_pos += encoded->len; 10438 return ret; 10439 } 10440 10441 #ifdef CONFIG_SWAP 10442 /* 10443 * Add an entry indicating a block group or device which is pinned by a 10444 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 10445 * negative errno on failure. 10446 */ 10447 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 10448 bool is_block_group) 10449 { 10450 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10451 struct btrfs_swapfile_pin *sp, *entry; 10452 struct rb_node **p; 10453 struct rb_node *parent = NULL; 10454 10455 sp = kmalloc(sizeof(*sp), GFP_NOFS); 10456 if (!sp) 10457 return -ENOMEM; 10458 sp->ptr = ptr; 10459 sp->inode = inode; 10460 sp->is_block_group = is_block_group; 10461 sp->bg_extent_count = 1; 10462 10463 spin_lock(&fs_info->swapfile_pins_lock); 10464 p = &fs_info->swapfile_pins.rb_node; 10465 while (*p) { 10466 parent = *p; 10467 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 10468 if (sp->ptr < entry->ptr || 10469 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 10470 p = &(*p)->rb_left; 10471 } else if (sp->ptr > entry->ptr || 10472 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 10473 p = &(*p)->rb_right; 10474 } else { 10475 if (is_block_group) 10476 entry->bg_extent_count++; 10477 spin_unlock(&fs_info->swapfile_pins_lock); 10478 kfree(sp); 10479 return 1; 10480 } 10481 } 10482 rb_link_node(&sp->node, parent, p); 10483 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 10484 spin_unlock(&fs_info->swapfile_pins_lock); 10485 return 0; 10486 } 10487 10488 /* Free all of the entries pinned by this swapfile. */ 10489 static void btrfs_free_swapfile_pins(struct inode *inode) 10490 { 10491 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10492 struct btrfs_swapfile_pin *sp; 10493 struct rb_node *node, *next; 10494 10495 spin_lock(&fs_info->swapfile_pins_lock); 10496 node = rb_first(&fs_info->swapfile_pins); 10497 while (node) { 10498 next = rb_next(node); 10499 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 10500 if (sp->inode == inode) { 10501 rb_erase(&sp->node, &fs_info->swapfile_pins); 10502 if (sp->is_block_group) { 10503 btrfs_dec_block_group_swap_extents(sp->ptr, 10504 sp->bg_extent_count); 10505 btrfs_put_block_group(sp->ptr); 10506 } 10507 kfree(sp); 10508 } 10509 node = next; 10510 } 10511 spin_unlock(&fs_info->swapfile_pins_lock); 10512 } 10513 10514 struct btrfs_swap_info { 10515 u64 start; 10516 u64 block_start; 10517 u64 block_len; 10518 u64 lowest_ppage; 10519 u64 highest_ppage; 10520 unsigned long nr_pages; 10521 int nr_extents; 10522 }; 10523 10524 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 10525 struct btrfs_swap_info *bsi) 10526 { 10527 unsigned long nr_pages; 10528 unsigned long max_pages; 10529 u64 first_ppage, first_ppage_reported, next_ppage; 10530 int ret; 10531 10532 /* 10533 * Our swapfile may have had its size extended after the swap header was 10534 * written. In that case activating the swapfile should not go beyond 10535 * the max size set in the swap header. 10536 */ 10537 if (bsi->nr_pages >= sis->max) 10538 return 0; 10539 10540 max_pages = sis->max - bsi->nr_pages; 10541 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; 10542 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; 10543 10544 if (first_ppage >= next_ppage) 10545 return 0; 10546 nr_pages = next_ppage - first_ppage; 10547 nr_pages = min(nr_pages, max_pages); 10548 10549 first_ppage_reported = first_ppage; 10550 if (bsi->start == 0) 10551 first_ppage_reported++; 10552 if (bsi->lowest_ppage > first_ppage_reported) 10553 bsi->lowest_ppage = first_ppage_reported; 10554 if (bsi->highest_ppage < (next_ppage - 1)) 10555 bsi->highest_ppage = next_ppage - 1; 10556 10557 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 10558 if (ret < 0) 10559 return ret; 10560 bsi->nr_extents += ret; 10561 bsi->nr_pages += nr_pages; 10562 return 0; 10563 } 10564 10565 static void btrfs_swap_deactivate(struct file *file) 10566 { 10567 struct inode *inode = file_inode(file); 10568 10569 btrfs_free_swapfile_pins(inode); 10570 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 10571 } 10572 10573 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10574 sector_t *span) 10575 { 10576 struct inode *inode = file_inode(file); 10577 struct btrfs_root *root = BTRFS_I(inode)->root; 10578 struct btrfs_fs_info *fs_info = root->fs_info; 10579 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 10580 struct extent_state *cached_state = NULL; 10581 struct extent_map *em = NULL; 10582 struct btrfs_device *device = NULL; 10583 struct btrfs_swap_info bsi = { 10584 .lowest_ppage = (sector_t)-1ULL, 10585 }; 10586 int ret = 0; 10587 u64 isize; 10588 u64 start; 10589 10590 /* 10591 * If the swap file was just created, make sure delalloc is done. If the 10592 * file changes again after this, the user is doing something stupid and 10593 * we don't really care. 10594 */ 10595 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 10596 if (ret) 10597 return ret; 10598 10599 /* 10600 * The inode is locked, so these flags won't change after we check them. 10601 */ 10602 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 10603 btrfs_warn(fs_info, "swapfile must not be compressed"); 10604 return -EINVAL; 10605 } 10606 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 10607 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 10608 return -EINVAL; 10609 } 10610 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 10611 btrfs_warn(fs_info, "swapfile must not be checksummed"); 10612 return -EINVAL; 10613 } 10614 10615 /* 10616 * Balance or device remove/replace/resize can move stuff around from 10617 * under us. The exclop protection makes sure they aren't running/won't 10618 * run concurrently while we are mapping the swap extents, and 10619 * fs_info->swapfile_pins prevents them from running while the swap 10620 * file is active and moving the extents. Note that this also prevents 10621 * a concurrent device add which isn't actually necessary, but it's not 10622 * really worth the trouble to allow it. 10623 */ 10624 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 10625 btrfs_warn(fs_info, 10626 "cannot activate swapfile while exclusive operation is running"); 10627 return -EBUSY; 10628 } 10629 10630 /* 10631 * Prevent snapshot creation while we are activating the swap file. 10632 * We do not want to race with snapshot creation. If snapshot creation 10633 * already started before we bumped nr_swapfiles from 0 to 1 and 10634 * completes before the first write into the swap file after it is 10635 * activated, than that write would fallback to COW. 10636 */ 10637 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 10638 btrfs_exclop_finish(fs_info); 10639 btrfs_warn(fs_info, 10640 "cannot activate swapfile because snapshot creation is in progress"); 10641 return -EINVAL; 10642 } 10643 /* 10644 * Snapshots can create extents which require COW even if NODATACOW is 10645 * set. We use this counter to prevent snapshots. We must increment it 10646 * before walking the extents because we don't want a concurrent 10647 * snapshot to run after we've already checked the extents. 10648 * 10649 * It is possible that subvolume is marked for deletion but still not 10650 * removed yet. To prevent this race, we check the root status before 10651 * activating the swapfile. 10652 */ 10653 spin_lock(&root->root_item_lock); 10654 if (btrfs_root_dead(root)) { 10655 spin_unlock(&root->root_item_lock); 10656 10657 btrfs_exclop_finish(fs_info); 10658 btrfs_warn(fs_info, 10659 "cannot activate swapfile because subvolume %llu is being deleted", 10660 root->root_key.objectid); 10661 return -EPERM; 10662 } 10663 atomic_inc(&root->nr_swapfiles); 10664 spin_unlock(&root->root_item_lock); 10665 10666 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 10667 10668 lock_extent(io_tree, 0, isize - 1, &cached_state); 10669 start = 0; 10670 while (start < isize) { 10671 u64 logical_block_start, physical_block_start; 10672 struct btrfs_block_group *bg; 10673 u64 len = isize - start; 10674 10675 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 10676 if (IS_ERR(em)) { 10677 ret = PTR_ERR(em); 10678 goto out; 10679 } 10680 10681 if (em->block_start == EXTENT_MAP_HOLE) { 10682 btrfs_warn(fs_info, "swapfile must not have holes"); 10683 ret = -EINVAL; 10684 goto out; 10685 } 10686 if (em->block_start == EXTENT_MAP_INLINE) { 10687 /* 10688 * It's unlikely we'll ever actually find ourselves 10689 * here, as a file small enough to fit inline won't be 10690 * big enough to store more than the swap header, but in 10691 * case something changes in the future, let's catch it 10692 * here rather than later. 10693 */ 10694 btrfs_warn(fs_info, "swapfile must not be inline"); 10695 ret = -EINVAL; 10696 goto out; 10697 } 10698 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10699 btrfs_warn(fs_info, "swapfile must not be compressed"); 10700 ret = -EINVAL; 10701 goto out; 10702 } 10703 10704 logical_block_start = em->block_start + (start - em->start); 10705 len = min(len, em->len - (start - em->start)); 10706 free_extent_map(em); 10707 em = NULL; 10708 10709 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true); 10710 if (ret < 0) { 10711 goto out; 10712 } else if (ret) { 10713 ret = 0; 10714 } else { 10715 btrfs_warn(fs_info, 10716 "swapfile must not be copy-on-write"); 10717 ret = -EINVAL; 10718 goto out; 10719 } 10720 10721 em = btrfs_get_chunk_map(fs_info, logical_block_start, len); 10722 if (IS_ERR(em)) { 10723 ret = PTR_ERR(em); 10724 goto out; 10725 } 10726 10727 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 10728 btrfs_warn(fs_info, 10729 "swapfile must have single data profile"); 10730 ret = -EINVAL; 10731 goto out; 10732 } 10733 10734 if (device == NULL) { 10735 device = em->map_lookup->stripes[0].dev; 10736 ret = btrfs_add_swapfile_pin(inode, device, false); 10737 if (ret == 1) 10738 ret = 0; 10739 else if (ret) 10740 goto out; 10741 } else if (device != em->map_lookup->stripes[0].dev) { 10742 btrfs_warn(fs_info, "swapfile must be on one device"); 10743 ret = -EINVAL; 10744 goto out; 10745 } 10746 10747 physical_block_start = (em->map_lookup->stripes[0].physical + 10748 (logical_block_start - em->start)); 10749 len = min(len, em->len - (logical_block_start - em->start)); 10750 free_extent_map(em); 10751 em = NULL; 10752 10753 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 10754 if (!bg) { 10755 btrfs_warn(fs_info, 10756 "could not find block group containing swapfile"); 10757 ret = -EINVAL; 10758 goto out; 10759 } 10760 10761 if (!btrfs_inc_block_group_swap_extents(bg)) { 10762 btrfs_warn(fs_info, 10763 "block group for swapfile at %llu is read-only%s", 10764 bg->start, 10765 atomic_read(&fs_info->scrubs_running) ? 10766 " (scrub running)" : ""); 10767 btrfs_put_block_group(bg); 10768 ret = -EINVAL; 10769 goto out; 10770 } 10771 10772 ret = btrfs_add_swapfile_pin(inode, bg, true); 10773 if (ret) { 10774 btrfs_put_block_group(bg); 10775 if (ret == 1) 10776 ret = 0; 10777 else 10778 goto out; 10779 } 10780 10781 if (bsi.block_len && 10782 bsi.block_start + bsi.block_len == physical_block_start) { 10783 bsi.block_len += len; 10784 } else { 10785 if (bsi.block_len) { 10786 ret = btrfs_add_swap_extent(sis, &bsi); 10787 if (ret) 10788 goto out; 10789 } 10790 bsi.start = start; 10791 bsi.block_start = physical_block_start; 10792 bsi.block_len = len; 10793 } 10794 10795 start += len; 10796 } 10797 10798 if (bsi.block_len) 10799 ret = btrfs_add_swap_extent(sis, &bsi); 10800 10801 out: 10802 if (!IS_ERR_OR_NULL(em)) 10803 free_extent_map(em); 10804 10805 unlock_extent(io_tree, 0, isize - 1, &cached_state); 10806 10807 if (ret) 10808 btrfs_swap_deactivate(file); 10809 10810 btrfs_drew_write_unlock(&root->snapshot_lock); 10811 10812 btrfs_exclop_finish(fs_info); 10813 10814 if (ret) 10815 return ret; 10816 10817 if (device) 10818 sis->bdev = device->bdev; 10819 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10820 sis->max = bsi.nr_pages; 10821 sis->pages = bsi.nr_pages - 1; 10822 sis->highest_bit = bsi.nr_pages - 1; 10823 return bsi.nr_extents; 10824 } 10825 #else 10826 static void btrfs_swap_deactivate(struct file *file) 10827 { 10828 } 10829 10830 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10831 sector_t *span) 10832 { 10833 return -EOPNOTSUPP; 10834 } 10835 #endif 10836 10837 /* 10838 * Update the number of bytes used in the VFS' inode. When we replace extents in 10839 * a range (clone, dedupe, fallocate's zero range), we must update the number of 10840 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 10841 * always get a correct value. 10842 */ 10843 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 10844 const u64 add_bytes, 10845 const u64 del_bytes) 10846 { 10847 if (add_bytes == del_bytes) 10848 return; 10849 10850 spin_lock(&inode->lock); 10851 if (del_bytes > 0) 10852 inode_sub_bytes(&inode->vfs_inode, del_bytes); 10853 if (add_bytes > 0) 10854 inode_add_bytes(&inode->vfs_inode, add_bytes); 10855 spin_unlock(&inode->lock); 10856 } 10857 10858 /* 10859 * Verify that there are no ordered extents for a given file range. 10860 * 10861 * @inode: The target inode. 10862 * @start: Start offset of the file range, should be sector size aligned. 10863 * @end: End offset (inclusive) of the file range, its value +1 should be 10864 * sector size aligned. 10865 * 10866 * This should typically be used for cases where we locked an inode's VFS lock in 10867 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 10868 * we have flushed all delalloc in the range, we have waited for all ordered 10869 * extents in the range to complete and finally we have locked the file range in 10870 * the inode's io_tree. 10871 */ 10872 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 10873 { 10874 struct btrfs_root *root = inode->root; 10875 struct btrfs_ordered_extent *ordered; 10876 10877 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 10878 return; 10879 10880 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 10881 if (ordered) { 10882 btrfs_err(root->fs_info, 10883 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 10884 start, end, btrfs_ino(inode), root->root_key.objectid, 10885 ordered->file_offset, 10886 ordered->file_offset + ordered->num_bytes - 1); 10887 btrfs_put_ordered_extent(ordered); 10888 } 10889 10890 ASSERT(ordered == NULL); 10891 } 10892 10893 static const struct inode_operations btrfs_dir_inode_operations = { 10894 .getattr = btrfs_getattr, 10895 .lookup = btrfs_lookup, 10896 .create = btrfs_create, 10897 .unlink = btrfs_unlink, 10898 .link = btrfs_link, 10899 .mkdir = btrfs_mkdir, 10900 .rmdir = btrfs_rmdir, 10901 .rename = btrfs_rename2, 10902 .symlink = btrfs_symlink, 10903 .setattr = btrfs_setattr, 10904 .mknod = btrfs_mknod, 10905 .listxattr = btrfs_listxattr, 10906 .permission = btrfs_permission, 10907 .get_inode_acl = btrfs_get_acl, 10908 .set_acl = btrfs_set_acl, 10909 .update_time = btrfs_update_time, 10910 .tmpfile = btrfs_tmpfile, 10911 .fileattr_get = btrfs_fileattr_get, 10912 .fileattr_set = btrfs_fileattr_set, 10913 }; 10914 10915 static const struct file_operations btrfs_dir_file_operations = { 10916 .llseek = generic_file_llseek, 10917 .read = generic_read_dir, 10918 .iterate_shared = btrfs_real_readdir, 10919 .open = btrfs_opendir, 10920 .unlocked_ioctl = btrfs_ioctl, 10921 #ifdef CONFIG_COMPAT 10922 .compat_ioctl = btrfs_compat_ioctl, 10923 #endif 10924 .release = btrfs_release_file, 10925 .fsync = btrfs_sync_file, 10926 }; 10927 10928 /* 10929 * btrfs doesn't support the bmap operation because swapfiles 10930 * use bmap to make a mapping of extents in the file. They assume 10931 * these extents won't change over the life of the file and they 10932 * use the bmap result to do IO directly to the drive. 10933 * 10934 * the btrfs bmap call would return logical addresses that aren't 10935 * suitable for IO and they also will change frequently as COW 10936 * operations happen. So, swapfile + btrfs == corruption. 10937 * 10938 * For now we're avoiding this by dropping bmap. 10939 */ 10940 static const struct address_space_operations btrfs_aops = { 10941 .read_folio = btrfs_read_folio, 10942 .writepages = btrfs_writepages, 10943 .readahead = btrfs_readahead, 10944 .direct_IO = noop_direct_IO, 10945 .invalidate_folio = btrfs_invalidate_folio, 10946 .release_folio = btrfs_release_folio, 10947 .migrate_folio = btrfs_migrate_folio, 10948 .dirty_folio = filemap_dirty_folio, 10949 .error_remove_page = generic_error_remove_page, 10950 .swap_activate = btrfs_swap_activate, 10951 .swap_deactivate = btrfs_swap_deactivate, 10952 }; 10953 10954 static const struct inode_operations btrfs_file_inode_operations = { 10955 .getattr = btrfs_getattr, 10956 .setattr = btrfs_setattr, 10957 .listxattr = btrfs_listxattr, 10958 .permission = btrfs_permission, 10959 .fiemap = btrfs_fiemap, 10960 .get_inode_acl = btrfs_get_acl, 10961 .set_acl = btrfs_set_acl, 10962 .update_time = btrfs_update_time, 10963 .fileattr_get = btrfs_fileattr_get, 10964 .fileattr_set = btrfs_fileattr_set, 10965 }; 10966 static const struct inode_operations btrfs_special_inode_operations = { 10967 .getattr = btrfs_getattr, 10968 .setattr = btrfs_setattr, 10969 .permission = btrfs_permission, 10970 .listxattr = btrfs_listxattr, 10971 .get_inode_acl = btrfs_get_acl, 10972 .set_acl = btrfs_set_acl, 10973 .update_time = btrfs_update_time, 10974 }; 10975 static const struct inode_operations btrfs_symlink_inode_operations = { 10976 .get_link = page_get_link, 10977 .getattr = btrfs_getattr, 10978 .setattr = btrfs_setattr, 10979 .permission = btrfs_permission, 10980 .listxattr = btrfs_listxattr, 10981 .update_time = btrfs_update_time, 10982 }; 10983 10984 const struct dentry_operations btrfs_dentry_operations = { 10985 .d_delete = btrfs_dentry_delete, 10986 }; 10987