1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 #include <linux/time.h> 15 #include <linux/init.h> 16 #include <linux/string.h> 17 #include <linux/backing-dev.h> 18 #include <linux/writeback.h> 19 #include <linux/compat.h> 20 #include <linux/xattr.h> 21 #include <linux/posix_acl.h> 22 #include <linux/falloc.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/btrfs.h> 26 #include <linux/blkdev.h> 27 #include <linux/posix_acl_xattr.h> 28 #include <linux/uio.h> 29 #include <linux/magic.h> 30 #include <linux/iversion.h> 31 #include <linux/swap.h> 32 #include <linux/migrate.h> 33 #include <linux/sched/mm.h> 34 #include <linux/iomap.h> 35 #include <asm/unaligned.h> 36 #include <linux/fsverity.h> 37 #include "misc.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "print-tree.h" 43 #include "ordered-data.h" 44 #include "xattr.h" 45 #include "tree-log.h" 46 #include "bio.h" 47 #include "compression.h" 48 #include "locking.h" 49 #include "free-space-cache.h" 50 #include "props.h" 51 #include "qgroup.h" 52 #include "delalloc-space.h" 53 #include "block-group.h" 54 #include "space-info.h" 55 #include "zoned.h" 56 #include "subpage.h" 57 #include "inode-item.h" 58 #include "fs.h" 59 #include "accessors.h" 60 #include "extent-tree.h" 61 #include "root-tree.h" 62 #include "defrag.h" 63 #include "dir-item.h" 64 #include "file-item.h" 65 #include "uuid-tree.h" 66 #include "ioctl.h" 67 #include "file.h" 68 #include "acl.h" 69 #include "relocation.h" 70 #include "verity.h" 71 #include "super.h" 72 #include "orphan.h" 73 #include "backref.h" 74 75 struct btrfs_iget_args { 76 u64 ino; 77 struct btrfs_root *root; 78 }; 79 80 struct btrfs_dio_data { 81 ssize_t submitted; 82 struct extent_changeset *data_reserved; 83 struct btrfs_ordered_extent *ordered; 84 bool data_space_reserved; 85 bool nocow_done; 86 }; 87 88 struct btrfs_dio_private { 89 /* Range of I/O */ 90 u64 file_offset; 91 u32 bytes; 92 93 /* This must be last */ 94 struct btrfs_bio bbio; 95 }; 96 97 static struct bio_set btrfs_dio_bioset; 98 99 struct btrfs_rename_ctx { 100 /* Output field. Stores the index number of the old directory entry. */ 101 u64 index; 102 }; 103 104 /* 105 * Used by data_reloc_print_warning_inode() to pass needed info for filename 106 * resolution and output of error message. 107 */ 108 struct data_reloc_warn { 109 struct btrfs_path path; 110 struct btrfs_fs_info *fs_info; 111 u64 extent_item_size; 112 u64 logical; 113 int mirror_num; 114 }; 115 116 static const struct inode_operations btrfs_dir_inode_operations; 117 static const struct inode_operations btrfs_symlink_inode_operations; 118 static const struct inode_operations btrfs_special_inode_operations; 119 static const struct inode_operations btrfs_file_inode_operations; 120 static const struct address_space_operations btrfs_aops; 121 static const struct file_operations btrfs_dir_file_operations; 122 123 static struct kmem_cache *btrfs_inode_cachep; 124 125 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 126 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback); 127 128 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 129 struct page *locked_page, u64 start, 130 u64 end, struct writeback_control *wbc, 131 bool pages_dirty); 132 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 133 u64 len, u64 orig_start, u64 block_start, 134 u64 block_len, u64 orig_block_len, 135 u64 ram_bytes, int compress_type, 136 int type); 137 138 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, 139 u64 root, void *warn_ctx) 140 { 141 struct data_reloc_warn *warn = warn_ctx; 142 struct btrfs_fs_info *fs_info = warn->fs_info; 143 struct extent_buffer *eb; 144 struct btrfs_inode_item *inode_item; 145 struct inode_fs_paths *ipath = NULL; 146 struct btrfs_root *local_root; 147 struct btrfs_key key; 148 unsigned int nofs_flag; 149 u32 nlink; 150 int ret; 151 152 local_root = btrfs_get_fs_root(fs_info, root, true); 153 if (IS_ERR(local_root)) { 154 ret = PTR_ERR(local_root); 155 goto err; 156 } 157 158 /* This makes the path point to (inum INODE_ITEM ioff). */ 159 key.objectid = inum; 160 key.type = BTRFS_INODE_ITEM_KEY; 161 key.offset = 0; 162 163 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0); 164 if (ret) { 165 btrfs_put_root(local_root); 166 btrfs_release_path(&warn->path); 167 goto err; 168 } 169 170 eb = warn->path.nodes[0]; 171 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item); 172 nlink = btrfs_inode_nlink(eb, inode_item); 173 btrfs_release_path(&warn->path); 174 175 nofs_flag = memalloc_nofs_save(); 176 ipath = init_ipath(4096, local_root, &warn->path); 177 memalloc_nofs_restore(nofs_flag); 178 if (IS_ERR(ipath)) { 179 btrfs_put_root(local_root); 180 ret = PTR_ERR(ipath); 181 ipath = NULL; 182 /* 183 * -ENOMEM, not a critical error, just output an generic error 184 * without filename. 185 */ 186 btrfs_warn(fs_info, 187 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu", 188 warn->logical, warn->mirror_num, root, inum, offset); 189 return ret; 190 } 191 ret = paths_from_inode(inum, ipath); 192 if (ret < 0) 193 goto err; 194 195 /* 196 * We deliberately ignore the bit ipath might have been too small to 197 * hold all of the paths here 198 */ 199 for (int i = 0; i < ipath->fspath->elem_cnt; i++) { 200 btrfs_warn(fs_info, 201 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)", 202 warn->logical, warn->mirror_num, root, inum, offset, 203 fs_info->sectorsize, nlink, 204 (char *)(unsigned long)ipath->fspath->val[i]); 205 } 206 207 btrfs_put_root(local_root); 208 free_ipath(ipath); 209 return 0; 210 211 err: 212 btrfs_warn(fs_info, 213 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d", 214 warn->logical, warn->mirror_num, root, inum, offset, ret); 215 216 free_ipath(ipath); 217 return ret; 218 } 219 220 /* 221 * Do extra user-friendly error output (e.g. lookup all the affected files). 222 * 223 * Return true if we succeeded doing the backref lookup. 224 * Return false if such lookup failed, and has to fallback to the old error message. 225 */ 226 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off, 227 const u8 *csum, const u8 *csum_expected, 228 int mirror_num) 229 { 230 struct btrfs_fs_info *fs_info = inode->root->fs_info; 231 struct btrfs_path path = { 0 }; 232 struct btrfs_key found_key = { 0 }; 233 struct extent_buffer *eb; 234 struct btrfs_extent_item *ei; 235 const u32 csum_size = fs_info->csum_size; 236 u64 logical; 237 u64 flags; 238 u32 item_size; 239 int ret; 240 241 mutex_lock(&fs_info->reloc_mutex); 242 logical = btrfs_get_reloc_bg_bytenr(fs_info); 243 mutex_unlock(&fs_info->reloc_mutex); 244 245 if (logical == U64_MAX) { 246 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation"); 247 btrfs_warn_rl(fs_info, 248 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 249 inode->root->root_key.objectid, btrfs_ino(inode), file_off, 250 CSUM_FMT_VALUE(csum_size, csum), 251 CSUM_FMT_VALUE(csum_size, csum_expected), 252 mirror_num); 253 return; 254 } 255 256 logical += file_off; 257 btrfs_warn_rl(fs_info, 258 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 259 inode->root->root_key.objectid, 260 btrfs_ino(inode), file_off, logical, 261 CSUM_FMT_VALUE(csum_size, csum), 262 CSUM_FMT_VALUE(csum_size, csum_expected), 263 mirror_num); 264 265 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags); 266 if (ret < 0) { 267 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d", 268 logical, ret); 269 return; 270 } 271 eb = path.nodes[0]; 272 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item); 273 item_size = btrfs_item_size(eb, path.slots[0]); 274 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 275 unsigned long ptr = 0; 276 u64 ref_root; 277 u8 ref_level; 278 279 while (true) { 280 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 281 item_size, &ref_root, 282 &ref_level); 283 if (ret < 0) { 284 btrfs_warn_rl(fs_info, 285 "failed to resolve tree backref for logical %llu: %d", 286 logical, ret); 287 break; 288 } 289 if (ret > 0) 290 break; 291 292 btrfs_warn_rl(fs_info, 293 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu", 294 logical, mirror_num, 295 (ref_level ? "node" : "leaf"), 296 ref_level, ref_root); 297 } 298 btrfs_release_path(&path); 299 } else { 300 struct btrfs_backref_walk_ctx ctx = { 0 }; 301 struct data_reloc_warn reloc_warn = { 0 }; 302 303 btrfs_release_path(&path); 304 305 ctx.bytenr = found_key.objectid; 306 ctx.extent_item_pos = logical - found_key.objectid; 307 ctx.fs_info = fs_info; 308 309 reloc_warn.logical = logical; 310 reloc_warn.extent_item_size = found_key.offset; 311 reloc_warn.mirror_num = mirror_num; 312 reloc_warn.fs_info = fs_info; 313 314 iterate_extent_inodes(&ctx, true, 315 data_reloc_print_warning_inode, &reloc_warn); 316 } 317 } 318 319 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, 320 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) 321 { 322 struct btrfs_root *root = inode->root; 323 const u32 csum_size = root->fs_info->csum_size; 324 325 /* For data reloc tree, it's better to do a backref lookup instead. */ 326 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 327 return print_data_reloc_error(inode, logical_start, csum, 328 csum_expected, mirror_num); 329 330 /* Output without objectid, which is more meaningful */ 331 if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) { 332 btrfs_warn_rl(root->fs_info, 333 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 334 root->root_key.objectid, btrfs_ino(inode), 335 logical_start, 336 CSUM_FMT_VALUE(csum_size, csum), 337 CSUM_FMT_VALUE(csum_size, csum_expected), 338 mirror_num); 339 } else { 340 btrfs_warn_rl(root->fs_info, 341 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 342 root->root_key.objectid, btrfs_ino(inode), 343 logical_start, 344 CSUM_FMT_VALUE(csum_size, csum), 345 CSUM_FMT_VALUE(csum_size, csum_expected), 346 mirror_num); 347 } 348 } 349 350 /* 351 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed 352 * 353 * ilock_flags can have the following bit set: 354 * 355 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 356 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 357 * return -EAGAIN 358 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 359 */ 360 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) 361 { 362 if (ilock_flags & BTRFS_ILOCK_SHARED) { 363 if (ilock_flags & BTRFS_ILOCK_TRY) { 364 if (!inode_trylock_shared(&inode->vfs_inode)) 365 return -EAGAIN; 366 else 367 return 0; 368 } 369 inode_lock_shared(&inode->vfs_inode); 370 } else { 371 if (ilock_flags & BTRFS_ILOCK_TRY) { 372 if (!inode_trylock(&inode->vfs_inode)) 373 return -EAGAIN; 374 else 375 return 0; 376 } 377 inode_lock(&inode->vfs_inode); 378 } 379 if (ilock_flags & BTRFS_ILOCK_MMAP) 380 down_write(&inode->i_mmap_lock); 381 return 0; 382 } 383 384 /* 385 * btrfs_inode_unlock - unock inode i_rwsem 386 * 387 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 388 * to decide whether the lock acquired is shared or exclusive. 389 */ 390 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) 391 { 392 if (ilock_flags & BTRFS_ILOCK_MMAP) 393 up_write(&inode->i_mmap_lock); 394 if (ilock_flags & BTRFS_ILOCK_SHARED) 395 inode_unlock_shared(&inode->vfs_inode); 396 else 397 inode_unlock(&inode->vfs_inode); 398 } 399 400 /* 401 * Cleanup all submitted ordered extents in specified range to handle errors 402 * from the btrfs_run_delalloc_range() callback. 403 * 404 * NOTE: caller must ensure that when an error happens, it can not call 405 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 406 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 407 * to be released, which we want to happen only when finishing the ordered 408 * extent (btrfs_finish_ordered_io()). 409 */ 410 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 411 struct page *locked_page, 412 u64 offset, u64 bytes) 413 { 414 unsigned long index = offset >> PAGE_SHIFT; 415 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 416 u64 page_start = 0, page_end = 0; 417 struct page *page; 418 419 if (locked_page) { 420 page_start = page_offset(locked_page); 421 page_end = page_start + PAGE_SIZE - 1; 422 } 423 424 while (index <= end_index) { 425 /* 426 * For locked page, we will call btrfs_mark_ordered_io_finished 427 * through btrfs_mark_ordered_io_finished() on it 428 * in run_delalloc_range() for the error handling, which will 429 * clear page Ordered and run the ordered extent accounting. 430 * 431 * Here we can't just clear the Ordered bit, or 432 * btrfs_mark_ordered_io_finished() would skip the accounting 433 * for the page range, and the ordered extent will never finish. 434 */ 435 if (locked_page && index == (page_start >> PAGE_SHIFT)) { 436 index++; 437 continue; 438 } 439 page = find_get_page(inode->vfs_inode.i_mapping, index); 440 index++; 441 if (!page) 442 continue; 443 444 /* 445 * Here we just clear all Ordered bits for every page in the 446 * range, then btrfs_mark_ordered_io_finished() will handle 447 * the ordered extent accounting for the range. 448 */ 449 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page, 450 offset, bytes); 451 put_page(page); 452 } 453 454 if (locked_page) { 455 /* The locked page covers the full range, nothing needs to be done */ 456 if (bytes + offset <= page_start + PAGE_SIZE) 457 return; 458 /* 459 * In case this page belongs to the delalloc range being 460 * instantiated then skip it, since the first page of a range is 461 * going to be properly cleaned up by the caller of 462 * run_delalloc_range 463 */ 464 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 465 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE; 466 offset = page_offset(locked_page) + PAGE_SIZE; 467 } 468 } 469 470 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false); 471 } 472 473 static int btrfs_dirty_inode(struct btrfs_inode *inode); 474 475 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 476 struct btrfs_new_inode_args *args) 477 { 478 int err; 479 480 if (args->default_acl) { 481 err = __btrfs_set_acl(trans, args->inode, args->default_acl, 482 ACL_TYPE_DEFAULT); 483 if (err) 484 return err; 485 } 486 if (args->acl) { 487 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 488 if (err) 489 return err; 490 } 491 if (!args->default_acl && !args->acl) 492 cache_no_acl(args->inode); 493 return btrfs_xattr_security_init(trans, args->inode, args->dir, 494 &args->dentry->d_name); 495 } 496 497 /* 498 * this does all the hard work for inserting an inline extent into 499 * the btree. The caller should have done a btrfs_drop_extents so that 500 * no overlapping inline items exist in the btree 501 */ 502 static int insert_inline_extent(struct btrfs_trans_handle *trans, 503 struct btrfs_path *path, 504 struct btrfs_inode *inode, bool extent_inserted, 505 size_t size, size_t compressed_size, 506 int compress_type, 507 struct page **compressed_pages, 508 bool update_i_size) 509 { 510 struct btrfs_root *root = inode->root; 511 struct extent_buffer *leaf; 512 struct page *page = NULL; 513 char *kaddr; 514 unsigned long ptr; 515 struct btrfs_file_extent_item *ei; 516 int ret; 517 size_t cur_size = size; 518 u64 i_size; 519 520 ASSERT((compressed_size > 0 && compressed_pages) || 521 (compressed_size == 0 && !compressed_pages)); 522 523 if (compressed_size && compressed_pages) 524 cur_size = compressed_size; 525 526 if (!extent_inserted) { 527 struct btrfs_key key; 528 size_t datasize; 529 530 key.objectid = btrfs_ino(inode); 531 key.offset = 0; 532 key.type = BTRFS_EXTENT_DATA_KEY; 533 534 datasize = btrfs_file_extent_calc_inline_size(cur_size); 535 ret = btrfs_insert_empty_item(trans, root, path, &key, 536 datasize); 537 if (ret) 538 goto fail; 539 } 540 leaf = path->nodes[0]; 541 ei = btrfs_item_ptr(leaf, path->slots[0], 542 struct btrfs_file_extent_item); 543 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 544 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 545 btrfs_set_file_extent_encryption(leaf, ei, 0); 546 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 547 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 548 ptr = btrfs_file_extent_inline_start(ei); 549 550 if (compress_type != BTRFS_COMPRESS_NONE) { 551 struct page *cpage; 552 int i = 0; 553 while (compressed_size > 0) { 554 cpage = compressed_pages[i]; 555 cur_size = min_t(unsigned long, compressed_size, 556 PAGE_SIZE); 557 558 kaddr = kmap_local_page(cpage); 559 write_extent_buffer(leaf, kaddr, ptr, cur_size); 560 kunmap_local(kaddr); 561 562 i++; 563 ptr += cur_size; 564 compressed_size -= cur_size; 565 } 566 btrfs_set_file_extent_compression(leaf, ei, 567 compress_type); 568 } else { 569 page = find_get_page(inode->vfs_inode.i_mapping, 0); 570 btrfs_set_file_extent_compression(leaf, ei, 0); 571 kaddr = kmap_local_page(page); 572 write_extent_buffer(leaf, kaddr, ptr, size); 573 kunmap_local(kaddr); 574 put_page(page); 575 } 576 btrfs_mark_buffer_dirty(leaf); 577 btrfs_release_path(path); 578 579 /* 580 * We align size to sectorsize for inline extents just for simplicity 581 * sake. 582 */ 583 ret = btrfs_inode_set_file_extent_range(inode, 0, 584 ALIGN(size, root->fs_info->sectorsize)); 585 if (ret) 586 goto fail; 587 588 /* 589 * We're an inline extent, so nobody can extend the file past i_size 590 * without locking a page we already have locked. 591 * 592 * We must do any i_size and inode updates before we unlock the pages. 593 * Otherwise we could end up racing with unlink. 594 */ 595 i_size = i_size_read(&inode->vfs_inode); 596 if (update_i_size && size > i_size) { 597 i_size_write(&inode->vfs_inode, size); 598 i_size = size; 599 } 600 inode->disk_i_size = i_size; 601 602 fail: 603 return ret; 604 } 605 606 607 /* 608 * conditionally insert an inline extent into the file. This 609 * does the checks required to make sure the data is small enough 610 * to fit as an inline extent. 611 */ 612 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size, 613 size_t compressed_size, 614 int compress_type, 615 struct page **compressed_pages, 616 bool update_i_size) 617 { 618 struct btrfs_drop_extents_args drop_args = { 0 }; 619 struct btrfs_root *root = inode->root; 620 struct btrfs_fs_info *fs_info = root->fs_info; 621 struct btrfs_trans_handle *trans; 622 u64 data_len = (compressed_size ?: size); 623 int ret; 624 struct btrfs_path *path; 625 626 /* 627 * We can create an inline extent if it ends at or beyond the current 628 * i_size, is no larger than a sector (decompressed), and the (possibly 629 * compressed) data fits in a leaf and the configured maximum inline 630 * size. 631 */ 632 if (size < i_size_read(&inode->vfs_inode) || 633 size > fs_info->sectorsize || 634 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || 635 data_len > fs_info->max_inline) 636 return 1; 637 638 path = btrfs_alloc_path(); 639 if (!path) 640 return -ENOMEM; 641 642 trans = btrfs_join_transaction(root); 643 if (IS_ERR(trans)) { 644 btrfs_free_path(path); 645 return PTR_ERR(trans); 646 } 647 trans->block_rsv = &inode->block_rsv; 648 649 drop_args.path = path; 650 drop_args.start = 0; 651 drop_args.end = fs_info->sectorsize; 652 drop_args.drop_cache = true; 653 drop_args.replace_extent = true; 654 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 655 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 656 if (ret) { 657 btrfs_abort_transaction(trans, ret); 658 goto out; 659 } 660 661 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 662 size, compressed_size, compress_type, 663 compressed_pages, update_i_size); 664 if (ret && ret != -ENOSPC) { 665 btrfs_abort_transaction(trans, ret); 666 goto out; 667 } else if (ret == -ENOSPC) { 668 ret = 1; 669 goto out; 670 } 671 672 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 673 ret = btrfs_update_inode(trans, root, inode); 674 if (ret && ret != -ENOSPC) { 675 btrfs_abort_transaction(trans, ret); 676 goto out; 677 } else if (ret == -ENOSPC) { 678 ret = 1; 679 goto out; 680 } 681 682 btrfs_set_inode_full_sync(inode); 683 out: 684 /* 685 * Don't forget to free the reserved space, as for inlined extent 686 * it won't count as data extent, free them directly here. 687 * And at reserve time, it's always aligned to page size, so 688 * just free one page here. 689 */ 690 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE); 691 btrfs_free_path(path); 692 btrfs_end_transaction(trans); 693 return ret; 694 } 695 696 struct async_extent { 697 u64 start; 698 u64 ram_size; 699 u64 compressed_size; 700 struct page **pages; 701 unsigned long nr_pages; 702 int compress_type; 703 struct list_head list; 704 }; 705 706 struct async_chunk { 707 struct btrfs_inode *inode; 708 struct page *locked_page; 709 u64 start; 710 u64 end; 711 blk_opf_t write_flags; 712 struct list_head extents; 713 struct cgroup_subsys_state *blkcg_css; 714 struct btrfs_work work; 715 struct async_cow *async_cow; 716 }; 717 718 struct async_cow { 719 atomic_t num_chunks; 720 struct async_chunk chunks[]; 721 }; 722 723 static noinline int add_async_extent(struct async_chunk *cow, 724 u64 start, u64 ram_size, 725 u64 compressed_size, 726 struct page **pages, 727 unsigned long nr_pages, 728 int compress_type) 729 { 730 struct async_extent *async_extent; 731 732 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 733 BUG_ON(!async_extent); /* -ENOMEM */ 734 async_extent->start = start; 735 async_extent->ram_size = ram_size; 736 async_extent->compressed_size = compressed_size; 737 async_extent->pages = pages; 738 async_extent->nr_pages = nr_pages; 739 async_extent->compress_type = compress_type; 740 list_add_tail(&async_extent->list, &cow->extents); 741 return 0; 742 } 743 744 /* 745 * Check if the inode needs to be submitted to compression, based on mount 746 * options, defragmentation, properties or heuristics. 747 */ 748 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 749 u64 end) 750 { 751 struct btrfs_fs_info *fs_info = inode->root->fs_info; 752 753 if (!btrfs_inode_can_compress(inode)) { 754 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 755 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 756 btrfs_ino(inode)); 757 return 0; 758 } 759 /* 760 * Special check for subpage. 761 * 762 * We lock the full page then run each delalloc range in the page, thus 763 * for the following case, we will hit some subpage specific corner case: 764 * 765 * 0 32K 64K 766 * | |///////| |///////| 767 * \- A \- B 768 * 769 * In above case, both range A and range B will try to unlock the full 770 * page [0, 64K), causing the one finished later will have page 771 * unlocked already, triggering various page lock requirement BUG_ON()s. 772 * 773 * So here we add an artificial limit that subpage compression can only 774 * if the range is fully page aligned. 775 * 776 * In theory we only need to ensure the first page is fully covered, but 777 * the tailing partial page will be locked until the full compression 778 * finishes, delaying the write of other range. 779 * 780 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range 781 * first to prevent any submitted async extent to unlock the full page. 782 * By this, we can ensure for subpage case that only the last async_cow 783 * will unlock the full page. 784 */ 785 if (fs_info->sectorsize < PAGE_SIZE) { 786 if (!PAGE_ALIGNED(start) || 787 !PAGE_ALIGNED(end + 1)) 788 return 0; 789 } 790 791 /* force compress */ 792 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 793 return 1; 794 /* defrag ioctl */ 795 if (inode->defrag_compress) 796 return 1; 797 /* bad compression ratios */ 798 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 799 return 0; 800 if (btrfs_test_opt(fs_info, COMPRESS) || 801 inode->flags & BTRFS_INODE_COMPRESS || 802 inode->prop_compress) 803 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); 804 return 0; 805 } 806 807 static inline void inode_should_defrag(struct btrfs_inode *inode, 808 u64 start, u64 end, u64 num_bytes, u32 small_write) 809 { 810 /* If this is a small write inside eof, kick off a defrag */ 811 if (num_bytes < small_write && 812 (start > 0 || end + 1 < inode->disk_i_size)) 813 btrfs_add_inode_defrag(NULL, inode, small_write); 814 } 815 816 /* 817 * Work queue call back to started compression on a file and pages. 818 * 819 * This is done inside an ordered work queue, and the compression is spread 820 * across many cpus. The actual IO submission is step two, and the ordered work 821 * queue takes care of making sure that happens in the same order things were 822 * put onto the queue by writepages and friends. 823 * 824 * If this code finds it can't get good compression, it puts an entry onto the 825 * work queue to write the uncompressed bytes. This makes sure that both 826 * compressed inodes and uncompressed inodes are written in the same order that 827 * the flusher thread sent them down. 828 */ 829 static void compress_file_range(struct btrfs_work *work) 830 { 831 struct async_chunk *async_chunk = 832 container_of(work, struct async_chunk, work); 833 struct btrfs_inode *inode = async_chunk->inode; 834 struct btrfs_fs_info *fs_info = inode->root->fs_info; 835 struct address_space *mapping = inode->vfs_inode.i_mapping; 836 u64 blocksize = fs_info->sectorsize; 837 u64 start = async_chunk->start; 838 u64 end = async_chunk->end; 839 u64 actual_end; 840 u64 i_size; 841 int ret = 0; 842 struct page **pages; 843 unsigned long nr_pages; 844 unsigned long total_compressed = 0; 845 unsigned long total_in = 0; 846 unsigned int poff; 847 int i; 848 int compress_type = fs_info->compress_type; 849 850 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); 851 852 /* 853 * We need to call clear_page_dirty_for_io on each page in the range. 854 * Otherwise applications with the file mmap'd can wander in and change 855 * the page contents while we are compressing them. 856 */ 857 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); 858 859 /* 860 * We need to save i_size before now because it could change in between 861 * us evaluating the size and assigning it. This is because we lock and 862 * unlock the page in truncate and fallocate, and then modify the i_size 863 * later on. 864 * 865 * The barriers are to emulate READ_ONCE, remove that once i_size_read 866 * does that for us. 867 */ 868 barrier(); 869 i_size = i_size_read(&inode->vfs_inode); 870 barrier(); 871 actual_end = min_t(u64, i_size, end + 1); 872 again: 873 pages = NULL; 874 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 875 nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES); 876 877 /* 878 * we don't want to send crud past the end of i_size through 879 * compression, that's just a waste of CPU time. So, if the 880 * end of the file is before the start of our current 881 * requested range of bytes, we bail out to the uncompressed 882 * cleanup code that can deal with all of this. 883 * 884 * It isn't really the fastest way to fix things, but this is a 885 * very uncommon corner. 886 */ 887 if (actual_end <= start) 888 goto cleanup_and_bail_uncompressed; 889 890 total_compressed = actual_end - start; 891 892 /* 893 * Skip compression for a small file range(<=blocksize) that 894 * isn't an inline extent, since it doesn't save disk space at all. 895 */ 896 if (total_compressed <= blocksize && 897 (start > 0 || end + 1 < inode->disk_i_size)) 898 goto cleanup_and_bail_uncompressed; 899 900 /* 901 * For subpage case, we require full page alignment for the sector 902 * aligned range. 903 * Thus we must also check against @actual_end, not just @end. 904 */ 905 if (blocksize < PAGE_SIZE) { 906 if (!PAGE_ALIGNED(start) || 907 !PAGE_ALIGNED(round_up(actual_end, blocksize))) 908 goto cleanup_and_bail_uncompressed; 909 } 910 911 total_compressed = min_t(unsigned long, total_compressed, 912 BTRFS_MAX_UNCOMPRESSED); 913 total_in = 0; 914 ret = 0; 915 916 /* 917 * We do compression for mount -o compress and when the inode has not 918 * been flagged as NOCOMPRESS. This flag can change at any time if we 919 * discover bad compression ratios. 920 */ 921 if (!inode_need_compress(inode, start, end)) 922 goto cleanup_and_bail_uncompressed; 923 924 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 925 if (!pages) { 926 /* 927 * Memory allocation failure is not a fatal error, we can fall 928 * back to uncompressed code. 929 */ 930 goto cleanup_and_bail_uncompressed; 931 } 932 933 if (inode->defrag_compress) 934 compress_type = inode->defrag_compress; 935 else if (inode->prop_compress) 936 compress_type = inode->prop_compress; 937 938 /* Compression level is applied here. */ 939 ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4), 940 mapping, start, pages, &nr_pages, &total_in, 941 &total_compressed); 942 if (ret) 943 goto mark_incompressible; 944 945 /* 946 * Zero the tail end of the last page, as we might be sending it down 947 * to disk. 948 */ 949 poff = offset_in_page(total_compressed); 950 if (poff) 951 memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff); 952 953 /* 954 * Try to create an inline extent. 955 * 956 * If we didn't compress the entire range, try to create an uncompressed 957 * inline extent, else a compressed one. 958 * 959 * Check cow_file_range() for why we don't even try to create inline 960 * extent for the subpage case. 961 */ 962 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 963 if (total_in < actual_end) { 964 ret = cow_file_range_inline(inode, actual_end, 0, 965 BTRFS_COMPRESS_NONE, NULL, 966 false); 967 } else { 968 ret = cow_file_range_inline(inode, actual_end, 969 total_compressed, 970 compress_type, pages, 971 false); 972 } 973 if (ret <= 0) { 974 unsigned long clear_flags = EXTENT_DELALLOC | 975 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 976 EXTENT_DO_ACCOUNTING; 977 978 if (ret < 0) 979 mapping_set_error(mapping, -EIO); 980 981 /* 982 * inline extent creation worked or returned error, 983 * we don't need to create any more async work items. 984 * Unlock and free up our temp pages. 985 * 986 * We use DO_ACCOUNTING here because we need the 987 * delalloc_release_metadata to be done _after_ we drop 988 * our outstanding extent for clearing delalloc for this 989 * range. 990 */ 991 extent_clear_unlock_delalloc(inode, start, end, 992 NULL, 993 clear_flags, 994 PAGE_UNLOCK | 995 PAGE_START_WRITEBACK | 996 PAGE_END_WRITEBACK); 997 goto free_pages; 998 } 999 } 1000 1001 /* 1002 * We aren't doing an inline extent. Round the compressed size up to a 1003 * block size boundary so the allocator does sane things. 1004 */ 1005 total_compressed = ALIGN(total_compressed, blocksize); 1006 1007 /* 1008 * One last check to make sure the compression is really a win, compare 1009 * the page count read with the blocks on disk, compression must free at 1010 * least one sector. 1011 */ 1012 total_in = round_up(total_in, fs_info->sectorsize); 1013 if (total_compressed + blocksize > total_in) 1014 goto mark_incompressible; 1015 1016 /* 1017 * The async work queues will take care of doing actual allocation on 1018 * disk for these compressed pages, and will submit the bios. 1019 */ 1020 add_async_extent(async_chunk, start, total_in, total_compressed, pages, 1021 nr_pages, compress_type); 1022 if (start + total_in < end) { 1023 start += total_in; 1024 cond_resched(); 1025 goto again; 1026 } 1027 return; 1028 1029 mark_incompressible: 1030 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) 1031 inode->flags |= BTRFS_INODE_NOCOMPRESS; 1032 cleanup_and_bail_uncompressed: 1033 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 1034 BTRFS_COMPRESS_NONE); 1035 free_pages: 1036 if (pages) { 1037 for (i = 0; i < nr_pages; i++) { 1038 WARN_ON(pages[i]->mapping); 1039 put_page(pages[i]); 1040 } 1041 kfree(pages); 1042 } 1043 } 1044 1045 static void free_async_extent_pages(struct async_extent *async_extent) 1046 { 1047 int i; 1048 1049 if (!async_extent->pages) 1050 return; 1051 1052 for (i = 0; i < async_extent->nr_pages; i++) { 1053 WARN_ON(async_extent->pages[i]->mapping); 1054 put_page(async_extent->pages[i]); 1055 } 1056 kfree(async_extent->pages); 1057 async_extent->nr_pages = 0; 1058 async_extent->pages = NULL; 1059 } 1060 1061 static void submit_uncompressed_range(struct btrfs_inode *inode, 1062 struct async_extent *async_extent, 1063 struct page *locked_page) 1064 { 1065 u64 start = async_extent->start; 1066 u64 end = async_extent->start + async_extent->ram_size - 1; 1067 int ret; 1068 struct writeback_control wbc = { 1069 .sync_mode = WB_SYNC_ALL, 1070 .range_start = start, 1071 .range_end = end, 1072 .no_cgroup_owner = 1, 1073 }; 1074 1075 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); 1076 ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false); 1077 wbc_detach_inode(&wbc); 1078 if (ret < 0) { 1079 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1); 1080 if (locked_page) { 1081 const u64 page_start = page_offset(locked_page); 1082 1083 set_page_writeback(locked_page); 1084 end_page_writeback(locked_page); 1085 btrfs_mark_ordered_io_finished(inode, locked_page, 1086 page_start, PAGE_SIZE, 1087 !ret); 1088 mapping_set_error(locked_page->mapping, ret); 1089 unlock_page(locked_page); 1090 } 1091 } 1092 } 1093 1094 static void submit_one_async_extent(struct async_chunk *async_chunk, 1095 struct async_extent *async_extent, 1096 u64 *alloc_hint) 1097 { 1098 struct btrfs_inode *inode = async_chunk->inode; 1099 struct extent_io_tree *io_tree = &inode->io_tree; 1100 struct btrfs_root *root = inode->root; 1101 struct btrfs_fs_info *fs_info = root->fs_info; 1102 struct btrfs_ordered_extent *ordered; 1103 struct btrfs_key ins; 1104 struct page *locked_page = NULL; 1105 struct extent_map *em; 1106 int ret = 0; 1107 u64 start = async_extent->start; 1108 u64 end = async_extent->start + async_extent->ram_size - 1; 1109 1110 if (async_chunk->blkcg_css) 1111 kthread_associate_blkcg(async_chunk->blkcg_css); 1112 1113 /* 1114 * If async_chunk->locked_page is in the async_extent range, we need to 1115 * handle it. 1116 */ 1117 if (async_chunk->locked_page) { 1118 u64 locked_page_start = page_offset(async_chunk->locked_page); 1119 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1; 1120 1121 if (!(start >= locked_page_end || end <= locked_page_start)) 1122 locked_page = async_chunk->locked_page; 1123 } 1124 lock_extent(io_tree, start, end, NULL); 1125 1126 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) { 1127 submit_uncompressed_range(inode, async_extent, locked_page); 1128 goto done; 1129 } 1130 1131 ret = btrfs_reserve_extent(root, async_extent->ram_size, 1132 async_extent->compressed_size, 1133 async_extent->compressed_size, 1134 0, *alloc_hint, &ins, 1, 1); 1135 if (ret) { 1136 /* 1137 * Here we used to try again by going back to non-compressed 1138 * path for ENOSPC. But we can't reserve space even for 1139 * compressed size, how could it work for uncompressed size 1140 * which requires larger size? So here we directly go error 1141 * path. 1142 */ 1143 goto out_free; 1144 } 1145 1146 /* Here we're doing allocation and writeback of the compressed pages */ 1147 em = create_io_em(inode, start, 1148 async_extent->ram_size, /* len */ 1149 start, /* orig_start */ 1150 ins.objectid, /* block_start */ 1151 ins.offset, /* block_len */ 1152 ins.offset, /* orig_block_len */ 1153 async_extent->ram_size, /* ram_bytes */ 1154 async_extent->compress_type, 1155 BTRFS_ORDERED_COMPRESSED); 1156 if (IS_ERR(em)) { 1157 ret = PTR_ERR(em); 1158 goto out_free_reserve; 1159 } 1160 free_extent_map(em); 1161 1162 ordered = btrfs_alloc_ordered_extent(inode, start, /* file_offset */ 1163 async_extent->ram_size, /* num_bytes */ 1164 async_extent->ram_size, /* ram_bytes */ 1165 ins.objectid, /* disk_bytenr */ 1166 ins.offset, /* disk_num_bytes */ 1167 0, /* offset */ 1168 1 << BTRFS_ORDERED_COMPRESSED, 1169 async_extent->compress_type); 1170 if (IS_ERR(ordered)) { 1171 btrfs_drop_extent_map_range(inode, start, end, false); 1172 ret = PTR_ERR(ordered); 1173 goto out_free_reserve; 1174 } 1175 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1176 1177 /* Clear dirty, set writeback and unlock the pages. */ 1178 extent_clear_unlock_delalloc(inode, start, end, 1179 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 1180 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1181 btrfs_submit_compressed_write(ordered, 1182 async_extent->pages, /* compressed_pages */ 1183 async_extent->nr_pages, 1184 async_chunk->write_flags, true); 1185 *alloc_hint = ins.objectid + ins.offset; 1186 done: 1187 if (async_chunk->blkcg_css) 1188 kthread_associate_blkcg(NULL); 1189 kfree(async_extent); 1190 return; 1191 1192 out_free_reserve: 1193 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1194 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1195 out_free: 1196 mapping_set_error(inode->vfs_inode.i_mapping, -EIO); 1197 extent_clear_unlock_delalloc(inode, start, end, 1198 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 1199 EXTENT_DELALLOC_NEW | 1200 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1201 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1202 PAGE_END_WRITEBACK); 1203 free_async_extent_pages(async_extent); 1204 if (async_chunk->blkcg_css) 1205 kthread_associate_blkcg(NULL); 1206 btrfs_debug(fs_info, 1207 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1208 root->root_key.objectid, btrfs_ino(inode), start, 1209 async_extent->ram_size, ret); 1210 kfree(async_extent); 1211 } 1212 1213 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1214 u64 num_bytes) 1215 { 1216 struct extent_map_tree *em_tree = &inode->extent_tree; 1217 struct extent_map *em; 1218 u64 alloc_hint = 0; 1219 1220 read_lock(&em_tree->lock); 1221 em = search_extent_mapping(em_tree, start, num_bytes); 1222 if (em) { 1223 /* 1224 * if block start isn't an actual block number then find the 1225 * first block in this inode and use that as a hint. If that 1226 * block is also bogus then just don't worry about it. 1227 */ 1228 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 1229 free_extent_map(em); 1230 em = search_extent_mapping(em_tree, 0, 0); 1231 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 1232 alloc_hint = em->block_start; 1233 if (em) 1234 free_extent_map(em); 1235 } else { 1236 alloc_hint = em->block_start; 1237 free_extent_map(em); 1238 } 1239 } 1240 read_unlock(&em_tree->lock); 1241 1242 return alloc_hint; 1243 } 1244 1245 /* 1246 * when extent_io.c finds a delayed allocation range in the file, 1247 * the call backs end up in this code. The basic idea is to 1248 * allocate extents on disk for the range, and create ordered data structs 1249 * in ram to track those extents. 1250 * 1251 * locked_page is the page that writepage had locked already. We use 1252 * it to make sure we don't do extra locks or unlocks. 1253 * 1254 * When this function fails, it unlocks all pages except @locked_page. 1255 * 1256 * When this function successfully creates an inline extent, it returns 1 and 1257 * unlocks all pages including locked_page and starts I/O on them. 1258 * (In reality inline extents are limited to a single page, so locked_page is 1259 * the only page handled anyway). 1260 * 1261 * When this function succeed and creates a normal extent, the page locking 1262 * status depends on the passed in flags: 1263 * 1264 * - If @keep_locked is set, all pages are kept locked. 1265 * - Else all pages except for @locked_page are unlocked. 1266 * 1267 * When a failure happens in the second or later iteration of the 1268 * while-loop, the ordered extents created in previous iterations are kept 1269 * intact. So, the caller must clean them up by calling 1270 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for 1271 * example. 1272 */ 1273 static noinline int cow_file_range(struct btrfs_inode *inode, 1274 struct page *locked_page, u64 start, u64 end, 1275 u64 *done_offset, 1276 bool keep_locked, bool no_inline) 1277 { 1278 struct btrfs_root *root = inode->root; 1279 struct btrfs_fs_info *fs_info = root->fs_info; 1280 u64 alloc_hint = 0; 1281 u64 orig_start = start; 1282 u64 num_bytes; 1283 unsigned long ram_size; 1284 u64 cur_alloc_size = 0; 1285 u64 min_alloc_size; 1286 u64 blocksize = fs_info->sectorsize; 1287 struct btrfs_key ins; 1288 struct extent_map *em; 1289 unsigned clear_bits; 1290 unsigned long page_ops; 1291 bool extent_reserved = false; 1292 int ret = 0; 1293 1294 if (btrfs_is_free_space_inode(inode)) { 1295 ret = -EINVAL; 1296 goto out_unlock; 1297 } 1298 1299 num_bytes = ALIGN(end - start + 1, blocksize); 1300 num_bytes = max(blocksize, num_bytes); 1301 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1302 1303 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1304 1305 /* 1306 * Due to the page size limit, for subpage we can only trigger the 1307 * writeback for the dirty sectors of page, that means data writeback 1308 * is doing more writeback than what we want. 1309 * 1310 * This is especially unexpected for some call sites like fallocate, 1311 * where we only increase i_size after everything is done. 1312 * This means we can trigger inline extent even if we didn't want to. 1313 * So here we skip inline extent creation completely. 1314 */ 1315 if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) { 1316 u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode), 1317 end + 1); 1318 1319 /* lets try to make an inline extent */ 1320 ret = cow_file_range_inline(inode, actual_end, 0, 1321 BTRFS_COMPRESS_NONE, NULL, false); 1322 if (ret == 0) { 1323 /* 1324 * We use DO_ACCOUNTING here because we need the 1325 * delalloc_release_metadata to be run _after_ we drop 1326 * our outstanding extent for clearing delalloc for this 1327 * range. 1328 */ 1329 extent_clear_unlock_delalloc(inode, start, end, 1330 locked_page, 1331 EXTENT_LOCKED | EXTENT_DELALLOC | 1332 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1333 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1334 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); 1335 /* 1336 * locked_page is locked by the caller of 1337 * writepage_delalloc(), not locked by 1338 * __process_pages_contig(). 1339 * 1340 * We can't let __process_pages_contig() to unlock it, 1341 * as it doesn't have any subpage::writers recorded. 1342 * 1343 * Here we manually unlock the page, since the caller 1344 * can't determine if it's an inline extent or a 1345 * compressed extent. 1346 */ 1347 unlock_page(locked_page); 1348 ret = 1; 1349 goto done; 1350 } else if (ret < 0) { 1351 goto out_unlock; 1352 } 1353 } 1354 1355 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 1356 1357 /* 1358 * Relocation relies on the relocated extents to have exactly the same 1359 * size as the original extents. Normally writeback for relocation data 1360 * extents follows a NOCOW path because relocation preallocates the 1361 * extents. However, due to an operation such as scrub turning a block 1362 * group to RO mode, it may fallback to COW mode, so we must make sure 1363 * an extent allocated during COW has exactly the requested size and can 1364 * not be split into smaller extents, otherwise relocation breaks and 1365 * fails during the stage where it updates the bytenr of file extent 1366 * items. 1367 */ 1368 if (btrfs_is_data_reloc_root(root)) 1369 min_alloc_size = num_bytes; 1370 else 1371 min_alloc_size = fs_info->sectorsize; 1372 1373 while (num_bytes > 0) { 1374 struct btrfs_ordered_extent *ordered; 1375 1376 cur_alloc_size = num_bytes; 1377 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, 1378 min_alloc_size, 0, alloc_hint, 1379 &ins, 1, 1); 1380 if (ret == -EAGAIN) { 1381 /* 1382 * btrfs_reserve_extent only returns -EAGAIN for zoned 1383 * file systems, which is an indication that there are 1384 * no active zones to allocate from at the moment. 1385 * 1386 * If this is the first loop iteration, wait for at 1387 * least one zone to finish before retrying the 1388 * allocation. Otherwise ask the caller to write out 1389 * the already allocated blocks before coming back to 1390 * us, or return -ENOSPC if it can't handle retries. 1391 */ 1392 ASSERT(btrfs_is_zoned(fs_info)); 1393 if (start == orig_start) { 1394 wait_on_bit_io(&inode->root->fs_info->flags, 1395 BTRFS_FS_NEED_ZONE_FINISH, 1396 TASK_UNINTERRUPTIBLE); 1397 continue; 1398 } 1399 if (done_offset) { 1400 *done_offset = start - 1; 1401 return 0; 1402 } 1403 ret = -ENOSPC; 1404 } 1405 if (ret < 0) 1406 goto out_unlock; 1407 cur_alloc_size = ins.offset; 1408 extent_reserved = true; 1409 1410 ram_size = ins.offset; 1411 em = create_io_em(inode, start, ins.offset, /* len */ 1412 start, /* orig_start */ 1413 ins.objectid, /* block_start */ 1414 ins.offset, /* block_len */ 1415 ins.offset, /* orig_block_len */ 1416 ram_size, /* ram_bytes */ 1417 BTRFS_COMPRESS_NONE, /* compress_type */ 1418 BTRFS_ORDERED_REGULAR /* type */); 1419 if (IS_ERR(em)) { 1420 ret = PTR_ERR(em); 1421 goto out_reserve; 1422 } 1423 free_extent_map(em); 1424 1425 ordered = btrfs_alloc_ordered_extent(inode, start, ram_size, 1426 ram_size, ins.objectid, cur_alloc_size, 1427 0, 1 << BTRFS_ORDERED_REGULAR, 1428 BTRFS_COMPRESS_NONE); 1429 if (IS_ERR(ordered)) { 1430 ret = PTR_ERR(ordered); 1431 goto out_drop_extent_cache; 1432 } 1433 1434 if (btrfs_is_data_reloc_root(root)) { 1435 ret = btrfs_reloc_clone_csums(ordered); 1436 1437 /* 1438 * Only drop cache here, and process as normal. 1439 * 1440 * We must not allow extent_clear_unlock_delalloc() 1441 * at out_unlock label to free meta of this ordered 1442 * extent, as its meta should be freed by 1443 * btrfs_finish_ordered_io(). 1444 * 1445 * So we must continue until @start is increased to 1446 * skip current ordered extent. 1447 */ 1448 if (ret) 1449 btrfs_drop_extent_map_range(inode, start, 1450 start + ram_size - 1, 1451 false); 1452 } 1453 btrfs_put_ordered_extent(ordered); 1454 1455 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1456 1457 /* 1458 * We're not doing compressed IO, don't unlock the first page 1459 * (which the caller expects to stay locked), don't clear any 1460 * dirty bits and don't set any writeback bits 1461 * 1462 * Do set the Ordered (Private2) bit so we know this page was 1463 * properly setup for writepage. 1464 */ 1465 page_ops = (keep_locked ? 0 : PAGE_UNLOCK); 1466 page_ops |= PAGE_SET_ORDERED; 1467 1468 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, 1469 locked_page, 1470 EXTENT_LOCKED | EXTENT_DELALLOC, 1471 page_ops); 1472 if (num_bytes < cur_alloc_size) 1473 num_bytes = 0; 1474 else 1475 num_bytes -= cur_alloc_size; 1476 alloc_hint = ins.objectid + ins.offset; 1477 start += cur_alloc_size; 1478 extent_reserved = false; 1479 1480 /* 1481 * btrfs_reloc_clone_csums() error, since start is increased 1482 * extent_clear_unlock_delalloc() at out_unlock label won't 1483 * free metadata of current ordered extent, we're OK to exit. 1484 */ 1485 if (ret) 1486 goto out_unlock; 1487 } 1488 done: 1489 if (done_offset) 1490 *done_offset = end; 1491 return ret; 1492 1493 out_drop_extent_cache: 1494 btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false); 1495 out_reserve: 1496 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1497 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1498 out_unlock: 1499 /* 1500 * Now, we have three regions to clean up: 1501 * 1502 * |-------(1)----|---(2)---|-------------(3)----------| 1503 * `- orig_start `- start `- start + cur_alloc_size `- end 1504 * 1505 * We process each region below. 1506 */ 1507 1508 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1509 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1510 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1511 1512 /* 1513 * For the range (1). We have already instantiated the ordered extents 1514 * for this region. They are cleaned up by 1515 * btrfs_cleanup_ordered_extents() in e.g, 1516 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are 1517 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW | 1518 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup 1519 * function. 1520 * 1521 * However, in case of @keep_locked, we still need to unlock the pages 1522 * (except @locked_page) to ensure all the pages are unlocked. 1523 */ 1524 if (keep_locked && orig_start < start) { 1525 if (!locked_page) 1526 mapping_set_error(inode->vfs_inode.i_mapping, ret); 1527 extent_clear_unlock_delalloc(inode, orig_start, start - 1, 1528 locked_page, 0, page_ops); 1529 } 1530 1531 /* 1532 * For the range (2). If we reserved an extent for our delalloc range 1533 * (or a subrange) and failed to create the respective ordered extent, 1534 * then it means that when we reserved the extent we decremented the 1535 * extent's size from the data space_info's bytes_may_use counter and 1536 * incremented the space_info's bytes_reserved counter by the same 1537 * amount. We must make sure extent_clear_unlock_delalloc() does not try 1538 * to decrement again the data space_info's bytes_may_use counter, 1539 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV. 1540 */ 1541 if (extent_reserved) { 1542 extent_clear_unlock_delalloc(inode, start, 1543 start + cur_alloc_size - 1, 1544 locked_page, 1545 clear_bits, 1546 page_ops); 1547 start += cur_alloc_size; 1548 } 1549 1550 /* 1551 * For the range (3). We never touched the region. In addition to the 1552 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data 1553 * space_info's bytes_may_use counter, reserved in 1554 * btrfs_check_data_free_space(). 1555 */ 1556 if (start < end) { 1557 clear_bits |= EXTENT_CLEAR_DATA_RESV; 1558 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1559 clear_bits, page_ops); 1560 } 1561 return ret; 1562 } 1563 1564 /* 1565 * Phase two of compressed writeback. This is the ordered portion of the code, 1566 * which only gets called in the order the work was queued. We walk all the 1567 * async extents created by compress_file_range and send them down to the disk. 1568 */ 1569 static noinline void submit_compressed_extents(struct btrfs_work *work) 1570 { 1571 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1572 work); 1573 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1574 struct async_extent *async_extent; 1575 unsigned long nr_pages; 1576 u64 alloc_hint = 0; 1577 1578 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1579 PAGE_SHIFT; 1580 1581 while (!list_empty(&async_chunk->extents)) { 1582 async_extent = list_entry(async_chunk->extents.next, 1583 struct async_extent, list); 1584 list_del(&async_extent->list); 1585 submit_one_async_extent(async_chunk, async_extent, &alloc_hint); 1586 } 1587 1588 /* atomic_sub_return implies a barrier */ 1589 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1590 5 * SZ_1M) 1591 cond_wake_up_nomb(&fs_info->async_submit_wait); 1592 } 1593 1594 static noinline void async_cow_free(struct btrfs_work *work) 1595 { 1596 struct async_chunk *async_chunk; 1597 struct async_cow *async_cow; 1598 1599 async_chunk = container_of(work, struct async_chunk, work); 1600 btrfs_add_delayed_iput(async_chunk->inode); 1601 if (async_chunk->blkcg_css) 1602 css_put(async_chunk->blkcg_css); 1603 1604 async_cow = async_chunk->async_cow; 1605 if (atomic_dec_and_test(&async_cow->num_chunks)) 1606 kvfree(async_cow); 1607 } 1608 1609 static bool run_delalloc_compressed(struct btrfs_inode *inode, 1610 struct page *locked_page, u64 start, 1611 u64 end, struct writeback_control *wbc) 1612 { 1613 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1614 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1615 struct async_cow *ctx; 1616 struct async_chunk *async_chunk; 1617 unsigned long nr_pages; 1618 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1619 int i; 1620 unsigned nofs_flag; 1621 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1622 1623 nofs_flag = memalloc_nofs_save(); 1624 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1625 memalloc_nofs_restore(nofs_flag); 1626 if (!ctx) 1627 return false; 1628 1629 unlock_extent(&inode->io_tree, start, end, NULL); 1630 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 1631 1632 async_chunk = ctx->chunks; 1633 atomic_set(&ctx->num_chunks, num_chunks); 1634 1635 for (i = 0; i < num_chunks; i++) { 1636 u64 cur_end = min(end, start + SZ_512K - 1); 1637 1638 /* 1639 * igrab is called higher up in the call chain, take only the 1640 * lightweight reference for the callback lifetime 1641 */ 1642 ihold(&inode->vfs_inode); 1643 async_chunk[i].async_cow = ctx; 1644 async_chunk[i].inode = inode; 1645 async_chunk[i].start = start; 1646 async_chunk[i].end = cur_end; 1647 async_chunk[i].write_flags = write_flags; 1648 INIT_LIST_HEAD(&async_chunk[i].extents); 1649 1650 /* 1651 * The locked_page comes all the way from writepage and its 1652 * the original page we were actually given. As we spread 1653 * this large delalloc region across multiple async_chunk 1654 * structs, only the first struct needs a pointer to locked_page 1655 * 1656 * This way we don't need racey decisions about who is supposed 1657 * to unlock it. 1658 */ 1659 if (locked_page) { 1660 /* 1661 * Depending on the compressibility, the pages might or 1662 * might not go through async. We want all of them to 1663 * be accounted against wbc once. Let's do it here 1664 * before the paths diverge. wbc accounting is used 1665 * only for foreign writeback detection and doesn't 1666 * need full accuracy. Just account the whole thing 1667 * against the first page. 1668 */ 1669 wbc_account_cgroup_owner(wbc, locked_page, 1670 cur_end - start); 1671 async_chunk[i].locked_page = locked_page; 1672 locked_page = NULL; 1673 } else { 1674 async_chunk[i].locked_page = NULL; 1675 } 1676 1677 if (blkcg_css != blkcg_root_css) { 1678 css_get(blkcg_css); 1679 async_chunk[i].blkcg_css = blkcg_css; 1680 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT; 1681 } else { 1682 async_chunk[i].blkcg_css = NULL; 1683 } 1684 1685 btrfs_init_work(&async_chunk[i].work, compress_file_range, 1686 submit_compressed_extents, async_cow_free); 1687 1688 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1689 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1690 1691 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1692 1693 start = cur_end + 1; 1694 } 1695 return true; 1696 } 1697 1698 /* 1699 * Run the delalloc range from start to end, and write back any dirty pages 1700 * covered by the range. 1701 */ 1702 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 1703 struct page *locked_page, u64 start, 1704 u64 end, struct writeback_control *wbc, 1705 bool pages_dirty) 1706 { 1707 u64 done_offset = end; 1708 int ret; 1709 1710 while (start <= end) { 1711 ret = cow_file_range(inode, locked_page, start, end, &done_offset, 1712 true, false); 1713 if (ret) 1714 return ret; 1715 extent_write_locked_range(&inode->vfs_inode, locked_page, start, 1716 done_offset, wbc, pages_dirty); 1717 start = done_offset + 1; 1718 } 1719 1720 return 1; 1721 } 1722 1723 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, 1724 u64 bytenr, u64 num_bytes, bool nowait) 1725 { 1726 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr); 1727 struct btrfs_ordered_sum *sums; 1728 int ret; 1729 LIST_HEAD(list); 1730 1731 ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1, 1732 &list, 0, nowait); 1733 if (ret == 0 && list_empty(&list)) 1734 return 0; 1735 1736 while (!list_empty(&list)) { 1737 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1738 list_del(&sums->list); 1739 kfree(sums); 1740 } 1741 if (ret < 0) 1742 return ret; 1743 return 1; 1744 } 1745 1746 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, 1747 const u64 start, const u64 end) 1748 { 1749 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1750 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1751 const u64 range_bytes = end + 1 - start; 1752 struct extent_io_tree *io_tree = &inode->io_tree; 1753 u64 range_start = start; 1754 u64 count; 1755 int ret; 1756 1757 /* 1758 * If EXTENT_NORESERVE is set it means that when the buffered write was 1759 * made we had not enough available data space and therefore we did not 1760 * reserve data space for it, since we though we could do NOCOW for the 1761 * respective file range (either there is prealloc extent or the inode 1762 * has the NOCOW bit set). 1763 * 1764 * However when we need to fallback to COW mode (because for example the 1765 * block group for the corresponding extent was turned to RO mode by a 1766 * scrub or relocation) we need to do the following: 1767 * 1768 * 1) We increment the bytes_may_use counter of the data space info. 1769 * If COW succeeds, it allocates a new data extent and after doing 1770 * that it decrements the space info's bytes_may_use counter and 1771 * increments its bytes_reserved counter by the same amount (we do 1772 * this at btrfs_add_reserved_bytes()). So we need to increment the 1773 * bytes_may_use counter to compensate (when space is reserved at 1774 * buffered write time, the bytes_may_use counter is incremented); 1775 * 1776 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1777 * that if the COW path fails for any reason, it decrements (through 1778 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1779 * data space info, which we incremented in the step above. 1780 * 1781 * If we need to fallback to cow and the inode corresponds to a free 1782 * space cache inode or an inode of the data relocation tree, we must 1783 * also increment bytes_may_use of the data space_info for the same 1784 * reason. Space caches and relocated data extents always get a prealloc 1785 * extent for them, however scrub or balance may have set the block 1786 * group that contains that extent to RO mode and therefore force COW 1787 * when starting writeback. 1788 */ 1789 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1790 EXTENT_NORESERVE, 0, NULL); 1791 if (count > 0 || is_space_ino || is_reloc_ino) { 1792 u64 bytes = count; 1793 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1794 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1795 1796 if (is_space_ino || is_reloc_ino) 1797 bytes = range_bytes; 1798 1799 spin_lock(&sinfo->lock); 1800 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1801 spin_unlock(&sinfo->lock); 1802 1803 if (count > 0) 1804 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1805 NULL); 1806 } 1807 1808 /* 1809 * Don't try to create inline extents, as a mix of inline extent that 1810 * is written out and unlocked directly and a normal NOCOW extent 1811 * doesn't work. 1812 */ 1813 ret = cow_file_range(inode, locked_page, start, end, NULL, false, true); 1814 ASSERT(ret != 1); 1815 return ret; 1816 } 1817 1818 struct can_nocow_file_extent_args { 1819 /* Input fields. */ 1820 1821 /* Start file offset of the range we want to NOCOW. */ 1822 u64 start; 1823 /* End file offset (inclusive) of the range we want to NOCOW. */ 1824 u64 end; 1825 bool writeback_path; 1826 bool strict; 1827 /* 1828 * Free the path passed to can_nocow_file_extent() once it's not needed 1829 * anymore. 1830 */ 1831 bool free_path; 1832 1833 /* Output fields. Only set when can_nocow_file_extent() returns 1. */ 1834 1835 u64 disk_bytenr; 1836 u64 disk_num_bytes; 1837 u64 extent_offset; 1838 /* Number of bytes that can be written to in NOCOW mode. */ 1839 u64 num_bytes; 1840 }; 1841 1842 /* 1843 * Check if we can NOCOW the file extent that the path points to. 1844 * This function may return with the path released, so the caller should check 1845 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1846 * 1847 * Returns: < 0 on error 1848 * 0 if we can not NOCOW 1849 * 1 if we can NOCOW 1850 */ 1851 static int can_nocow_file_extent(struct btrfs_path *path, 1852 struct btrfs_key *key, 1853 struct btrfs_inode *inode, 1854 struct can_nocow_file_extent_args *args) 1855 { 1856 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 1857 struct extent_buffer *leaf = path->nodes[0]; 1858 struct btrfs_root *root = inode->root; 1859 struct btrfs_file_extent_item *fi; 1860 u64 extent_end; 1861 u8 extent_type; 1862 int can_nocow = 0; 1863 int ret = 0; 1864 bool nowait = path->nowait; 1865 1866 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 1867 extent_type = btrfs_file_extent_type(leaf, fi); 1868 1869 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1870 goto out; 1871 1872 /* Can't access these fields unless we know it's not an inline extent. */ 1873 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1874 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1875 args->extent_offset = btrfs_file_extent_offset(leaf, fi); 1876 1877 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1878 extent_type == BTRFS_FILE_EXTENT_REG) 1879 goto out; 1880 1881 /* 1882 * If the extent was created before the generation where the last snapshot 1883 * for its subvolume was created, then this implies the extent is shared, 1884 * hence we must COW. 1885 */ 1886 if (!args->strict && 1887 btrfs_file_extent_generation(leaf, fi) <= 1888 btrfs_root_last_snapshot(&root->root_item)) 1889 goto out; 1890 1891 /* An explicit hole, must COW. */ 1892 if (args->disk_bytenr == 0) 1893 goto out; 1894 1895 /* Compressed/encrypted/encoded extents must be COWed. */ 1896 if (btrfs_file_extent_compression(leaf, fi) || 1897 btrfs_file_extent_encryption(leaf, fi) || 1898 btrfs_file_extent_other_encoding(leaf, fi)) 1899 goto out; 1900 1901 extent_end = btrfs_file_extent_end(path); 1902 1903 /* 1904 * The following checks can be expensive, as they need to take other 1905 * locks and do btree or rbtree searches, so release the path to avoid 1906 * blocking other tasks for too long. 1907 */ 1908 btrfs_release_path(path); 1909 1910 ret = btrfs_cross_ref_exist(root, btrfs_ino(inode), 1911 key->offset - args->extent_offset, 1912 args->disk_bytenr, args->strict, path); 1913 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1914 if (ret != 0) 1915 goto out; 1916 1917 if (args->free_path) { 1918 /* 1919 * We don't need the path anymore, plus through the 1920 * csum_exist_in_range() call below we will end up allocating 1921 * another path. So free the path to avoid unnecessary extra 1922 * memory usage. 1923 */ 1924 btrfs_free_path(path); 1925 path = NULL; 1926 } 1927 1928 /* If there are pending snapshots for this root, we must COW. */ 1929 if (args->writeback_path && !is_freespace_inode && 1930 atomic_read(&root->snapshot_force_cow)) 1931 goto out; 1932 1933 args->disk_bytenr += args->extent_offset; 1934 args->disk_bytenr += args->start - key->offset; 1935 args->num_bytes = min(args->end + 1, extent_end) - args->start; 1936 1937 /* 1938 * Force COW if csums exist in the range. This ensures that csums for a 1939 * given extent are either valid or do not exist. 1940 */ 1941 ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes, 1942 nowait); 1943 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1944 if (ret != 0) 1945 goto out; 1946 1947 can_nocow = 1; 1948 out: 1949 if (args->free_path && path) 1950 btrfs_free_path(path); 1951 1952 return ret < 0 ? ret : can_nocow; 1953 } 1954 1955 /* 1956 * when nowcow writeback call back. This checks for snapshots or COW copies 1957 * of the extents that exist in the file, and COWs the file as required. 1958 * 1959 * If no cow copies or snapshots exist, we write directly to the existing 1960 * blocks on disk 1961 */ 1962 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 1963 struct page *locked_page, 1964 const u64 start, const u64 end) 1965 { 1966 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1967 struct btrfs_root *root = inode->root; 1968 struct btrfs_path *path; 1969 u64 cow_start = (u64)-1; 1970 u64 cur_offset = start; 1971 int ret; 1972 bool check_prev = true; 1973 u64 ino = btrfs_ino(inode); 1974 struct can_nocow_file_extent_args nocow_args = { 0 }; 1975 1976 /* 1977 * Normally on a zoned device we're only doing COW writes, but in case 1978 * of relocation on a zoned filesystem serializes I/O so that we're only 1979 * writing sequentially and can end up here as well. 1980 */ 1981 ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root)); 1982 1983 path = btrfs_alloc_path(); 1984 if (!path) { 1985 ret = -ENOMEM; 1986 goto error; 1987 } 1988 1989 nocow_args.end = end; 1990 nocow_args.writeback_path = true; 1991 1992 while (1) { 1993 struct btrfs_block_group *nocow_bg = NULL; 1994 struct btrfs_ordered_extent *ordered; 1995 struct btrfs_key found_key; 1996 struct btrfs_file_extent_item *fi; 1997 struct extent_buffer *leaf; 1998 u64 extent_end; 1999 u64 ram_bytes; 2000 u64 nocow_end; 2001 int extent_type; 2002 bool is_prealloc; 2003 2004 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 2005 cur_offset, 0); 2006 if (ret < 0) 2007 goto error; 2008 2009 /* 2010 * If there is no extent for our range when doing the initial 2011 * search, then go back to the previous slot as it will be the 2012 * one containing the search offset 2013 */ 2014 if (ret > 0 && path->slots[0] > 0 && check_prev) { 2015 leaf = path->nodes[0]; 2016 btrfs_item_key_to_cpu(leaf, &found_key, 2017 path->slots[0] - 1); 2018 if (found_key.objectid == ino && 2019 found_key.type == BTRFS_EXTENT_DATA_KEY) 2020 path->slots[0]--; 2021 } 2022 check_prev = false; 2023 next_slot: 2024 /* Go to next leaf if we have exhausted the current one */ 2025 leaf = path->nodes[0]; 2026 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2027 ret = btrfs_next_leaf(root, path); 2028 if (ret < 0) 2029 goto error; 2030 if (ret > 0) 2031 break; 2032 leaf = path->nodes[0]; 2033 } 2034 2035 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2036 2037 /* Didn't find anything for our INO */ 2038 if (found_key.objectid > ino) 2039 break; 2040 /* 2041 * Keep searching until we find an EXTENT_ITEM or there are no 2042 * more extents for this inode 2043 */ 2044 if (WARN_ON_ONCE(found_key.objectid < ino) || 2045 found_key.type < BTRFS_EXTENT_DATA_KEY) { 2046 path->slots[0]++; 2047 goto next_slot; 2048 } 2049 2050 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 2051 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 2052 found_key.offset > end) 2053 break; 2054 2055 /* 2056 * If the found extent starts after requested offset, then 2057 * adjust extent_end to be right before this extent begins 2058 */ 2059 if (found_key.offset > cur_offset) { 2060 extent_end = found_key.offset; 2061 extent_type = 0; 2062 goto must_cow; 2063 } 2064 2065 /* 2066 * Found extent which begins before our range and potentially 2067 * intersect it 2068 */ 2069 fi = btrfs_item_ptr(leaf, path->slots[0], 2070 struct btrfs_file_extent_item); 2071 extent_type = btrfs_file_extent_type(leaf, fi); 2072 /* If this is triggered then we have a memory corruption. */ 2073 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 2074 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 2075 ret = -EUCLEAN; 2076 goto error; 2077 } 2078 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 2079 extent_end = btrfs_file_extent_end(path); 2080 2081 /* 2082 * If the extent we got ends before our current offset, skip to 2083 * the next extent. 2084 */ 2085 if (extent_end <= cur_offset) { 2086 path->slots[0]++; 2087 goto next_slot; 2088 } 2089 2090 nocow_args.start = cur_offset; 2091 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 2092 if (ret < 0) 2093 goto error; 2094 if (ret == 0) 2095 goto must_cow; 2096 2097 ret = 0; 2098 nocow_bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr); 2099 if (!nocow_bg) { 2100 must_cow: 2101 /* 2102 * If we can't perform NOCOW writeback for the range, 2103 * then record the beginning of the range that needs to 2104 * be COWed. It will be written out before the next 2105 * NOCOW range if we find one, or when exiting this 2106 * loop. 2107 */ 2108 if (cow_start == (u64)-1) 2109 cow_start = cur_offset; 2110 cur_offset = extent_end; 2111 if (cur_offset > end) 2112 break; 2113 if (!path->nodes[0]) 2114 continue; 2115 path->slots[0]++; 2116 goto next_slot; 2117 } 2118 2119 /* 2120 * COW range from cow_start to found_key.offset - 1. As the key 2121 * will contain the beginning of the first extent that can be 2122 * NOCOW, following one which needs to be COW'ed 2123 */ 2124 if (cow_start != (u64)-1) { 2125 ret = fallback_to_cow(inode, locked_page, 2126 cow_start, found_key.offset - 1); 2127 cow_start = (u64)-1; 2128 if (ret) { 2129 btrfs_dec_nocow_writers(nocow_bg); 2130 goto error; 2131 } 2132 } 2133 2134 nocow_end = cur_offset + nocow_args.num_bytes - 1; 2135 is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC; 2136 if (is_prealloc) { 2137 u64 orig_start = found_key.offset - nocow_args.extent_offset; 2138 struct extent_map *em; 2139 2140 em = create_io_em(inode, cur_offset, nocow_args.num_bytes, 2141 orig_start, 2142 nocow_args.disk_bytenr, /* block_start */ 2143 nocow_args.num_bytes, /* block_len */ 2144 nocow_args.disk_num_bytes, /* orig_block_len */ 2145 ram_bytes, BTRFS_COMPRESS_NONE, 2146 BTRFS_ORDERED_PREALLOC); 2147 if (IS_ERR(em)) { 2148 btrfs_dec_nocow_writers(nocow_bg); 2149 ret = PTR_ERR(em); 2150 goto error; 2151 } 2152 free_extent_map(em); 2153 } 2154 2155 ordered = btrfs_alloc_ordered_extent(inode, cur_offset, 2156 nocow_args.num_bytes, nocow_args.num_bytes, 2157 nocow_args.disk_bytenr, nocow_args.num_bytes, 0, 2158 is_prealloc 2159 ? (1 << BTRFS_ORDERED_PREALLOC) 2160 : (1 << BTRFS_ORDERED_NOCOW), 2161 BTRFS_COMPRESS_NONE); 2162 btrfs_dec_nocow_writers(nocow_bg); 2163 if (IS_ERR(ordered)) { 2164 if (is_prealloc) { 2165 btrfs_drop_extent_map_range(inode, cur_offset, 2166 nocow_end, false); 2167 } 2168 ret = PTR_ERR(ordered); 2169 goto error; 2170 } 2171 2172 if (btrfs_is_data_reloc_root(root)) 2173 /* 2174 * Error handled later, as we must prevent 2175 * extent_clear_unlock_delalloc() in error handler 2176 * from freeing metadata of created ordered extent. 2177 */ 2178 ret = btrfs_reloc_clone_csums(ordered); 2179 btrfs_put_ordered_extent(ordered); 2180 2181 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, 2182 locked_page, EXTENT_LOCKED | 2183 EXTENT_DELALLOC | 2184 EXTENT_CLEAR_DATA_RESV, 2185 PAGE_UNLOCK | PAGE_SET_ORDERED); 2186 2187 cur_offset = extent_end; 2188 2189 /* 2190 * btrfs_reloc_clone_csums() error, now we're OK to call error 2191 * handler, as metadata for created ordered extent will only 2192 * be freed by btrfs_finish_ordered_io(). 2193 */ 2194 if (ret) 2195 goto error; 2196 if (cur_offset > end) 2197 break; 2198 } 2199 btrfs_release_path(path); 2200 2201 if (cur_offset <= end && cow_start == (u64)-1) 2202 cow_start = cur_offset; 2203 2204 if (cow_start != (u64)-1) { 2205 cur_offset = end; 2206 ret = fallback_to_cow(inode, locked_page, cow_start, end); 2207 cow_start = (u64)-1; 2208 if (ret) 2209 goto error; 2210 } 2211 2212 btrfs_free_path(path); 2213 return 0; 2214 2215 error: 2216 /* 2217 * If an error happened while a COW region is outstanding, cur_offset 2218 * needs to be reset to cow_start to ensure the COW region is unlocked 2219 * as well. 2220 */ 2221 if (cow_start != (u64)-1) 2222 cur_offset = cow_start; 2223 if (cur_offset < end) 2224 extent_clear_unlock_delalloc(inode, cur_offset, end, 2225 locked_page, EXTENT_LOCKED | 2226 EXTENT_DELALLOC | EXTENT_DEFRAG | 2227 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2228 PAGE_START_WRITEBACK | 2229 PAGE_END_WRITEBACK); 2230 btrfs_free_path(path); 2231 return ret; 2232 } 2233 2234 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2235 { 2236 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2237 if (inode->defrag_bytes && 2238 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 2239 0, NULL)) 2240 return false; 2241 return true; 2242 } 2243 return false; 2244 } 2245 2246 /* 2247 * Function to process delayed allocation (create CoW) for ranges which are 2248 * being touched for the first time. 2249 */ 2250 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, 2251 u64 start, u64 end, struct writeback_control *wbc) 2252 { 2253 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2254 int ret; 2255 2256 /* 2257 * The range must cover part of the @locked_page, or a return of 1 2258 * can confuse the caller. 2259 */ 2260 ASSERT(!(end <= page_offset(locked_page) || 2261 start >= page_offset(locked_page) + PAGE_SIZE)); 2262 2263 if (should_nocow(inode, start, end)) { 2264 ret = run_delalloc_nocow(inode, locked_page, start, end); 2265 goto out; 2266 } 2267 2268 if (btrfs_inode_can_compress(inode) && 2269 inode_need_compress(inode, start, end) && 2270 run_delalloc_compressed(inode, locked_page, start, end, wbc)) 2271 return 1; 2272 2273 if (zoned) 2274 ret = run_delalloc_cow(inode, locked_page, start, end, wbc, 2275 true); 2276 else 2277 ret = cow_file_range(inode, locked_page, start, end, NULL, 2278 false, false); 2279 2280 out: 2281 if (ret < 0) 2282 btrfs_cleanup_ordered_extents(inode, locked_page, start, 2283 end - start + 1); 2284 return ret; 2285 } 2286 2287 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 2288 struct extent_state *orig, u64 split) 2289 { 2290 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2291 u64 size; 2292 2293 /* not delalloc, ignore it */ 2294 if (!(orig->state & EXTENT_DELALLOC)) 2295 return; 2296 2297 size = orig->end - orig->start + 1; 2298 if (size > fs_info->max_extent_size) { 2299 u32 num_extents; 2300 u64 new_size; 2301 2302 /* 2303 * See the explanation in btrfs_merge_delalloc_extent, the same 2304 * applies here, just in reverse. 2305 */ 2306 new_size = orig->end - split + 1; 2307 num_extents = count_max_extents(fs_info, new_size); 2308 new_size = split - orig->start; 2309 num_extents += count_max_extents(fs_info, new_size); 2310 if (count_max_extents(fs_info, size) >= num_extents) 2311 return; 2312 } 2313 2314 spin_lock(&inode->lock); 2315 btrfs_mod_outstanding_extents(inode, 1); 2316 spin_unlock(&inode->lock); 2317 } 2318 2319 /* 2320 * Handle merged delayed allocation extents so we can keep track of new extents 2321 * that are just merged onto old extents, such as when we are doing sequential 2322 * writes, so we can properly account for the metadata space we'll need. 2323 */ 2324 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 2325 struct extent_state *other) 2326 { 2327 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2328 u64 new_size, old_size; 2329 u32 num_extents; 2330 2331 /* not delalloc, ignore it */ 2332 if (!(other->state & EXTENT_DELALLOC)) 2333 return; 2334 2335 if (new->start > other->start) 2336 new_size = new->end - other->start + 1; 2337 else 2338 new_size = other->end - new->start + 1; 2339 2340 /* we're not bigger than the max, unreserve the space and go */ 2341 if (new_size <= fs_info->max_extent_size) { 2342 spin_lock(&inode->lock); 2343 btrfs_mod_outstanding_extents(inode, -1); 2344 spin_unlock(&inode->lock); 2345 return; 2346 } 2347 2348 /* 2349 * We have to add up either side to figure out how many extents were 2350 * accounted for before we merged into one big extent. If the number of 2351 * extents we accounted for is <= the amount we need for the new range 2352 * then we can return, otherwise drop. Think of it like this 2353 * 2354 * [ 4k][MAX_SIZE] 2355 * 2356 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2357 * need 2 outstanding extents, on one side we have 1 and the other side 2358 * we have 1 so they are == and we can return. But in this case 2359 * 2360 * [MAX_SIZE+4k][MAX_SIZE+4k] 2361 * 2362 * Each range on their own accounts for 2 extents, but merged together 2363 * they are only 3 extents worth of accounting, so we need to drop in 2364 * this case. 2365 */ 2366 old_size = other->end - other->start + 1; 2367 num_extents = count_max_extents(fs_info, old_size); 2368 old_size = new->end - new->start + 1; 2369 num_extents += count_max_extents(fs_info, old_size); 2370 if (count_max_extents(fs_info, new_size) >= num_extents) 2371 return; 2372 2373 spin_lock(&inode->lock); 2374 btrfs_mod_outstanding_extents(inode, -1); 2375 spin_unlock(&inode->lock); 2376 } 2377 2378 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 2379 struct btrfs_inode *inode) 2380 { 2381 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2382 2383 spin_lock(&root->delalloc_lock); 2384 if (list_empty(&inode->delalloc_inodes)) { 2385 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 2386 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, &inode->runtime_flags); 2387 root->nr_delalloc_inodes++; 2388 if (root->nr_delalloc_inodes == 1) { 2389 spin_lock(&fs_info->delalloc_root_lock); 2390 BUG_ON(!list_empty(&root->delalloc_root)); 2391 list_add_tail(&root->delalloc_root, 2392 &fs_info->delalloc_roots); 2393 spin_unlock(&fs_info->delalloc_root_lock); 2394 } 2395 } 2396 spin_unlock(&root->delalloc_lock); 2397 } 2398 2399 void __btrfs_del_delalloc_inode(struct btrfs_root *root, 2400 struct btrfs_inode *inode) 2401 { 2402 struct btrfs_fs_info *fs_info = root->fs_info; 2403 2404 if (!list_empty(&inode->delalloc_inodes)) { 2405 list_del_init(&inode->delalloc_inodes); 2406 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2407 &inode->runtime_flags); 2408 root->nr_delalloc_inodes--; 2409 if (!root->nr_delalloc_inodes) { 2410 ASSERT(list_empty(&root->delalloc_inodes)); 2411 spin_lock(&fs_info->delalloc_root_lock); 2412 BUG_ON(list_empty(&root->delalloc_root)); 2413 list_del_init(&root->delalloc_root); 2414 spin_unlock(&fs_info->delalloc_root_lock); 2415 } 2416 } 2417 } 2418 2419 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 2420 struct btrfs_inode *inode) 2421 { 2422 spin_lock(&root->delalloc_lock); 2423 __btrfs_del_delalloc_inode(root, inode); 2424 spin_unlock(&root->delalloc_lock); 2425 } 2426 2427 /* 2428 * Properly track delayed allocation bytes in the inode and to maintain the 2429 * list of inodes that have pending delalloc work to be done. 2430 */ 2431 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 2432 u32 bits) 2433 { 2434 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2435 2436 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC)) 2437 WARN_ON(1); 2438 /* 2439 * set_bit and clear bit hooks normally require _irqsave/restore 2440 * but in this case, we are only testing for the DELALLOC 2441 * bit, which is only set or cleared with irqs on 2442 */ 2443 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2444 struct btrfs_root *root = inode->root; 2445 u64 len = state->end + 1 - state->start; 2446 u32 num_extents = count_max_extents(fs_info, len); 2447 bool do_list = !btrfs_is_free_space_inode(inode); 2448 2449 spin_lock(&inode->lock); 2450 btrfs_mod_outstanding_extents(inode, num_extents); 2451 spin_unlock(&inode->lock); 2452 2453 /* For sanity tests */ 2454 if (btrfs_is_testing(fs_info)) 2455 return; 2456 2457 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2458 fs_info->delalloc_batch); 2459 spin_lock(&inode->lock); 2460 inode->delalloc_bytes += len; 2461 if (bits & EXTENT_DEFRAG) 2462 inode->defrag_bytes += len; 2463 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2464 &inode->runtime_flags)) 2465 btrfs_add_delalloc_inodes(root, inode); 2466 spin_unlock(&inode->lock); 2467 } 2468 2469 if (!(state->state & EXTENT_DELALLOC_NEW) && 2470 (bits & EXTENT_DELALLOC_NEW)) { 2471 spin_lock(&inode->lock); 2472 inode->new_delalloc_bytes += state->end + 1 - state->start; 2473 spin_unlock(&inode->lock); 2474 } 2475 } 2476 2477 /* 2478 * Once a range is no longer delalloc this function ensures that proper 2479 * accounting happens. 2480 */ 2481 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 2482 struct extent_state *state, u32 bits) 2483 { 2484 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2485 u64 len = state->end + 1 - state->start; 2486 u32 num_extents = count_max_extents(fs_info, len); 2487 2488 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { 2489 spin_lock(&inode->lock); 2490 inode->defrag_bytes -= len; 2491 spin_unlock(&inode->lock); 2492 } 2493 2494 /* 2495 * set_bit and clear bit hooks normally require _irqsave/restore 2496 * but in this case, we are only testing for the DELALLOC 2497 * bit, which is only set or cleared with irqs on 2498 */ 2499 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2500 struct btrfs_root *root = inode->root; 2501 bool do_list = !btrfs_is_free_space_inode(inode); 2502 2503 spin_lock(&inode->lock); 2504 btrfs_mod_outstanding_extents(inode, -num_extents); 2505 spin_unlock(&inode->lock); 2506 2507 /* 2508 * We don't reserve metadata space for space cache inodes so we 2509 * don't need to call delalloc_release_metadata if there is an 2510 * error. 2511 */ 2512 if (bits & EXTENT_CLEAR_META_RESV && 2513 root != fs_info->tree_root) 2514 btrfs_delalloc_release_metadata(inode, len, false); 2515 2516 /* For sanity tests. */ 2517 if (btrfs_is_testing(fs_info)) 2518 return; 2519 2520 if (!btrfs_is_data_reloc_root(root) && 2521 do_list && !(state->state & EXTENT_NORESERVE) && 2522 (bits & EXTENT_CLEAR_DATA_RESV)) 2523 btrfs_free_reserved_data_space_noquota(fs_info, len); 2524 2525 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2526 fs_info->delalloc_batch); 2527 spin_lock(&inode->lock); 2528 inode->delalloc_bytes -= len; 2529 if (do_list && inode->delalloc_bytes == 0 && 2530 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2531 &inode->runtime_flags)) 2532 btrfs_del_delalloc_inode(root, inode); 2533 spin_unlock(&inode->lock); 2534 } 2535 2536 if ((state->state & EXTENT_DELALLOC_NEW) && 2537 (bits & EXTENT_DELALLOC_NEW)) { 2538 spin_lock(&inode->lock); 2539 ASSERT(inode->new_delalloc_bytes >= len); 2540 inode->new_delalloc_bytes -= len; 2541 if (bits & EXTENT_ADD_INODE_BYTES) 2542 inode_add_bytes(&inode->vfs_inode, len); 2543 spin_unlock(&inode->lock); 2544 } 2545 } 2546 2547 static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, 2548 struct btrfs_ordered_extent *ordered) 2549 { 2550 u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 2551 u64 len = bbio->bio.bi_iter.bi_size; 2552 struct btrfs_ordered_extent *new; 2553 int ret; 2554 2555 /* Must always be called for the beginning of an ordered extent. */ 2556 if (WARN_ON_ONCE(start != ordered->disk_bytenr)) 2557 return -EINVAL; 2558 2559 /* No need to split if the ordered extent covers the entire bio. */ 2560 if (ordered->disk_num_bytes == len) { 2561 refcount_inc(&ordered->refs); 2562 bbio->ordered = ordered; 2563 return 0; 2564 } 2565 2566 /* 2567 * Don't split the extent_map for NOCOW extents, as we're writing into 2568 * a pre-existing one. 2569 */ 2570 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 2571 ret = split_extent_map(bbio->inode, bbio->file_offset, 2572 ordered->num_bytes, len, 2573 ordered->disk_bytenr); 2574 if (ret) 2575 return ret; 2576 } 2577 2578 new = btrfs_split_ordered_extent(ordered, len); 2579 if (IS_ERR(new)) 2580 return PTR_ERR(new); 2581 bbio->ordered = new; 2582 return 0; 2583 } 2584 2585 /* 2586 * given a list of ordered sums record them in the inode. This happens 2587 * at IO completion time based on sums calculated at bio submission time. 2588 */ 2589 static int add_pending_csums(struct btrfs_trans_handle *trans, 2590 struct list_head *list) 2591 { 2592 struct btrfs_ordered_sum *sum; 2593 struct btrfs_root *csum_root = NULL; 2594 int ret; 2595 2596 list_for_each_entry(sum, list, list) { 2597 trans->adding_csums = true; 2598 if (!csum_root) 2599 csum_root = btrfs_csum_root(trans->fs_info, 2600 sum->logical); 2601 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2602 trans->adding_csums = false; 2603 if (ret) 2604 return ret; 2605 } 2606 return 0; 2607 } 2608 2609 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2610 const u64 start, 2611 const u64 len, 2612 struct extent_state **cached_state) 2613 { 2614 u64 search_start = start; 2615 const u64 end = start + len - 1; 2616 2617 while (search_start < end) { 2618 const u64 search_len = end - search_start + 1; 2619 struct extent_map *em; 2620 u64 em_len; 2621 int ret = 0; 2622 2623 em = btrfs_get_extent(inode, NULL, 0, search_start, search_len); 2624 if (IS_ERR(em)) 2625 return PTR_ERR(em); 2626 2627 if (em->block_start != EXTENT_MAP_HOLE) 2628 goto next; 2629 2630 em_len = em->len; 2631 if (em->start < search_start) 2632 em_len -= search_start - em->start; 2633 if (em_len > search_len) 2634 em_len = search_len; 2635 2636 ret = set_extent_bit(&inode->io_tree, search_start, 2637 search_start + em_len - 1, 2638 EXTENT_DELALLOC_NEW, cached_state); 2639 next: 2640 search_start = extent_map_end(em); 2641 free_extent_map(em); 2642 if (ret) 2643 return ret; 2644 } 2645 return 0; 2646 } 2647 2648 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2649 unsigned int extra_bits, 2650 struct extent_state **cached_state) 2651 { 2652 WARN_ON(PAGE_ALIGNED(end)); 2653 2654 if (start >= i_size_read(&inode->vfs_inode) && 2655 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2656 /* 2657 * There can't be any extents following eof in this case so just 2658 * set the delalloc new bit for the range directly. 2659 */ 2660 extra_bits |= EXTENT_DELALLOC_NEW; 2661 } else { 2662 int ret; 2663 2664 ret = btrfs_find_new_delalloc_bytes(inode, start, 2665 end + 1 - start, 2666 cached_state); 2667 if (ret) 2668 return ret; 2669 } 2670 2671 return set_extent_bit(&inode->io_tree, start, end, 2672 EXTENT_DELALLOC | extra_bits, cached_state); 2673 } 2674 2675 /* see btrfs_writepage_start_hook for details on why this is required */ 2676 struct btrfs_writepage_fixup { 2677 struct page *page; 2678 struct btrfs_inode *inode; 2679 struct btrfs_work work; 2680 }; 2681 2682 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2683 { 2684 struct btrfs_writepage_fixup *fixup = 2685 container_of(work, struct btrfs_writepage_fixup, work); 2686 struct btrfs_ordered_extent *ordered; 2687 struct extent_state *cached_state = NULL; 2688 struct extent_changeset *data_reserved = NULL; 2689 struct page *page = fixup->page; 2690 struct btrfs_inode *inode = fixup->inode; 2691 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2692 u64 page_start = page_offset(page); 2693 u64 page_end = page_offset(page) + PAGE_SIZE - 1; 2694 int ret = 0; 2695 bool free_delalloc_space = true; 2696 2697 /* 2698 * This is similar to page_mkwrite, we need to reserve the space before 2699 * we take the page lock. 2700 */ 2701 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2702 PAGE_SIZE); 2703 again: 2704 lock_page(page); 2705 2706 /* 2707 * Before we queued this fixup, we took a reference on the page. 2708 * page->mapping may go NULL, but it shouldn't be moved to a different 2709 * address space. 2710 */ 2711 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 2712 /* 2713 * Unfortunately this is a little tricky, either 2714 * 2715 * 1) We got here and our page had already been dealt with and 2716 * we reserved our space, thus ret == 0, so we need to just 2717 * drop our space reservation and bail. This can happen the 2718 * first time we come into the fixup worker, or could happen 2719 * while waiting for the ordered extent. 2720 * 2) Our page was already dealt with, but we happened to get an 2721 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2722 * this case we obviously don't have anything to release, but 2723 * because the page was already dealt with we don't want to 2724 * mark the page with an error, so make sure we're resetting 2725 * ret to 0. This is why we have this check _before_ the ret 2726 * check, because we do not want to have a surprise ENOSPC 2727 * when the page was already properly dealt with. 2728 */ 2729 if (!ret) { 2730 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2731 btrfs_delalloc_release_space(inode, data_reserved, 2732 page_start, PAGE_SIZE, 2733 true); 2734 } 2735 ret = 0; 2736 goto out_page; 2737 } 2738 2739 /* 2740 * We can't mess with the page state unless it is locked, so now that 2741 * it is locked bail if we failed to make our space reservation. 2742 */ 2743 if (ret) 2744 goto out_page; 2745 2746 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2747 2748 /* already ordered? We're done */ 2749 if (PageOrdered(page)) 2750 goto out_reserved; 2751 2752 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2753 if (ordered) { 2754 unlock_extent(&inode->io_tree, page_start, page_end, 2755 &cached_state); 2756 unlock_page(page); 2757 btrfs_start_ordered_extent(ordered); 2758 btrfs_put_ordered_extent(ordered); 2759 goto again; 2760 } 2761 2762 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2763 &cached_state); 2764 if (ret) 2765 goto out_reserved; 2766 2767 /* 2768 * Everything went as planned, we're now the owner of a dirty page with 2769 * delayed allocation bits set and space reserved for our COW 2770 * destination. 2771 * 2772 * The page was dirty when we started, nothing should have cleaned it. 2773 */ 2774 BUG_ON(!PageDirty(page)); 2775 free_delalloc_space = false; 2776 out_reserved: 2777 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2778 if (free_delalloc_space) 2779 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2780 PAGE_SIZE, true); 2781 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2782 out_page: 2783 if (ret) { 2784 /* 2785 * We hit ENOSPC or other errors. Update the mapping and page 2786 * to reflect the errors and clean the page. 2787 */ 2788 mapping_set_error(page->mapping, ret); 2789 btrfs_mark_ordered_io_finished(inode, page, page_start, 2790 PAGE_SIZE, !ret); 2791 clear_page_dirty_for_io(page); 2792 } 2793 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); 2794 unlock_page(page); 2795 put_page(page); 2796 kfree(fixup); 2797 extent_changeset_free(data_reserved); 2798 /* 2799 * As a precaution, do a delayed iput in case it would be the last iput 2800 * that could need flushing space. Recursing back to fixup worker would 2801 * deadlock. 2802 */ 2803 btrfs_add_delayed_iput(inode); 2804 } 2805 2806 /* 2807 * There are a few paths in the higher layers of the kernel that directly 2808 * set the page dirty bit without asking the filesystem if it is a 2809 * good idea. This causes problems because we want to make sure COW 2810 * properly happens and the data=ordered rules are followed. 2811 * 2812 * In our case any range that doesn't have the ORDERED bit set 2813 * hasn't been properly setup for IO. We kick off an async process 2814 * to fix it up. The async helper will wait for ordered extents, set 2815 * the delalloc bit and make it safe to write the page. 2816 */ 2817 int btrfs_writepage_cow_fixup(struct page *page) 2818 { 2819 struct inode *inode = page->mapping->host; 2820 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2821 struct btrfs_writepage_fixup *fixup; 2822 2823 /* This page has ordered extent covering it already */ 2824 if (PageOrdered(page)) 2825 return 0; 2826 2827 /* 2828 * PageChecked is set below when we create a fixup worker for this page, 2829 * don't try to create another one if we're already PageChecked() 2830 * 2831 * The extent_io writepage code will redirty the page if we send back 2832 * EAGAIN. 2833 */ 2834 if (PageChecked(page)) 2835 return -EAGAIN; 2836 2837 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2838 if (!fixup) 2839 return -EAGAIN; 2840 2841 /* 2842 * We are already holding a reference to this inode from 2843 * write_cache_pages. We need to hold it because the space reservation 2844 * takes place outside of the page lock, and we can't trust 2845 * page->mapping outside of the page lock. 2846 */ 2847 ihold(inode); 2848 btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE); 2849 get_page(page); 2850 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); 2851 fixup->page = page; 2852 fixup->inode = BTRFS_I(inode); 2853 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2854 2855 return -EAGAIN; 2856 } 2857 2858 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2859 struct btrfs_inode *inode, u64 file_pos, 2860 struct btrfs_file_extent_item *stack_fi, 2861 const bool update_inode_bytes, 2862 u64 qgroup_reserved) 2863 { 2864 struct btrfs_root *root = inode->root; 2865 const u64 sectorsize = root->fs_info->sectorsize; 2866 struct btrfs_path *path; 2867 struct extent_buffer *leaf; 2868 struct btrfs_key ins; 2869 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 2870 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 2871 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 2872 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 2873 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 2874 struct btrfs_drop_extents_args drop_args = { 0 }; 2875 int ret; 2876 2877 path = btrfs_alloc_path(); 2878 if (!path) 2879 return -ENOMEM; 2880 2881 /* 2882 * we may be replacing one extent in the tree with another. 2883 * The new extent is pinned in the extent map, and we don't want 2884 * to drop it from the cache until it is completely in the btree. 2885 * 2886 * So, tell btrfs_drop_extents to leave this extent in the cache. 2887 * the caller is expected to unpin it and allow it to be merged 2888 * with the others. 2889 */ 2890 drop_args.path = path; 2891 drop_args.start = file_pos; 2892 drop_args.end = file_pos + num_bytes; 2893 drop_args.replace_extent = true; 2894 drop_args.extent_item_size = sizeof(*stack_fi); 2895 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 2896 if (ret) 2897 goto out; 2898 2899 if (!drop_args.extent_inserted) { 2900 ins.objectid = btrfs_ino(inode); 2901 ins.offset = file_pos; 2902 ins.type = BTRFS_EXTENT_DATA_KEY; 2903 2904 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2905 sizeof(*stack_fi)); 2906 if (ret) 2907 goto out; 2908 } 2909 leaf = path->nodes[0]; 2910 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 2911 write_extent_buffer(leaf, stack_fi, 2912 btrfs_item_ptr_offset(leaf, path->slots[0]), 2913 sizeof(struct btrfs_file_extent_item)); 2914 2915 btrfs_mark_buffer_dirty(leaf); 2916 btrfs_release_path(path); 2917 2918 /* 2919 * If we dropped an inline extent here, we know the range where it is 2920 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 2921 * number of bytes only for that range containing the inline extent. 2922 * The remaining of the range will be processed when clearning the 2923 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 2924 */ 2925 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 2926 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 2927 2928 inline_size = drop_args.bytes_found - inline_size; 2929 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 2930 drop_args.bytes_found -= inline_size; 2931 num_bytes -= sectorsize; 2932 } 2933 2934 if (update_inode_bytes) 2935 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 2936 2937 ins.objectid = disk_bytenr; 2938 ins.offset = disk_num_bytes; 2939 ins.type = BTRFS_EXTENT_ITEM_KEY; 2940 2941 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 2942 if (ret) 2943 goto out; 2944 2945 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 2946 file_pos - offset, 2947 qgroup_reserved, &ins); 2948 out: 2949 btrfs_free_path(path); 2950 2951 return ret; 2952 } 2953 2954 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 2955 u64 start, u64 len) 2956 { 2957 struct btrfs_block_group *cache; 2958 2959 cache = btrfs_lookup_block_group(fs_info, start); 2960 ASSERT(cache); 2961 2962 spin_lock(&cache->lock); 2963 cache->delalloc_bytes -= len; 2964 spin_unlock(&cache->lock); 2965 2966 btrfs_put_block_group(cache); 2967 } 2968 2969 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 2970 struct btrfs_ordered_extent *oe) 2971 { 2972 struct btrfs_file_extent_item stack_fi; 2973 bool update_inode_bytes; 2974 u64 num_bytes = oe->num_bytes; 2975 u64 ram_bytes = oe->ram_bytes; 2976 2977 memset(&stack_fi, 0, sizeof(stack_fi)); 2978 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 2979 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 2980 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 2981 oe->disk_num_bytes); 2982 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 2983 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) { 2984 num_bytes = oe->truncated_len; 2985 ram_bytes = num_bytes; 2986 } 2987 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 2988 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 2989 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 2990 /* Encryption and other encoding is reserved and all 0 */ 2991 2992 /* 2993 * For delalloc, when completing an ordered extent we update the inode's 2994 * bytes when clearing the range in the inode's io tree, so pass false 2995 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 2996 * except if the ordered extent was truncated. 2997 */ 2998 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 2999 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3000 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3001 3002 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), 3003 oe->file_offset, &stack_fi, 3004 update_inode_bytes, oe->qgroup_rsv); 3005 } 3006 3007 /* 3008 * As ordered data IO finishes, this gets called so we can finish 3009 * an ordered extent if the range of bytes in the file it covers are 3010 * fully written. 3011 */ 3012 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) 3013 { 3014 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); 3015 struct btrfs_root *root = inode->root; 3016 struct btrfs_fs_info *fs_info = root->fs_info; 3017 struct btrfs_trans_handle *trans = NULL; 3018 struct extent_io_tree *io_tree = &inode->io_tree; 3019 struct extent_state *cached_state = NULL; 3020 u64 start, end; 3021 int compress_type = 0; 3022 int ret = 0; 3023 u64 logical_len = ordered_extent->num_bytes; 3024 bool freespace_inode; 3025 bool truncated = false; 3026 bool clear_reserved_extent = true; 3027 unsigned int clear_bits = EXTENT_DEFRAG; 3028 3029 start = ordered_extent->file_offset; 3030 end = start + ordered_extent->num_bytes - 1; 3031 3032 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3033 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3034 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3035 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3036 clear_bits |= EXTENT_DELALLOC_NEW; 3037 3038 freespace_inode = btrfs_is_free_space_inode(inode); 3039 if (!freespace_inode) 3040 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3041 3042 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3043 ret = -EIO; 3044 goto out; 3045 } 3046 3047 if (btrfs_is_zoned(fs_info)) 3048 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3049 ordered_extent->disk_num_bytes); 3050 3051 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3052 truncated = true; 3053 logical_len = ordered_extent->truncated_len; 3054 /* Truncated the entire extent, don't bother adding */ 3055 if (!logical_len) 3056 goto out; 3057 } 3058 3059 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3060 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 3061 3062 btrfs_inode_safe_disk_i_size_write(inode, 0); 3063 if (freespace_inode) 3064 trans = btrfs_join_transaction_spacecache(root); 3065 else 3066 trans = btrfs_join_transaction(root); 3067 if (IS_ERR(trans)) { 3068 ret = PTR_ERR(trans); 3069 trans = NULL; 3070 goto out; 3071 } 3072 trans->block_rsv = &inode->block_rsv; 3073 ret = btrfs_update_inode_fallback(trans, root, inode); 3074 if (ret) /* -ENOMEM or corruption */ 3075 btrfs_abort_transaction(trans, ret); 3076 goto out; 3077 } 3078 3079 clear_bits |= EXTENT_LOCKED; 3080 lock_extent(io_tree, start, end, &cached_state); 3081 3082 if (freespace_inode) 3083 trans = btrfs_join_transaction_spacecache(root); 3084 else 3085 trans = btrfs_join_transaction(root); 3086 if (IS_ERR(trans)) { 3087 ret = PTR_ERR(trans); 3088 trans = NULL; 3089 goto out; 3090 } 3091 3092 trans->block_rsv = &inode->block_rsv; 3093 3094 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3095 compress_type = ordered_extent->compress_type; 3096 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3097 BUG_ON(compress_type); 3098 ret = btrfs_mark_extent_written(trans, inode, 3099 ordered_extent->file_offset, 3100 ordered_extent->file_offset + 3101 logical_len); 3102 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, 3103 ordered_extent->disk_num_bytes); 3104 } else { 3105 BUG_ON(root == fs_info->tree_root); 3106 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3107 if (!ret) { 3108 clear_reserved_extent = false; 3109 btrfs_release_delalloc_bytes(fs_info, 3110 ordered_extent->disk_bytenr, 3111 ordered_extent->disk_num_bytes); 3112 } 3113 } 3114 unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset, 3115 ordered_extent->num_bytes, trans->transid); 3116 if (ret < 0) { 3117 btrfs_abort_transaction(trans, ret); 3118 goto out; 3119 } 3120 3121 ret = add_pending_csums(trans, &ordered_extent->list); 3122 if (ret) { 3123 btrfs_abort_transaction(trans, ret); 3124 goto out; 3125 } 3126 3127 /* 3128 * If this is a new delalloc range, clear its new delalloc flag to 3129 * update the inode's number of bytes. This needs to be done first 3130 * before updating the inode item. 3131 */ 3132 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3133 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3134 clear_extent_bit(&inode->io_tree, start, end, 3135 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3136 &cached_state); 3137 3138 btrfs_inode_safe_disk_i_size_write(inode, 0); 3139 ret = btrfs_update_inode_fallback(trans, root, inode); 3140 if (ret) { /* -ENOMEM or corruption */ 3141 btrfs_abort_transaction(trans, ret); 3142 goto out; 3143 } 3144 ret = 0; 3145 out: 3146 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3147 &cached_state); 3148 3149 if (trans) 3150 btrfs_end_transaction(trans); 3151 3152 if (ret || truncated) { 3153 u64 unwritten_start = start; 3154 3155 /* 3156 * If we failed to finish this ordered extent for any reason we 3157 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3158 * extent, and mark the inode with the error if it wasn't 3159 * already set. Any error during writeback would have already 3160 * set the mapping error, so we need to set it if we're the ones 3161 * marking this ordered extent as failed. 3162 */ 3163 if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR, 3164 &ordered_extent->flags)) 3165 mapping_set_error(ordered_extent->inode->i_mapping, -EIO); 3166 3167 if (truncated) 3168 unwritten_start += logical_len; 3169 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3170 3171 /* Drop extent maps for the part of the extent we didn't write. */ 3172 btrfs_drop_extent_map_range(inode, unwritten_start, end, false); 3173 3174 /* 3175 * If the ordered extent had an IOERR or something else went 3176 * wrong we need to return the space for this ordered extent 3177 * back to the allocator. We only free the extent in the 3178 * truncated case if we didn't write out the extent at all. 3179 * 3180 * If we made it past insert_reserved_file_extent before we 3181 * errored out then we don't need to do this as the accounting 3182 * has already been done. 3183 */ 3184 if ((ret || !logical_len) && 3185 clear_reserved_extent && 3186 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3187 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3188 /* 3189 * Discard the range before returning it back to the 3190 * free space pool 3191 */ 3192 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3193 btrfs_discard_extent(fs_info, 3194 ordered_extent->disk_bytenr, 3195 ordered_extent->disk_num_bytes, 3196 NULL); 3197 btrfs_free_reserved_extent(fs_info, 3198 ordered_extent->disk_bytenr, 3199 ordered_extent->disk_num_bytes, 1); 3200 /* 3201 * Actually free the qgroup rsv which was released when 3202 * the ordered extent was created. 3203 */ 3204 btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid, 3205 ordered_extent->qgroup_rsv, 3206 BTRFS_QGROUP_RSV_DATA); 3207 } 3208 } 3209 3210 /* 3211 * This needs to be done to make sure anybody waiting knows we are done 3212 * updating everything for this ordered extent. 3213 */ 3214 btrfs_remove_ordered_extent(inode, ordered_extent); 3215 3216 /* once for us */ 3217 btrfs_put_ordered_extent(ordered_extent); 3218 /* once for the tree */ 3219 btrfs_put_ordered_extent(ordered_extent); 3220 3221 return ret; 3222 } 3223 3224 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered) 3225 { 3226 if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) && 3227 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 3228 btrfs_finish_ordered_zoned(ordered); 3229 return btrfs_finish_one_ordered(ordered); 3230 } 3231 3232 /* 3233 * Verify the checksum for a single sector without any extra action that depend 3234 * on the type of I/O. 3235 */ 3236 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, 3237 u32 pgoff, u8 *csum, const u8 * const csum_expected) 3238 { 3239 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3240 char *kaddr; 3241 3242 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); 3243 3244 shash->tfm = fs_info->csum_shash; 3245 3246 kaddr = kmap_local_page(page) + pgoff; 3247 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 3248 kunmap_local(kaddr); 3249 3250 if (memcmp(csum, csum_expected, fs_info->csum_size)) 3251 return -EIO; 3252 return 0; 3253 } 3254 3255 /* 3256 * Verify the checksum of a single data sector. 3257 * 3258 * @bbio: btrfs_io_bio which contains the csum 3259 * @dev: device the sector is on 3260 * @bio_offset: offset to the beginning of the bio (in bytes) 3261 * @bv: bio_vec to check 3262 * 3263 * Check if the checksum on a data block is valid. When a checksum mismatch is 3264 * detected, report the error and fill the corrupted range with zero. 3265 * 3266 * Return %true if the sector is ok or had no checksum to start with, else %false. 3267 */ 3268 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, 3269 u32 bio_offset, struct bio_vec *bv) 3270 { 3271 struct btrfs_inode *inode = bbio->inode; 3272 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3273 u64 file_offset = bbio->file_offset + bio_offset; 3274 u64 end = file_offset + bv->bv_len - 1; 3275 u8 *csum_expected; 3276 u8 csum[BTRFS_CSUM_SIZE]; 3277 3278 ASSERT(bv->bv_len == fs_info->sectorsize); 3279 3280 if (!bbio->csum) 3281 return true; 3282 3283 if (btrfs_is_data_reloc_root(inode->root) && 3284 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, 3285 1, NULL)) { 3286 /* Skip the range without csum for data reloc inode */ 3287 clear_extent_bits(&inode->io_tree, file_offset, end, 3288 EXTENT_NODATASUM); 3289 return true; 3290 } 3291 3292 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * 3293 fs_info->csum_size; 3294 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, 3295 csum_expected)) 3296 goto zeroit; 3297 return true; 3298 3299 zeroit: 3300 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected, 3301 bbio->mirror_num); 3302 if (dev) 3303 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 3304 memzero_bvec(bv); 3305 return false; 3306 } 3307 3308 /* 3309 * btrfs_add_delayed_iput - perform a delayed iput on @inode 3310 * 3311 * @inode: The inode we want to perform iput on 3312 * 3313 * This function uses the generic vfs_inode::i_count to track whether we should 3314 * just decrement it (in case it's > 1) or if this is the last iput then link 3315 * the inode to the delayed iput machinery. Delayed iputs are processed at 3316 * transaction commit time/superblock commit/cleaner kthread. 3317 */ 3318 void btrfs_add_delayed_iput(struct btrfs_inode *inode) 3319 { 3320 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3321 unsigned long flags; 3322 3323 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) 3324 return; 3325 3326 atomic_inc(&fs_info->nr_delayed_iputs); 3327 /* 3328 * Need to be irq safe here because we can be called from either an irq 3329 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq 3330 * context. 3331 */ 3332 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags); 3333 ASSERT(list_empty(&inode->delayed_iput)); 3334 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); 3335 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags); 3336 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3337 wake_up_process(fs_info->cleaner_kthread); 3338 } 3339 3340 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3341 struct btrfs_inode *inode) 3342 { 3343 list_del_init(&inode->delayed_iput); 3344 spin_unlock_irq(&fs_info->delayed_iput_lock); 3345 iput(&inode->vfs_inode); 3346 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3347 wake_up(&fs_info->delayed_iputs_wait); 3348 spin_lock_irq(&fs_info->delayed_iput_lock); 3349 } 3350 3351 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3352 struct btrfs_inode *inode) 3353 { 3354 if (!list_empty(&inode->delayed_iput)) { 3355 spin_lock_irq(&fs_info->delayed_iput_lock); 3356 if (!list_empty(&inode->delayed_iput)) 3357 run_delayed_iput_locked(fs_info, inode); 3358 spin_unlock_irq(&fs_info->delayed_iput_lock); 3359 } 3360 } 3361 3362 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3363 { 3364 /* 3365 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which 3366 * calls btrfs_add_delayed_iput() and that needs to lock 3367 * fs_info->delayed_iput_lock. So we need to disable irqs here to 3368 * prevent a deadlock. 3369 */ 3370 spin_lock_irq(&fs_info->delayed_iput_lock); 3371 while (!list_empty(&fs_info->delayed_iputs)) { 3372 struct btrfs_inode *inode; 3373 3374 inode = list_first_entry(&fs_info->delayed_iputs, 3375 struct btrfs_inode, delayed_iput); 3376 run_delayed_iput_locked(fs_info, inode); 3377 if (need_resched()) { 3378 spin_unlock_irq(&fs_info->delayed_iput_lock); 3379 cond_resched(); 3380 spin_lock_irq(&fs_info->delayed_iput_lock); 3381 } 3382 } 3383 spin_unlock_irq(&fs_info->delayed_iput_lock); 3384 } 3385 3386 /* 3387 * Wait for flushing all delayed iputs 3388 * 3389 * @fs_info: the filesystem 3390 * 3391 * This will wait on any delayed iputs that are currently running with KILLABLE 3392 * set. Once they are all done running we will return, unless we are killed in 3393 * which case we return EINTR. This helps in user operations like fallocate etc 3394 * that might get blocked on the iputs. 3395 * 3396 * Return EINTR if we were killed, 0 if nothing's pending 3397 */ 3398 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3399 { 3400 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3401 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3402 if (ret) 3403 return -EINTR; 3404 return 0; 3405 } 3406 3407 /* 3408 * This creates an orphan entry for the given inode in case something goes wrong 3409 * in the middle of an unlink. 3410 */ 3411 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3412 struct btrfs_inode *inode) 3413 { 3414 int ret; 3415 3416 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3417 if (ret && ret != -EEXIST) { 3418 btrfs_abort_transaction(trans, ret); 3419 return ret; 3420 } 3421 3422 return 0; 3423 } 3424 3425 /* 3426 * We have done the delete so we can go ahead and remove the orphan item for 3427 * this particular inode. 3428 */ 3429 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3430 struct btrfs_inode *inode) 3431 { 3432 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3433 } 3434 3435 /* 3436 * this cleans up any orphans that may be left on the list from the last use 3437 * of this root. 3438 */ 3439 int btrfs_orphan_cleanup(struct btrfs_root *root) 3440 { 3441 struct btrfs_fs_info *fs_info = root->fs_info; 3442 struct btrfs_path *path; 3443 struct extent_buffer *leaf; 3444 struct btrfs_key key, found_key; 3445 struct btrfs_trans_handle *trans; 3446 struct inode *inode; 3447 u64 last_objectid = 0; 3448 int ret = 0, nr_unlink = 0; 3449 3450 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3451 return 0; 3452 3453 path = btrfs_alloc_path(); 3454 if (!path) { 3455 ret = -ENOMEM; 3456 goto out; 3457 } 3458 path->reada = READA_BACK; 3459 3460 key.objectid = BTRFS_ORPHAN_OBJECTID; 3461 key.type = BTRFS_ORPHAN_ITEM_KEY; 3462 key.offset = (u64)-1; 3463 3464 while (1) { 3465 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3466 if (ret < 0) 3467 goto out; 3468 3469 /* 3470 * if ret == 0 means we found what we were searching for, which 3471 * is weird, but possible, so only screw with path if we didn't 3472 * find the key and see if we have stuff that matches 3473 */ 3474 if (ret > 0) { 3475 ret = 0; 3476 if (path->slots[0] == 0) 3477 break; 3478 path->slots[0]--; 3479 } 3480 3481 /* pull out the item */ 3482 leaf = path->nodes[0]; 3483 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3484 3485 /* make sure the item matches what we want */ 3486 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3487 break; 3488 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3489 break; 3490 3491 /* release the path since we're done with it */ 3492 btrfs_release_path(path); 3493 3494 /* 3495 * this is where we are basically btrfs_lookup, without the 3496 * crossing root thing. we store the inode number in the 3497 * offset of the orphan item. 3498 */ 3499 3500 if (found_key.offset == last_objectid) { 3501 /* 3502 * We found the same inode as before. This means we were 3503 * not able to remove its items via eviction triggered 3504 * by an iput(). A transaction abort may have happened, 3505 * due to -ENOSPC for example, so try to grab the error 3506 * that lead to a transaction abort, if any. 3507 */ 3508 btrfs_err(fs_info, 3509 "Error removing orphan entry, stopping orphan cleanup"); 3510 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL; 3511 goto out; 3512 } 3513 3514 last_objectid = found_key.offset; 3515 3516 found_key.objectid = found_key.offset; 3517 found_key.type = BTRFS_INODE_ITEM_KEY; 3518 found_key.offset = 0; 3519 inode = btrfs_iget(fs_info->sb, last_objectid, root); 3520 if (IS_ERR(inode)) { 3521 ret = PTR_ERR(inode); 3522 inode = NULL; 3523 if (ret != -ENOENT) 3524 goto out; 3525 } 3526 3527 if (!inode && root == fs_info->tree_root) { 3528 struct btrfs_root *dead_root; 3529 int is_dead_root = 0; 3530 3531 /* 3532 * This is an orphan in the tree root. Currently these 3533 * could come from 2 sources: 3534 * a) a root (snapshot/subvolume) deletion in progress 3535 * b) a free space cache inode 3536 * We need to distinguish those two, as the orphan item 3537 * for a root must not get deleted before the deletion 3538 * of the snapshot/subvolume's tree completes. 3539 * 3540 * btrfs_find_orphan_roots() ran before us, which has 3541 * found all deleted roots and loaded them into 3542 * fs_info->fs_roots_radix. So here we can find if an 3543 * orphan item corresponds to a deleted root by looking 3544 * up the root from that radix tree. 3545 */ 3546 3547 spin_lock(&fs_info->fs_roots_radix_lock); 3548 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3549 (unsigned long)found_key.objectid); 3550 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3551 is_dead_root = 1; 3552 spin_unlock(&fs_info->fs_roots_radix_lock); 3553 3554 if (is_dead_root) { 3555 /* prevent this orphan from being found again */ 3556 key.offset = found_key.objectid - 1; 3557 continue; 3558 } 3559 3560 } 3561 3562 /* 3563 * If we have an inode with links, there are a couple of 3564 * possibilities: 3565 * 3566 * 1. We were halfway through creating fsverity metadata for the 3567 * file. In that case, the orphan item represents incomplete 3568 * fsverity metadata which must be cleaned up with 3569 * btrfs_drop_verity_items and deleting the orphan item. 3570 3571 * 2. Old kernels (before v3.12) used to create an 3572 * orphan item for truncate indicating that there were possibly 3573 * extent items past i_size that needed to be deleted. In v3.12, 3574 * truncate was changed to update i_size in sync with the extent 3575 * items, but the (useless) orphan item was still created. Since 3576 * v4.18, we don't create the orphan item for truncate at all. 3577 * 3578 * So, this item could mean that we need to do a truncate, but 3579 * only if this filesystem was last used on a pre-v3.12 kernel 3580 * and was not cleanly unmounted. The odds of that are quite 3581 * slim, and it's a pain to do the truncate now, so just delete 3582 * the orphan item. 3583 * 3584 * It's also possible that this orphan item was supposed to be 3585 * deleted but wasn't. The inode number may have been reused, 3586 * but either way, we can delete the orphan item. 3587 */ 3588 if (!inode || inode->i_nlink) { 3589 if (inode) { 3590 ret = btrfs_drop_verity_items(BTRFS_I(inode)); 3591 iput(inode); 3592 inode = NULL; 3593 if (ret) 3594 goto out; 3595 } 3596 trans = btrfs_start_transaction(root, 1); 3597 if (IS_ERR(trans)) { 3598 ret = PTR_ERR(trans); 3599 goto out; 3600 } 3601 btrfs_debug(fs_info, "auto deleting %Lu", 3602 found_key.objectid); 3603 ret = btrfs_del_orphan_item(trans, root, 3604 found_key.objectid); 3605 btrfs_end_transaction(trans); 3606 if (ret) 3607 goto out; 3608 continue; 3609 } 3610 3611 nr_unlink++; 3612 3613 /* this will do delete_inode and everything for us */ 3614 iput(inode); 3615 } 3616 /* release the path since we're done with it */ 3617 btrfs_release_path(path); 3618 3619 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3620 trans = btrfs_join_transaction(root); 3621 if (!IS_ERR(trans)) 3622 btrfs_end_transaction(trans); 3623 } 3624 3625 if (nr_unlink) 3626 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3627 3628 out: 3629 if (ret) 3630 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3631 btrfs_free_path(path); 3632 return ret; 3633 } 3634 3635 /* 3636 * very simple check to peek ahead in the leaf looking for xattrs. If we 3637 * don't find any xattrs, we know there can't be any acls. 3638 * 3639 * slot is the slot the inode is in, objectid is the objectid of the inode 3640 */ 3641 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3642 int slot, u64 objectid, 3643 int *first_xattr_slot) 3644 { 3645 u32 nritems = btrfs_header_nritems(leaf); 3646 struct btrfs_key found_key; 3647 static u64 xattr_access = 0; 3648 static u64 xattr_default = 0; 3649 int scanned = 0; 3650 3651 if (!xattr_access) { 3652 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3653 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3654 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3655 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3656 } 3657 3658 slot++; 3659 *first_xattr_slot = -1; 3660 while (slot < nritems) { 3661 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3662 3663 /* we found a different objectid, there must not be acls */ 3664 if (found_key.objectid != objectid) 3665 return 0; 3666 3667 /* we found an xattr, assume we've got an acl */ 3668 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3669 if (*first_xattr_slot == -1) 3670 *first_xattr_slot = slot; 3671 if (found_key.offset == xattr_access || 3672 found_key.offset == xattr_default) 3673 return 1; 3674 } 3675 3676 /* 3677 * we found a key greater than an xattr key, there can't 3678 * be any acls later on 3679 */ 3680 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3681 return 0; 3682 3683 slot++; 3684 scanned++; 3685 3686 /* 3687 * it goes inode, inode backrefs, xattrs, extents, 3688 * so if there are a ton of hard links to an inode there can 3689 * be a lot of backrefs. Don't waste time searching too hard, 3690 * this is just an optimization 3691 */ 3692 if (scanned >= 8) 3693 break; 3694 } 3695 /* we hit the end of the leaf before we found an xattr or 3696 * something larger than an xattr. We have to assume the inode 3697 * has acls 3698 */ 3699 if (*first_xattr_slot == -1) 3700 *first_xattr_slot = slot; 3701 return 1; 3702 } 3703 3704 /* 3705 * read an inode from the btree into the in-memory inode 3706 */ 3707 static int btrfs_read_locked_inode(struct inode *inode, 3708 struct btrfs_path *in_path) 3709 { 3710 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3711 struct btrfs_path *path = in_path; 3712 struct extent_buffer *leaf; 3713 struct btrfs_inode_item *inode_item; 3714 struct btrfs_root *root = BTRFS_I(inode)->root; 3715 struct btrfs_key location; 3716 unsigned long ptr; 3717 int maybe_acls; 3718 u32 rdev; 3719 int ret; 3720 bool filled = false; 3721 int first_xattr_slot; 3722 3723 ret = btrfs_fill_inode(inode, &rdev); 3724 if (!ret) 3725 filled = true; 3726 3727 if (!path) { 3728 path = btrfs_alloc_path(); 3729 if (!path) 3730 return -ENOMEM; 3731 } 3732 3733 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3734 3735 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3736 if (ret) { 3737 if (path != in_path) 3738 btrfs_free_path(path); 3739 return ret; 3740 } 3741 3742 leaf = path->nodes[0]; 3743 3744 if (filled) 3745 goto cache_index; 3746 3747 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3748 struct btrfs_inode_item); 3749 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3750 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3751 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3752 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3753 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3754 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 3755 round_up(i_size_read(inode), fs_info->sectorsize)); 3756 3757 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3758 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3759 3760 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3761 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3762 3763 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 3764 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 3765 3766 BTRFS_I(inode)->i_otime.tv_sec = 3767 btrfs_timespec_sec(leaf, &inode_item->otime); 3768 BTRFS_I(inode)->i_otime.tv_nsec = 3769 btrfs_timespec_nsec(leaf, &inode_item->otime); 3770 3771 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3772 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3773 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3774 3775 inode_set_iversion_queried(inode, 3776 btrfs_inode_sequence(leaf, inode_item)); 3777 inode->i_generation = BTRFS_I(inode)->generation; 3778 inode->i_rdev = 0; 3779 rdev = btrfs_inode_rdev(leaf, inode_item); 3780 3781 BTRFS_I(inode)->index_cnt = (u64)-1; 3782 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 3783 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 3784 3785 cache_index: 3786 /* 3787 * If we were modified in the current generation and evicted from memory 3788 * and then re-read we need to do a full sync since we don't have any 3789 * idea about which extents were modified before we were evicted from 3790 * cache. 3791 * 3792 * This is required for both inode re-read from disk and delayed inode 3793 * in delayed_nodes_tree. 3794 */ 3795 if (BTRFS_I(inode)->last_trans == fs_info->generation) 3796 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3797 &BTRFS_I(inode)->runtime_flags); 3798 3799 /* 3800 * We don't persist the id of the transaction where an unlink operation 3801 * against the inode was last made. So here we assume the inode might 3802 * have been evicted, and therefore the exact value of last_unlink_trans 3803 * lost, and set it to last_trans to avoid metadata inconsistencies 3804 * between the inode and its parent if the inode is fsync'ed and the log 3805 * replayed. For example, in the scenario: 3806 * 3807 * touch mydir/foo 3808 * ln mydir/foo mydir/bar 3809 * sync 3810 * unlink mydir/bar 3811 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3812 * xfs_io -c fsync mydir/foo 3813 * <power failure> 3814 * mount fs, triggers fsync log replay 3815 * 3816 * We must make sure that when we fsync our inode foo we also log its 3817 * parent inode, otherwise after log replay the parent still has the 3818 * dentry with the "bar" name but our inode foo has a link count of 1 3819 * and doesn't have an inode ref with the name "bar" anymore. 3820 * 3821 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3822 * but it guarantees correctness at the expense of occasional full 3823 * transaction commits on fsync if our inode is a directory, or if our 3824 * inode is not a directory, logging its parent unnecessarily. 3825 */ 3826 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3827 3828 /* 3829 * Same logic as for last_unlink_trans. We don't persist the generation 3830 * of the last transaction where this inode was used for a reflink 3831 * operation, so after eviction and reloading the inode we must be 3832 * pessimistic and assume the last transaction that modified the inode. 3833 */ 3834 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 3835 3836 path->slots[0]++; 3837 if (inode->i_nlink != 1 || 3838 path->slots[0] >= btrfs_header_nritems(leaf)) 3839 goto cache_acl; 3840 3841 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3842 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 3843 goto cache_acl; 3844 3845 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3846 if (location.type == BTRFS_INODE_REF_KEY) { 3847 struct btrfs_inode_ref *ref; 3848 3849 ref = (struct btrfs_inode_ref *)ptr; 3850 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3851 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3852 struct btrfs_inode_extref *extref; 3853 3854 extref = (struct btrfs_inode_extref *)ptr; 3855 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3856 extref); 3857 } 3858 cache_acl: 3859 /* 3860 * try to precache a NULL acl entry for files that don't have 3861 * any xattrs or acls 3862 */ 3863 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3864 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 3865 if (first_xattr_slot != -1) { 3866 path->slots[0] = first_xattr_slot; 3867 ret = btrfs_load_inode_props(inode, path); 3868 if (ret) 3869 btrfs_err(fs_info, 3870 "error loading props for ino %llu (root %llu): %d", 3871 btrfs_ino(BTRFS_I(inode)), 3872 root->root_key.objectid, ret); 3873 } 3874 if (path != in_path) 3875 btrfs_free_path(path); 3876 3877 if (!maybe_acls) 3878 cache_no_acl(inode); 3879 3880 switch (inode->i_mode & S_IFMT) { 3881 case S_IFREG: 3882 inode->i_mapping->a_ops = &btrfs_aops; 3883 inode->i_fop = &btrfs_file_operations; 3884 inode->i_op = &btrfs_file_inode_operations; 3885 break; 3886 case S_IFDIR: 3887 inode->i_fop = &btrfs_dir_file_operations; 3888 inode->i_op = &btrfs_dir_inode_operations; 3889 break; 3890 case S_IFLNK: 3891 inode->i_op = &btrfs_symlink_inode_operations; 3892 inode_nohighmem(inode); 3893 inode->i_mapping->a_ops = &btrfs_aops; 3894 break; 3895 default: 3896 inode->i_op = &btrfs_special_inode_operations; 3897 init_special_inode(inode, inode->i_mode, rdev); 3898 break; 3899 } 3900 3901 btrfs_sync_inode_flags_to_i_flags(inode); 3902 return 0; 3903 } 3904 3905 /* 3906 * given a leaf and an inode, copy the inode fields into the leaf 3907 */ 3908 static void fill_inode_item(struct btrfs_trans_handle *trans, 3909 struct extent_buffer *leaf, 3910 struct btrfs_inode_item *item, 3911 struct inode *inode) 3912 { 3913 struct btrfs_map_token token; 3914 u64 flags; 3915 3916 btrfs_init_map_token(&token, leaf); 3917 3918 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 3919 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 3920 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 3921 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 3922 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 3923 3924 btrfs_set_token_timespec_sec(&token, &item->atime, 3925 inode->i_atime.tv_sec); 3926 btrfs_set_token_timespec_nsec(&token, &item->atime, 3927 inode->i_atime.tv_nsec); 3928 3929 btrfs_set_token_timespec_sec(&token, &item->mtime, 3930 inode->i_mtime.tv_sec); 3931 btrfs_set_token_timespec_nsec(&token, &item->mtime, 3932 inode->i_mtime.tv_nsec); 3933 3934 btrfs_set_token_timespec_sec(&token, &item->ctime, 3935 inode->i_ctime.tv_sec); 3936 btrfs_set_token_timespec_nsec(&token, &item->ctime, 3937 inode->i_ctime.tv_nsec); 3938 3939 btrfs_set_token_timespec_sec(&token, &item->otime, 3940 BTRFS_I(inode)->i_otime.tv_sec); 3941 btrfs_set_token_timespec_nsec(&token, &item->otime, 3942 BTRFS_I(inode)->i_otime.tv_nsec); 3943 3944 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 3945 btrfs_set_token_inode_generation(&token, item, 3946 BTRFS_I(inode)->generation); 3947 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 3948 btrfs_set_token_inode_transid(&token, item, trans->transid); 3949 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 3950 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 3951 BTRFS_I(inode)->ro_flags); 3952 btrfs_set_token_inode_flags(&token, item, flags); 3953 btrfs_set_token_inode_block_group(&token, item, 0); 3954 } 3955 3956 /* 3957 * copy everything in the in-memory inode into the btree. 3958 */ 3959 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 3960 struct btrfs_root *root, 3961 struct btrfs_inode *inode) 3962 { 3963 struct btrfs_inode_item *inode_item; 3964 struct btrfs_path *path; 3965 struct extent_buffer *leaf; 3966 int ret; 3967 3968 path = btrfs_alloc_path(); 3969 if (!path) 3970 return -ENOMEM; 3971 3972 ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1); 3973 if (ret) { 3974 if (ret > 0) 3975 ret = -ENOENT; 3976 goto failed; 3977 } 3978 3979 leaf = path->nodes[0]; 3980 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3981 struct btrfs_inode_item); 3982 3983 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 3984 btrfs_mark_buffer_dirty(leaf); 3985 btrfs_set_inode_last_trans(trans, inode); 3986 ret = 0; 3987 failed: 3988 btrfs_free_path(path); 3989 return ret; 3990 } 3991 3992 /* 3993 * copy everything in the in-memory inode into the btree. 3994 */ 3995 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 3996 struct btrfs_root *root, 3997 struct btrfs_inode *inode) 3998 { 3999 struct btrfs_fs_info *fs_info = root->fs_info; 4000 int ret; 4001 4002 /* 4003 * If the inode is a free space inode, we can deadlock during commit 4004 * if we put it into the delayed code. 4005 * 4006 * The data relocation inode should also be directly updated 4007 * without delay 4008 */ 4009 if (!btrfs_is_free_space_inode(inode) 4010 && !btrfs_is_data_reloc_root(root) 4011 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4012 btrfs_update_root_times(trans, root); 4013 4014 ret = btrfs_delayed_update_inode(trans, root, inode); 4015 if (!ret) 4016 btrfs_set_inode_last_trans(trans, inode); 4017 return ret; 4018 } 4019 4020 return btrfs_update_inode_item(trans, root, inode); 4021 } 4022 4023 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4024 struct btrfs_root *root, struct btrfs_inode *inode) 4025 { 4026 int ret; 4027 4028 ret = btrfs_update_inode(trans, root, inode); 4029 if (ret == -ENOSPC) 4030 return btrfs_update_inode_item(trans, root, inode); 4031 return ret; 4032 } 4033 4034 /* 4035 * unlink helper that gets used here in inode.c and in the tree logging 4036 * recovery code. It remove a link in a directory with a given name, and 4037 * also drops the back refs in the inode to the directory 4038 */ 4039 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4040 struct btrfs_inode *dir, 4041 struct btrfs_inode *inode, 4042 const struct fscrypt_str *name, 4043 struct btrfs_rename_ctx *rename_ctx) 4044 { 4045 struct btrfs_root *root = dir->root; 4046 struct btrfs_fs_info *fs_info = root->fs_info; 4047 struct btrfs_path *path; 4048 int ret = 0; 4049 struct btrfs_dir_item *di; 4050 u64 index; 4051 u64 ino = btrfs_ino(inode); 4052 u64 dir_ino = btrfs_ino(dir); 4053 4054 path = btrfs_alloc_path(); 4055 if (!path) { 4056 ret = -ENOMEM; 4057 goto out; 4058 } 4059 4060 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); 4061 if (IS_ERR_OR_NULL(di)) { 4062 ret = di ? PTR_ERR(di) : -ENOENT; 4063 goto err; 4064 } 4065 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4066 if (ret) 4067 goto err; 4068 btrfs_release_path(path); 4069 4070 /* 4071 * If we don't have dir index, we have to get it by looking up 4072 * the inode ref, since we get the inode ref, remove it directly, 4073 * it is unnecessary to do delayed deletion. 4074 * 4075 * But if we have dir index, needn't search inode ref to get it. 4076 * Since the inode ref is close to the inode item, it is better 4077 * that we delay to delete it, and just do this deletion when 4078 * we update the inode item. 4079 */ 4080 if (inode->dir_index) { 4081 ret = btrfs_delayed_delete_inode_ref(inode); 4082 if (!ret) { 4083 index = inode->dir_index; 4084 goto skip_backref; 4085 } 4086 } 4087 4088 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); 4089 if (ret) { 4090 btrfs_info(fs_info, 4091 "failed to delete reference to %.*s, inode %llu parent %llu", 4092 name->len, name->name, ino, dir_ino); 4093 btrfs_abort_transaction(trans, ret); 4094 goto err; 4095 } 4096 skip_backref: 4097 if (rename_ctx) 4098 rename_ctx->index = index; 4099 4100 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4101 if (ret) { 4102 btrfs_abort_transaction(trans, ret); 4103 goto err; 4104 } 4105 4106 /* 4107 * If we are in a rename context, we don't need to update anything in the 4108 * log. That will be done later during the rename by btrfs_log_new_name(). 4109 * Besides that, doing it here would only cause extra unnecessary btree 4110 * operations on the log tree, increasing latency for applications. 4111 */ 4112 if (!rename_ctx) { 4113 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino); 4114 btrfs_del_dir_entries_in_log(trans, root, name, dir, index); 4115 } 4116 4117 /* 4118 * If we have a pending delayed iput we could end up with the final iput 4119 * being run in btrfs-cleaner context. If we have enough of these built 4120 * up we can end up burning a lot of time in btrfs-cleaner without any 4121 * way to throttle the unlinks. Since we're currently holding a ref on 4122 * the inode we can run the delayed iput here without any issues as the 4123 * final iput won't be done until after we drop the ref we're currently 4124 * holding. 4125 */ 4126 btrfs_run_delayed_iput(fs_info, inode); 4127 err: 4128 btrfs_free_path(path); 4129 if (ret) 4130 goto out; 4131 4132 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4133 inode_inc_iversion(&inode->vfs_inode); 4134 inode_inc_iversion(&dir->vfs_inode); 4135 inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode); 4136 dir->vfs_inode.i_mtime = inode->vfs_inode.i_ctime; 4137 dir->vfs_inode.i_ctime = inode->vfs_inode.i_ctime; 4138 ret = btrfs_update_inode(trans, root, dir); 4139 out: 4140 return ret; 4141 } 4142 4143 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4144 struct btrfs_inode *dir, struct btrfs_inode *inode, 4145 const struct fscrypt_str *name) 4146 { 4147 int ret; 4148 4149 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); 4150 if (!ret) { 4151 drop_nlink(&inode->vfs_inode); 4152 ret = btrfs_update_inode(trans, inode->root, inode); 4153 } 4154 return ret; 4155 } 4156 4157 /* 4158 * helper to start transaction for unlink and rmdir. 4159 * 4160 * unlink and rmdir are special in btrfs, they do not always free space, so 4161 * if we cannot make our reservations the normal way try and see if there is 4162 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4163 * allow the unlink to occur. 4164 */ 4165 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) 4166 { 4167 struct btrfs_root *root = dir->root; 4168 4169 return btrfs_start_transaction_fallback_global_rsv(root, 4170 BTRFS_UNLINK_METADATA_UNITS); 4171 } 4172 4173 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4174 { 4175 struct btrfs_trans_handle *trans; 4176 struct inode *inode = d_inode(dentry); 4177 int ret; 4178 struct fscrypt_name fname; 4179 4180 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4181 if (ret) 4182 return ret; 4183 4184 /* This needs to handle no-key deletions later on */ 4185 4186 trans = __unlink_start_trans(BTRFS_I(dir)); 4187 if (IS_ERR(trans)) { 4188 ret = PTR_ERR(trans); 4189 goto fscrypt_free; 4190 } 4191 4192 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4193 false); 4194 4195 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4196 &fname.disk_name); 4197 if (ret) 4198 goto end_trans; 4199 4200 if (inode->i_nlink == 0) { 4201 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4202 if (ret) 4203 goto end_trans; 4204 } 4205 4206 end_trans: 4207 btrfs_end_transaction(trans); 4208 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4209 fscrypt_free: 4210 fscrypt_free_filename(&fname); 4211 return ret; 4212 } 4213 4214 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4215 struct btrfs_inode *dir, struct dentry *dentry) 4216 { 4217 struct btrfs_root *root = dir->root; 4218 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4219 struct btrfs_path *path; 4220 struct extent_buffer *leaf; 4221 struct btrfs_dir_item *di; 4222 struct btrfs_key key; 4223 u64 index; 4224 int ret; 4225 u64 objectid; 4226 u64 dir_ino = btrfs_ino(dir); 4227 struct fscrypt_name fname; 4228 4229 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 4230 if (ret) 4231 return ret; 4232 4233 /* This needs to handle no-key deletions later on */ 4234 4235 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4236 objectid = inode->root->root_key.objectid; 4237 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4238 objectid = inode->location.objectid; 4239 } else { 4240 WARN_ON(1); 4241 fscrypt_free_filename(&fname); 4242 return -EINVAL; 4243 } 4244 4245 path = btrfs_alloc_path(); 4246 if (!path) { 4247 ret = -ENOMEM; 4248 goto out; 4249 } 4250 4251 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4252 &fname.disk_name, -1); 4253 if (IS_ERR_OR_NULL(di)) { 4254 ret = di ? PTR_ERR(di) : -ENOENT; 4255 goto out; 4256 } 4257 4258 leaf = path->nodes[0]; 4259 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4260 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4261 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4262 if (ret) { 4263 btrfs_abort_transaction(trans, ret); 4264 goto out; 4265 } 4266 btrfs_release_path(path); 4267 4268 /* 4269 * This is a placeholder inode for a subvolume we didn't have a 4270 * reference to at the time of the snapshot creation. In the meantime 4271 * we could have renamed the real subvol link into our snapshot, so 4272 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4273 * Instead simply lookup the dir_index_item for this entry so we can 4274 * remove it. Otherwise we know we have a ref to the root and we can 4275 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4276 */ 4277 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4278 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); 4279 if (IS_ERR_OR_NULL(di)) { 4280 if (!di) 4281 ret = -ENOENT; 4282 else 4283 ret = PTR_ERR(di); 4284 btrfs_abort_transaction(trans, ret); 4285 goto out; 4286 } 4287 4288 leaf = path->nodes[0]; 4289 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4290 index = key.offset; 4291 btrfs_release_path(path); 4292 } else { 4293 ret = btrfs_del_root_ref(trans, objectid, 4294 root->root_key.objectid, dir_ino, 4295 &index, &fname.disk_name); 4296 if (ret) { 4297 btrfs_abort_transaction(trans, ret); 4298 goto out; 4299 } 4300 } 4301 4302 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4303 if (ret) { 4304 btrfs_abort_transaction(trans, ret); 4305 goto out; 4306 } 4307 4308 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); 4309 inode_inc_iversion(&dir->vfs_inode); 4310 dir->vfs_inode.i_mtime = current_time(&dir->vfs_inode); 4311 dir->vfs_inode.i_ctime = dir->vfs_inode.i_mtime; 4312 ret = btrfs_update_inode_fallback(trans, root, dir); 4313 if (ret) 4314 btrfs_abort_transaction(trans, ret); 4315 out: 4316 btrfs_free_path(path); 4317 fscrypt_free_filename(&fname); 4318 return ret; 4319 } 4320 4321 /* 4322 * Helper to check if the subvolume references other subvolumes or if it's 4323 * default. 4324 */ 4325 static noinline int may_destroy_subvol(struct btrfs_root *root) 4326 { 4327 struct btrfs_fs_info *fs_info = root->fs_info; 4328 struct btrfs_path *path; 4329 struct btrfs_dir_item *di; 4330 struct btrfs_key key; 4331 struct fscrypt_str name = FSTR_INIT("default", 7); 4332 u64 dir_id; 4333 int ret; 4334 4335 path = btrfs_alloc_path(); 4336 if (!path) 4337 return -ENOMEM; 4338 4339 /* Make sure this root isn't set as the default subvol */ 4340 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4341 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4342 dir_id, &name, 0); 4343 if (di && !IS_ERR(di)) { 4344 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4345 if (key.objectid == root->root_key.objectid) { 4346 ret = -EPERM; 4347 btrfs_err(fs_info, 4348 "deleting default subvolume %llu is not allowed", 4349 key.objectid); 4350 goto out; 4351 } 4352 btrfs_release_path(path); 4353 } 4354 4355 key.objectid = root->root_key.objectid; 4356 key.type = BTRFS_ROOT_REF_KEY; 4357 key.offset = (u64)-1; 4358 4359 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4360 if (ret < 0) 4361 goto out; 4362 BUG_ON(ret == 0); 4363 4364 ret = 0; 4365 if (path->slots[0] > 0) { 4366 path->slots[0]--; 4367 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4368 if (key.objectid == root->root_key.objectid && 4369 key.type == BTRFS_ROOT_REF_KEY) 4370 ret = -ENOTEMPTY; 4371 } 4372 out: 4373 btrfs_free_path(path); 4374 return ret; 4375 } 4376 4377 /* Delete all dentries for inodes belonging to the root */ 4378 static void btrfs_prune_dentries(struct btrfs_root *root) 4379 { 4380 struct btrfs_fs_info *fs_info = root->fs_info; 4381 struct rb_node *node; 4382 struct rb_node *prev; 4383 struct btrfs_inode *entry; 4384 struct inode *inode; 4385 u64 objectid = 0; 4386 4387 if (!BTRFS_FS_ERROR(fs_info)) 4388 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4389 4390 spin_lock(&root->inode_lock); 4391 again: 4392 node = root->inode_tree.rb_node; 4393 prev = NULL; 4394 while (node) { 4395 prev = node; 4396 entry = rb_entry(node, struct btrfs_inode, rb_node); 4397 4398 if (objectid < btrfs_ino(entry)) 4399 node = node->rb_left; 4400 else if (objectid > btrfs_ino(entry)) 4401 node = node->rb_right; 4402 else 4403 break; 4404 } 4405 if (!node) { 4406 while (prev) { 4407 entry = rb_entry(prev, struct btrfs_inode, rb_node); 4408 if (objectid <= btrfs_ino(entry)) { 4409 node = prev; 4410 break; 4411 } 4412 prev = rb_next(prev); 4413 } 4414 } 4415 while (node) { 4416 entry = rb_entry(node, struct btrfs_inode, rb_node); 4417 objectid = btrfs_ino(entry) + 1; 4418 inode = igrab(&entry->vfs_inode); 4419 if (inode) { 4420 spin_unlock(&root->inode_lock); 4421 if (atomic_read(&inode->i_count) > 1) 4422 d_prune_aliases(inode); 4423 /* 4424 * btrfs_drop_inode will have it removed from the inode 4425 * cache when its usage count hits zero. 4426 */ 4427 iput(inode); 4428 cond_resched(); 4429 spin_lock(&root->inode_lock); 4430 goto again; 4431 } 4432 4433 if (cond_resched_lock(&root->inode_lock)) 4434 goto again; 4435 4436 node = rb_next(node); 4437 } 4438 spin_unlock(&root->inode_lock); 4439 } 4440 4441 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) 4442 { 4443 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 4444 struct btrfs_root *root = dir->root; 4445 struct inode *inode = d_inode(dentry); 4446 struct btrfs_root *dest = BTRFS_I(inode)->root; 4447 struct btrfs_trans_handle *trans; 4448 struct btrfs_block_rsv block_rsv; 4449 u64 root_flags; 4450 int ret; 4451 4452 /* 4453 * Don't allow to delete a subvolume with send in progress. This is 4454 * inside the inode lock so the error handling that has to drop the bit 4455 * again is not run concurrently. 4456 */ 4457 spin_lock(&dest->root_item_lock); 4458 if (dest->send_in_progress) { 4459 spin_unlock(&dest->root_item_lock); 4460 btrfs_warn(fs_info, 4461 "attempt to delete subvolume %llu during send", 4462 dest->root_key.objectid); 4463 return -EPERM; 4464 } 4465 if (atomic_read(&dest->nr_swapfiles)) { 4466 spin_unlock(&dest->root_item_lock); 4467 btrfs_warn(fs_info, 4468 "attempt to delete subvolume %llu with active swapfile", 4469 root->root_key.objectid); 4470 return -EPERM; 4471 } 4472 root_flags = btrfs_root_flags(&dest->root_item); 4473 btrfs_set_root_flags(&dest->root_item, 4474 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4475 spin_unlock(&dest->root_item_lock); 4476 4477 down_write(&fs_info->subvol_sem); 4478 4479 ret = may_destroy_subvol(dest); 4480 if (ret) 4481 goto out_up_write; 4482 4483 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4484 /* 4485 * One for dir inode, 4486 * two for dir entries, 4487 * two for root ref/backref. 4488 */ 4489 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4490 if (ret) 4491 goto out_up_write; 4492 4493 trans = btrfs_start_transaction(root, 0); 4494 if (IS_ERR(trans)) { 4495 ret = PTR_ERR(trans); 4496 goto out_release; 4497 } 4498 trans->block_rsv = &block_rsv; 4499 trans->bytes_reserved = block_rsv.size; 4500 4501 btrfs_record_snapshot_destroy(trans, dir); 4502 4503 ret = btrfs_unlink_subvol(trans, dir, dentry); 4504 if (ret) { 4505 btrfs_abort_transaction(trans, ret); 4506 goto out_end_trans; 4507 } 4508 4509 ret = btrfs_record_root_in_trans(trans, dest); 4510 if (ret) { 4511 btrfs_abort_transaction(trans, ret); 4512 goto out_end_trans; 4513 } 4514 4515 memset(&dest->root_item.drop_progress, 0, 4516 sizeof(dest->root_item.drop_progress)); 4517 btrfs_set_root_drop_level(&dest->root_item, 0); 4518 btrfs_set_root_refs(&dest->root_item, 0); 4519 4520 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4521 ret = btrfs_insert_orphan_item(trans, 4522 fs_info->tree_root, 4523 dest->root_key.objectid); 4524 if (ret) { 4525 btrfs_abort_transaction(trans, ret); 4526 goto out_end_trans; 4527 } 4528 } 4529 4530 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4531 BTRFS_UUID_KEY_SUBVOL, 4532 dest->root_key.objectid); 4533 if (ret && ret != -ENOENT) { 4534 btrfs_abort_transaction(trans, ret); 4535 goto out_end_trans; 4536 } 4537 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4538 ret = btrfs_uuid_tree_remove(trans, 4539 dest->root_item.received_uuid, 4540 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4541 dest->root_key.objectid); 4542 if (ret && ret != -ENOENT) { 4543 btrfs_abort_transaction(trans, ret); 4544 goto out_end_trans; 4545 } 4546 } 4547 4548 free_anon_bdev(dest->anon_dev); 4549 dest->anon_dev = 0; 4550 out_end_trans: 4551 trans->block_rsv = NULL; 4552 trans->bytes_reserved = 0; 4553 ret = btrfs_end_transaction(trans); 4554 inode->i_flags |= S_DEAD; 4555 out_release: 4556 btrfs_subvolume_release_metadata(root, &block_rsv); 4557 out_up_write: 4558 up_write(&fs_info->subvol_sem); 4559 if (ret) { 4560 spin_lock(&dest->root_item_lock); 4561 root_flags = btrfs_root_flags(&dest->root_item); 4562 btrfs_set_root_flags(&dest->root_item, 4563 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4564 spin_unlock(&dest->root_item_lock); 4565 } else { 4566 d_invalidate(dentry); 4567 btrfs_prune_dentries(dest); 4568 ASSERT(dest->send_in_progress == 0); 4569 } 4570 4571 return ret; 4572 } 4573 4574 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4575 { 4576 struct inode *inode = d_inode(dentry); 4577 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 4578 int err = 0; 4579 struct btrfs_trans_handle *trans; 4580 u64 last_unlink_trans; 4581 struct fscrypt_name fname; 4582 4583 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4584 return -ENOTEMPTY; 4585 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { 4586 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4587 btrfs_err(fs_info, 4588 "extent tree v2 doesn't support snapshot deletion yet"); 4589 return -EOPNOTSUPP; 4590 } 4591 return btrfs_delete_subvolume(BTRFS_I(dir), dentry); 4592 } 4593 4594 err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4595 if (err) 4596 return err; 4597 4598 /* This needs to handle no-key deletions later on */ 4599 4600 trans = __unlink_start_trans(BTRFS_I(dir)); 4601 if (IS_ERR(trans)) { 4602 err = PTR_ERR(trans); 4603 goto out_notrans; 4604 } 4605 4606 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4607 err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); 4608 goto out; 4609 } 4610 4611 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4612 if (err) 4613 goto out; 4614 4615 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4616 4617 /* now the directory is empty */ 4618 err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4619 &fname.disk_name); 4620 if (!err) { 4621 btrfs_i_size_write(BTRFS_I(inode), 0); 4622 /* 4623 * Propagate the last_unlink_trans value of the deleted dir to 4624 * its parent directory. This is to prevent an unrecoverable 4625 * log tree in the case we do something like this: 4626 * 1) create dir foo 4627 * 2) create snapshot under dir foo 4628 * 3) delete the snapshot 4629 * 4) rmdir foo 4630 * 5) mkdir foo 4631 * 6) fsync foo or some file inside foo 4632 */ 4633 if (last_unlink_trans >= trans->transid) 4634 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4635 } 4636 out: 4637 btrfs_end_transaction(trans); 4638 out_notrans: 4639 btrfs_btree_balance_dirty(fs_info); 4640 fscrypt_free_filename(&fname); 4641 4642 return err; 4643 } 4644 4645 /* 4646 * btrfs_truncate_block - read, zero a chunk and write a block 4647 * @inode - inode that we're zeroing 4648 * @from - the offset to start zeroing 4649 * @len - the length to zero, 0 to zero the entire range respective to the 4650 * offset 4651 * @front - zero up to the offset instead of from the offset on 4652 * 4653 * This will find the block for the "from" offset and cow the block and zero the 4654 * part we want to zero. This is used with truncate and hole punching. 4655 */ 4656 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 4657 int front) 4658 { 4659 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4660 struct address_space *mapping = inode->vfs_inode.i_mapping; 4661 struct extent_io_tree *io_tree = &inode->io_tree; 4662 struct btrfs_ordered_extent *ordered; 4663 struct extent_state *cached_state = NULL; 4664 struct extent_changeset *data_reserved = NULL; 4665 bool only_release_metadata = false; 4666 u32 blocksize = fs_info->sectorsize; 4667 pgoff_t index = from >> PAGE_SHIFT; 4668 unsigned offset = from & (blocksize - 1); 4669 struct page *page; 4670 gfp_t mask = btrfs_alloc_write_mask(mapping); 4671 size_t write_bytes = blocksize; 4672 int ret = 0; 4673 u64 block_start; 4674 u64 block_end; 4675 4676 if (IS_ALIGNED(offset, blocksize) && 4677 (!len || IS_ALIGNED(len, blocksize))) 4678 goto out; 4679 4680 block_start = round_down(from, blocksize); 4681 block_end = block_start + blocksize - 1; 4682 4683 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 4684 blocksize, false); 4685 if (ret < 0) { 4686 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { 4687 /* For nocow case, no need to reserve data space */ 4688 only_release_metadata = true; 4689 } else { 4690 goto out; 4691 } 4692 } 4693 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 4694 if (ret < 0) { 4695 if (!only_release_metadata) 4696 btrfs_free_reserved_data_space(inode, data_reserved, 4697 block_start, blocksize); 4698 goto out; 4699 } 4700 again: 4701 page = find_or_create_page(mapping, index, mask); 4702 if (!page) { 4703 btrfs_delalloc_release_space(inode, data_reserved, block_start, 4704 blocksize, true); 4705 btrfs_delalloc_release_extents(inode, blocksize); 4706 ret = -ENOMEM; 4707 goto out; 4708 } 4709 4710 if (!PageUptodate(page)) { 4711 ret = btrfs_read_folio(NULL, page_folio(page)); 4712 lock_page(page); 4713 if (page->mapping != mapping) { 4714 unlock_page(page); 4715 put_page(page); 4716 goto again; 4717 } 4718 if (!PageUptodate(page)) { 4719 ret = -EIO; 4720 goto out_unlock; 4721 } 4722 } 4723 4724 /* 4725 * We unlock the page after the io is completed and then re-lock it 4726 * above. release_folio() could have come in between that and cleared 4727 * PagePrivate(), but left the page in the mapping. Set the page mapped 4728 * here to make sure it's properly set for the subpage stuff. 4729 */ 4730 ret = set_page_extent_mapped(page); 4731 if (ret < 0) 4732 goto out_unlock; 4733 4734 wait_on_page_writeback(page); 4735 4736 lock_extent(io_tree, block_start, block_end, &cached_state); 4737 4738 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4739 if (ordered) { 4740 unlock_extent(io_tree, block_start, block_end, &cached_state); 4741 unlock_page(page); 4742 put_page(page); 4743 btrfs_start_ordered_extent(ordered); 4744 btrfs_put_ordered_extent(ordered); 4745 goto again; 4746 } 4747 4748 clear_extent_bit(&inode->io_tree, block_start, block_end, 4749 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4750 &cached_state); 4751 4752 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 4753 &cached_state); 4754 if (ret) { 4755 unlock_extent(io_tree, block_start, block_end, &cached_state); 4756 goto out_unlock; 4757 } 4758 4759 if (offset != blocksize) { 4760 if (!len) 4761 len = blocksize - offset; 4762 if (front) 4763 memzero_page(page, (block_start - page_offset(page)), 4764 offset); 4765 else 4766 memzero_page(page, (block_start - page_offset(page)) + offset, 4767 len); 4768 } 4769 btrfs_page_clear_checked(fs_info, page, block_start, 4770 block_end + 1 - block_start); 4771 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); 4772 unlock_extent(io_tree, block_start, block_end, &cached_state); 4773 4774 if (only_release_metadata) 4775 set_extent_bit(&inode->io_tree, block_start, block_end, 4776 EXTENT_NORESERVE, NULL); 4777 4778 out_unlock: 4779 if (ret) { 4780 if (only_release_metadata) 4781 btrfs_delalloc_release_metadata(inode, blocksize, true); 4782 else 4783 btrfs_delalloc_release_space(inode, data_reserved, 4784 block_start, blocksize, true); 4785 } 4786 btrfs_delalloc_release_extents(inode, blocksize); 4787 unlock_page(page); 4788 put_page(page); 4789 out: 4790 if (only_release_metadata) 4791 btrfs_check_nocow_unlock(inode); 4792 extent_changeset_free(data_reserved); 4793 return ret; 4794 } 4795 4796 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode, 4797 u64 offset, u64 len) 4798 { 4799 struct btrfs_fs_info *fs_info = root->fs_info; 4800 struct btrfs_trans_handle *trans; 4801 struct btrfs_drop_extents_args drop_args = { 0 }; 4802 int ret; 4803 4804 /* 4805 * If NO_HOLES is enabled, we don't need to do anything. 4806 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 4807 * or btrfs_update_inode() will be called, which guarantee that the next 4808 * fsync will know this inode was changed and needs to be logged. 4809 */ 4810 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 4811 return 0; 4812 4813 /* 4814 * 1 - for the one we're dropping 4815 * 1 - for the one we're adding 4816 * 1 - for updating the inode. 4817 */ 4818 trans = btrfs_start_transaction(root, 3); 4819 if (IS_ERR(trans)) 4820 return PTR_ERR(trans); 4821 4822 drop_args.start = offset; 4823 drop_args.end = offset + len; 4824 drop_args.drop_cache = true; 4825 4826 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 4827 if (ret) { 4828 btrfs_abort_transaction(trans, ret); 4829 btrfs_end_transaction(trans); 4830 return ret; 4831 } 4832 4833 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len); 4834 if (ret) { 4835 btrfs_abort_transaction(trans, ret); 4836 } else { 4837 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 4838 btrfs_update_inode(trans, root, inode); 4839 } 4840 btrfs_end_transaction(trans); 4841 return ret; 4842 } 4843 4844 /* 4845 * This function puts in dummy file extents for the area we're creating a hole 4846 * for. So if we are truncating this file to a larger size we need to insert 4847 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4848 * the range between oldsize and size 4849 */ 4850 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 4851 { 4852 struct btrfs_root *root = inode->root; 4853 struct btrfs_fs_info *fs_info = root->fs_info; 4854 struct extent_io_tree *io_tree = &inode->io_tree; 4855 struct extent_map *em = NULL; 4856 struct extent_state *cached_state = NULL; 4857 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 4858 u64 block_end = ALIGN(size, fs_info->sectorsize); 4859 u64 last_byte; 4860 u64 cur_offset; 4861 u64 hole_size; 4862 int err = 0; 4863 4864 /* 4865 * If our size started in the middle of a block we need to zero out the 4866 * rest of the block before we expand the i_size, otherwise we could 4867 * expose stale data. 4868 */ 4869 err = btrfs_truncate_block(inode, oldsize, 0, 0); 4870 if (err) 4871 return err; 4872 4873 if (size <= hole_start) 4874 return 0; 4875 4876 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 4877 &cached_state); 4878 cur_offset = hole_start; 4879 while (1) { 4880 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 4881 block_end - cur_offset); 4882 if (IS_ERR(em)) { 4883 err = PTR_ERR(em); 4884 em = NULL; 4885 break; 4886 } 4887 last_byte = min(extent_map_end(em), block_end); 4888 last_byte = ALIGN(last_byte, fs_info->sectorsize); 4889 hole_size = last_byte - cur_offset; 4890 4891 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 4892 struct extent_map *hole_em; 4893 4894 err = maybe_insert_hole(root, inode, cur_offset, 4895 hole_size); 4896 if (err) 4897 break; 4898 4899 err = btrfs_inode_set_file_extent_range(inode, 4900 cur_offset, hole_size); 4901 if (err) 4902 break; 4903 4904 hole_em = alloc_extent_map(); 4905 if (!hole_em) { 4906 btrfs_drop_extent_map_range(inode, cur_offset, 4907 cur_offset + hole_size - 1, 4908 false); 4909 btrfs_set_inode_full_sync(inode); 4910 goto next; 4911 } 4912 hole_em->start = cur_offset; 4913 hole_em->len = hole_size; 4914 hole_em->orig_start = cur_offset; 4915 4916 hole_em->block_start = EXTENT_MAP_HOLE; 4917 hole_em->block_len = 0; 4918 hole_em->orig_block_len = 0; 4919 hole_em->ram_bytes = hole_size; 4920 hole_em->compress_type = BTRFS_COMPRESS_NONE; 4921 hole_em->generation = fs_info->generation; 4922 4923 err = btrfs_replace_extent_map_range(inode, hole_em, true); 4924 free_extent_map(hole_em); 4925 } else { 4926 err = btrfs_inode_set_file_extent_range(inode, 4927 cur_offset, hole_size); 4928 if (err) 4929 break; 4930 } 4931 next: 4932 free_extent_map(em); 4933 em = NULL; 4934 cur_offset = last_byte; 4935 if (cur_offset >= block_end) 4936 break; 4937 } 4938 free_extent_map(em); 4939 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); 4940 return err; 4941 } 4942 4943 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 4944 { 4945 struct btrfs_root *root = BTRFS_I(inode)->root; 4946 struct btrfs_trans_handle *trans; 4947 loff_t oldsize = i_size_read(inode); 4948 loff_t newsize = attr->ia_size; 4949 int mask = attr->ia_valid; 4950 int ret; 4951 4952 /* 4953 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 4954 * special case where we need to update the times despite not having 4955 * these flags set. For all other operations the VFS set these flags 4956 * explicitly if it wants a timestamp update. 4957 */ 4958 if (newsize != oldsize) { 4959 inode_inc_iversion(inode); 4960 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) { 4961 inode->i_mtime = current_time(inode); 4962 inode->i_ctime = inode->i_mtime; 4963 } 4964 } 4965 4966 if (newsize > oldsize) { 4967 /* 4968 * Don't do an expanding truncate while snapshotting is ongoing. 4969 * This is to ensure the snapshot captures a fully consistent 4970 * state of this file - if the snapshot captures this expanding 4971 * truncation, it must capture all writes that happened before 4972 * this truncation. 4973 */ 4974 btrfs_drew_write_lock(&root->snapshot_lock); 4975 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 4976 if (ret) { 4977 btrfs_drew_write_unlock(&root->snapshot_lock); 4978 return ret; 4979 } 4980 4981 trans = btrfs_start_transaction(root, 1); 4982 if (IS_ERR(trans)) { 4983 btrfs_drew_write_unlock(&root->snapshot_lock); 4984 return PTR_ERR(trans); 4985 } 4986 4987 i_size_write(inode, newsize); 4988 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 4989 pagecache_isize_extended(inode, oldsize, newsize); 4990 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 4991 btrfs_drew_write_unlock(&root->snapshot_lock); 4992 btrfs_end_transaction(trans); 4993 } else { 4994 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 4995 4996 if (btrfs_is_zoned(fs_info)) { 4997 ret = btrfs_wait_ordered_range(inode, 4998 ALIGN(newsize, fs_info->sectorsize), 4999 (u64)-1); 5000 if (ret) 5001 return ret; 5002 } 5003 5004 /* 5005 * We're truncating a file that used to have good data down to 5006 * zero. Make sure any new writes to the file get on disk 5007 * on close. 5008 */ 5009 if (newsize == 0) 5010 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5011 &BTRFS_I(inode)->runtime_flags); 5012 5013 truncate_setsize(inode, newsize); 5014 5015 inode_dio_wait(inode); 5016 5017 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); 5018 if (ret && inode->i_nlink) { 5019 int err; 5020 5021 /* 5022 * Truncate failed, so fix up the in-memory size. We 5023 * adjusted disk_i_size down as we removed extents, so 5024 * wait for disk_i_size to be stable and then update the 5025 * in-memory size to match. 5026 */ 5027 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); 5028 if (err) 5029 return err; 5030 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5031 } 5032 } 5033 5034 return ret; 5035 } 5036 5037 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 5038 struct iattr *attr) 5039 { 5040 struct inode *inode = d_inode(dentry); 5041 struct btrfs_root *root = BTRFS_I(inode)->root; 5042 int err; 5043 5044 if (btrfs_root_readonly(root)) 5045 return -EROFS; 5046 5047 err = setattr_prepare(idmap, dentry, attr); 5048 if (err) 5049 return err; 5050 5051 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5052 err = btrfs_setsize(inode, attr); 5053 if (err) 5054 return err; 5055 } 5056 5057 if (attr->ia_valid) { 5058 setattr_copy(idmap, inode, attr); 5059 inode_inc_iversion(inode); 5060 err = btrfs_dirty_inode(BTRFS_I(inode)); 5061 5062 if (!err && attr->ia_valid & ATTR_MODE) 5063 err = posix_acl_chmod(idmap, dentry, inode->i_mode); 5064 } 5065 5066 return err; 5067 } 5068 5069 /* 5070 * While truncating the inode pages during eviction, we get the VFS 5071 * calling btrfs_invalidate_folio() against each folio of the inode. This 5072 * is slow because the calls to btrfs_invalidate_folio() result in a 5073 * huge amount of calls to lock_extent() and clear_extent_bit(), 5074 * which keep merging and splitting extent_state structures over and over, 5075 * wasting lots of time. 5076 * 5077 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5078 * skip all those expensive operations on a per folio basis and do only 5079 * the ordered io finishing, while we release here the extent_map and 5080 * extent_state structures, without the excessive merging and splitting. 5081 */ 5082 static void evict_inode_truncate_pages(struct inode *inode) 5083 { 5084 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5085 struct rb_node *node; 5086 5087 ASSERT(inode->i_state & I_FREEING); 5088 truncate_inode_pages_final(&inode->i_data); 5089 5090 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 5091 5092 /* 5093 * Keep looping until we have no more ranges in the io tree. 5094 * We can have ongoing bios started by readahead that have 5095 * their endio callback (extent_io.c:end_bio_extent_readpage) 5096 * still in progress (unlocked the pages in the bio but did not yet 5097 * unlocked the ranges in the io tree). Therefore this means some 5098 * ranges can still be locked and eviction started because before 5099 * submitting those bios, which are executed by a separate task (work 5100 * queue kthread), inode references (inode->i_count) were not taken 5101 * (which would be dropped in the end io callback of each bio). 5102 * Therefore here we effectively end up waiting for those bios and 5103 * anyone else holding locked ranges without having bumped the inode's 5104 * reference count - if we don't do it, when they access the inode's 5105 * io_tree to unlock a range it may be too late, leading to an 5106 * use-after-free issue. 5107 */ 5108 spin_lock(&io_tree->lock); 5109 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5110 struct extent_state *state; 5111 struct extent_state *cached_state = NULL; 5112 u64 start; 5113 u64 end; 5114 unsigned state_flags; 5115 5116 node = rb_first(&io_tree->state); 5117 state = rb_entry(node, struct extent_state, rb_node); 5118 start = state->start; 5119 end = state->end; 5120 state_flags = state->state; 5121 spin_unlock(&io_tree->lock); 5122 5123 lock_extent(io_tree, start, end, &cached_state); 5124 5125 /* 5126 * If still has DELALLOC flag, the extent didn't reach disk, 5127 * and its reserved space won't be freed by delayed_ref. 5128 * So we need to free its reserved space here. 5129 * (Refer to comment in btrfs_invalidate_folio, case 2) 5130 * 5131 * Note, end is the bytenr of last byte, so we need + 1 here. 5132 */ 5133 if (state_flags & EXTENT_DELALLOC) 5134 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5135 end - start + 1); 5136 5137 clear_extent_bit(io_tree, start, end, 5138 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, 5139 &cached_state); 5140 5141 cond_resched(); 5142 spin_lock(&io_tree->lock); 5143 } 5144 spin_unlock(&io_tree->lock); 5145 } 5146 5147 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5148 struct btrfs_block_rsv *rsv) 5149 { 5150 struct btrfs_fs_info *fs_info = root->fs_info; 5151 struct btrfs_trans_handle *trans; 5152 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1); 5153 int ret; 5154 5155 /* 5156 * Eviction should be taking place at some place safe because of our 5157 * delayed iputs. However the normal flushing code will run delayed 5158 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5159 * 5160 * We reserve the delayed_refs_extra here again because we can't use 5161 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5162 * above. We reserve our extra bit here because we generate a ton of 5163 * delayed refs activity by truncating. 5164 * 5165 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5166 * if we fail to make this reservation we can re-try without the 5167 * delayed_refs_extra so we can make some forward progress. 5168 */ 5169 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5170 BTRFS_RESERVE_FLUSH_EVICT); 5171 if (ret) { 5172 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5173 BTRFS_RESERVE_FLUSH_EVICT); 5174 if (ret) { 5175 btrfs_warn(fs_info, 5176 "could not allocate space for delete; will truncate on mount"); 5177 return ERR_PTR(-ENOSPC); 5178 } 5179 delayed_refs_extra = 0; 5180 } 5181 5182 trans = btrfs_join_transaction(root); 5183 if (IS_ERR(trans)) 5184 return trans; 5185 5186 if (delayed_refs_extra) { 5187 trans->block_rsv = &fs_info->trans_block_rsv; 5188 trans->bytes_reserved = delayed_refs_extra; 5189 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5190 delayed_refs_extra, true); 5191 } 5192 return trans; 5193 } 5194 5195 void btrfs_evict_inode(struct inode *inode) 5196 { 5197 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5198 struct btrfs_trans_handle *trans; 5199 struct btrfs_root *root = BTRFS_I(inode)->root; 5200 struct btrfs_block_rsv *rsv = NULL; 5201 int ret; 5202 5203 trace_btrfs_inode_evict(inode); 5204 5205 if (!root) { 5206 fsverity_cleanup_inode(inode); 5207 clear_inode(inode); 5208 return; 5209 } 5210 5211 evict_inode_truncate_pages(inode); 5212 5213 if (inode->i_nlink && 5214 ((btrfs_root_refs(&root->root_item) != 0 && 5215 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5216 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5217 goto out; 5218 5219 if (is_bad_inode(inode)) 5220 goto out; 5221 5222 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5223 goto out; 5224 5225 if (inode->i_nlink > 0) { 5226 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5227 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5228 goto out; 5229 } 5230 5231 /* 5232 * This makes sure the inode item in tree is uptodate and the space for 5233 * the inode update is released. 5234 */ 5235 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5236 if (ret) 5237 goto out; 5238 5239 /* 5240 * This drops any pending insert or delete operations we have for this 5241 * inode. We could have a delayed dir index deletion queued up, but 5242 * we're removing the inode completely so that'll be taken care of in 5243 * the truncate. 5244 */ 5245 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5246 5247 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5248 if (!rsv) 5249 goto out; 5250 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5251 rsv->failfast = true; 5252 5253 btrfs_i_size_write(BTRFS_I(inode), 0); 5254 5255 while (1) { 5256 struct btrfs_truncate_control control = { 5257 .inode = BTRFS_I(inode), 5258 .ino = btrfs_ino(BTRFS_I(inode)), 5259 .new_size = 0, 5260 .min_type = 0, 5261 }; 5262 5263 trans = evict_refill_and_join(root, rsv); 5264 if (IS_ERR(trans)) 5265 goto out; 5266 5267 trans->block_rsv = rsv; 5268 5269 ret = btrfs_truncate_inode_items(trans, root, &control); 5270 trans->block_rsv = &fs_info->trans_block_rsv; 5271 btrfs_end_transaction(trans); 5272 /* 5273 * We have not added new delayed items for our inode after we 5274 * have flushed its delayed items, so no need to throttle on 5275 * delayed items. However we have modified extent buffers. 5276 */ 5277 btrfs_btree_balance_dirty_nodelay(fs_info); 5278 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5279 goto out; 5280 else if (!ret) 5281 break; 5282 } 5283 5284 /* 5285 * Errors here aren't a big deal, it just means we leave orphan items in 5286 * the tree. They will be cleaned up on the next mount. If the inode 5287 * number gets reused, cleanup deletes the orphan item without doing 5288 * anything, and unlink reuses the existing orphan item. 5289 * 5290 * If it turns out that we are dropping too many of these, we might want 5291 * to add a mechanism for retrying these after a commit. 5292 */ 5293 trans = evict_refill_and_join(root, rsv); 5294 if (!IS_ERR(trans)) { 5295 trans->block_rsv = rsv; 5296 btrfs_orphan_del(trans, BTRFS_I(inode)); 5297 trans->block_rsv = &fs_info->trans_block_rsv; 5298 btrfs_end_transaction(trans); 5299 } 5300 5301 out: 5302 btrfs_free_block_rsv(fs_info, rsv); 5303 /* 5304 * If we didn't successfully delete, the orphan item will still be in 5305 * the tree and we'll retry on the next mount. Again, we might also want 5306 * to retry these periodically in the future. 5307 */ 5308 btrfs_remove_delayed_node(BTRFS_I(inode)); 5309 fsverity_cleanup_inode(inode); 5310 clear_inode(inode); 5311 } 5312 5313 /* 5314 * Return the key found in the dir entry in the location pointer, fill @type 5315 * with BTRFS_FT_*, and return 0. 5316 * 5317 * If no dir entries were found, returns -ENOENT. 5318 * If found a corrupted location in dir entry, returns -EUCLEAN. 5319 */ 5320 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, 5321 struct btrfs_key *location, u8 *type) 5322 { 5323 struct btrfs_dir_item *di; 5324 struct btrfs_path *path; 5325 struct btrfs_root *root = dir->root; 5326 int ret = 0; 5327 struct fscrypt_name fname; 5328 5329 path = btrfs_alloc_path(); 5330 if (!path) 5331 return -ENOMEM; 5332 5333 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 5334 if (ret < 0) 5335 goto out; 5336 /* 5337 * fscrypt_setup_filename() should never return a positive value, but 5338 * gcc on sparc/parisc thinks it can, so assert that doesn't happen. 5339 */ 5340 ASSERT(ret == 0); 5341 5342 /* This needs to handle no-key deletions later on */ 5343 5344 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), 5345 &fname.disk_name, 0); 5346 if (IS_ERR_OR_NULL(di)) { 5347 ret = di ? PTR_ERR(di) : -ENOENT; 5348 goto out; 5349 } 5350 5351 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5352 if (location->type != BTRFS_INODE_ITEM_KEY && 5353 location->type != BTRFS_ROOT_ITEM_KEY) { 5354 ret = -EUCLEAN; 5355 btrfs_warn(root->fs_info, 5356 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5357 __func__, fname.disk_name.name, btrfs_ino(dir), 5358 location->objectid, location->type, location->offset); 5359 } 5360 if (!ret) 5361 *type = btrfs_dir_ftype(path->nodes[0], di); 5362 out: 5363 fscrypt_free_filename(&fname); 5364 btrfs_free_path(path); 5365 return ret; 5366 } 5367 5368 /* 5369 * when we hit a tree root in a directory, the btrfs part of the inode 5370 * needs to be changed to reflect the root directory of the tree root. This 5371 * is kind of like crossing a mount point. 5372 */ 5373 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5374 struct btrfs_inode *dir, 5375 struct dentry *dentry, 5376 struct btrfs_key *location, 5377 struct btrfs_root **sub_root) 5378 { 5379 struct btrfs_path *path; 5380 struct btrfs_root *new_root; 5381 struct btrfs_root_ref *ref; 5382 struct extent_buffer *leaf; 5383 struct btrfs_key key; 5384 int ret; 5385 int err = 0; 5386 struct fscrypt_name fname; 5387 5388 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); 5389 if (ret) 5390 return ret; 5391 5392 path = btrfs_alloc_path(); 5393 if (!path) { 5394 err = -ENOMEM; 5395 goto out; 5396 } 5397 5398 err = -ENOENT; 5399 key.objectid = dir->root->root_key.objectid; 5400 key.type = BTRFS_ROOT_REF_KEY; 5401 key.offset = location->objectid; 5402 5403 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5404 if (ret) { 5405 if (ret < 0) 5406 err = ret; 5407 goto out; 5408 } 5409 5410 leaf = path->nodes[0]; 5411 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5412 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5413 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) 5414 goto out; 5415 5416 ret = memcmp_extent_buffer(leaf, fname.disk_name.name, 5417 (unsigned long)(ref + 1), fname.disk_name.len); 5418 if (ret) 5419 goto out; 5420 5421 btrfs_release_path(path); 5422 5423 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5424 if (IS_ERR(new_root)) { 5425 err = PTR_ERR(new_root); 5426 goto out; 5427 } 5428 5429 *sub_root = new_root; 5430 location->objectid = btrfs_root_dirid(&new_root->root_item); 5431 location->type = BTRFS_INODE_ITEM_KEY; 5432 location->offset = 0; 5433 err = 0; 5434 out: 5435 btrfs_free_path(path); 5436 fscrypt_free_filename(&fname); 5437 return err; 5438 } 5439 5440 static void inode_tree_add(struct btrfs_inode *inode) 5441 { 5442 struct btrfs_root *root = inode->root; 5443 struct btrfs_inode *entry; 5444 struct rb_node **p; 5445 struct rb_node *parent; 5446 struct rb_node *new = &inode->rb_node; 5447 u64 ino = btrfs_ino(inode); 5448 5449 if (inode_unhashed(&inode->vfs_inode)) 5450 return; 5451 parent = NULL; 5452 spin_lock(&root->inode_lock); 5453 p = &root->inode_tree.rb_node; 5454 while (*p) { 5455 parent = *p; 5456 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5457 5458 if (ino < btrfs_ino(entry)) 5459 p = &parent->rb_left; 5460 else if (ino > btrfs_ino(entry)) 5461 p = &parent->rb_right; 5462 else { 5463 WARN_ON(!(entry->vfs_inode.i_state & 5464 (I_WILL_FREE | I_FREEING))); 5465 rb_replace_node(parent, new, &root->inode_tree); 5466 RB_CLEAR_NODE(parent); 5467 spin_unlock(&root->inode_lock); 5468 return; 5469 } 5470 } 5471 rb_link_node(new, parent, p); 5472 rb_insert_color(new, &root->inode_tree); 5473 spin_unlock(&root->inode_lock); 5474 } 5475 5476 static void inode_tree_del(struct btrfs_inode *inode) 5477 { 5478 struct btrfs_root *root = inode->root; 5479 int empty = 0; 5480 5481 spin_lock(&root->inode_lock); 5482 if (!RB_EMPTY_NODE(&inode->rb_node)) { 5483 rb_erase(&inode->rb_node, &root->inode_tree); 5484 RB_CLEAR_NODE(&inode->rb_node); 5485 empty = RB_EMPTY_ROOT(&root->inode_tree); 5486 } 5487 spin_unlock(&root->inode_lock); 5488 5489 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5490 spin_lock(&root->inode_lock); 5491 empty = RB_EMPTY_ROOT(&root->inode_tree); 5492 spin_unlock(&root->inode_lock); 5493 if (empty) 5494 btrfs_add_dead_root(root); 5495 } 5496 } 5497 5498 5499 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5500 { 5501 struct btrfs_iget_args *args = p; 5502 5503 inode->i_ino = args->ino; 5504 BTRFS_I(inode)->location.objectid = args->ino; 5505 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 5506 BTRFS_I(inode)->location.offset = 0; 5507 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5508 BUG_ON(args->root && !BTRFS_I(inode)->root); 5509 5510 if (args->root && args->root == args->root->fs_info->tree_root && 5511 args->ino != BTRFS_BTREE_INODE_OBJECTID) 5512 set_bit(BTRFS_INODE_FREE_SPACE_INODE, 5513 &BTRFS_I(inode)->runtime_flags); 5514 return 0; 5515 } 5516 5517 static int btrfs_find_actor(struct inode *inode, void *opaque) 5518 { 5519 struct btrfs_iget_args *args = opaque; 5520 5521 return args->ino == BTRFS_I(inode)->location.objectid && 5522 args->root == BTRFS_I(inode)->root; 5523 } 5524 5525 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, 5526 struct btrfs_root *root) 5527 { 5528 struct inode *inode; 5529 struct btrfs_iget_args args; 5530 unsigned long hashval = btrfs_inode_hash(ino, root); 5531 5532 args.ino = ino; 5533 args.root = root; 5534 5535 inode = iget5_locked(s, hashval, btrfs_find_actor, 5536 btrfs_init_locked_inode, 5537 (void *)&args); 5538 return inode; 5539 } 5540 5541 /* 5542 * Get an inode object given its inode number and corresponding root. 5543 * Path can be preallocated to prevent recursing back to iget through 5544 * allocator. NULL is also valid but may require an additional allocation 5545 * later. 5546 */ 5547 struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 5548 struct btrfs_root *root, struct btrfs_path *path) 5549 { 5550 struct inode *inode; 5551 5552 inode = btrfs_iget_locked(s, ino, root); 5553 if (!inode) 5554 return ERR_PTR(-ENOMEM); 5555 5556 if (inode->i_state & I_NEW) { 5557 int ret; 5558 5559 ret = btrfs_read_locked_inode(inode, path); 5560 if (!ret) { 5561 inode_tree_add(BTRFS_I(inode)); 5562 unlock_new_inode(inode); 5563 } else { 5564 iget_failed(inode); 5565 /* 5566 * ret > 0 can come from btrfs_search_slot called by 5567 * btrfs_read_locked_inode, this means the inode item 5568 * was not found. 5569 */ 5570 if (ret > 0) 5571 ret = -ENOENT; 5572 inode = ERR_PTR(ret); 5573 } 5574 } 5575 5576 return inode; 5577 } 5578 5579 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) 5580 { 5581 return btrfs_iget_path(s, ino, root, NULL); 5582 } 5583 5584 static struct inode *new_simple_dir(struct inode *dir, 5585 struct btrfs_key *key, 5586 struct btrfs_root *root) 5587 { 5588 struct inode *inode = new_inode(dir->i_sb); 5589 5590 if (!inode) 5591 return ERR_PTR(-ENOMEM); 5592 5593 BTRFS_I(inode)->root = btrfs_grab_root(root); 5594 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5595 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5596 5597 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5598 /* 5599 * We only need lookup, the rest is read-only and there's no inode 5600 * associated with the dentry 5601 */ 5602 inode->i_op = &simple_dir_inode_operations; 5603 inode->i_opflags &= ~IOP_XATTR; 5604 inode->i_fop = &simple_dir_operations; 5605 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5606 inode->i_mtime = current_time(inode); 5607 inode->i_atime = dir->i_atime; 5608 inode->i_ctime = dir->i_ctime; 5609 BTRFS_I(inode)->i_otime = inode->i_mtime; 5610 inode->i_uid = dir->i_uid; 5611 inode->i_gid = dir->i_gid; 5612 5613 return inode; 5614 } 5615 5616 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5617 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5618 static_assert(BTRFS_FT_DIR == FT_DIR); 5619 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5620 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5621 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5622 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5623 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5624 5625 static inline u8 btrfs_inode_type(struct inode *inode) 5626 { 5627 return fs_umode_to_ftype(inode->i_mode); 5628 } 5629 5630 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5631 { 5632 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 5633 struct inode *inode; 5634 struct btrfs_root *root = BTRFS_I(dir)->root; 5635 struct btrfs_root *sub_root = root; 5636 struct btrfs_key location; 5637 u8 di_type = 0; 5638 int ret = 0; 5639 5640 if (dentry->d_name.len > BTRFS_NAME_LEN) 5641 return ERR_PTR(-ENAMETOOLONG); 5642 5643 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type); 5644 if (ret < 0) 5645 return ERR_PTR(ret); 5646 5647 if (location.type == BTRFS_INODE_ITEM_KEY) { 5648 inode = btrfs_iget(dir->i_sb, location.objectid, root); 5649 if (IS_ERR(inode)) 5650 return inode; 5651 5652 /* Do extra check against inode mode with di_type */ 5653 if (btrfs_inode_type(inode) != di_type) { 5654 btrfs_crit(fs_info, 5655 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5656 inode->i_mode, btrfs_inode_type(inode), 5657 di_type); 5658 iput(inode); 5659 return ERR_PTR(-EUCLEAN); 5660 } 5661 return inode; 5662 } 5663 5664 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry, 5665 &location, &sub_root); 5666 if (ret < 0) { 5667 if (ret != -ENOENT) 5668 inode = ERR_PTR(ret); 5669 else 5670 inode = new_simple_dir(dir, &location, root); 5671 } else { 5672 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); 5673 btrfs_put_root(sub_root); 5674 5675 if (IS_ERR(inode)) 5676 return inode; 5677 5678 down_read(&fs_info->cleanup_work_sem); 5679 if (!sb_rdonly(inode->i_sb)) 5680 ret = btrfs_orphan_cleanup(sub_root); 5681 up_read(&fs_info->cleanup_work_sem); 5682 if (ret) { 5683 iput(inode); 5684 inode = ERR_PTR(ret); 5685 } 5686 } 5687 5688 return inode; 5689 } 5690 5691 static int btrfs_dentry_delete(const struct dentry *dentry) 5692 { 5693 struct btrfs_root *root; 5694 struct inode *inode = d_inode(dentry); 5695 5696 if (!inode && !IS_ROOT(dentry)) 5697 inode = d_inode(dentry->d_parent); 5698 5699 if (inode) { 5700 root = BTRFS_I(inode)->root; 5701 if (btrfs_root_refs(&root->root_item) == 0) 5702 return 1; 5703 5704 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5705 return 1; 5706 } 5707 return 0; 5708 } 5709 5710 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5711 unsigned int flags) 5712 { 5713 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5714 5715 if (inode == ERR_PTR(-ENOENT)) 5716 inode = NULL; 5717 return d_splice_alias(inode, dentry); 5718 } 5719 5720 /* 5721 * Find the highest existing sequence number in a directory and then set the 5722 * in-memory index_cnt variable to the first free sequence number. 5723 */ 5724 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 5725 { 5726 struct btrfs_root *root = inode->root; 5727 struct btrfs_key key, found_key; 5728 struct btrfs_path *path; 5729 struct extent_buffer *leaf; 5730 int ret; 5731 5732 key.objectid = btrfs_ino(inode); 5733 key.type = BTRFS_DIR_INDEX_KEY; 5734 key.offset = (u64)-1; 5735 5736 path = btrfs_alloc_path(); 5737 if (!path) 5738 return -ENOMEM; 5739 5740 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5741 if (ret < 0) 5742 goto out; 5743 /* FIXME: we should be able to handle this */ 5744 if (ret == 0) 5745 goto out; 5746 ret = 0; 5747 5748 if (path->slots[0] == 0) { 5749 inode->index_cnt = BTRFS_DIR_START_INDEX; 5750 goto out; 5751 } 5752 5753 path->slots[0]--; 5754 5755 leaf = path->nodes[0]; 5756 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5757 5758 if (found_key.objectid != btrfs_ino(inode) || 5759 found_key.type != BTRFS_DIR_INDEX_KEY) { 5760 inode->index_cnt = BTRFS_DIR_START_INDEX; 5761 goto out; 5762 } 5763 5764 inode->index_cnt = found_key.offset + 1; 5765 out: 5766 btrfs_free_path(path); 5767 return ret; 5768 } 5769 5770 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) 5771 { 5772 if (dir->index_cnt == (u64)-1) { 5773 int ret; 5774 5775 ret = btrfs_inode_delayed_dir_index_count(dir); 5776 if (ret) { 5777 ret = btrfs_set_inode_index_count(dir); 5778 if (ret) 5779 return ret; 5780 } 5781 } 5782 5783 /* index_cnt is the index number of next new entry, so decrement it. */ 5784 *index = dir->index_cnt - 1; 5785 5786 return 0; 5787 } 5788 5789 /* 5790 * All this infrastructure exists because dir_emit can fault, and we are holding 5791 * the tree lock when doing readdir. For now just allocate a buffer and copy 5792 * our information into that, and then dir_emit from the buffer. This is 5793 * similar to what NFS does, only we don't keep the buffer around in pagecache 5794 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5795 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5796 * tree lock. 5797 */ 5798 static int btrfs_opendir(struct inode *inode, struct file *file) 5799 { 5800 struct btrfs_file_private *private; 5801 u64 last_index; 5802 int ret; 5803 5804 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); 5805 if (ret) 5806 return ret; 5807 5808 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5809 if (!private) 5810 return -ENOMEM; 5811 private->last_index = last_index; 5812 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5813 if (!private->filldir_buf) { 5814 kfree(private); 5815 return -ENOMEM; 5816 } 5817 file->private_data = private; 5818 return 0; 5819 } 5820 5821 struct dir_entry { 5822 u64 ino; 5823 u64 offset; 5824 unsigned type; 5825 int name_len; 5826 }; 5827 5828 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5829 { 5830 while (entries--) { 5831 struct dir_entry *entry = addr; 5832 char *name = (char *)(entry + 1); 5833 5834 ctx->pos = get_unaligned(&entry->offset); 5835 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5836 get_unaligned(&entry->ino), 5837 get_unaligned(&entry->type))) 5838 return 1; 5839 addr += sizeof(struct dir_entry) + 5840 get_unaligned(&entry->name_len); 5841 ctx->pos++; 5842 } 5843 return 0; 5844 } 5845 5846 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5847 { 5848 struct inode *inode = file_inode(file); 5849 struct btrfs_root *root = BTRFS_I(inode)->root; 5850 struct btrfs_file_private *private = file->private_data; 5851 struct btrfs_dir_item *di; 5852 struct btrfs_key key; 5853 struct btrfs_key found_key; 5854 struct btrfs_path *path; 5855 void *addr; 5856 LIST_HEAD(ins_list); 5857 LIST_HEAD(del_list); 5858 int ret; 5859 char *name_ptr; 5860 int name_len; 5861 int entries = 0; 5862 int total_len = 0; 5863 bool put = false; 5864 struct btrfs_key location; 5865 5866 if (!dir_emit_dots(file, ctx)) 5867 return 0; 5868 5869 path = btrfs_alloc_path(); 5870 if (!path) 5871 return -ENOMEM; 5872 5873 addr = private->filldir_buf; 5874 path->reada = READA_FORWARD; 5875 5876 put = btrfs_readdir_get_delayed_items(inode, private->last_index, 5877 &ins_list, &del_list); 5878 5879 again: 5880 key.type = BTRFS_DIR_INDEX_KEY; 5881 key.offset = ctx->pos; 5882 key.objectid = btrfs_ino(BTRFS_I(inode)); 5883 5884 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 5885 struct dir_entry *entry; 5886 struct extent_buffer *leaf = path->nodes[0]; 5887 u8 ftype; 5888 5889 if (found_key.objectid != key.objectid) 5890 break; 5891 if (found_key.type != BTRFS_DIR_INDEX_KEY) 5892 break; 5893 if (found_key.offset < ctx->pos) 5894 continue; 5895 if (found_key.offset > private->last_index) 5896 break; 5897 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 5898 continue; 5899 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 5900 name_len = btrfs_dir_name_len(leaf, di); 5901 if ((total_len + sizeof(struct dir_entry) + name_len) >= 5902 PAGE_SIZE) { 5903 btrfs_release_path(path); 5904 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5905 if (ret) 5906 goto nopos; 5907 addr = private->filldir_buf; 5908 entries = 0; 5909 total_len = 0; 5910 goto again; 5911 } 5912 5913 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di)); 5914 entry = addr; 5915 name_ptr = (char *)(entry + 1); 5916 read_extent_buffer(leaf, name_ptr, 5917 (unsigned long)(di + 1), name_len); 5918 put_unaligned(name_len, &entry->name_len); 5919 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); 5920 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5921 put_unaligned(location.objectid, &entry->ino); 5922 put_unaligned(found_key.offset, &entry->offset); 5923 entries++; 5924 addr += sizeof(struct dir_entry) + name_len; 5925 total_len += sizeof(struct dir_entry) + name_len; 5926 } 5927 /* Catch error encountered during iteration */ 5928 if (ret < 0) 5929 goto err; 5930 5931 btrfs_release_path(path); 5932 5933 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5934 if (ret) 5935 goto nopos; 5936 5937 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 5938 if (ret) 5939 goto nopos; 5940 5941 /* 5942 * Stop new entries from being returned after we return the last 5943 * entry. 5944 * 5945 * New directory entries are assigned a strictly increasing 5946 * offset. This means that new entries created during readdir 5947 * are *guaranteed* to be seen in the future by that readdir. 5948 * This has broken buggy programs which operate on names as 5949 * they're returned by readdir. Until we re-use freed offsets 5950 * we have this hack to stop new entries from being returned 5951 * under the assumption that they'll never reach this huge 5952 * offset. 5953 * 5954 * This is being careful not to overflow 32bit loff_t unless the 5955 * last entry requires it because doing so has broken 32bit apps 5956 * in the past. 5957 */ 5958 if (ctx->pos >= INT_MAX) 5959 ctx->pos = LLONG_MAX; 5960 else 5961 ctx->pos = INT_MAX; 5962 nopos: 5963 ret = 0; 5964 err: 5965 if (put) 5966 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); 5967 btrfs_free_path(path); 5968 return ret; 5969 } 5970 5971 /* 5972 * This is somewhat expensive, updating the tree every time the 5973 * inode changes. But, it is most likely to find the inode in cache. 5974 * FIXME, needs more benchmarking...there are no reasons other than performance 5975 * to keep or drop this code. 5976 */ 5977 static int btrfs_dirty_inode(struct btrfs_inode *inode) 5978 { 5979 struct btrfs_root *root = inode->root; 5980 struct btrfs_fs_info *fs_info = root->fs_info; 5981 struct btrfs_trans_handle *trans; 5982 int ret; 5983 5984 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) 5985 return 0; 5986 5987 trans = btrfs_join_transaction(root); 5988 if (IS_ERR(trans)) 5989 return PTR_ERR(trans); 5990 5991 ret = btrfs_update_inode(trans, root, inode); 5992 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) { 5993 /* whoops, lets try again with the full transaction */ 5994 btrfs_end_transaction(trans); 5995 trans = btrfs_start_transaction(root, 1); 5996 if (IS_ERR(trans)) 5997 return PTR_ERR(trans); 5998 5999 ret = btrfs_update_inode(trans, root, inode); 6000 } 6001 btrfs_end_transaction(trans); 6002 if (inode->delayed_node) 6003 btrfs_balance_delayed_items(fs_info); 6004 6005 return ret; 6006 } 6007 6008 /* 6009 * This is a copy of file_update_time. We need this so we can return error on 6010 * ENOSPC for updating the inode in the case of file write and mmap writes. 6011 */ 6012 static int btrfs_update_time(struct inode *inode, struct timespec64 *now, 6013 int flags) 6014 { 6015 struct btrfs_root *root = BTRFS_I(inode)->root; 6016 bool dirty = flags & ~S_VERSION; 6017 6018 if (btrfs_root_readonly(root)) 6019 return -EROFS; 6020 6021 if (flags & S_VERSION) 6022 dirty |= inode_maybe_inc_iversion(inode, dirty); 6023 if (flags & S_CTIME) 6024 inode->i_ctime = *now; 6025 if (flags & S_MTIME) 6026 inode->i_mtime = *now; 6027 if (flags & S_ATIME) 6028 inode->i_atime = *now; 6029 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6030 } 6031 6032 /* 6033 * helper to find a free sequence number in a given directory. This current 6034 * code is very simple, later versions will do smarter things in the btree 6035 */ 6036 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6037 { 6038 int ret = 0; 6039 6040 if (dir->index_cnt == (u64)-1) { 6041 ret = btrfs_inode_delayed_dir_index_count(dir); 6042 if (ret) { 6043 ret = btrfs_set_inode_index_count(dir); 6044 if (ret) 6045 return ret; 6046 } 6047 } 6048 6049 *index = dir->index_cnt; 6050 dir->index_cnt++; 6051 6052 return ret; 6053 } 6054 6055 static int btrfs_insert_inode_locked(struct inode *inode) 6056 { 6057 struct btrfs_iget_args args; 6058 6059 args.ino = BTRFS_I(inode)->location.objectid; 6060 args.root = BTRFS_I(inode)->root; 6061 6062 return insert_inode_locked4(inode, 6063 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6064 btrfs_find_actor, &args); 6065 } 6066 6067 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6068 unsigned int *trans_num_items) 6069 { 6070 struct inode *dir = args->dir; 6071 struct inode *inode = args->inode; 6072 int ret; 6073 6074 if (!args->orphan) { 6075 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, 6076 &args->fname); 6077 if (ret) 6078 return ret; 6079 } 6080 6081 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6082 if (ret) { 6083 fscrypt_free_filename(&args->fname); 6084 return ret; 6085 } 6086 6087 /* 1 to add inode item */ 6088 *trans_num_items = 1; 6089 /* 1 to add compression property */ 6090 if (BTRFS_I(dir)->prop_compress) 6091 (*trans_num_items)++; 6092 /* 1 to add default ACL xattr */ 6093 if (args->default_acl) 6094 (*trans_num_items)++; 6095 /* 1 to add access ACL xattr */ 6096 if (args->acl) 6097 (*trans_num_items)++; 6098 #ifdef CONFIG_SECURITY 6099 /* 1 to add LSM xattr */ 6100 if (dir->i_security) 6101 (*trans_num_items)++; 6102 #endif 6103 if (args->orphan) { 6104 /* 1 to add orphan item */ 6105 (*trans_num_items)++; 6106 } else { 6107 /* 6108 * 1 to add dir item 6109 * 1 to add dir index 6110 * 1 to update parent inode item 6111 * 6112 * No need for 1 unit for the inode ref item because it is 6113 * inserted in a batch together with the inode item at 6114 * btrfs_create_new_inode(). 6115 */ 6116 *trans_num_items += 3; 6117 } 6118 return 0; 6119 } 6120 6121 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6122 { 6123 posix_acl_release(args->acl); 6124 posix_acl_release(args->default_acl); 6125 fscrypt_free_filename(&args->fname); 6126 } 6127 6128 /* 6129 * Inherit flags from the parent inode. 6130 * 6131 * Currently only the compression flags and the cow flags are inherited. 6132 */ 6133 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir) 6134 { 6135 unsigned int flags; 6136 6137 flags = dir->flags; 6138 6139 if (flags & BTRFS_INODE_NOCOMPRESS) { 6140 inode->flags &= ~BTRFS_INODE_COMPRESS; 6141 inode->flags |= BTRFS_INODE_NOCOMPRESS; 6142 } else if (flags & BTRFS_INODE_COMPRESS) { 6143 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 6144 inode->flags |= BTRFS_INODE_COMPRESS; 6145 } 6146 6147 if (flags & BTRFS_INODE_NODATACOW) { 6148 inode->flags |= BTRFS_INODE_NODATACOW; 6149 if (S_ISREG(inode->vfs_inode.i_mode)) 6150 inode->flags |= BTRFS_INODE_NODATASUM; 6151 } 6152 6153 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); 6154 } 6155 6156 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6157 struct btrfs_new_inode_args *args) 6158 { 6159 struct inode *dir = args->dir; 6160 struct inode *inode = args->inode; 6161 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; 6162 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6163 struct btrfs_root *root; 6164 struct btrfs_inode_item *inode_item; 6165 struct btrfs_key *location; 6166 struct btrfs_path *path; 6167 u64 objectid; 6168 struct btrfs_inode_ref *ref; 6169 struct btrfs_key key[2]; 6170 u32 sizes[2]; 6171 struct btrfs_item_batch batch; 6172 unsigned long ptr; 6173 int ret; 6174 6175 path = btrfs_alloc_path(); 6176 if (!path) 6177 return -ENOMEM; 6178 6179 if (!args->subvol) 6180 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6181 root = BTRFS_I(inode)->root; 6182 6183 ret = btrfs_get_free_objectid(root, &objectid); 6184 if (ret) 6185 goto out; 6186 inode->i_ino = objectid; 6187 6188 if (args->orphan) { 6189 /* 6190 * O_TMPFILE, set link count to 0, so that after this point, we 6191 * fill in an inode item with the correct link count. 6192 */ 6193 set_nlink(inode, 0); 6194 } else { 6195 trace_btrfs_inode_request(dir); 6196 6197 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6198 if (ret) 6199 goto out; 6200 } 6201 /* index_cnt is ignored for everything but a dir. */ 6202 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6203 BTRFS_I(inode)->generation = trans->transid; 6204 inode->i_generation = BTRFS_I(inode)->generation; 6205 6206 /* 6207 * Subvolumes don't inherit flags from their parent directory. 6208 * Originally this was probably by accident, but we probably can't 6209 * change it now without compatibility issues. 6210 */ 6211 if (!args->subvol) 6212 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir)); 6213 6214 if (S_ISREG(inode->i_mode)) { 6215 if (btrfs_test_opt(fs_info, NODATASUM)) 6216 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6217 if (btrfs_test_opt(fs_info, NODATACOW)) 6218 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6219 BTRFS_INODE_NODATASUM; 6220 } 6221 6222 location = &BTRFS_I(inode)->location; 6223 location->objectid = objectid; 6224 location->offset = 0; 6225 location->type = BTRFS_INODE_ITEM_KEY; 6226 6227 ret = btrfs_insert_inode_locked(inode); 6228 if (ret < 0) { 6229 if (!args->orphan) 6230 BTRFS_I(dir)->index_cnt--; 6231 goto out; 6232 } 6233 6234 /* 6235 * We could have gotten an inode number from somebody who was fsynced 6236 * and then removed in this same transaction, so let's just set full 6237 * sync since it will be a full sync anyway and this will blow away the 6238 * old info in the log. 6239 */ 6240 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6241 6242 key[0].objectid = objectid; 6243 key[0].type = BTRFS_INODE_ITEM_KEY; 6244 key[0].offset = 0; 6245 6246 sizes[0] = sizeof(struct btrfs_inode_item); 6247 6248 if (!args->orphan) { 6249 /* 6250 * Start new inodes with an inode_ref. This is slightly more 6251 * efficient for small numbers of hard links since they will 6252 * be packed into one item. Extended refs will kick in if we 6253 * add more hard links than can fit in the ref item. 6254 */ 6255 key[1].objectid = objectid; 6256 key[1].type = BTRFS_INODE_REF_KEY; 6257 if (args->subvol) { 6258 key[1].offset = objectid; 6259 sizes[1] = 2 + sizeof(*ref); 6260 } else { 6261 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6262 sizes[1] = name->len + sizeof(*ref); 6263 } 6264 } 6265 6266 batch.keys = &key[0]; 6267 batch.data_sizes = &sizes[0]; 6268 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6269 batch.nr = args->orphan ? 1 : 2; 6270 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6271 if (ret != 0) { 6272 btrfs_abort_transaction(trans, ret); 6273 goto discard; 6274 } 6275 6276 inode->i_mtime = current_time(inode); 6277 inode->i_atime = inode->i_mtime; 6278 inode->i_ctime = inode->i_mtime; 6279 BTRFS_I(inode)->i_otime = inode->i_mtime; 6280 6281 /* 6282 * We're going to fill the inode item now, so at this point the inode 6283 * must be fully initialized. 6284 */ 6285 6286 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6287 struct btrfs_inode_item); 6288 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6289 sizeof(*inode_item)); 6290 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6291 6292 if (!args->orphan) { 6293 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6294 struct btrfs_inode_ref); 6295 ptr = (unsigned long)(ref + 1); 6296 if (args->subvol) { 6297 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6298 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6299 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6300 } else { 6301 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 6302 name->len); 6303 btrfs_set_inode_ref_index(path->nodes[0], ref, 6304 BTRFS_I(inode)->dir_index); 6305 write_extent_buffer(path->nodes[0], name->name, ptr, 6306 name->len); 6307 } 6308 } 6309 6310 btrfs_mark_buffer_dirty(path->nodes[0]); 6311 /* 6312 * We don't need the path anymore, plus inheriting properties, adding 6313 * ACLs, security xattrs, orphan item or adding the link, will result in 6314 * allocating yet another path. So just free our path. 6315 */ 6316 btrfs_free_path(path); 6317 path = NULL; 6318 6319 if (args->subvol) { 6320 struct inode *parent; 6321 6322 /* 6323 * Subvolumes inherit properties from their parent subvolume, 6324 * not the directory they were created in. 6325 */ 6326 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, 6327 BTRFS_I(dir)->root); 6328 if (IS_ERR(parent)) { 6329 ret = PTR_ERR(parent); 6330 } else { 6331 ret = btrfs_inode_inherit_props(trans, inode, parent); 6332 iput(parent); 6333 } 6334 } else { 6335 ret = btrfs_inode_inherit_props(trans, inode, dir); 6336 } 6337 if (ret) { 6338 btrfs_err(fs_info, 6339 "error inheriting props for ino %llu (root %llu): %d", 6340 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, 6341 ret); 6342 } 6343 6344 /* 6345 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6346 * probably a bug. 6347 */ 6348 if (!args->subvol) { 6349 ret = btrfs_init_inode_security(trans, args); 6350 if (ret) { 6351 btrfs_abort_transaction(trans, ret); 6352 goto discard; 6353 } 6354 } 6355 6356 inode_tree_add(BTRFS_I(inode)); 6357 6358 trace_btrfs_inode_new(inode); 6359 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6360 6361 btrfs_update_root_times(trans, root); 6362 6363 if (args->orphan) { 6364 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6365 } else { 6366 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6367 0, BTRFS_I(inode)->dir_index); 6368 } 6369 if (ret) { 6370 btrfs_abort_transaction(trans, ret); 6371 goto discard; 6372 } 6373 6374 return 0; 6375 6376 discard: 6377 /* 6378 * discard_new_inode() calls iput(), but the caller owns the reference 6379 * to the inode. 6380 */ 6381 ihold(inode); 6382 discard_new_inode(inode); 6383 out: 6384 btrfs_free_path(path); 6385 return ret; 6386 } 6387 6388 /* 6389 * utility function to add 'inode' into 'parent_inode' with 6390 * a give name and a given sequence number. 6391 * if 'add_backref' is true, also insert a backref from the 6392 * inode to the parent directory. 6393 */ 6394 int btrfs_add_link(struct btrfs_trans_handle *trans, 6395 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6396 const struct fscrypt_str *name, int add_backref, u64 index) 6397 { 6398 int ret = 0; 6399 struct btrfs_key key; 6400 struct btrfs_root *root = parent_inode->root; 6401 u64 ino = btrfs_ino(inode); 6402 u64 parent_ino = btrfs_ino(parent_inode); 6403 6404 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6405 memcpy(&key, &inode->root->root_key, sizeof(key)); 6406 } else { 6407 key.objectid = ino; 6408 key.type = BTRFS_INODE_ITEM_KEY; 6409 key.offset = 0; 6410 } 6411 6412 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6413 ret = btrfs_add_root_ref(trans, key.objectid, 6414 root->root_key.objectid, parent_ino, 6415 index, name); 6416 } else if (add_backref) { 6417 ret = btrfs_insert_inode_ref(trans, root, name, 6418 ino, parent_ino, index); 6419 } 6420 6421 /* Nothing to clean up yet */ 6422 if (ret) 6423 return ret; 6424 6425 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, 6426 btrfs_inode_type(&inode->vfs_inode), index); 6427 if (ret == -EEXIST || ret == -EOVERFLOW) 6428 goto fail_dir_item; 6429 else if (ret) { 6430 btrfs_abort_transaction(trans, ret); 6431 return ret; 6432 } 6433 6434 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6435 name->len * 2); 6436 inode_inc_iversion(&parent_inode->vfs_inode); 6437 /* 6438 * If we are replaying a log tree, we do not want to update the mtime 6439 * and ctime of the parent directory with the current time, since the 6440 * log replay procedure is responsible for setting them to their correct 6441 * values (the ones it had when the fsync was done). 6442 */ 6443 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { 6444 struct timespec64 now = current_time(&parent_inode->vfs_inode); 6445 6446 parent_inode->vfs_inode.i_mtime = now; 6447 parent_inode->vfs_inode.i_ctime = now; 6448 } 6449 ret = btrfs_update_inode(trans, root, parent_inode); 6450 if (ret) 6451 btrfs_abort_transaction(trans, ret); 6452 return ret; 6453 6454 fail_dir_item: 6455 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6456 u64 local_index; 6457 int err; 6458 err = btrfs_del_root_ref(trans, key.objectid, 6459 root->root_key.objectid, parent_ino, 6460 &local_index, name); 6461 if (err) 6462 btrfs_abort_transaction(trans, err); 6463 } else if (add_backref) { 6464 u64 local_index; 6465 int err; 6466 6467 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, 6468 &local_index); 6469 if (err) 6470 btrfs_abort_transaction(trans, err); 6471 } 6472 6473 /* Return the original error code */ 6474 return ret; 6475 } 6476 6477 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6478 struct inode *inode) 6479 { 6480 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6481 struct btrfs_root *root = BTRFS_I(dir)->root; 6482 struct btrfs_new_inode_args new_inode_args = { 6483 .dir = dir, 6484 .dentry = dentry, 6485 .inode = inode, 6486 }; 6487 unsigned int trans_num_items; 6488 struct btrfs_trans_handle *trans; 6489 int err; 6490 6491 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6492 if (err) 6493 goto out_inode; 6494 6495 trans = btrfs_start_transaction(root, trans_num_items); 6496 if (IS_ERR(trans)) { 6497 err = PTR_ERR(trans); 6498 goto out_new_inode_args; 6499 } 6500 6501 err = btrfs_create_new_inode(trans, &new_inode_args); 6502 if (!err) 6503 d_instantiate_new(dentry, inode); 6504 6505 btrfs_end_transaction(trans); 6506 btrfs_btree_balance_dirty(fs_info); 6507 out_new_inode_args: 6508 btrfs_new_inode_args_destroy(&new_inode_args); 6509 out_inode: 6510 if (err) 6511 iput(inode); 6512 return err; 6513 } 6514 6515 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir, 6516 struct dentry *dentry, umode_t mode, dev_t rdev) 6517 { 6518 struct inode *inode; 6519 6520 inode = new_inode(dir->i_sb); 6521 if (!inode) 6522 return -ENOMEM; 6523 inode_init_owner(idmap, inode, dir, mode); 6524 inode->i_op = &btrfs_special_inode_operations; 6525 init_special_inode(inode, inode->i_mode, rdev); 6526 return btrfs_create_common(dir, dentry, inode); 6527 } 6528 6529 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir, 6530 struct dentry *dentry, umode_t mode, bool excl) 6531 { 6532 struct inode *inode; 6533 6534 inode = new_inode(dir->i_sb); 6535 if (!inode) 6536 return -ENOMEM; 6537 inode_init_owner(idmap, inode, dir, mode); 6538 inode->i_fop = &btrfs_file_operations; 6539 inode->i_op = &btrfs_file_inode_operations; 6540 inode->i_mapping->a_ops = &btrfs_aops; 6541 return btrfs_create_common(dir, dentry, inode); 6542 } 6543 6544 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6545 struct dentry *dentry) 6546 { 6547 struct btrfs_trans_handle *trans = NULL; 6548 struct btrfs_root *root = BTRFS_I(dir)->root; 6549 struct inode *inode = d_inode(old_dentry); 6550 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 6551 struct fscrypt_name fname; 6552 u64 index; 6553 int err; 6554 int drop_inode = 0; 6555 6556 /* do not allow sys_link's with other subvols of the same device */ 6557 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) 6558 return -EXDEV; 6559 6560 if (inode->i_nlink >= BTRFS_LINK_MAX) 6561 return -EMLINK; 6562 6563 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); 6564 if (err) 6565 goto fail; 6566 6567 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6568 if (err) 6569 goto fail; 6570 6571 /* 6572 * 2 items for inode and inode ref 6573 * 2 items for dir items 6574 * 1 item for parent inode 6575 * 1 item for orphan item deletion if O_TMPFILE 6576 */ 6577 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6578 if (IS_ERR(trans)) { 6579 err = PTR_ERR(trans); 6580 trans = NULL; 6581 goto fail; 6582 } 6583 6584 /* There are several dir indexes for this inode, clear the cache. */ 6585 BTRFS_I(inode)->dir_index = 0ULL; 6586 inc_nlink(inode); 6587 inode_inc_iversion(inode); 6588 inode->i_ctime = current_time(inode); 6589 ihold(inode); 6590 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6591 6592 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6593 &fname.disk_name, 1, index); 6594 6595 if (err) { 6596 drop_inode = 1; 6597 } else { 6598 struct dentry *parent = dentry->d_parent; 6599 6600 err = btrfs_update_inode(trans, root, BTRFS_I(inode)); 6601 if (err) 6602 goto fail; 6603 if (inode->i_nlink == 1) { 6604 /* 6605 * If new hard link count is 1, it's a file created 6606 * with open(2) O_TMPFILE flag. 6607 */ 6608 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6609 if (err) 6610 goto fail; 6611 } 6612 d_instantiate(dentry, inode); 6613 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent); 6614 } 6615 6616 fail: 6617 fscrypt_free_filename(&fname); 6618 if (trans) 6619 btrfs_end_transaction(trans); 6620 if (drop_inode) { 6621 inode_dec_link_count(inode); 6622 iput(inode); 6623 } 6624 btrfs_btree_balance_dirty(fs_info); 6625 return err; 6626 } 6627 6628 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 6629 struct dentry *dentry, umode_t mode) 6630 { 6631 struct inode *inode; 6632 6633 inode = new_inode(dir->i_sb); 6634 if (!inode) 6635 return -ENOMEM; 6636 inode_init_owner(idmap, inode, dir, S_IFDIR | mode); 6637 inode->i_op = &btrfs_dir_inode_operations; 6638 inode->i_fop = &btrfs_dir_file_operations; 6639 return btrfs_create_common(dir, dentry, inode); 6640 } 6641 6642 static noinline int uncompress_inline(struct btrfs_path *path, 6643 struct page *page, 6644 struct btrfs_file_extent_item *item) 6645 { 6646 int ret; 6647 struct extent_buffer *leaf = path->nodes[0]; 6648 char *tmp; 6649 size_t max_size; 6650 unsigned long inline_size; 6651 unsigned long ptr; 6652 int compress_type; 6653 6654 compress_type = btrfs_file_extent_compression(leaf, item); 6655 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6656 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 6657 tmp = kmalloc(inline_size, GFP_NOFS); 6658 if (!tmp) 6659 return -ENOMEM; 6660 ptr = btrfs_file_extent_inline_start(item); 6661 6662 read_extent_buffer(leaf, tmp, ptr, inline_size); 6663 6664 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6665 ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size); 6666 6667 /* 6668 * decompression code contains a memset to fill in any space between the end 6669 * of the uncompressed data and the end of max_size in case the decompressed 6670 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6671 * the end of an inline extent and the beginning of the next block, so we 6672 * cover that region here. 6673 */ 6674 6675 if (max_size < PAGE_SIZE) 6676 memzero_page(page, max_size, PAGE_SIZE - max_size); 6677 kfree(tmp); 6678 return ret; 6679 } 6680 6681 static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path, 6682 struct page *page) 6683 { 6684 struct btrfs_file_extent_item *fi; 6685 void *kaddr; 6686 size_t copy_size; 6687 6688 if (!page || PageUptodate(page)) 6689 return 0; 6690 6691 ASSERT(page_offset(page) == 0); 6692 6693 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 6694 struct btrfs_file_extent_item); 6695 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) 6696 return uncompress_inline(path, page, fi); 6697 6698 copy_size = min_t(u64, PAGE_SIZE, 6699 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); 6700 kaddr = kmap_local_page(page); 6701 read_extent_buffer(path->nodes[0], kaddr, 6702 btrfs_file_extent_inline_start(fi), copy_size); 6703 kunmap_local(kaddr); 6704 if (copy_size < PAGE_SIZE) 6705 memzero_page(page, copy_size, PAGE_SIZE - copy_size); 6706 return 0; 6707 } 6708 6709 /* 6710 * Lookup the first extent overlapping a range in a file. 6711 * 6712 * @inode: file to search in 6713 * @page: page to read extent data into if the extent is inline 6714 * @pg_offset: offset into @page to copy to 6715 * @start: file offset 6716 * @len: length of range starting at @start 6717 * 6718 * Return the first &struct extent_map which overlaps the given range, reading 6719 * it from the B-tree and caching it if necessary. Note that there may be more 6720 * extents which overlap the given range after the returned extent_map. 6721 * 6722 * If @page is not NULL and the extent is inline, this also reads the extent 6723 * data directly into the page and marks the extent up to date in the io_tree. 6724 * 6725 * Return: ERR_PTR on error, non-NULL extent_map on success. 6726 */ 6727 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6728 struct page *page, size_t pg_offset, 6729 u64 start, u64 len) 6730 { 6731 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6732 int ret = 0; 6733 u64 extent_start = 0; 6734 u64 extent_end = 0; 6735 u64 objectid = btrfs_ino(inode); 6736 int extent_type = -1; 6737 struct btrfs_path *path = NULL; 6738 struct btrfs_root *root = inode->root; 6739 struct btrfs_file_extent_item *item; 6740 struct extent_buffer *leaf; 6741 struct btrfs_key found_key; 6742 struct extent_map *em = NULL; 6743 struct extent_map_tree *em_tree = &inode->extent_tree; 6744 6745 read_lock(&em_tree->lock); 6746 em = lookup_extent_mapping(em_tree, start, len); 6747 read_unlock(&em_tree->lock); 6748 6749 if (em) { 6750 if (em->start > start || em->start + em->len <= start) 6751 free_extent_map(em); 6752 else if (em->block_start == EXTENT_MAP_INLINE && page) 6753 free_extent_map(em); 6754 else 6755 goto out; 6756 } 6757 em = alloc_extent_map(); 6758 if (!em) { 6759 ret = -ENOMEM; 6760 goto out; 6761 } 6762 em->start = EXTENT_MAP_HOLE; 6763 em->orig_start = EXTENT_MAP_HOLE; 6764 em->len = (u64)-1; 6765 em->block_len = (u64)-1; 6766 6767 path = btrfs_alloc_path(); 6768 if (!path) { 6769 ret = -ENOMEM; 6770 goto out; 6771 } 6772 6773 /* Chances are we'll be called again, so go ahead and do readahead */ 6774 path->reada = READA_FORWARD; 6775 6776 /* 6777 * The same explanation in load_free_space_cache applies here as well, 6778 * we only read when we're loading the free space cache, and at that 6779 * point the commit_root has everything we need. 6780 */ 6781 if (btrfs_is_free_space_inode(inode)) { 6782 path->search_commit_root = 1; 6783 path->skip_locking = 1; 6784 } 6785 6786 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6787 if (ret < 0) { 6788 goto out; 6789 } else if (ret > 0) { 6790 if (path->slots[0] == 0) 6791 goto not_found; 6792 path->slots[0]--; 6793 ret = 0; 6794 } 6795 6796 leaf = path->nodes[0]; 6797 item = btrfs_item_ptr(leaf, path->slots[0], 6798 struct btrfs_file_extent_item); 6799 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6800 if (found_key.objectid != objectid || 6801 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6802 /* 6803 * If we backup past the first extent we want to move forward 6804 * and see if there is an extent in front of us, otherwise we'll 6805 * say there is a hole for our whole search range which can 6806 * cause problems. 6807 */ 6808 extent_end = start; 6809 goto next; 6810 } 6811 6812 extent_type = btrfs_file_extent_type(leaf, item); 6813 extent_start = found_key.offset; 6814 extent_end = btrfs_file_extent_end(path); 6815 if (extent_type == BTRFS_FILE_EXTENT_REG || 6816 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6817 /* Only regular file could have regular/prealloc extent */ 6818 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6819 ret = -EUCLEAN; 6820 btrfs_crit(fs_info, 6821 "regular/prealloc extent found for non-regular inode %llu", 6822 btrfs_ino(inode)); 6823 goto out; 6824 } 6825 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6826 extent_start); 6827 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6828 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 6829 path->slots[0], 6830 extent_start); 6831 } 6832 next: 6833 if (start >= extent_end) { 6834 path->slots[0]++; 6835 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6836 ret = btrfs_next_leaf(root, path); 6837 if (ret < 0) 6838 goto out; 6839 else if (ret > 0) 6840 goto not_found; 6841 6842 leaf = path->nodes[0]; 6843 } 6844 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6845 if (found_key.objectid != objectid || 6846 found_key.type != BTRFS_EXTENT_DATA_KEY) 6847 goto not_found; 6848 if (start + len <= found_key.offset) 6849 goto not_found; 6850 if (start > found_key.offset) 6851 goto next; 6852 6853 /* New extent overlaps with existing one */ 6854 em->start = start; 6855 em->orig_start = start; 6856 em->len = found_key.offset - start; 6857 em->block_start = EXTENT_MAP_HOLE; 6858 goto insert; 6859 } 6860 6861 btrfs_extent_item_to_extent_map(inode, path, item, em); 6862 6863 if (extent_type == BTRFS_FILE_EXTENT_REG || 6864 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6865 goto insert; 6866 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6867 /* 6868 * Inline extent can only exist at file offset 0. This is 6869 * ensured by tree-checker and inline extent creation path. 6870 * Thus all members representing file offsets should be zero. 6871 */ 6872 ASSERT(pg_offset == 0); 6873 ASSERT(extent_start == 0); 6874 ASSERT(em->start == 0); 6875 6876 /* 6877 * btrfs_extent_item_to_extent_map() should have properly 6878 * initialized em members already. 6879 * 6880 * Other members are not utilized for inline extents. 6881 */ 6882 ASSERT(em->block_start == EXTENT_MAP_INLINE); 6883 ASSERT(em->len == fs_info->sectorsize); 6884 6885 ret = read_inline_extent(inode, path, page); 6886 if (ret < 0) 6887 goto out; 6888 goto insert; 6889 } 6890 not_found: 6891 em->start = start; 6892 em->orig_start = start; 6893 em->len = len; 6894 em->block_start = EXTENT_MAP_HOLE; 6895 insert: 6896 ret = 0; 6897 btrfs_release_path(path); 6898 if (em->start > start || extent_map_end(em) <= start) { 6899 btrfs_err(fs_info, 6900 "bad extent! em: [%llu %llu] passed [%llu %llu]", 6901 em->start, em->len, start, len); 6902 ret = -EIO; 6903 goto out; 6904 } 6905 6906 write_lock(&em_tree->lock); 6907 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 6908 write_unlock(&em_tree->lock); 6909 out: 6910 btrfs_free_path(path); 6911 6912 trace_btrfs_get_extent(root, inode, em); 6913 6914 if (ret) { 6915 free_extent_map(em); 6916 return ERR_PTR(ret); 6917 } 6918 return em; 6919 } 6920 6921 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, 6922 struct btrfs_dio_data *dio_data, 6923 const u64 start, 6924 const u64 len, 6925 const u64 orig_start, 6926 const u64 block_start, 6927 const u64 block_len, 6928 const u64 orig_block_len, 6929 const u64 ram_bytes, 6930 const int type) 6931 { 6932 struct extent_map *em = NULL; 6933 struct btrfs_ordered_extent *ordered; 6934 6935 if (type != BTRFS_ORDERED_NOCOW) { 6936 em = create_io_em(inode, start, len, orig_start, block_start, 6937 block_len, orig_block_len, ram_bytes, 6938 BTRFS_COMPRESS_NONE, /* compress_type */ 6939 type); 6940 if (IS_ERR(em)) 6941 goto out; 6942 } 6943 ordered = btrfs_alloc_ordered_extent(inode, start, len, len, 6944 block_start, block_len, 0, 6945 (1 << type) | 6946 (1 << BTRFS_ORDERED_DIRECT), 6947 BTRFS_COMPRESS_NONE); 6948 if (IS_ERR(ordered)) { 6949 if (em) { 6950 free_extent_map(em); 6951 btrfs_drop_extent_map_range(inode, start, 6952 start + len - 1, false); 6953 } 6954 em = ERR_CAST(ordered); 6955 } else { 6956 ASSERT(!dio_data->ordered); 6957 dio_data->ordered = ordered; 6958 } 6959 out: 6960 6961 return em; 6962 } 6963 6964 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, 6965 struct btrfs_dio_data *dio_data, 6966 u64 start, u64 len) 6967 { 6968 struct btrfs_root *root = inode->root; 6969 struct btrfs_fs_info *fs_info = root->fs_info; 6970 struct extent_map *em; 6971 struct btrfs_key ins; 6972 u64 alloc_hint; 6973 int ret; 6974 6975 alloc_hint = get_extent_allocation_hint(inode, start, len); 6976 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 6977 0, alloc_hint, &ins, 1, 1); 6978 if (ret) 6979 return ERR_PTR(ret); 6980 6981 em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start, 6982 ins.objectid, ins.offset, ins.offset, 6983 ins.offset, BTRFS_ORDERED_REGULAR); 6984 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 6985 if (IS_ERR(em)) 6986 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 6987 1); 6988 6989 return em; 6990 } 6991 6992 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 6993 { 6994 struct btrfs_block_group *block_group; 6995 bool readonly = false; 6996 6997 block_group = btrfs_lookup_block_group(fs_info, bytenr); 6998 if (!block_group || block_group->ro) 6999 readonly = true; 7000 if (block_group) 7001 btrfs_put_block_group(block_group); 7002 return readonly; 7003 } 7004 7005 /* 7006 * Check if we can do nocow write into the range [@offset, @offset + @len) 7007 * 7008 * @offset: File offset 7009 * @len: The length to write, will be updated to the nocow writeable 7010 * range 7011 * @orig_start: (optional) Return the original file offset of the file extent 7012 * @orig_len: (optional) Return the original on-disk length of the file extent 7013 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7014 * @strict: if true, omit optimizations that might force us into unnecessary 7015 * cow. e.g., don't trust generation number. 7016 * 7017 * Return: 7018 * >0 and update @len if we can do nocow write 7019 * 0 if we can't do nocow write 7020 * <0 if error happened 7021 * 7022 * NOTE: This only checks the file extents, caller is responsible to wait for 7023 * any ordered extents. 7024 */ 7025 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7026 u64 *orig_start, u64 *orig_block_len, 7027 u64 *ram_bytes, bool nowait, bool strict) 7028 { 7029 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7030 struct can_nocow_file_extent_args nocow_args = { 0 }; 7031 struct btrfs_path *path; 7032 int ret; 7033 struct extent_buffer *leaf; 7034 struct btrfs_root *root = BTRFS_I(inode)->root; 7035 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7036 struct btrfs_file_extent_item *fi; 7037 struct btrfs_key key; 7038 int found_type; 7039 7040 path = btrfs_alloc_path(); 7041 if (!path) 7042 return -ENOMEM; 7043 path->nowait = nowait; 7044 7045 ret = btrfs_lookup_file_extent(NULL, root, path, 7046 btrfs_ino(BTRFS_I(inode)), offset, 0); 7047 if (ret < 0) 7048 goto out; 7049 7050 if (ret == 1) { 7051 if (path->slots[0] == 0) { 7052 /* can't find the item, must cow */ 7053 ret = 0; 7054 goto out; 7055 } 7056 path->slots[0]--; 7057 } 7058 ret = 0; 7059 leaf = path->nodes[0]; 7060 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7061 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7062 key.type != BTRFS_EXTENT_DATA_KEY) { 7063 /* not our file or wrong item type, must cow */ 7064 goto out; 7065 } 7066 7067 if (key.offset > offset) { 7068 /* Wrong offset, must cow */ 7069 goto out; 7070 } 7071 7072 if (btrfs_file_extent_end(path) <= offset) 7073 goto out; 7074 7075 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7076 found_type = btrfs_file_extent_type(leaf, fi); 7077 if (ram_bytes) 7078 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7079 7080 nocow_args.start = offset; 7081 nocow_args.end = offset + *len - 1; 7082 nocow_args.strict = strict; 7083 nocow_args.free_path = true; 7084 7085 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args); 7086 /* can_nocow_file_extent() has freed the path. */ 7087 path = NULL; 7088 7089 if (ret != 1) { 7090 /* Treat errors as not being able to NOCOW. */ 7091 ret = 0; 7092 goto out; 7093 } 7094 7095 ret = 0; 7096 if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr)) 7097 goto out; 7098 7099 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7100 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7101 u64 range_end; 7102 7103 range_end = round_up(offset + nocow_args.num_bytes, 7104 root->fs_info->sectorsize) - 1; 7105 ret = test_range_bit(io_tree, offset, range_end, 7106 EXTENT_DELALLOC, 0, NULL); 7107 if (ret) { 7108 ret = -EAGAIN; 7109 goto out; 7110 } 7111 } 7112 7113 if (orig_start) 7114 *orig_start = key.offset - nocow_args.extent_offset; 7115 if (orig_block_len) 7116 *orig_block_len = nocow_args.disk_num_bytes; 7117 7118 *len = nocow_args.num_bytes; 7119 ret = 1; 7120 out: 7121 btrfs_free_path(path); 7122 return ret; 7123 } 7124 7125 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7126 struct extent_state **cached_state, 7127 unsigned int iomap_flags) 7128 { 7129 const bool writing = (iomap_flags & IOMAP_WRITE); 7130 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7131 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7132 struct btrfs_ordered_extent *ordered; 7133 int ret = 0; 7134 7135 while (1) { 7136 if (nowait) { 7137 if (!try_lock_extent(io_tree, lockstart, lockend, 7138 cached_state)) 7139 return -EAGAIN; 7140 } else { 7141 lock_extent(io_tree, lockstart, lockend, cached_state); 7142 } 7143 /* 7144 * We're concerned with the entire range that we're going to be 7145 * doing DIO to, so we need to make sure there's no ordered 7146 * extents in this range. 7147 */ 7148 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, 7149 lockend - lockstart + 1); 7150 7151 /* 7152 * We need to make sure there are no buffered pages in this 7153 * range either, we could have raced between the invalidate in 7154 * generic_file_direct_write and locking the extent. The 7155 * invalidate needs to happen so that reads after a write do not 7156 * get stale data. 7157 */ 7158 if (!ordered && 7159 (!writing || !filemap_range_has_page(inode->i_mapping, 7160 lockstart, lockend))) 7161 break; 7162 7163 unlock_extent(io_tree, lockstart, lockend, cached_state); 7164 7165 if (ordered) { 7166 if (nowait) { 7167 btrfs_put_ordered_extent(ordered); 7168 ret = -EAGAIN; 7169 break; 7170 } 7171 /* 7172 * If we are doing a DIO read and the ordered extent we 7173 * found is for a buffered write, we can not wait for it 7174 * to complete and retry, because if we do so we can 7175 * deadlock with concurrent buffered writes on page 7176 * locks. This happens only if our DIO read covers more 7177 * than one extent map, if at this point has already 7178 * created an ordered extent for a previous extent map 7179 * and locked its range in the inode's io tree, and a 7180 * concurrent write against that previous extent map's 7181 * range and this range started (we unlock the ranges 7182 * in the io tree only when the bios complete and 7183 * buffered writes always lock pages before attempting 7184 * to lock range in the io tree). 7185 */ 7186 if (writing || 7187 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) 7188 btrfs_start_ordered_extent(ordered); 7189 else 7190 ret = nowait ? -EAGAIN : -ENOTBLK; 7191 btrfs_put_ordered_extent(ordered); 7192 } else { 7193 /* 7194 * We could trigger writeback for this range (and wait 7195 * for it to complete) and then invalidate the pages for 7196 * this range (through invalidate_inode_pages2_range()), 7197 * but that can lead us to a deadlock with a concurrent 7198 * call to readahead (a buffered read or a defrag call 7199 * triggered a readahead) on a page lock due to an 7200 * ordered dio extent we created before but did not have 7201 * yet a corresponding bio submitted (whence it can not 7202 * complete), which makes readahead wait for that 7203 * ordered extent to complete while holding a lock on 7204 * that page. 7205 */ 7206 ret = nowait ? -EAGAIN : -ENOTBLK; 7207 } 7208 7209 if (ret) 7210 break; 7211 7212 cond_resched(); 7213 } 7214 7215 return ret; 7216 } 7217 7218 /* The callers of this must take lock_extent() */ 7219 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 7220 u64 len, u64 orig_start, u64 block_start, 7221 u64 block_len, u64 orig_block_len, 7222 u64 ram_bytes, int compress_type, 7223 int type) 7224 { 7225 struct extent_map *em; 7226 int ret; 7227 7228 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7229 type == BTRFS_ORDERED_COMPRESSED || 7230 type == BTRFS_ORDERED_NOCOW || 7231 type == BTRFS_ORDERED_REGULAR); 7232 7233 em = alloc_extent_map(); 7234 if (!em) 7235 return ERR_PTR(-ENOMEM); 7236 7237 em->start = start; 7238 em->orig_start = orig_start; 7239 em->len = len; 7240 em->block_len = block_len; 7241 em->block_start = block_start; 7242 em->orig_block_len = orig_block_len; 7243 em->ram_bytes = ram_bytes; 7244 em->generation = -1; 7245 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7246 if (type == BTRFS_ORDERED_PREALLOC) { 7247 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7248 } else if (type == BTRFS_ORDERED_COMPRESSED) { 7249 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 7250 em->compress_type = compress_type; 7251 } 7252 7253 ret = btrfs_replace_extent_map_range(inode, em, true); 7254 if (ret) { 7255 free_extent_map(em); 7256 return ERR_PTR(ret); 7257 } 7258 7259 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7260 return em; 7261 } 7262 7263 7264 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7265 struct inode *inode, 7266 struct btrfs_dio_data *dio_data, 7267 u64 start, u64 *lenp, 7268 unsigned int iomap_flags) 7269 { 7270 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7271 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7272 struct extent_map *em = *map; 7273 int type; 7274 u64 block_start, orig_start, orig_block_len, ram_bytes; 7275 struct btrfs_block_group *bg; 7276 bool can_nocow = false; 7277 bool space_reserved = false; 7278 u64 len = *lenp; 7279 u64 prev_len; 7280 int ret = 0; 7281 7282 /* 7283 * We don't allocate a new extent in the following cases 7284 * 7285 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7286 * existing extent. 7287 * 2) The extent is marked as PREALLOC. We're good to go here and can 7288 * just use the extent. 7289 * 7290 */ 7291 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7292 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7293 em->block_start != EXTENT_MAP_HOLE)) { 7294 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7295 type = BTRFS_ORDERED_PREALLOC; 7296 else 7297 type = BTRFS_ORDERED_NOCOW; 7298 len = min(len, em->len - (start - em->start)); 7299 block_start = em->block_start + (start - em->start); 7300 7301 if (can_nocow_extent(inode, start, &len, &orig_start, 7302 &orig_block_len, &ram_bytes, false, false) == 1) { 7303 bg = btrfs_inc_nocow_writers(fs_info, block_start); 7304 if (bg) 7305 can_nocow = true; 7306 } 7307 } 7308 7309 prev_len = len; 7310 if (can_nocow) { 7311 struct extent_map *em2; 7312 7313 /* We can NOCOW, so only need to reserve metadata space. */ 7314 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7315 nowait); 7316 if (ret < 0) { 7317 /* Our caller expects us to free the input extent map. */ 7318 free_extent_map(em); 7319 *map = NULL; 7320 btrfs_dec_nocow_writers(bg); 7321 if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) 7322 ret = -EAGAIN; 7323 goto out; 7324 } 7325 space_reserved = true; 7326 7327 em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len, 7328 orig_start, block_start, 7329 len, orig_block_len, 7330 ram_bytes, type); 7331 btrfs_dec_nocow_writers(bg); 7332 if (type == BTRFS_ORDERED_PREALLOC) { 7333 free_extent_map(em); 7334 *map = em2; 7335 em = em2; 7336 } 7337 7338 if (IS_ERR(em2)) { 7339 ret = PTR_ERR(em2); 7340 goto out; 7341 } 7342 7343 dio_data->nocow_done = true; 7344 } else { 7345 /* Our caller expects us to free the input extent map. */ 7346 free_extent_map(em); 7347 *map = NULL; 7348 7349 if (nowait) { 7350 ret = -EAGAIN; 7351 goto out; 7352 } 7353 7354 /* 7355 * If we could not allocate data space before locking the file 7356 * range and we can't do a NOCOW write, then we have to fail. 7357 */ 7358 if (!dio_data->data_space_reserved) { 7359 ret = -ENOSPC; 7360 goto out; 7361 } 7362 7363 /* 7364 * We have to COW and we have already reserved data space before, 7365 * so now we reserve only metadata. 7366 */ 7367 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7368 false); 7369 if (ret < 0) 7370 goto out; 7371 space_reserved = true; 7372 7373 em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len); 7374 if (IS_ERR(em)) { 7375 ret = PTR_ERR(em); 7376 goto out; 7377 } 7378 *map = em; 7379 len = min(len, em->len - (start - em->start)); 7380 if (len < prev_len) 7381 btrfs_delalloc_release_metadata(BTRFS_I(inode), 7382 prev_len - len, true); 7383 } 7384 7385 /* 7386 * We have created our ordered extent, so we can now release our reservation 7387 * for an outstanding extent. 7388 */ 7389 btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len); 7390 7391 /* 7392 * Need to update the i_size under the extent lock so buffered 7393 * readers will get the updated i_size when we unlock. 7394 */ 7395 if (start + len > i_size_read(inode)) 7396 i_size_write(inode, start + len); 7397 out: 7398 if (ret && space_reserved) { 7399 btrfs_delalloc_release_extents(BTRFS_I(inode), len); 7400 btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true); 7401 } 7402 *lenp = len; 7403 return ret; 7404 } 7405 7406 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, 7407 loff_t length, unsigned int flags, struct iomap *iomap, 7408 struct iomap *srcmap) 7409 { 7410 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7411 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7412 struct extent_map *em; 7413 struct extent_state *cached_state = NULL; 7414 struct btrfs_dio_data *dio_data = iter->private; 7415 u64 lockstart, lockend; 7416 const bool write = !!(flags & IOMAP_WRITE); 7417 int ret = 0; 7418 u64 len = length; 7419 const u64 data_alloc_len = length; 7420 bool unlock_extents = false; 7421 7422 /* 7423 * We could potentially fault if we have a buffer > PAGE_SIZE, and if 7424 * we're NOWAIT we may submit a bio for a partial range and return 7425 * EIOCBQUEUED, which would result in an errant short read. 7426 * 7427 * The best way to handle this would be to allow for partial completions 7428 * of iocb's, so we could submit the partial bio, return and fault in 7429 * the rest of the pages, and then submit the io for the rest of the 7430 * range. However we don't have that currently, so simply return 7431 * -EAGAIN at this point so that the normal path is used. 7432 */ 7433 if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE) 7434 return -EAGAIN; 7435 7436 /* 7437 * Cap the size of reads to that usually seen in buffered I/O as we need 7438 * to allocate a contiguous array for the checksums. 7439 */ 7440 if (!write) 7441 len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS); 7442 7443 lockstart = start; 7444 lockend = start + len - 1; 7445 7446 /* 7447 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't 7448 * enough if we've written compressed pages to this area, so we need to 7449 * flush the dirty pages again to make absolutely sure that any 7450 * outstanding dirty pages are on disk - the first flush only starts 7451 * compression on the data, while keeping the pages locked, so by the 7452 * time the second flush returns we know bios for the compressed pages 7453 * were submitted and finished, and the pages no longer under writeback. 7454 * 7455 * If we have a NOWAIT request and we have any pages in the range that 7456 * are locked, likely due to compression still in progress, we don't want 7457 * to block on page locks. We also don't want to block on pages marked as 7458 * dirty or under writeback (same as for the non-compression case). 7459 * iomap_dio_rw() did the same check, but after that and before we got 7460 * here, mmap'ed writes may have happened or buffered reads started 7461 * (readpage() and readahead(), which lock pages), as we haven't locked 7462 * the file range yet. 7463 */ 7464 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7465 &BTRFS_I(inode)->runtime_flags)) { 7466 if (flags & IOMAP_NOWAIT) { 7467 if (filemap_range_needs_writeback(inode->i_mapping, 7468 lockstart, lockend)) 7469 return -EAGAIN; 7470 } else { 7471 ret = filemap_fdatawrite_range(inode->i_mapping, start, 7472 start + length - 1); 7473 if (ret) 7474 return ret; 7475 } 7476 } 7477 7478 memset(dio_data, 0, sizeof(*dio_data)); 7479 7480 /* 7481 * We always try to allocate data space and must do it before locking 7482 * the file range, to avoid deadlocks with concurrent writes to the same 7483 * range if the range has several extents and the writes don't expand the 7484 * current i_size (the inode lock is taken in shared mode). If we fail to 7485 * allocate data space here we continue and later, after locking the 7486 * file range, we fail with ENOSPC only if we figure out we can not do a 7487 * NOCOW write. 7488 */ 7489 if (write && !(flags & IOMAP_NOWAIT)) { 7490 ret = btrfs_check_data_free_space(BTRFS_I(inode), 7491 &dio_data->data_reserved, 7492 start, data_alloc_len, false); 7493 if (!ret) 7494 dio_data->data_space_reserved = true; 7495 else if (ret && !(BTRFS_I(inode)->flags & 7496 (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 7497 goto err; 7498 } 7499 7500 /* 7501 * If this errors out it's because we couldn't invalidate pagecache for 7502 * this range and we need to fallback to buffered IO, or we are doing a 7503 * NOWAIT read/write and we need to block. 7504 */ 7505 ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags); 7506 if (ret < 0) 7507 goto err; 7508 7509 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 7510 if (IS_ERR(em)) { 7511 ret = PTR_ERR(em); 7512 goto unlock_err; 7513 } 7514 7515 /* 7516 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7517 * io. INLINE is special, and we could probably kludge it in here, but 7518 * it's still buffered so for safety lets just fall back to the generic 7519 * buffered path. 7520 * 7521 * For COMPRESSED we _have_ to read the entire extent in so we can 7522 * decompress it, so there will be buffering required no matter what we 7523 * do, so go ahead and fallback to buffered. 7524 * 7525 * We return -ENOTBLK because that's what makes DIO go ahead and go back 7526 * to buffered IO. Don't blame me, this is the price we pay for using 7527 * the generic code. 7528 */ 7529 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7530 em->block_start == EXTENT_MAP_INLINE) { 7531 free_extent_map(em); 7532 /* 7533 * If we are in a NOWAIT context, return -EAGAIN in order to 7534 * fallback to buffered IO. This is not only because we can 7535 * block with buffered IO (no support for NOWAIT semantics at 7536 * the moment) but also to avoid returning short reads to user 7537 * space - this happens if we were able to read some data from 7538 * previous non-compressed extents and then when we fallback to 7539 * buffered IO, at btrfs_file_read_iter() by calling 7540 * filemap_read(), we fail to fault in pages for the read buffer, 7541 * in which case filemap_read() returns a short read (the number 7542 * of bytes previously read is > 0, so it does not return -EFAULT). 7543 */ 7544 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; 7545 goto unlock_err; 7546 } 7547 7548 len = min(len, em->len - (start - em->start)); 7549 7550 /* 7551 * If we have a NOWAIT request and the range contains multiple extents 7552 * (or a mix of extents and holes), then we return -EAGAIN to make the 7553 * caller fallback to a context where it can do a blocking (without 7554 * NOWAIT) request. This way we avoid doing partial IO and returning 7555 * success to the caller, which is not optimal for writes and for reads 7556 * it can result in unexpected behaviour for an application. 7557 * 7558 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling 7559 * iomap_dio_rw(), we can end up returning less data then what the caller 7560 * asked for, resulting in an unexpected, and incorrect, short read. 7561 * That is, the caller asked to read N bytes and we return less than that, 7562 * which is wrong unless we are crossing EOF. This happens if we get a 7563 * page fault error when trying to fault in pages for the buffer that is 7564 * associated to the struct iov_iter passed to iomap_dio_rw(), and we 7565 * have previously submitted bios for other extents in the range, in 7566 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of 7567 * those bios have completed by the time we get the page fault error, 7568 * which we return back to our caller - we should only return EIOCBQUEUED 7569 * after we have submitted bios for all the extents in the range. 7570 */ 7571 if ((flags & IOMAP_NOWAIT) && len < length) { 7572 free_extent_map(em); 7573 ret = -EAGAIN; 7574 goto unlock_err; 7575 } 7576 7577 if (write) { 7578 ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, 7579 start, &len, flags); 7580 if (ret < 0) 7581 goto unlock_err; 7582 unlock_extents = true; 7583 /* Recalc len in case the new em is smaller than requested */ 7584 len = min(len, em->len - (start - em->start)); 7585 if (dio_data->data_space_reserved) { 7586 u64 release_offset; 7587 u64 release_len = 0; 7588 7589 if (dio_data->nocow_done) { 7590 release_offset = start; 7591 release_len = data_alloc_len; 7592 } else if (len < data_alloc_len) { 7593 release_offset = start + len; 7594 release_len = data_alloc_len - len; 7595 } 7596 7597 if (release_len > 0) 7598 btrfs_free_reserved_data_space(BTRFS_I(inode), 7599 dio_data->data_reserved, 7600 release_offset, 7601 release_len); 7602 } 7603 } else { 7604 /* 7605 * We need to unlock only the end area that we aren't using. 7606 * The rest is going to be unlocked by the endio routine. 7607 */ 7608 lockstart = start + len; 7609 if (lockstart < lockend) 7610 unlock_extents = true; 7611 } 7612 7613 if (unlock_extents) 7614 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7615 &cached_state); 7616 else 7617 free_extent_state(cached_state); 7618 7619 /* 7620 * Translate extent map information to iomap. 7621 * We trim the extents (and move the addr) even though iomap code does 7622 * that, since we have locked only the parts we are performing I/O in. 7623 */ 7624 if ((em->block_start == EXTENT_MAP_HOLE) || 7625 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { 7626 iomap->addr = IOMAP_NULL_ADDR; 7627 iomap->type = IOMAP_HOLE; 7628 } else { 7629 iomap->addr = em->block_start + (start - em->start); 7630 iomap->type = IOMAP_MAPPED; 7631 } 7632 iomap->offset = start; 7633 iomap->bdev = fs_info->fs_devices->latest_dev->bdev; 7634 iomap->length = len; 7635 free_extent_map(em); 7636 7637 return 0; 7638 7639 unlock_err: 7640 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7641 &cached_state); 7642 err: 7643 if (dio_data->data_space_reserved) { 7644 btrfs_free_reserved_data_space(BTRFS_I(inode), 7645 dio_data->data_reserved, 7646 start, data_alloc_len); 7647 extent_changeset_free(dio_data->data_reserved); 7648 } 7649 7650 return ret; 7651 } 7652 7653 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, 7654 ssize_t written, unsigned int flags, struct iomap *iomap) 7655 { 7656 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7657 struct btrfs_dio_data *dio_data = iter->private; 7658 size_t submitted = dio_data->submitted; 7659 const bool write = !!(flags & IOMAP_WRITE); 7660 int ret = 0; 7661 7662 if (!write && (iomap->type == IOMAP_HOLE)) { 7663 /* If reading from a hole, unlock and return */ 7664 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, 7665 NULL); 7666 return 0; 7667 } 7668 7669 if (submitted < length) { 7670 pos += submitted; 7671 length -= submitted; 7672 if (write) 7673 btrfs_finish_ordered_extent(dio_data->ordered, NULL, 7674 pos, length, false); 7675 else 7676 unlock_extent(&BTRFS_I(inode)->io_tree, pos, 7677 pos + length - 1, NULL); 7678 ret = -ENOTBLK; 7679 } 7680 if (write) { 7681 btrfs_put_ordered_extent(dio_data->ordered); 7682 dio_data->ordered = NULL; 7683 } 7684 7685 if (write) 7686 extent_changeset_free(dio_data->data_reserved); 7687 return ret; 7688 } 7689 7690 static void btrfs_dio_end_io(struct btrfs_bio *bbio) 7691 { 7692 struct btrfs_dio_private *dip = 7693 container_of(bbio, struct btrfs_dio_private, bbio); 7694 struct btrfs_inode *inode = bbio->inode; 7695 struct bio *bio = &bbio->bio; 7696 7697 if (bio->bi_status) { 7698 btrfs_warn(inode->root->fs_info, 7699 "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d", 7700 btrfs_ino(inode), bio->bi_opf, 7701 dip->file_offset, dip->bytes, bio->bi_status); 7702 } 7703 7704 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 7705 btrfs_finish_ordered_extent(bbio->ordered, NULL, 7706 dip->file_offset, dip->bytes, 7707 !bio->bi_status); 7708 } else { 7709 unlock_extent(&inode->io_tree, dip->file_offset, 7710 dip->file_offset + dip->bytes - 1, NULL); 7711 } 7712 7713 bbio->bio.bi_private = bbio->private; 7714 iomap_dio_bio_end_io(bio); 7715 } 7716 7717 static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio, 7718 loff_t file_offset) 7719 { 7720 struct btrfs_bio *bbio = btrfs_bio(bio); 7721 struct btrfs_dio_private *dip = 7722 container_of(bbio, struct btrfs_dio_private, bbio); 7723 struct btrfs_dio_data *dio_data = iter->private; 7724 7725 btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info, 7726 btrfs_dio_end_io, bio->bi_private); 7727 bbio->inode = BTRFS_I(iter->inode); 7728 bbio->file_offset = file_offset; 7729 7730 dip->file_offset = file_offset; 7731 dip->bytes = bio->bi_iter.bi_size; 7732 7733 dio_data->submitted += bio->bi_iter.bi_size; 7734 7735 /* 7736 * Check if we are doing a partial write. If we are, we need to split 7737 * the ordered extent to match the submitted bio. Hang on to the 7738 * remaining unfinishable ordered_extent in dio_data so that it can be 7739 * cancelled in iomap_end to avoid a deadlock wherein faulting the 7740 * remaining pages is blocked on the outstanding ordered extent. 7741 */ 7742 if (iter->flags & IOMAP_WRITE) { 7743 int ret; 7744 7745 ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered); 7746 if (ret) { 7747 btrfs_finish_ordered_extent(dio_data->ordered, NULL, 7748 file_offset, dip->bytes, 7749 !ret); 7750 bio->bi_status = errno_to_blk_status(ret); 7751 iomap_dio_bio_end_io(bio); 7752 return; 7753 } 7754 } 7755 7756 btrfs_submit_bio(bbio, 0); 7757 } 7758 7759 static const struct iomap_ops btrfs_dio_iomap_ops = { 7760 .iomap_begin = btrfs_dio_iomap_begin, 7761 .iomap_end = btrfs_dio_iomap_end, 7762 }; 7763 7764 static const struct iomap_dio_ops btrfs_dio_ops = { 7765 .submit_io = btrfs_dio_submit_io, 7766 .bio_set = &btrfs_dio_bioset, 7767 }; 7768 7769 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) 7770 { 7771 struct btrfs_dio_data data = { 0 }; 7772 7773 return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7774 IOMAP_DIO_PARTIAL, &data, done_before); 7775 } 7776 7777 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, 7778 size_t done_before) 7779 { 7780 struct btrfs_dio_data data = { 0 }; 7781 7782 return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7783 IOMAP_DIO_PARTIAL, &data, done_before); 7784 } 7785 7786 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 7787 u64 start, u64 len) 7788 { 7789 int ret; 7790 7791 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 7792 if (ret) 7793 return ret; 7794 7795 /* 7796 * fiemap_prep() called filemap_write_and_wait() for the whole possible 7797 * file range (0 to LLONG_MAX), but that is not enough if we have 7798 * compression enabled. The first filemap_fdatawrite_range() only kicks 7799 * in the compression of data (in an async thread) and will return 7800 * before the compression is done and writeback is started. A second 7801 * filemap_fdatawrite_range() is needed to wait for the compression to 7802 * complete and writeback to start. We also need to wait for ordered 7803 * extents to complete, because our fiemap implementation uses mainly 7804 * file extent items to list the extents, searching for extent maps 7805 * only for file ranges with holes or prealloc extents to figure out 7806 * if we have delalloc in those ranges. 7807 */ 7808 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { 7809 ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX); 7810 if (ret) 7811 return ret; 7812 } 7813 7814 return extent_fiemap(BTRFS_I(inode), fieinfo, start, len); 7815 } 7816 7817 static int btrfs_writepages(struct address_space *mapping, 7818 struct writeback_control *wbc) 7819 { 7820 return extent_writepages(mapping, wbc); 7821 } 7822 7823 static void btrfs_readahead(struct readahead_control *rac) 7824 { 7825 extent_readahead(rac); 7826 } 7827 7828 /* 7829 * For release_folio() and invalidate_folio() we have a race window where 7830 * folio_end_writeback() is called but the subpage spinlock is not yet released. 7831 * If we continue to release/invalidate the page, we could cause use-after-free 7832 * for subpage spinlock. So this function is to spin and wait for subpage 7833 * spinlock. 7834 */ 7835 static void wait_subpage_spinlock(struct page *page) 7836 { 7837 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); 7838 struct btrfs_subpage *subpage; 7839 7840 if (!btrfs_is_subpage(fs_info, page)) 7841 return; 7842 7843 ASSERT(PagePrivate(page) && page->private); 7844 subpage = (struct btrfs_subpage *)page->private; 7845 7846 /* 7847 * This may look insane as we just acquire the spinlock and release it, 7848 * without doing anything. But we just want to make sure no one is 7849 * still holding the subpage spinlock. 7850 * And since the page is not dirty nor writeback, and we have page 7851 * locked, the only possible way to hold a spinlock is from the endio 7852 * function to clear page writeback. 7853 * 7854 * Here we just acquire the spinlock so that all existing callers 7855 * should exit and we're safe to release/invalidate the page. 7856 */ 7857 spin_lock_irq(&subpage->lock); 7858 spin_unlock_irq(&subpage->lock); 7859 } 7860 7861 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7862 { 7863 int ret = try_release_extent_mapping(&folio->page, gfp_flags); 7864 7865 if (ret == 1) { 7866 wait_subpage_spinlock(&folio->page); 7867 clear_page_extent_mapped(&folio->page); 7868 } 7869 return ret; 7870 } 7871 7872 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7873 { 7874 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 7875 return false; 7876 return __btrfs_release_folio(folio, gfp_flags); 7877 } 7878 7879 #ifdef CONFIG_MIGRATION 7880 static int btrfs_migrate_folio(struct address_space *mapping, 7881 struct folio *dst, struct folio *src, 7882 enum migrate_mode mode) 7883 { 7884 int ret = filemap_migrate_folio(mapping, dst, src, mode); 7885 7886 if (ret != MIGRATEPAGE_SUCCESS) 7887 return ret; 7888 7889 if (folio_test_ordered(src)) { 7890 folio_clear_ordered(src); 7891 folio_set_ordered(dst); 7892 } 7893 7894 return MIGRATEPAGE_SUCCESS; 7895 } 7896 #else 7897 #define btrfs_migrate_folio NULL 7898 #endif 7899 7900 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 7901 size_t length) 7902 { 7903 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); 7904 struct btrfs_fs_info *fs_info = inode->root->fs_info; 7905 struct extent_io_tree *tree = &inode->io_tree; 7906 struct extent_state *cached_state = NULL; 7907 u64 page_start = folio_pos(folio); 7908 u64 page_end = page_start + folio_size(folio) - 1; 7909 u64 cur; 7910 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 7911 7912 /* 7913 * We have folio locked so no new ordered extent can be created on this 7914 * page, nor bio can be submitted for this folio. 7915 * 7916 * But already submitted bio can still be finished on this folio. 7917 * Furthermore, endio function won't skip folio which has Ordered 7918 * (Private2) already cleared, so it's possible for endio and 7919 * invalidate_folio to do the same ordered extent accounting twice 7920 * on one folio. 7921 * 7922 * So here we wait for any submitted bios to finish, so that we won't 7923 * do double ordered extent accounting on the same folio. 7924 */ 7925 folio_wait_writeback(folio); 7926 wait_subpage_spinlock(&folio->page); 7927 7928 /* 7929 * For subpage case, we have call sites like 7930 * btrfs_punch_hole_lock_range() which passes range not aligned to 7931 * sectorsize. 7932 * If the range doesn't cover the full folio, we don't need to and 7933 * shouldn't clear page extent mapped, as folio->private can still 7934 * record subpage dirty bits for other part of the range. 7935 * 7936 * For cases that invalidate the full folio even the range doesn't 7937 * cover the full folio, like invalidating the last folio, we're 7938 * still safe to wait for ordered extent to finish. 7939 */ 7940 if (!(offset == 0 && length == folio_size(folio))) { 7941 btrfs_release_folio(folio, GFP_NOFS); 7942 return; 7943 } 7944 7945 if (!inode_evicting) 7946 lock_extent(tree, page_start, page_end, &cached_state); 7947 7948 cur = page_start; 7949 while (cur < page_end) { 7950 struct btrfs_ordered_extent *ordered; 7951 u64 range_end; 7952 u32 range_len; 7953 u32 extra_flags = 0; 7954 7955 ordered = btrfs_lookup_first_ordered_range(inode, cur, 7956 page_end + 1 - cur); 7957 if (!ordered) { 7958 range_end = page_end; 7959 /* 7960 * No ordered extent covering this range, we are safe 7961 * to delete all extent states in the range. 7962 */ 7963 extra_flags = EXTENT_CLEAR_ALL_BITS; 7964 goto next; 7965 } 7966 if (ordered->file_offset > cur) { 7967 /* 7968 * There is a range between [cur, oe->file_offset) not 7969 * covered by any ordered extent. 7970 * We are safe to delete all extent states, and handle 7971 * the ordered extent in the next iteration. 7972 */ 7973 range_end = ordered->file_offset - 1; 7974 extra_flags = EXTENT_CLEAR_ALL_BITS; 7975 goto next; 7976 } 7977 7978 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 7979 page_end); 7980 ASSERT(range_end + 1 - cur < U32_MAX); 7981 range_len = range_end + 1 - cur; 7982 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) { 7983 /* 7984 * If Ordered (Private2) is cleared, it means endio has 7985 * already been executed for the range. 7986 * We can't delete the extent states as 7987 * btrfs_finish_ordered_io() may still use some of them. 7988 */ 7989 goto next; 7990 } 7991 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len); 7992 7993 /* 7994 * IO on this page will never be started, so we need to account 7995 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 7996 * here, must leave that up for the ordered extent completion. 7997 * 7998 * This will also unlock the range for incoming 7999 * btrfs_finish_ordered_io(). 8000 */ 8001 if (!inode_evicting) 8002 clear_extent_bit(tree, cur, range_end, 8003 EXTENT_DELALLOC | 8004 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8005 EXTENT_DEFRAG, &cached_state); 8006 8007 spin_lock_irq(&inode->ordered_tree.lock); 8008 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8009 ordered->truncated_len = min(ordered->truncated_len, 8010 cur - ordered->file_offset); 8011 spin_unlock_irq(&inode->ordered_tree.lock); 8012 8013 /* 8014 * If the ordered extent has finished, we're safe to delete all 8015 * the extent states of the range, otherwise 8016 * btrfs_finish_ordered_io() will get executed by endio for 8017 * other pages, so we can't delete extent states. 8018 */ 8019 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8020 cur, range_end + 1 - cur)) { 8021 btrfs_finish_ordered_io(ordered); 8022 /* 8023 * The ordered extent has finished, now we're again 8024 * safe to delete all extent states of the range. 8025 */ 8026 extra_flags = EXTENT_CLEAR_ALL_BITS; 8027 } 8028 next: 8029 if (ordered) 8030 btrfs_put_ordered_extent(ordered); 8031 /* 8032 * Qgroup reserved space handler 8033 * Sector(s) here will be either: 8034 * 8035 * 1) Already written to disk or bio already finished 8036 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 8037 * Qgroup will be handled by its qgroup_record then. 8038 * btrfs_qgroup_free_data() call will do nothing here. 8039 * 8040 * 2) Not written to disk yet 8041 * Then btrfs_qgroup_free_data() call will clear the 8042 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 8043 * reserved data space. 8044 * Since the IO will never happen for this page. 8045 */ 8046 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur); 8047 if (!inode_evicting) { 8048 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 8049 EXTENT_DELALLOC | EXTENT_UPTODATE | 8050 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG | 8051 extra_flags, &cached_state); 8052 } 8053 cur = range_end + 1; 8054 } 8055 /* 8056 * We have iterated through all ordered extents of the page, the page 8057 * should not have Ordered (Private2) anymore, or the above iteration 8058 * did something wrong. 8059 */ 8060 ASSERT(!folio_test_ordered(folio)); 8061 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio)); 8062 if (!inode_evicting) 8063 __btrfs_release_folio(folio, GFP_NOFS); 8064 clear_page_extent_mapped(&folio->page); 8065 } 8066 8067 /* 8068 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8069 * called from a page fault handler when a page is first dirtied. Hence we must 8070 * be careful to check for EOF conditions here. We set the page up correctly 8071 * for a written page which means we get ENOSPC checking when writing into 8072 * holes and correct delalloc and unwritten extent mapping on filesystems that 8073 * support these features. 8074 * 8075 * We are not allowed to take the i_mutex here so we have to play games to 8076 * protect against truncate races as the page could now be beyond EOF. Because 8077 * truncate_setsize() writes the inode size before removing pages, once we have 8078 * the page lock we can determine safely if the page is beyond EOF. If it is not 8079 * beyond EOF, then the page is guaranteed safe against truncation until we 8080 * unlock the page. 8081 */ 8082 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 8083 { 8084 struct page *page = vmf->page; 8085 struct inode *inode = file_inode(vmf->vma->vm_file); 8086 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8087 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8088 struct btrfs_ordered_extent *ordered; 8089 struct extent_state *cached_state = NULL; 8090 struct extent_changeset *data_reserved = NULL; 8091 unsigned long zero_start; 8092 loff_t size; 8093 vm_fault_t ret; 8094 int ret2; 8095 int reserved = 0; 8096 u64 reserved_space; 8097 u64 page_start; 8098 u64 page_end; 8099 u64 end; 8100 8101 reserved_space = PAGE_SIZE; 8102 8103 sb_start_pagefault(inode->i_sb); 8104 page_start = page_offset(page); 8105 page_end = page_start + PAGE_SIZE - 1; 8106 end = page_end; 8107 8108 /* 8109 * Reserving delalloc space after obtaining the page lock can lead to 8110 * deadlock. For example, if a dirty page is locked by this function 8111 * and the call to btrfs_delalloc_reserve_space() ends up triggering 8112 * dirty page write out, then the btrfs_writepages() function could 8113 * end up waiting indefinitely to get a lock on the page currently 8114 * being processed by btrfs_page_mkwrite() function. 8115 */ 8116 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 8117 page_start, reserved_space); 8118 if (!ret2) { 8119 ret2 = file_update_time(vmf->vma->vm_file); 8120 reserved = 1; 8121 } 8122 if (ret2) { 8123 ret = vmf_error(ret2); 8124 if (reserved) 8125 goto out; 8126 goto out_noreserve; 8127 } 8128 8129 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8130 again: 8131 down_read(&BTRFS_I(inode)->i_mmap_lock); 8132 lock_page(page); 8133 size = i_size_read(inode); 8134 8135 if ((page->mapping != inode->i_mapping) || 8136 (page_start >= size)) { 8137 /* page got truncated out from underneath us */ 8138 goto out_unlock; 8139 } 8140 wait_on_page_writeback(page); 8141 8142 lock_extent(io_tree, page_start, page_end, &cached_state); 8143 ret2 = set_page_extent_mapped(page); 8144 if (ret2 < 0) { 8145 ret = vmf_error(ret2); 8146 unlock_extent(io_tree, page_start, page_end, &cached_state); 8147 goto out_unlock; 8148 } 8149 8150 /* 8151 * we can't set the delalloc bits if there are pending ordered 8152 * extents. Drop our locks and wait for them to finish 8153 */ 8154 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, 8155 PAGE_SIZE); 8156 if (ordered) { 8157 unlock_extent(io_tree, page_start, page_end, &cached_state); 8158 unlock_page(page); 8159 up_read(&BTRFS_I(inode)->i_mmap_lock); 8160 btrfs_start_ordered_extent(ordered); 8161 btrfs_put_ordered_extent(ordered); 8162 goto again; 8163 } 8164 8165 if (page->index == ((size - 1) >> PAGE_SHIFT)) { 8166 reserved_space = round_up(size - page_start, 8167 fs_info->sectorsize); 8168 if (reserved_space < PAGE_SIZE) { 8169 end = page_start + reserved_space - 1; 8170 btrfs_delalloc_release_space(BTRFS_I(inode), 8171 data_reserved, page_start, 8172 PAGE_SIZE - reserved_space, true); 8173 } 8174 } 8175 8176 /* 8177 * page_mkwrite gets called when the page is firstly dirtied after it's 8178 * faulted in, but write(2) could also dirty a page and set delalloc 8179 * bits, thus in this case for space account reason, we still need to 8180 * clear any delalloc bits within this page range since we have to 8181 * reserve data&meta space before lock_page() (see above comments). 8182 */ 8183 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, 8184 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8185 EXTENT_DEFRAG, &cached_state); 8186 8187 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, 8188 &cached_state); 8189 if (ret2) { 8190 unlock_extent(io_tree, page_start, page_end, &cached_state); 8191 ret = VM_FAULT_SIGBUS; 8192 goto out_unlock; 8193 } 8194 8195 /* page is wholly or partially inside EOF */ 8196 if (page_start + PAGE_SIZE > size) 8197 zero_start = offset_in_page(size); 8198 else 8199 zero_start = PAGE_SIZE; 8200 8201 if (zero_start != PAGE_SIZE) 8202 memzero_page(page, zero_start, PAGE_SIZE - zero_start); 8203 8204 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); 8205 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); 8206 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); 8207 8208 btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); 8209 8210 unlock_extent(io_tree, page_start, page_end, &cached_state); 8211 up_read(&BTRFS_I(inode)->i_mmap_lock); 8212 8213 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8214 sb_end_pagefault(inode->i_sb); 8215 extent_changeset_free(data_reserved); 8216 return VM_FAULT_LOCKED; 8217 8218 out_unlock: 8219 unlock_page(page); 8220 up_read(&BTRFS_I(inode)->i_mmap_lock); 8221 out: 8222 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8223 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, 8224 reserved_space, (ret != 0)); 8225 out_noreserve: 8226 sb_end_pagefault(inode->i_sb); 8227 extent_changeset_free(data_reserved); 8228 return ret; 8229 } 8230 8231 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) 8232 { 8233 struct btrfs_truncate_control control = { 8234 .inode = inode, 8235 .ino = btrfs_ino(inode), 8236 .min_type = BTRFS_EXTENT_DATA_KEY, 8237 .clear_extent_range = true, 8238 }; 8239 struct btrfs_root *root = inode->root; 8240 struct btrfs_fs_info *fs_info = root->fs_info; 8241 struct btrfs_block_rsv *rsv; 8242 int ret; 8243 struct btrfs_trans_handle *trans; 8244 u64 mask = fs_info->sectorsize - 1; 8245 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 8246 8247 if (!skip_writeback) { 8248 ret = btrfs_wait_ordered_range(&inode->vfs_inode, 8249 inode->vfs_inode.i_size & (~mask), 8250 (u64)-1); 8251 if (ret) 8252 return ret; 8253 } 8254 8255 /* 8256 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 8257 * things going on here: 8258 * 8259 * 1) We need to reserve space to update our inode. 8260 * 8261 * 2) We need to have something to cache all the space that is going to 8262 * be free'd up by the truncate operation, but also have some slack 8263 * space reserved in case it uses space during the truncate (thank you 8264 * very much snapshotting). 8265 * 8266 * And we need these to be separate. The fact is we can use a lot of 8267 * space doing the truncate, and we have no earthly idea how much space 8268 * we will use, so we need the truncate reservation to be separate so it 8269 * doesn't end up using space reserved for updating the inode. We also 8270 * need to be able to stop the transaction and start a new one, which 8271 * means we need to be able to update the inode several times, and we 8272 * have no idea of knowing how many times that will be, so we can't just 8273 * reserve 1 item for the entirety of the operation, so that has to be 8274 * done separately as well. 8275 * 8276 * So that leaves us with 8277 * 8278 * 1) rsv - for the truncate reservation, which we will steal from the 8279 * transaction reservation. 8280 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 8281 * updating the inode. 8282 */ 8283 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 8284 if (!rsv) 8285 return -ENOMEM; 8286 rsv->size = min_size; 8287 rsv->failfast = true; 8288 8289 /* 8290 * 1 for the truncate slack space 8291 * 1 for updating the inode. 8292 */ 8293 trans = btrfs_start_transaction(root, 2); 8294 if (IS_ERR(trans)) { 8295 ret = PTR_ERR(trans); 8296 goto out; 8297 } 8298 8299 /* Migrate the slack space for the truncate to our reserve */ 8300 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 8301 min_size, false); 8302 /* 8303 * We have reserved 2 metadata units when we started the transaction and 8304 * min_size matches 1 unit, so this should never fail, but if it does, 8305 * it's not critical we just fail truncation. 8306 */ 8307 if (WARN_ON(ret)) { 8308 btrfs_end_transaction(trans); 8309 goto out; 8310 } 8311 8312 trans->block_rsv = rsv; 8313 8314 while (1) { 8315 struct extent_state *cached_state = NULL; 8316 const u64 new_size = inode->vfs_inode.i_size; 8317 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 8318 8319 control.new_size = new_size; 8320 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8321 /* 8322 * We want to drop from the next block forward in case this new 8323 * size is not block aligned since we will be keeping the last 8324 * block of the extent just the way it is. 8325 */ 8326 btrfs_drop_extent_map_range(inode, 8327 ALIGN(new_size, fs_info->sectorsize), 8328 (u64)-1, false); 8329 8330 ret = btrfs_truncate_inode_items(trans, root, &control); 8331 8332 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 8333 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 8334 8335 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8336 8337 trans->block_rsv = &fs_info->trans_block_rsv; 8338 if (ret != -ENOSPC && ret != -EAGAIN) 8339 break; 8340 8341 ret = btrfs_update_inode(trans, root, inode); 8342 if (ret) 8343 break; 8344 8345 btrfs_end_transaction(trans); 8346 btrfs_btree_balance_dirty(fs_info); 8347 8348 trans = btrfs_start_transaction(root, 2); 8349 if (IS_ERR(trans)) { 8350 ret = PTR_ERR(trans); 8351 trans = NULL; 8352 break; 8353 } 8354 8355 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 8356 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 8357 rsv, min_size, false); 8358 /* 8359 * We have reserved 2 metadata units when we started the 8360 * transaction and min_size matches 1 unit, so this should never 8361 * fail, but if it does, it's not critical we just fail truncation. 8362 */ 8363 if (WARN_ON(ret)) 8364 break; 8365 8366 trans->block_rsv = rsv; 8367 } 8368 8369 /* 8370 * We can't call btrfs_truncate_block inside a trans handle as we could 8371 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 8372 * know we've truncated everything except the last little bit, and can 8373 * do btrfs_truncate_block and then update the disk_i_size. 8374 */ 8375 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 8376 btrfs_end_transaction(trans); 8377 btrfs_btree_balance_dirty(fs_info); 8378 8379 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); 8380 if (ret) 8381 goto out; 8382 trans = btrfs_start_transaction(root, 1); 8383 if (IS_ERR(trans)) { 8384 ret = PTR_ERR(trans); 8385 goto out; 8386 } 8387 btrfs_inode_safe_disk_i_size_write(inode, 0); 8388 } 8389 8390 if (trans) { 8391 int ret2; 8392 8393 trans->block_rsv = &fs_info->trans_block_rsv; 8394 ret2 = btrfs_update_inode(trans, root, inode); 8395 if (ret2 && !ret) 8396 ret = ret2; 8397 8398 ret2 = btrfs_end_transaction(trans); 8399 if (ret2 && !ret) 8400 ret = ret2; 8401 btrfs_btree_balance_dirty(fs_info); 8402 } 8403 out: 8404 btrfs_free_block_rsv(fs_info, rsv); 8405 /* 8406 * So if we truncate and then write and fsync we normally would just 8407 * write the extents that changed, which is a problem if we need to 8408 * first truncate that entire inode. So set this flag so we write out 8409 * all of the extents in the inode to the sync log so we're completely 8410 * safe. 8411 * 8412 * If no extents were dropped or trimmed we don't need to force the next 8413 * fsync to truncate all the inode's items from the log and re-log them 8414 * all. This means the truncate operation did not change the file size, 8415 * or changed it to a smaller size but there was only an implicit hole 8416 * between the old i_size and the new i_size, and there were no prealloc 8417 * extents beyond i_size to drop. 8418 */ 8419 if (control.extents_found > 0) 8420 btrfs_set_inode_full_sync(inode); 8421 8422 return ret; 8423 } 8424 8425 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap, 8426 struct inode *dir) 8427 { 8428 struct inode *inode; 8429 8430 inode = new_inode(dir->i_sb); 8431 if (inode) { 8432 /* 8433 * Subvolumes don't inherit the sgid bit or the parent's gid if 8434 * the parent's sgid bit is set. This is probably a bug. 8435 */ 8436 inode_init_owner(idmap, inode, NULL, 8437 S_IFDIR | (~current_umask() & S_IRWXUGO)); 8438 inode->i_op = &btrfs_dir_inode_operations; 8439 inode->i_fop = &btrfs_dir_file_operations; 8440 } 8441 return inode; 8442 } 8443 8444 struct inode *btrfs_alloc_inode(struct super_block *sb) 8445 { 8446 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 8447 struct btrfs_inode *ei; 8448 struct inode *inode; 8449 8450 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 8451 if (!ei) 8452 return NULL; 8453 8454 ei->root = NULL; 8455 ei->generation = 0; 8456 ei->last_trans = 0; 8457 ei->last_sub_trans = 0; 8458 ei->logged_trans = 0; 8459 ei->delalloc_bytes = 0; 8460 ei->new_delalloc_bytes = 0; 8461 ei->defrag_bytes = 0; 8462 ei->disk_i_size = 0; 8463 ei->flags = 0; 8464 ei->ro_flags = 0; 8465 ei->csum_bytes = 0; 8466 ei->index_cnt = (u64)-1; 8467 ei->dir_index = 0; 8468 ei->last_unlink_trans = 0; 8469 ei->last_reflink_trans = 0; 8470 ei->last_log_commit = 0; 8471 8472 spin_lock_init(&ei->lock); 8473 ei->outstanding_extents = 0; 8474 if (sb->s_magic != BTRFS_TEST_MAGIC) 8475 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 8476 BTRFS_BLOCK_RSV_DELALLOC); 8477 ei->runtime_flags = 0; 8478 ei->prop_compress = BTRFS_COMPRESS_NONE; 8479 ei->defrag_compress = BTRFS_COMPRESS_NONE; 8480 8481 ei->delayed_node = NULL; 8482 8483 ei->i_otime.tv_sec = 0; 8484 ei->i_otime.tv_nsec = 0; 8485 8486 inode = &ei->vfs_inode; 8487 extent_map_tree_init(&ei->extent_tree); 8488 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); 8489 ei->io_tree.inode = ei; 8490 extent_io_tree_init(fs_info, &ei->file_extent_tree, 8491 IO_TREE_INODE_FILE_EXTENT); 8492 mutex_init(&ei->log_mutex); 8493 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 8494 INIT_LIST_HEAD(&ei->delalloc_inodes); 8495 INIT_LIST_HEAD(&ei->delayed_iput); 8496 RB_CLEAR_NODE(&ei->rb_node); 8497 init_rwsem(&ei->i_mmap_lock); 8498 8499 return inode; 8500 } 8501 8502 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 8503 void btrfs_test_destroy_inode(struct inode *inode) 8504 { 8505 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 8506 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8507 } 8508 #endif 8509 8510 void btrfs_free_inode(struct inode *inode) 8511 { 8512 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8513 } 8514 8515 void btrfs_destroy_inode(struct inode *vfs_inode) 8516 { 8517 struct btrfs_ordered_extent *ordered; 8518 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 8519 struct btrfs_root *root = inode->root; 8520 bool freespace_inode; 8521 8522 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 8523 WARN_ON(vfs_inode->i_data.nrpages); 8524 WARN_ON(inode->block_rsv.reserved); 8525 WARN_ON(inode->block_rsv.size); 8526 WARN_ON(inode->outstanding_extents); 8527 if (!S_ISDIR(vfs_inode->i_mode)) { 8528 WARN_ON(inode->delalloc_bytes); 8529 WARN_ON(inode->new_delalloc_bytes); 8530 } 8531 WARN_ON(inode->csum_bytes); 8532 WARN_ON(inode->defrag_bytes); 8533 8534 /* 8535 * This can happen where we create an inode, but somebody else also 8536 * created the same inode and we need to destroy the one we already 8537 * created. 8538 */ 8539 if (!root) 8540 return; 8541 8542 /* 8543 * If this is a free space inode do not take the ordered extents lockdep 8544 * map. 8545 */ 8546 freespace_inode = btrfs_is_free_space_inode(inode); 8547 8548 while (1) { 8549 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8550 if (!ordered) 8551 break; 8552 else { 8553 btrfs_err(root->fs_info, 8554 "found ordered extent %llu %llu on inode cleanup", 8555 ordered->file_offset, ordered->num_bytes); 8556 8557 if (!freespace_inode) 8558 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); 8559 8560 btrfs_remove_ordered_extent(inode, ordered); 8561 btrfs_put_ordered_extent(ordered); 8562 btrfs_put_ordered_extent(ordered); 8563 } 8564 } 8565 btrfs_qgroup_check_reserved_leak(inode); 8566 inode_tree_del(inode); 8567 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 8568 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 8569 btrfs_put_root(inode->root); 8570 } 8571 8572 int btrfs_drop_inode(struct inode *inode) 8573 { 8574 struct btrfs_root *root = BTRFS_I(inode)->root; 8575 8576 if (root == NULL) 8577 return 1; 8578 8579 /* the snap/subvol tree is on deleting */ 8580 if (btrfs_root_refs(&root->root_item) == 0) 8581 return 1; 8582 else 8583 return generic_drop_inode(inode); 8584 } 8585 8586 static void init_once(void *foo) 8587 { 8588 struct btrfs_inode *ei = foo; 8589 8590 inode_init_once(&ei->vfs_inode); 8591 } 8592 8593 void __cold btrfs_destroy_cachep(void) 8594 { 8595 /* 8596 * Make sure all delayed rcu free inodes are flushed before we 8597 * destroy cache. 8598 */ 8599 rcu_barrier(); 8600 bioset_exit(&btrfs_dio_bioset); 8601 kmem_cache_destroy(btrfs_inode_cachep); 8602 } 8603 8604 int __init btrfs_init_cachep(void) 8605 { 8606 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 8607 sizeof(struct btrfs_inode), 0, 8608 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, 8609 init_once); 8610 if (!btrfs_inode_cachep) 8611 goto fail; 8612 8613 if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE, 8614 offsetof(struct btrfs_dio_private, bbio.bio), 8615 BIOSET_NEED_BVECS)) 8616 goto fail; 8617 8618 return 0; 8619 fail: 8620 btrfs_destroy_cachep(); 8621 return -ENOMEM; 8622 } 8623 8624 static int btrfs_getattr(struct mnt_idmap *idmap, 8625 const struct path *path, struct kstat *stat, 8626 u32 request_mask, unsigned int flags) 8627 { 8628 u64 delalloc_bytes; 8629 u64 inode_bytes; 8630 struct inode *inode = d_inode(path->dentry); 8631 u32 blocksize = inode->i_sb->s_blocksize; 8632 u32 bi_flags = BTRFS_I(inode)->flags; 8633 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 8634 8635 stat->result_mask |= STATX_BTIME; 8636 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; 8637 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; 8638 if (bi_flags & BTRFS_INODE_APPEND) 8639 stat->attributes |= STATX_ATTR_APPEND; 8640 if (bi_flags & BTRFS_INODE_COMPRESS) 8641 stat->attributes |= STATX_ATTR_COMPRESSED; 8642 if (bi_flags & BTRFS_INODE_IMMUTABLE) 8643 stat->attributes |= STATX_ATTR_IMMUTABLE; 8644 if (bi_flags & BTRFS_INODE_NODUMP) 8645 stat->attributes |= STATX_ATTR_NODUMP; 8646 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 8647 stat->attributes |= STATX_ATTR_VERITY; 8648 8649 stat->attributes_mask |= (STATX_ATTR_APPEND | 8650 STATX_ATTR_COMPRESSED | 8651 STATX_ATTR_IMMUTABLE | 8652 STATX_ATTR_NODUMP); 8653 8654 generic_fillattr(idmap, inode, stat); 8655 stat->dev = BTRFS_I(inode)->root->anon_dev; 8656 8657 spin_lock(&BTRFS_I(inode)->lock); 8658 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 8659 inode_bytes = inode_get_bytes(inode); 8660 spin_unlock(&BTRFS_I(inode)->lock); 8661 stat->blocks = (ALIGN(inode_bytes, blocksize) + 8662 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT; 8663 return 0; 8664 } 8665 8666 static int btrfs_rename_exchange(struct inode *old_dir, 8667 struct dentry *old_dentry, 8668 struct inode *new_dir, 8669 struct dentry *new_dentry) 8670 { 8671 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 8672 struct btrfs_trans_handle *trans; 8673 unsigned int trans_num_items; 8674 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8675 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8676 struct inode *new_inode = new_dentry->d_inode; 8677 struct inode *old_inode = old_dentry->d_inode; 8678 struct timespec64 ctime = current_time(old_inode); 8679 struct btrfs_rename_ctx old_rename_ctx; 8680 struct btrfs_rename_ctx new_rename_ctx; 8681 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8682 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 8683 u64 old_idx = 0; 8684 u64 new_idx = 0; 8685 int ret; 8686 int ret2; 8687 bool need_abort = false; 8688 struct fscrypt_name old_fname, new_fname; 8689 struct fscrypt_str *old_name, *new_name; 8690 8691 /* 8692 * For non-subvolumes allow exchange only within one subvolume, in the 8693 * same inode namespace. Two subvolumes (represented as directory) can 8694 * be exchanged as they're a logical link and have a fixed inode number. 8695 */ 8696 if (root != dest && 8697 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 8698 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 8699 return -EXDEV; 8700 8701 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8702 if (ret) 8703 return ret; 8704 8705 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8706 if (ret) { 8707 fscrypt_free_filename(&old_fname); 8708 return ret; 8709 } 8710 8711 old_name = &old_fname.disk_name; 8712 new_name = &new_fname.disk_name; 8713 8714 /* close the race window with snapshot create/destroy ioctl */ 8715 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 8716 new_ino == BTRFS_FIRST_FREE_OBJECTID) 8717 down_read(&fs_info->subvol_sem); 8718 8719 /* 8720 * For each inode: 8721 * 1 to remove old dir item 8722 * 1 to remove old dir index 8723 * 1 to add new dir item 8724 * 1 to add new dir index 8725 * 1 to update parent inode 8726 * 8727 * If the parents are the same, we only need to account for one 8728 */ 8729 trans_num_items = (old_dir == new_dir ? 9 : 10); 8730 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8731 /* 8732 * 1 to remove old root ref 8733 * 1 to remove old root backref 8734 * 1 to add new root ref 8735 * 1 to add new root backref 8736 */ 8737 trans_num_items += 4; 8738 } else { 8739 /* 8740 * 1 to update inode item 8741 * 1 to remove old inode ref 8742 * 1 to add new inode ref 8743 */ 8744 trans_num_items += 3; 8745 } 8746 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 8747 trans_num_items += 4; 8748 else 8749 trans_num_items += 3; 8750 trans = btrfs_start_transaction(root, trans_num_items); 8751 if (IS_ERR(trans)) { 8752 ret = PTR_ERR(trans); 8753 goto out_notrans; 8754 } 8755 8756 if (dest != root) { 8757 ret = btrfs_record_root_in_trans(trans, dest); 8758 if (ret) 8759 goto out_fail; 8760 } 8761 8762 /* 8763 * We need to find a free sequence number both in the source and 8764 * in the destination directory for the exchange. 8765 */ 8766 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 8767 if (ret) 8768 goto out_fail; 8769 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 8770 if (ret) 8771 goto out_fail; 8772 8773 BTRFS_I(old_inode)->dir_index = 0ULL; 8774 BTRFS_I(new_inode)->dir_index = 0ULL; 8775 8776 /* Reference for the source. */ 8777 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8778 /* force full log commit if subvolume involved. */ 8779 btrfs_set_log_full_commit(trans); 8780 } else { 8781 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, 8782 btrfs_ino(BTRFS_I(new_dir)), 8783 old_idx); 8784 if (ret) 8785 goto out_fail; 8786 need_abort = true; 8787 } 8788 8789 /* And now for the dest. */ 8790 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8791 /* force full log commit if subvolume involved. */ 8792 btrfs_set_log_full_commit(trans); 8793 } else { 8794 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, 8795 btrfs_ino(BTRFS_I(old_dir)), 8796 new_idx); 8797 if (ret) { 8798 if (need_abort) 8799 btrfs_abort_transaction(trans, ret); 8800 goto out_fail; 8801 } 8802 } 8803 8804 /* Update inode version and ctime/mtime. */ 8805 inode_inc_iversion(old_dir); 8806 inode_inc_iversion(new_dir); 8807 inode_inc_iversion(old_inode); 8808 inode_inc_iversion(new_inode); 8809 old_dir->i_mtime = ctime; 8810 old_dir->i_ctime = ctime; 8811 new_dir->i_mtime = ctime; 8812 new_dir->i_ctime = ctime; 8813 old_inode->i_ctime = ctime; 8814 new_inode->i_ctime = ctime; 8815 8816 if (old_dentry->d_parent != new_dentry->d_parent) { 8817 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8818 BTRFS_I(old_inode), true); 8819 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 8820 BTRFS_I(new_inode), true); 8821 } 8822 8823 /* src is a subvolume */ 8824 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8825 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8826 } else { /* src is an inode */ 8827 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8828 BTRFS_I(old_dentry->d_inode), 8829 old_name, &old_rename_ctx); 8830 if (!ret) 8831 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 8832 } 8833 if (ret) { 8834 btrfs_abort_transaction(trans, ret); 8835 goto out_fail; 8836 } 8837 8838 /* dest is a subvolume */ 8839 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8840 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8841 } else { /* dest is an inode */ 8842 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8843 BTRFS_I(new_dentry->d_inode), 8844 new_name, &new_rename_ctx); 8845 if (!ret) 8846 ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode)); 8847 } 8848 if (ret) { 8849 btrfs_abort_transaction(trans, ret); 8850 goto out_fail; 8851 } 8852 8853 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8854 new_name, 0, old_idx); 8855 if (ret) { 8856 btrfs_abort_transaction(trans, ret); 8857 goto out_fail; 8858 } 8859 8860 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 8861 old_name, 0, new_idx); 8862 if (ret) { 8863 btrfs_abort_transaction(trans, ret); 8864 goto out_fail; 8865 } 8866 8867 if (old_inode->i_nlink == 1) 8868 BTRFS_I(old_inode)->dir_index = old_idx; 8869 if (new_inode->i_nlink == 1) 8870 BTRFS_I(new_inode)->dir_index = new_idx; 8871 8872 /* 8873 * Now pin the logs of the roots. We do it to ensure that no other task 8874 * can sync the logs while we are in progress with the rename, because 8875 * that could result in an inconsistency in case any of the inodes that 8876 * are part of this rename operation were logged before. 8877 */ 8878 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8879 btrfs_pin_log_trans(root); 8880 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8881 btrfs_pin_log_trans(dest); 8882 8883 /* Do the log updates for all inodes. */ 8884 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8885 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8886 old_rename_ctx.index, new_dentry->d_parent); 8887 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8888 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 8889 new_rename_ctx.index, old_dentry->d_parent); 8890 8891 /* Now unpin the logs. */ 8892 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8893 btrfs_end_log_trans(root); 8894 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8895 btrfs_end_log_trans(dest); 8896 out_fail: 8897 ret2 = btrfs_end_transaction(trans); 8898 ret = ret ? ret : ret2; 8899 out_notrans: 8900 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 8901 old_ino == BTRFS_FIRST_FREE_OBJECTID) 8902 up_read(&fs_info->subvol_sem); 8903 8904 fscrypt_free_filename(&new_fname); 8905 fscrypt_free_filename(&old_fname); 8906 return ret; 8907 } 8908 8909 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap, 8910 struct inode *dir) 8911 { 8912 struct inode *inode; 8913 8914 inode = new_inode(dir->i_sb); 8915 if (inode) { 8916 inode_init_owner(idmap, inode, dir, 8917 S_IFCHR | WHITEOUT_MODE); 8918 inode->i_op = &btrfs_special_inode_operations; 8919 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 8920 } 8921 return inode; 8922 } 8923 8924 static int btrfs_rename(struct mnt_idmap *idmap, 8925 struct inode *old_dir, struct dentry *old_dentry, 8926 struct inode *new_dir, struct dentry *new_dentry, 8927 unsigned int flags) 8928 { 8929 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 8930 struct btrfs_new_inode_args whiteout_args = { 8931 .dir = old_dir, 8932 .dentry = old_dentry, 8933 }; 8934 struct btrfs_trans_handle *trans; 8935 unsigned int trans_num_items; 8936 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8937 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8938 struct inode *new_inode = d_inode(new_dentry); 8939 struct inode *old_inode = d_inode(old_dentry); 8940 struct btrfs_rename_ctx rename_ctx; 8941 u64 index = 0; 8942 int ret; 8943 int ret2; 8944 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8945 struct fscrypt_name old_fname, new_fname; 8946 8947 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 8948 return -EPERM; 8949 8950 /* we only allow rename subvolume link between subvolumes */ 8951 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 8952 return -EXDEV; 8953 8954 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 8955 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 8956 return -ENOTEMPTY; 8957 8958 if (S_ISDIR(old_inode->i_mode) && new_inode && 8959 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 8960 return -ENOTEMPTY; 8961 8962 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8963 if (ret) 8964 return ret; 8965 8966 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8967 if (ret) { 8968 fscrypt_free_filename(&old_fname); 8969 return ret; 8970 } 8971 8972 /* check for collisions, even if the name isn't there */ 8973 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); 8974 if (ret) { 8975 if (ret == -EEXIST) { 8976 /* we shouldn't get 8977 * eexist without a new_inode */ 8978 if (WARN_ON(!new_inode)) { 8979 goto out_fscrypt_names; 8980 } 8981 } else { 8982 /* maybe -EOVERFLOW */ 8983 goto out_fscrypt_names; 8984 } 8985 } 8986 ret = 0; 8987 8988 /* 8989 * we're using rename to replace one file with another. Start IO on it 8990 * now so we don't add too much work to the end of the transaction 8991 */ 8992 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 8993 filemap_flush(old_inode->i_mapping); 8994 8995 if (flags & RENAME_WHITEOUT) { 8996 whiteout_args.inode = new_whiteout_inode(idmap, old_dir); 8997 if (!whiteout_args.inode) { 8998 ret = -ENOMEM; 8999 goto out_fscrypt_names; 9000 } 9001 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 9002 if (ret) 9003 goto out_whiteout_inode; 9004 } else { 9005 /* 1 to update the old parent inode. */ 9006 trans_num_items = 1; 9007 } 9008 9009 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9010 /* Close the race window with snapshot create/destroy ioctl */ 9011 down_read(&fs_info->subvol_sem); 9012 /* 9013 * 1 to remove old root ref 9014 * 1 to remove old root backref 9015 * 1 to add new root ref 9016 * 1 to add new root backref 9017 */ 9018 trans_num_items += 4; 9019 } else { 9020 /* 9021 * 1 to update inode 9022 * 1 to remove old inode ref 9023 * 1 to add new inode ref 9024 */ 9025 trans_num_items += 3; 9026 } 9027 /* 9028 * 1 to remove old dir item 9029 * 1 to remove old dir index 9030 * 1 to add new dir item 9031 * 1 to add new dir index 9032 */ 9033 trans_num_items += 4; 9034 /* 1 to update new parent inode if it's not the same as the old parent */ 9035 if (new_dir != old_dir) 9036 trans_num_items++; 9037 if (new_inode) { 9038 /* 9039 * 1 to update inode 9040 * 1 to remove inode ref 9041 * 1 to remove dir item 9042 * 1 to remove dir index 9043 * 1 to possibly add orphan item 9044 */ 9045 trans_num_items += 5; 9046 } 9047 trans = btrfs_start_transaction(root, trans_num_items); 9048 if (IS_ERR(trans)) { 9049 ret = PTR_ERR(trans); 9050 goto out_notrans; 9051 } 9052 9053 if (dest != root) { 9054 ret = btrfs_record_root_in_trans(trans, dest); 9055 if (ret) 9056 goto out_fail; 9057 } 9058 9059 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 9060 if (ret) 9061 goto out_fail; 9062 9063 BTRFS_I(old_inode)->dir_index = 0ULL; 9064 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9065 /* force full log commit if subvolume involved. */ 9066 btrfs_set_log_full_commit(trans); 9067 } else { 9068 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, 9069 old_ino, btrfs_ino(BTRFS_I(new_dir)), 9070 index); 9071 if (ret) 9072 goto out_fail; 9073 } 9074 9075 inode_inc_iversion(old_dir); 9076 inode_inc_iversion(new_dir); 9077 inode_inc_iversion(old_inode); 9078 old_dir->i_mtime = current_time(old_dir); 9079 old_dir->i_ctime = old_dir->i_mtime; 9080 new_dir->i_mtime = old_dir->i_mtime; 9081 new_dir->i_ctime = old_dir->i_mtime; 9082 old_inode->i_ctime = old_dir->i_mtime; 9083 9084 if (old_dentry->d_parent != new_dentry->d_parent) 9085 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9086 BTRFS_I(old_inode), true); 9087 9088 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9089 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 9090 } else { 9091 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 9092 BTRFS_I(d_inode(old_dentry)), 9093 &old_fname.disk_name, &rename_ctx); 9094 if (!ret) 9095 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 9096 } 9097 if (ret) { 9098 btrfs_abort_transaction(trans, ret); 9099 goto out_fail; 9100 } 9101 9102 if (new_inode) { 9103 inode_inc_iversion(new_inode); 9104 new_inode->i_ctime = current_time(new_inode); 9105 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 9106 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9107 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 9108 BUG_ON(new_inode->i_nlink == 0); 9109 } else { 9110 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 9111 BTRFS_I(d_inode(new_dentry)), 9112 &new_fname.disk_name); 9113 } 9114 if (!ret && new_inode->i_nlink == 0) 9115 ret = btrfs_orphan_add(trans, 9116 BTRFS_I(d_inode(new_dentry))); 9117 if (ret) { 9118 btrfs_abort_transaction(trans, ret); 9119 goto out_fail; 9120 } 9121 } 9122 9123 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9124 &new_fname.disk_name, 0, index); 9125 if (ret) { 9126 btrfs_abort_transaction(trans, ret); 9127 goto out_fail; 9128 } 9129 9130 if (old_inode->i_nlink == 1) 9131 BTRFS_I(old_inode)->dir_index = index; 9132 9133 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9134 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9135 rename_ctx.index, new_dentry->d_parent); 9136 9137 if (flags & RENAME_WHITEOUT) { 9138 ret = btrfs_create_new_inode(trans, &whiteout_args); 9139 if (ret) { 9140 btrfs_abort_transaction(trans, ret); 9141 goto out_fail; 9142 } else { 9143 unlock_new_inode(whiteout_args.inode); 9144 iput(whiteout_args.inode); 9145 whiteout_args.inode = NULL; 9146 } 9147 } 9148 out_fail: 9149 ret2 = btrfs_end_transaction(trans); 9150 ret = ret ? ret : ret2; 9151 out_notrans: 9152 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9153 up_read(&fs_info->subvol_sem); 9154 if (flags & RENAME_WHITEOUT) 9155 btrfs_new_inode_args_destroy(&whiteout_args); 9156 out_whiteout_inode: 9157 if (flags & RENAME_WHITEOUT) 9158 iput(whiteout_args.inode); 9159 out_fscrypt_names: 9160 fscrypt_free_filename(&old_fname); 9161 fscrypt_free_filename(&new_fname); 9162 return ret; 9163 } 9164 9165 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir, 9166 struct dentry *old_dentry, struct inode *new_dir, 9167 struct dentry *new_dentry, unsigned int flags) 9168 { 9169 int ret; 9170 9171 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 9172 return -EINVAL; 9173 9174 if (flags & RENAME_EXCHANGE) 9175 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir, 9176 new_dentry); 9177 else 9178 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir, 9179 new_dentry, flags); 9180 9181 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); 9182 9183 return ret; 9184 } 9185 9186 struct btrfs_delalloc_work { 9187 struct inode *inode; 9188 struct completion completion; 9189 struct list_head list; 9190 struct btrfs_work work; 9191 }; 9192 9193 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9194 { 9195 struct btrfs_delalloc_work *delalloc_work; 9196 struct inode *inode; 9197 9198 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9199 work); 9200 inode = delalloc_work->inode; 9201 filemap_flush(inode->i_mapping); 9202 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9203 &BTRFS_I(inode)->runtime_flags)) 9204 filemap_flush(inode->i_mapping); 9205 9206 iput(inode); 9207 complete(&delalloc_work->completion); 9208 } 9209 9210 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 9211 { 9212 struct btrfs_delalloc_work *work; 9213 9214 work = kmalloc(sizeof(*work), GFP_NOFS); 9215 if (!work) 9216 return NULL; 9217 9218 init_completion(&work->completion); 9219 INIT_LIST_HEAD(&work->list); 9220 work->inode = inode; 9221 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); 9222 9223 return work; 9224 } 9225 9226 /* 9227 * some fairly slow code that needs optimization. This walks the list 9228 * of all the inodes with pending delalloc and forces them to disk. 9229 */ 9230 static int start_delalloc_inodes(struct btrfs_root *root, 9231 struct writeback_control *wbc, bool snapshot, 9232 bool in_reclaim_context) 9233 { 9234 struct btrfs_inode *binode; 9235 struct inode *inode; 9236 struct btrfs_delalloc_work *work, *next; 9237 LIST_HEAD(works); 9238 LIST_HEAD(splice); 9239 int ret = 0; 9240 bool full_flush = wbc->nr_to_write == LONG_MAX; 9241 9242 mutex_lock(&root->delalloc_mutex); 9243 spin_lock(&root->delalloc_lock); 9244 list_splice_init(&root->delalloc_inodes, &splice); 9245 while (!list_empty(&splice)) { 9246 binode = list_entry(splice.next, struct btrfs_inode, 9247 delalloc_inodes); 9248 9249 list_move_tail(&binode->delalloc_inodes, 9250 &root->delalloc_inodes); 9251 9252 if (in_reclaim_context && 9253 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 9254 continue; 9255 9256 inode = igrab(&binode->vfs_inode); 9257 if (!inode) { 9258 cond_resched_lock(&root->delalloc_lock); 9259 continue; 9260 } 9261 spin_unlock(&root->delalloc_lock); 9262 9263 if (snapshot) 9264 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 9265 &binode->runtime_flags); 9266 if (full_flush) { 9267 work = btrfs_alloc_delalloc_work(inode); 9268 if (!work) { 9269 iput(inode); 9270 ret = -ENOMEM; 9271 goto out; 9272 } 9273 list_add_tail(&work->list, &works); 9274 btrfs_queue_work(root->fs_info->flush_workers, 9275 &work->work); 9276 } else { 9277 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); 9278 btrfs_add_delayed_iput(BTRFS_I(inode)); 9279 if (ret || wbc->nr_to_write <= 0) 9280 goto out; 9281 } 9282 cond_resched(); 9283 spin_lock(&root->delalloc_lock); 9284 } 9285 spin_unlock(&root->delalloc_lock); 9286 9287 out: 9288 list_for_each_entry_safe(work, next, &works, list) { 9289 list_del_init(&work->list); 9290 wait_for_completion(&work->completion); 9291 kfree(work); 9292 } 9293 9294 if (!list_empty(&splice)) { 9295 spin_lock(&root->delalloc_lock); 9296 list_splice_tail(&splice, &root->delalloc_inodes); 9297 spin_unlock(&root->delalloc_lock); 9298 } 9299 mutex_unlock(&root->delalloc_mutex); 9300 return ret; 9301 } 9302 9303 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 9304 { 9305 struct writeback_control wbc = { 9306 .nr_to_write = LONG_MAX, 9307 .sync_mode = WB_SYNC_NONE, 9308 .range_start = 0, 9309 .range_end = LLONG_MAX, 9310 }; 9311 struct btrfs_fs_info *fs_info = root->fs_info; 9312 9313 if (BTRFS_FS_ERROR(fs_info)) 9314 return -EROFS; 9315 9316 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context); 9317 } 9318 9319 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 9320 bool in_reclaim_context) 9321 { 9322 struct writeback_control wbc = { 9323 .nr_to_write = nr, 9324 .sync_mode = WB_SYNC_NONE, 9325 .range_start = 0, 9326 .range_end = LLONG_MAX, 9327 }; 9328 struct btrfs_root *root; 9329 LIST_HEAD(splice); 9330 int ret; 9331 9332 if (BTRFS_FS_ERROR(fs_info)) 9333 return -EROFS; 9334 9335 mutex_lock(&fs_info->delalloc_root_mutex); 9336 spin_lock(&fs_info->delalloc_root_lock); 9337 list_splice_init(&fs_info->delalloc_roots, &splice); 9338 while (!list_empty(&splice)) { 9339 /* 9340 * Reset nr_to_write here so we know that we're doing a full 9341 * flush. 9342 */ 9343 if (nr == LONG_MAX) 9344 wbc.nr_to_write = LONG_MAX; 9345 9346 root = list_first_entry(&splice, struct btrfs_root, 9347 delalloc_root); 9348 root = btrfs_grab_root(root); 9349 BUG_ON(!root); 9350 list_move_tail(&root->delalloc_root, 9351 &fs_info->delalloc_roots); 9352 spin_unlock(&fs_info->delalloc_root_lock); 9353 9354 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context); 9355 btrfs_put_root(root); 9356 if (ret < 0 || wbc.nr_to_write <= 0) 9357 goto out; 9358 spin_lock(&fs_info->delalloc_root_lock); 9359 } 9360 spin_unlock(&fs_info->delalloc_root_lock); 9361 9362 ret = 0; 9363 out: 9364 if (!list_empty(&splice)) { 9365 spin_lock(&fs_info->delalloc_root_lock); 9366 list_splice_tail(&splice, &fs_info->delalloc_roots); 9367 spin_unlock(&fs_info->delalloc_root_lock); 9368 } 9369 mutex_unlock(&fs_info->delalloc_root_mutex); 9370 return ret; 9371 } 9372 9373 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, 9374 struct dentry *dentry, const char *symname) 9375 { 9376 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9377 struct btrfs_trans_handle *trans; 9378 struct btrfs_root *root = BTRFS_I(dir)->root; 9379 struct btrfs_path *path; 9380 struct btrfs_key key; 9381 struct inode *inode; 9382 struct btrfs_new_inode_args new_inode_args = { 9383 .dir = dir, 9384 .dentry = dentry, 9385 }; 9386 unsigned int trans_num_items; 9387 int err; 9388 int name_len; 9389 int datasize; 9390 unsigned long ptr; 9391 struct btrfs_file_extent_item *ei; 9392 struct extent_buffer *leaf; 9393 9394 name_len = strlen(symname); 9395 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 9396 return -ENAMETOOLONG; 9397 9398 inode = new_inode(dir->i_sb); 9399 if (!inode) 9400 return -ENOMEM; 9401 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO); 9402 inode->i_op = &btrfs_symlink_inode_operations; 9403 inode_nohighmem(inode); 9404 inode->i_mapping->a_ops = &btrfs_aops; 9405 btrfs_i_size_write(BTRFS_I(inode), name_len); 9406 inode_set_bytes(inode, name_len); 9407 9408 new_inode_args.inode = inode; 9409 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9410 if (err) 9411 goto out_inode; 9412 /* 1 additional item for the inline extent */ 9413 trans_num_items++; 9414 9415 trans = btrfs_start_transaction(root, trans_num_items); 9416 if (IS_ERR(trans)) { 9417 err = PTR_ERR(trans); 9418 goto out_new_inode_args; 9419 } 9420 9421 err = btrfs_create_new_inode(trans, &new_inode_args); 9422 if (err) 9423 goto out; 9424 9425 path = btrfs_alloc_path(); 9426 if (!path) { 9427 err = -ENOMEM; 9428 btrfs_abort_transaction(trans, err); 9429 discard_new_inode(inode); 9430 inode = NULL; 9431 goto out; 9432 } 9433 key.objectid = btrfs_ino(BTRFS_I(inode)); 9434 key.offset = 0; 9435 key.type = BTRFS_EXTENT_DATA_KEY; 9436 datasize = btrfs_file_extent_calc_inline_size(name_len); 9437 err = btrfs_insert_empty_item(trans, root, path, &key, 9438 datasize); 9439 if (err) { 9440 btrfs_abort_transaction(trans, err); 9441 btrfs_free_path(path); 9442 discard_new_inode(inode); 9443 inode = NULL; 9444 goto out; 9445 } 9446 leaf = path->nodes[0]; 9447 ei = btrfs_item_ptr(leaf, path->slots[0], 9448 struct btrfs_file_extent_item); 9449 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9450 btrfs_set_file_extent_type(leaf, ei, 9451 BTRFS_FILE_EXTENT_INLINE); 9452 btrfs_set_file_extent_encryption(leaf, ei, 0); 9453 btrfs_set_file_extent_compression(leaf, ei, 0); 9454 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9455 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9456 9457 ptr = btrfs_file_extent_inline_start(ei); 9458 write_extent_buffer(leaf, symname, ptr, name_len); 9459 btrfs_mark_buffer_dirty(leaf); 9460 btrfs_free_path(path); 9461 9462 d_instantiate_new(dentry, inode); 9463 err = 0; 9464 out: 9465 btrfs_end_transaction(trans); 9466 btrfs_btree_balance_dirty(fs_info); 9467 out_new_inode_args: 9468 btrfs_new_inode_args_destroy(&new_inode_args); 9469 out_inode: 9470 if (err) 9471 iput(inode); 9472 return err; 9473 } 9474 9475 static struct btrfs_trans_handle *insert_prealloc_file_extent( 9476 struct btrfs_trans_handle *trans_in, 9477 struct btrfs_inode *inode, 9478 struct btrfs_key *ins, 9479 u64 file_offset) 9480 { 9481 struct btrfs_file_extent_item stack_fi; 9482 struct btrfs_replace_extent_info extent_info; 9483 struct btrfs_trans_handle *trans = trans_in; 9484 struct btrfs_path *path; 9485 u64 start = ins->objectid; 9486 u64 len = ins->offset; 9487 int qgroup_released; 9488 int ret; 9489 9490 memset(&stack_fi, 0, sizeof(stack_fi)); 9491 9492 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 9493 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 9494 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 9495 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 9496 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 9497 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9498 /* Encryption and other encoding is reserved and all 0 */ 9499 9500 qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len); 9501 if (qgroup_released < 0) 9502 return ERR_PTR(qgroup_released); 9503 9504 if (trans) { 9505 ret = insert_reserved_file_extent(trans, inode, 9506 file_offset, &stack_fi, 9507 true, qgroup_released); 9508 if (ret) 9509 goto free_qgroup; 9510 return trans; 9511 } 9512 9513 extent_info.disk_offset = start; 9514 extent_info.disk_len = len; 9515 extent_info.data_offset = 0; 9516 extent_info.data_len = len; 9517 extent_info.file_offset = file_offset; 9518 extent_info.extent_buf = (char *)&stack_fi; 9519 extent_info.is_new_extent = true; 9520 extent_info.update_times = true; 9521 extent_info.qgroup_reserved = qgroup_released; 9522 extent_info.insertions = 0; 9523 9524 path = btrfs_alloc_path(); 9525 if (!path) { 9526 ret = -ENOMEM; 9527 goto free_qgroup; 9528 } 9529 9530 ret = btrfs_replace_file_extents(inode, path, file_offset, 9531 file_offset + len - 1, &extent_info, 9532 &trans); 9533 btrfs_free_path(path); 9534 if (ret) 9535 goto free_qgroup; 9536 return trans; 9537 9538 free_qgroup: 9539 /* 9540 * We have released qgroup data range at the beginning of the function, 9541 * and normally qgroup_released bytes will be freed when committing 9542 * transaction. 9543 * But if we error out early, we have to free what we have released 9544 * or we leak qgroup data reservation. 9545 */ 9546 btrfs_qgroup_free_refroot(inode->root->fs_info, 9547 inode->root->root_key.objectid, qgroup_released, 9548 BTRFS_QGROUP_RSV_DATA); 9549 return ERR_PTR(ret); 9550 } 9551 9552 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9553 u64 start, u64 num_bytes, u64 min_size, 9554 loff_t actual_len, u64 *alloc_hint, 9555 struct btrfs_trans_handle *trans) 9556 { 9557 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 9558 struct extent_map *em; 9559 struct btrfs_root *root = BTRFS_I(inode)->root; 9560 struct btrfs_key ins; 9561 u64 cur_offset = start; 9562 u64 clear_offset = start; 9563 u64 i_size; 9564 u64 cur_bytes; 9565 u64 last_alloc = (u64)-1; 9566 int ret = 0; 9567 bool own_trans = true; 9568 u64 end = start + num_bytes - 1; 9569 9570 if (trans) 9571 own_trans = false; 9572 while (num_bytes > 0) { 9573 cur_bytes = min_t(u64, num_bytes, SZ_256M); 9574 cur_bytes = max(cur_bytes, min_size); 9575 /* 9576 * If we are severely fragmented we could end up with really 9577 * small allocations, so if the allocator is returning small 9578 * chunks lets make its job easier by only searching for those 9579 * sized chunks. 9580 */ 9581 cur_bytes = min(cur_bytes, last_alloc); 9582 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 9583 min_size, 0, *alloc_hint, &ins, 1, 0); 9584 if (ret) 9585 break; 9586 9587 /* 9588 * We've reserved this space, and thus converted it from 9589 * ->bytes_may_use to ->bytes_reserved. Any error that happens 9590 * from here on out we will only need to clear our reservation 9591 * for the remaining unreserved area, so advance our 9592 * clear_offset by our extent size. 9593 */ 9594 clear_offset += ins.offset; 9595 9596 last_alloc = ins.offset; 9597 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 9598 &ins, cur_offset); 9599 /* 9600 * Now that we inserted the prealloc extent we can finally 9601 * decrement the number of reservations in the block group. 9602 * If we did it before, we could race with relocation and have 9603 * relocation miss the reserved extent, making it fail later. 9604 */ 9605 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9606 if (IS_ERR(trans)) { 9607 ret = PTR_ERR(trans); 9608 btrfs_free_reserved_extent(fs_info, ins.objectid, 9609 ins.offset, 0); 9610 break; 9611 } 9612 9613 em = alloc_extent_map(); 9614 if (!em) { 9615 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, 9616 cur_offset + ins.offset - 1, false); 9617 btrfs_set_inode_full_sync(BTRFS_I(inode)); 9618 goto next; 9619 } 9620 9621 em->start = cur_offset; 9622 em->orig_start = cur_offset; 9623 em->len = ins.offset; 9624 em->block_start = ins.objectid; 9625 em->block_len = ins.offset; 9626 em->orig_block_len = ins.offset; 9627 em->ram_bytes = ins.offset; 9628 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 9629 em->generation = trans->transid; 9630 9631 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); 9632 free_extent_map(em); 9633 next: 9634 num_bytes -= ins.offset; 9635 cur_offset += ins.offset; 9636 *alloc_hint = ins.objectid + ins.offset; 9637 9638 inode_inc_iversion(inode); 9639 inode->i_ctime = current_time(inode); 9640 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9641 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9642 (actual_len > inode->i_size) && 9643 (cur_offset > inode->i_size)) { 9644 if (cur_offset > actual_len) 9645 i_size = actual_len; 9646 else 9647 i_size = cur_offset; 9648 i_size_write(inode, i_size); 9649 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 9650 } 9651 9652 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 9653 9654 if (ret) { 9655 btrfs_abort_transaction(trans, ret); 9656 if (own_trans) 9657 btrfs_end_transaction(trans); 9658 break; 9659 } 9660 9661 if (own_trans) { 9662 btrfs_end_transaction(trans); 9663 trans = NULL; 9664 } 9665 } 9666 if (clear_offset < end) 9667 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 9668 end - clear_offset + 1); 9669 return ret; 9670 } 9671 9672 int btrfs_prealloc_file_range(struct inode *inode, int mode, 9673 u64 start, u64 num_bytes, u64 min_size, 9674 loff_t actual_len, u64 *alloc_hint) 9675 { 9676 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9677 min_size, actual_len, alloc_hint, 9678 NULL); 9679 } 9680 9681 int btrfs_prealloc_file_range_trans(struct inode *inode, 9682 struct btrfs_trans_handle *trans, int mode, 9683 u64 start, u64 num_bytes, u64 min_size, 9684 loff_t actual_len, u64 *alloc_hint) 9685 { 9686 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9687 min_size, actual_len, alloc_hint, trans); 9688 } 9689 9690 static int btrfs_permission(struct mnt_idmap *idmap, 9691 struct inode *inode, int mask) 9692 { 9693 struct btrfs_root *root = BTRFS_I(inode)->root; 9694 umode_t mode = inode->i_mode; 9695 9696 if (mask & MAY_WRITE && 9697 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9698 if (btrfs_root_readonly(root)) 9699 return -EROFS; 9700 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9701 return -EACCES; 9702 } 9703 return generic_permission(idmap, inode, mask); 9704 } 9705 9706 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 9707 struct file *file, umode_t mode) 9708 { 9709 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9710 struct btrfs_trans_handle *trans; 9711 struct btrfs_root *root = BTRFS_I(dir)->root; 9712 struct inode *inode; 9713 struct btrfs_new_inode_args new_inode_args = { 9714 .dir = dir, 9715 .dentry = file->f_path.dentry, 9716 .orphan = true, 9717 }; 9718 unsigned int trans_num_items; 9719 int ret; 9720 9721 inode = new_inode(dir->i_sb); 9722 if (!inode) 9723 return -ENOMEM; 9724 inode_init_owner(idmap, inode, dir, mode); 9725 inode->i_fop = &btrfs_file_operations; 9726 inode->i_op = &btrfs_file_inode_operations; 9727 inode->i_mapping->a_ops = &btrfs_aops; 9728 9729 new_inode_args.inode = inode; 9730 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9731 if (ret) 9732 goto out_inode; 9733 9734 trans = btrfs_start_transaction(root, trans_num_items); 9735 if (IS_ERR(trans)) { 9736 ret = PTR_ERR(trans); 9737 goto out_new_inode_args; 9738 } 9739 9740 ret = btrfs_create_new_inode(trans, &new_inode_args); 9741 9742 /* 9743 * We set number of links to 0 in btrfs_create_new_inode(), and here we 9744 * set it to 1 because d_tmpfile() will issue a warning if the count is 9745 * 0, through: 9746 * 9747 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9748 */ 9749 set_nlink(inode, 1); 9750 9751 if (!ret) { 9752 d_tmpfile(file, inode); 9753 unlock_new_inode(inode); 9754 mark_inode_dirty(inode); 9755 } 9756 9757 btrfs_end_transaction(trans); 9758 btrfs_btree_balance_dirty(fs_info); 9759 out_new_inode_args: 9760 btrfs_new_inode_args_destroy(&new_inode_args); 9761 out_inode: 9762 if (ret) 9763 iput(inode); 9764 return finish_open_simple(file, ret); 9765 } 9766 9767 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end) 9768 { 9769 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9770 unsigned long index = start >> PAGE_SHIFT; 9771 unsigned long end_index = end >> PAGE_SHIFT; 9772 struct page *page; 9773 u32 len; 9774 9775 ASSERT(end + 1 - start <= U32_MAX); 9776 len = end + 1 - start; 9777 while (index <= end_index) { 9778 page = find_get_page(inode->vfs_inode.i_mapping, index); 9779 ASSERT(page); /* Pages should be in the extent_io_tree */ 9780 9781 btrfs_page_set_writeback(fs_info, page, start, len); 9782 put_page(page); 9783 index++; 9784 } 9785 } 9786 9787 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 9788 int compress_type) 9789 { 9790 switch (compress_type) { 9791 case BTRFS_COMPRESS_NONE: 9792 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 9793 case BTRFS_COMPRESS_ZLIB: 9794 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 9795 case BTRFS_COMPRESS_LZO: 9796 /* 9797 * The LZO format depends on the sector size. 64K is the maximum 9798 * sector size that we support. 9799 */ 9800 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 9801 return -EINVAL; 9802 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 9803 (fs_info->sectorsize_bits - 12); 9804 case BTRFS_COMPRESS_ZSTD: 9805 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 9806 default: 9807 return -EUCLEAN; 9808 } 9809 } 9810 9811 static ssize_t btrfs_encoded_read_inline( 9812 struct kiocb *iocb, 9813 struct iov_iter *iter, u64 start, 9814 u64 lockend, 9815 struct extent_state **cached_state, 9816 u64 extent_start, size_t count, 9817 struct btrfs_ioctl_encoded_io_args *encoded, 9818 bool *unlocked) 9819 { 9820 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9821 struct btrfs_root *root = inode->root; 9822 struct btrfs_fs_info *fs_info = root->fs_info; 9823 struct extent_io_tree *io_tree = &inode->io_tree; 9824 struct btrfs_path *path; 9825 struct extent_buffer *leaf; 9826 struct btrfs_file_extent_item *item; 9827 u64 ram_bytes; 9828 unsigned long ptr; 9829 void *tmp; 9830 ssize_t ret; 9831 9832 path = btrfs_alloc_path(); 9833 if (!path) { 9834 ret = -ENOMEM; 9835 goto out; 9836 } 9837 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 9838 extent_start, 0); 9839 if (ret) { 9840 if (ret > 0) { 9841 /* The extent item disappeared? */ 9842 ret = -EIO; 9843 } 9844 goto out; 9845 } 9846 leaf = path->nodes[0]; 9847 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 9848 9849 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 9850 ptr = btrfs_file_extent_inline_start(item); 9851 9852 encoded->len = min_t(u64, extent_start + ram_bytes, 9853 inode->vfs_inode.i_size) - iocb->ki_pos; 9854 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9855 btrfs_file_extent_compression(leaf, item)); 9856 if (ret < 0) 9857 goto out; 9858 encoded->compression = ret; 9859 if (encoded->compression) { 9860 size_t inline_size; 9861 9862 inline_size = btrfs_file_extent_inline_item_len(leaf, 9863 path->slots[0]); 9864 if (inline_size > count) { 9865 ret = -ENOBUFS; 9866 goto out; 9867 } 9868 count = inline_size; 9869 encoded->unencoded_len = ram_bytes; 9870 encoded->unencoded_offset = iocb->ki_pos - extent_start; 9871 } else { 9872 count = min_t(u64, count, encoded->len); 9873 encoded->len = count; 9874 encoded->unencoded_len = count; 9875 ptr += iocb->ki_pos - extent_start; 9876 } 9877 9878 tmp = kmalloc(count, GFP_NOFS); 9879 if (!tmp) { 9880 ret = -ENOMEM; 9881 goto out; 9882 } 9883 read_extent_buffer(leaf, tmp, ptr, count); 9884 btrfs_release_path(path); 9885 unlock_extent(io_tree, start, lockend, cached_state); 9886 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9887 *unlocked = true; 9888 9889 ret = copy_to_iter(tmp, count, iter); 9890 if (ret != count) 9891 ret = -EFAULT; 9892 kfree(tmp); 9893 out: 9894 btrfs_free_path(path); 9895 return ret; 9896 } 9897 9898 struct btrfs_encoded_read_private { 9899 wait_queue_head_t wait; 9900 atomic_t pending; 9901 blk_status_t status; 9902 }; 9903 9904 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) 9905 { 9906 struct btrfs_encoded_read_private *priv = bbio->private; 9907 9908 if (bbio->bio.bi_status) { 9909 /* 9910 * The memory barrier implied by the atomic_dec_return() here 9911 * pairs with the memory barrier implied by the 9912 * atomic_dec_return() or io_wait_event() in 9913 * btrfs_encoded_read_regular_fill_pages() to ensure that this 9914 * write is observed before the load of status in 9915 * btrfs_encoded_read_regular_fill_pages(). 9916 */ 9917 WRITE_ONCE(priv->status, bbio->bio.bi_status); 9918 } 9919 if (!atomic_dec_return(&priv->pending)) 9920 wake_up(&priv->wait); 9921 bio_put(&bbio->bio); 9922 } 9923 9924 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 9925 u64 file_offset, u64 disk_bytenr, 9926 u64 disk_io_size, struct page **pages) 9927 { 9928 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9929 struct btrfs_encoded_read_private priv = { 9930 .pending = ATOMIC_INIT(1), 9931 }; 9932 unsigned long i = 0; 9933 struct btrfs_bio *bbio; 9934 9935 init_waitqueue_head(&priv.wait); 9936 9937 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9938 btrfs_encoded_read_endio, &priv); 9939 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9940 bbio->inode = inode; 9941 9942 do { 9943 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); 9944 9945 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { 9946 atomic_inc(&priv.pending); 9947 btrfs_submit_bio(bbio, 0); 9948 9949 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9950 btrfs_encoded_read_endio, &priv); 9951 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9952 bbio->inode = inode; 9953 continue; 9954 } 9955 9956 i++; 9957 disk_bytenr += bytes; 9958 disk_io_size -= bytes; 9959 } while (disk_io_size); 9960 9961 atomic_inc(&priv.pending); 9962 btrfs_submit_bio(bbio, 0); 9963 9964 if (atomic_dec_return(&priv.pending)) 9965 io_wait_event(priv.wait, !atomic_read(&priv.pending)); 9966 /* See btrfs_encoded_read_endio() for ordering. */ 9967 return blk_status_to_errno(READ_ONCE(priv.status)); 9968 } 9969 9970 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, 9971 struct iov_iter *iter, 9972 u64 start, u64 lockend, 9973 struct extent_state **cached_state, 9974 u64 disk_bytenr, u64 disk_io_size, 9975 size_t count, bool compressed, 9976 bool *unlocked) 9977 { 9978 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9979 struct extent_io_tree *io_tree = &inode->io_tree; 9980 struct page **pages; 9981 unsigned long nr_pages, i; 9982 u64 cur; 9983 size_t page_offset; 9984 ssize_t ret; 9985 9986 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 9987 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 9988 if (!pages) 9989 return -ENOMEM; 9990 ret = btrfs_alloc_page_array(nr_pages, pages); 9991 if (ret) { 9992 ret = -ENOMEM; 9993 goto out; 9994 } 9995 9996 ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr, 9997 disk_io_size, pages); 9998 if (ret) 9999 goto out; 10000 10001 unlock_extent(io_tree, start, lockend, cached_state); 10002 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10003 *unlocked = true; 10004 10005 if (compressed) { 10006 i = 0; 10007 page_offset = 0; 10008 } else { 10009 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 10010 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 10011 } 10012 cur = 0; 10013 while (cur < count) { 10014 size_t bytes = min_t(size_t, count - cur, 10015 PAGE_SIZE - page_offset); 10016 10017 if (copy_page_to_iter(pages[i], page_offset, bytes, 10018 iter) != bytes) { 10019 ret = -EFAULT; 10020 goto out; 10021 } 10022 i++; 10023 cur += bytes; 10024 page_offset = 0; 10025 } 10026 ret = count; 10027 out: 10028 for (i = 0; i < nr_pages; i++) { 10029 if (pages[i]) 10030 __free_page(pages[i]); 10031 } 10032 kfree(pages); 10033 return ret; 10034 } 10035 10036 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 10037 struct btrfs_ioctl_encoded_io_args *encoded) 10038 { 10039 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10040 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10041 struct extent_io_tree *io_tree = &inode->io_tree; 10042 ssize_t ret; 10043 size_t count = iov_iter_count(iter); 10044 u64 start, lockend, disk_bytenr, disk_io_size; 10045 struct extent_state *cached_state = NULL; 10046 struct extent_map *em; 10047 bool unlocked = false; 10048 10049 file_accessed(iocb->ki_filp); 10050 10051 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED); 10052 10053 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 10054 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10055 return 0; 10056 } 10057 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 10058 /* 10059 * We don't know how long the extent containing iocb->ki_pos is, but if 10060 * it's compressed we know that it won't be longer than this. 10061 */ 10062 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 10063 10064 for (;;) { 10065 struct btrfs_ordered_extent *ordered; 10066 10067 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, 10068 lockend - start + 1); 10069 if (ret) 10070 goto out_unlock_inode; 10071 lock_extent(io_tree, start, lockend, &cached_state); 10072 ordered = btrfs_lookup_ordered_range(inode, start, 10073 lockend - start + 1); 10074 if (!ordered) 10075 break; 10076 btrfs_put_ordered_extent(ordered); 10077 unlock_extent(io_tree, start, lockend, &cached_state); 10078 cond_resched(); 10079 } 10080 10081 em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1); 10082 if (IS_ERR(em)) { 10083 ret = PTR_ERR(em); 10084 goto out_unlock_extent; 10085 } 10086 10087 if (em->block_start == EXTENT_MAP_INLINE) { 10088 u64 extent_start = em->start; 10089 10090 /* 10091 * For inline extents we get everything we need out of the 10092 * extent item. 10093 */ 10094 free_extent_map(em); 10095 em = NULL; 10096 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 10097 &cached_state, extent_start, 10098 count, encoded, &unlocked); 10099 goto out; 10100 } 10101 10102 /* 10103 * We only want to return up to EOF even if the extent extends beyond 10104 * that. 10105 */ 10106 encoded->len = min_t(u64, extent_map_end(em), 10107 inode->vfs_inode.i_size) - iocb->ki_pos; 10108 if (em->block_start == EXTENT_MAP_HOLE || 10109 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 10110 disk_bytenr = EXTENT_MAP_HOLE; 10111 count = min_t(u64, count, encoded->len); 10112 encoded->len = count; 10113 encoded->unencoded_len = count; 10114 } else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10115 disk_bytenr = em->block_start; 10116 /* 10117 * Bail if the buffer isn't large enough to return the whole 10118 * compressed extent. 10119 */ 10120 if (em->block_len > count) { 10121 ret = -ENOBUFS; 10122 goto out_em; 10123 } 10124 disk_io_size = em->block_len; 10125 count = em->block_len; 10126 encoded->unencoded_len = em->ram_bytes; 10127 encoded->unencoded_offset = iocb->ki_pos - em->orig_start; 10128 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10129 em->compress_type); 10130 if (ret < 0) 10131 goto out_em; 10132 encoded->compression = ret; 10133 } else { 10134 disk_bytenr = em->block_start + (start - em->start); 10135 if (encoded->len > count) 10136 encoded->len = count; 10137 /* 10138 * Don't read beyond what we locked. This also limits the page 10139 * allocations that we'll do. 10140 */ 10141 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 10142 count = start + disk_io_size - iocb->ki_pos; 10143 encoded->len = count; 10144 encoded->unencoded_len = count; 10145 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize); 10146 } 10147 free_extent_map(em); 10148 em = NULL; 10149 10150 if (disk_bytenr == EXTENT_MAP_HOLE) { 10151 unlock_extent(io_tree, start, lockend, &cached_state); 10152 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10153 unlocked = true; 10154 ret = iov_iter_zero(count, iter); 10155 if (ret != count) 10156 ret = -EFAULT; 10157 } else { 10158 ret = btrfs_encoded_read_regular(iocb, iter, start, lockend, 10159 &cached_state, disk_bytenr, 10160 disk_io_size, count, 10161 encoded->compression, 10162 &unlocked); 10163 } 10164 10165 out: 10166 if (ret >= 0) 10167 iocb->ki_pos += encoded->len; 10168 out_em: 10169 free_extent_map(em); 10170 out_unlock_extent: 10171 if (!unlocked) 10172 unlock_extent(io_tree, start, lockend, &cached_state); 10173 out_unlock_inode: 10174 if (!unlocked) 10175 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10176 return ret; 10177 } 10178 10179 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 10180 const struct btrfs_ioctl_encoded_io_args *encoded) 10181 { 10182 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10183 struct btrfs_root *root = inode->root; 10184 struct btrfs_fs_info *fs_info = root->fs_info; 10185 struct extent_io_tree *io_tree = &inode->io_tree; 10186 struct extent_changeset *data_reserved = NULL; 10187 struct extent_state *cached_state = NULL; 10188 struct btrfs_ordered_extent *ordered; 10189 int compression; 10190 size_t orig_count; 10191 u64 start, end; 10192 u64 num_bytes, ram_bytes, disk_num_bytes; 10193 unsigned long nr_pages, i; 10194 struct page **pages; 10195 struct btrfs_key ins; 10196 bool extent_reserved = false; 10197 struct extent_map *em; 10198 ssize_t ret; 10199 10200 switch (encoded->compression) { 10201 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 10202 compression = BTRFS_COMPRESS_ZLIB; 10203 break; 10204 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 10205 compression = BTRFS_COMPRESS_ZSTD; 10206 break; 10207 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 10208 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 10209 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 10210 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 10211 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 10212 /* The sector size must match for LZO. */ 10213 if (encoded->compression - 10214 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 10215 fs_info->sectorsize_bits) 10216 return -EINVAL; 10217 compression = BTRFS_COMPRESS_LZO; 10218 break; 10219 default: 10220 return -EINVAL; 10221 } 10222 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 10223 return -EINVAL; 10224 10225 orig_count = iov_iter_count(from); 10226 10227 /* The extent size must be sane. */ 10228 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 10229 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 10230 return -EINVAL; 10231 10232 /* 10233 * The compressed data must be smaller than the decompressed data. 10234 * 10235 * It's of course possible for data to compress to larger or the same 10236 * size, but the buffered I/O path falls back to no compression for such 10237 * data, and we don't want to break any assumptions by creating these 10238 * extents. 10239 * 10240 * Note that this is less strict than the current check we have that the 10241 * compressed data must be at least one sector smaller than the 10242 * decompressed data. We only want to enforce the weaker requirement 10243 * from old kernels that it is at least one byte smaller. 10244 */ 10245 if (orig_count >= encoded->unencoded_len) 10246 return -EINVAL; 10247 10248 /* The extent must start on a sector boundary. */ 10249 start = iocb->ki_pos; 10250 if (!IS_ALIGNED(start, fs_info->sectorsize)) 10251 return -EINVAL; 10252 10253 /* 10254 * The extent must end on a sector boundary. However, we allow a write 10255 * which ends at or extends i_size to have an unaligned length; we round 10256 * up the extent size and set i_size to the unaligned end. 10257 */ 10258 if (start + encoded->len < inode->vfs_inode.i_size && 10259 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 10260 return -EINVAL; 10261 10262 /* Finally, the offset in the unencoded data must be sector-aligned. */ 10263 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 10264 return -EINVAL; 10265 10266 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 10267 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 10268 end = start + num_bytes - 1; 10269 10270 /* 10271 * If the extent cannot be inline, the compressed data on disk must be 10272 * sector-aligned. For convenience, we extend it with zeroes if it 10273 * isn't. 10274 */ 10275 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 10276 nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 10277 pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT); 10278 if (!pages) 10279 return -ENOMEM; 10280 for (i = 0; i < nr_pages; i++) { 10281 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 10282 char *kaddr; 10283 10284 pages[i] = alloc_page(GFP_KERNEL_ACCOUNT); 10285 if (!pages[i]) { 10286 ret = -ENOMEM; 10287 goto out_pages; 10288 } 10289 kaddr = kmap_local_page(pages[i]); 10290 if (copy_from_iter(kaddr, bytes, from) != bytes) { 10291 kunmap_local(kaddr); 10292 ret = -EFAULT; 10293 goto out_pages; 10294 } 10295 if (bytes < PAGE_SIZE) 10296 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 10297 kunmap_local(kaddr); 10298 } 10299 10300 for (;;) { 10301 struct btrfs_ordered_extent *ordered; 10302 10303 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes); 10304 if (ret) 10305 goto out_pages; 10306 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 10307 start >> PAGE_SHIFT, 10308 end >> PAGE_SHIFT); 10309 if (ret) 10310 goto out_pages; 10311 lock_extent(io_tree, start, end, &cached_state); 10312 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 10313 if (!ordered && 10314 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 10315 break; 10316 if (ordered) 10317 btrfs_put_ordered_extent(ordered); 10318 unlock_extent(io_tree, start, end, &cached_state); 10319 cond_resched(); 10320 } 10321 10322 /* 10323 * We don't use the higher-level delalloc space functions because our 10324 * num_bytes and disk_num_bytes are different. 10325 */ 10326 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 10327 if (ret) 10328 goto out_unlock; 10329 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 10330 if (ret) 10331 goto out_free_data_space; 10332 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 10333 false); 10334 if (ret) 10335 goto out_qgroup_free_data; 10336 10337 /* Try an inline extent first. */ 10338 if (start == 0 && encoded->unencoded_len == encoded->len && 10339 encoded->unencoded_offset == 0) { 10340 ret = cow_file_range_inline(inode, encoded->len, orig_count, 10341 compression, pages, true); 10342 if (ret <= 0) { 10343 if (ret == 0) 10344 ret = orig_count; 10345 goto out_delalloc_release; 10346 } 10347 } 10348 10349 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 10350 disk_num_bytes, 0, 0, &ins, 1, 1); 10351 if (ret) 10352 goto out_delalloc_release; 10353 extent_reserved = true; 10354 10355 em = create_io_em(inode, start, num_bytes, 10356 start - encoded->unencoded_offset, ins.objectid, 10357 ins.offset, ins.offset, ram_bytes, compression, 10358 BTRFS_ORDERED_COMPRESSED); 10359 if (IS_ERR(em)) { 10360 ret = PTR_ERR(em); 10361 goto out_free_reserved; 10362 } 10363 free_extent_map(em); 10364 10365 ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes, 10366 ins.objectid, ins.offset, 10367 encoded->unencoded_offset, 10368 (1 << BTRFS_ORDERED_ENCODED) | 10369 (1 << BTRFS_ORDERED_COMPRESSED), 10370 compression); 10371 if (IS_ERR(ordered)) { 10372 btrfs_drop_extent_map_range(inode, start, end, false); 10373 ret = PTR_ERR(ordered); 10374 goto out_free_reserved; 10375 } 10376 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10377 10378 if (start + encoded->len > inode->vfs_inode.i_size) 10379 i_size_write(&inode->vfs_inode, start + encoded->len); 10380 10381 unlock_extent(io_tree, start, end, &cached_state); 10382 10383 btrfs_delalloc_release_extents(inode, num_bytes); 10384 10385 btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false); 10386 ret = orig_count; 10387 goto out; 10388 10389 out_free_reserved: 10390 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10391 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 10392 out_delalloc_release: 10393 btrfs_delalloc_release_extents(inode, num_bytes); 10394 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 10395 out_qgroup_free_data: 10396 if (ret < 0) 10397 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes); 10398 out_free_data_space: 10399 /* 10400 * If btrfs_reserve_extent() succeeded, then we already decremented 10401 * bytes_may_use. 10402 */ 10403 if (!extent_reserved) 10404 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); 10405 out_unlock: 10406 unlock_extent(io_tree, start, end, &cached_state); 10407 out_pages: 10408 for (i = 0; i < nr_pages; i++) { 10409 if (pages[i]) 10410 __free_page(pages[i]); 10411 } 10412 kvfree(pages); 10413 out: 10414 if (ret >= 0) 10415 iocb->ki_pos += encoded->len; 10416 return ret; 10417 } 10418 10419 #ifdef CONFIG_SWAP 10420 /* 10421 * Add an entry indicating a block group or device which is pinned by a 10422 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 10423 * negative errno on failure. 10424 */ 10425 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 10426 bool is_block_group) 10427 { 10428 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10429 struct btrfs_swapfile_pin *sp, *entry; 10430 struct rb_node **p; 10431 struct rb_node *parent = NULL; 10432 10433 sp = kmalloc(sizeof(*sp), GFP_NOFS); 10434 if (!sp) 10435 return -ENOMEM; 10436 sp->ptr = ptr; 10437 sp->inode = inode; 10438 sp->is_block_group = is_block_group; 10439 sp->bg_extent_count = 1; 10440 10441 spin_lock(&fs_info->swapfile_pins_lock); 10442 p = &fs_info->swapfile_pins.rb_node; 10443 while (*p) { 10444 parent = *p; 10445 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 10446 if (sp->ptr < entry->ptr || 10447 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 10448 p = &(*p)->rb_left; 10449 } else if (sp->ptr > entry->ptr || 10450 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 10451 p = &(*p)->rb_right; 10452 } else { 10453 if (is_block_group) 10454 entry->bg_extent_count++; 10455 spin_unlock(&fs_info->swapfile_pins_lock); 10456 kfree(sp); 10457 return 1; 10458 } 10459 } 10460 rb_link_node(&sp->node, parent, p); 10461 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 10462 spin_unlock(&fs_info->swapfile_pins_lock); 10463 return 0; 10464 } 10465 10466 /* Free all of the entries pinned by this swapfile. */ 10467 static void btrfs_free_swapfile_pins(struct inode *inode) 10468 { 10469 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10470 struct btrfs_swapfile_pin *sp; 10471 struct rb_node *node, *next; 10472 10473 spin_lock(&fs_info->swapfile_pins_lock); 10474 node = rb_first(&fs_info->swapfile_pins); 10475 while (node) { 10476 next = rb_next(node); 10477 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 10478 if (sp->inode == inode) { 10479 rb_erase(&sp->node, &fs_info->swapfile_pins); 10480 if (sp->is_block_group) { 10481 btrfs_dec_block_group_swap_extents(sp->ptr, 10482 sp->bg_extent_count); 10483 btrfs_put_block_group(sp->ptr); 10484 } 10485 kfree(sp); 10486 } 10487 node = next; 10488 } 10489 spin_unlock(&fs_info->swapfile_pins_lock); 10490 } 10491 10492 struct btrfs_swap_info { 10493 u64 start; 10494 u64 block_start; 10495 u64 block_len; 10496 u64 lowest_ppage; 10497 u64 highest_ppage; 10498 unsigned long nr_pages; 10499 int nr_extents; 10500 }; 10501 10502 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 10503 struct btrfs_swap_info *bsi) 10504 { 10505 unsigned long nr_pages; 10506 unsigned long max_pages; 10507 u64 first_ppage, first_ppage_reported, next_ppage; 10508 int ret; 10509 10510 /* 10511 * Our swapfile may have had its size extended after the swap header was 10512 * written. In that case activating the swapfile should not go beyond 10513 * the max size set in the swap header. 10514 */ 10515 if (bsi->nr_pages >= sis->max) 10516 return 0; 10517 10518 max_pages = sis->max - bsi->nr_pages; 10519 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; 10520 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; 10521 10522 if (first_ppage >= next_ppage) 10523 return 0; 10524 nr_pages = next_ppage - first_ppage; 10525 nr_pages = min(nr_pages, max_pages); 10526 10527 first_ppage_reported = first_ppage; 10528 if (bsi->start == 0) 10529 first_ppage_reported++; 10530 if (bsi->lowest_ppage > first_ppage_reported) 10531 bsi->lowest_ppage = first_ppage_reported; 10532 if (bsi->highest_ppage < (next_ppage - 1)) 10533 bsi->highest_ppage = next_ppage - 1; 10534 10535 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 10536 if (ret < 0) 10537 return ret; 10538 bsi->nr_extents += ret; 10539 bsi->nr_pages += nr_pages; 10540 return 0; 10541 } 10542 10543 static void btrfs_swap_deactivate(struct file *file) 10544 { 10545 struct inode *inode = file_inode(file); 10546 10547 btrfs_free_swapfile_pins(inode); 10548 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 10549 } 10550 10551 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10552 sector_t *span) 10553 { 10554 struct inode *inode = file_inode(file); 10555 struct btrfs_root *root = BTRFS_I(inode)->root; 10556 struct btrfs_fs_info *fs_info = root->fs_info; 10557 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 10558 struct extent_state *cached_state = NULL; 10559 struct extent_map *em = NULL; 10560 struct btrfs_device *device = NULL; 10561 struct btrfs_swap_info bsi = { 10562 .lowest_ppage = (sector_t)-1ULL, 10563 }; 10564 int ret = 0; 10565 u64 isize; 10566 u64 start; 10567 10568 /* 10569 * If the swap file was just created, make sure delalloc is done. If the 10570 * file changes again after this, the user is doing something stupid and 10571 * we don't really care. 10572 */ 10573 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 10574 if (ret) 10575 return ret; 10576 10577 /* 10578 * The inode is locked, so these flags won't change after we check them. 10579 */ 10580 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 10581 btrfs_warn(fs_info, "swapfile must not be compressed"); 10582 return -EINVAL; 10583 } 10584 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 10585 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 10586 return -EINVAL; 10587 } 10588 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 10589 btrfs_warn(fs_info, "swapfile must not be checksummed"); 10590 return -EINVAL; 10591 } 10592 10593 /* 10594 * Balance or device remove/replace/resize can move stuff around from 10595 * under us. The exclop protection makes sure they aren't running/won't 10596 * run concurrently while we are mapping the swap extents, and 10597 * fs_info->swapfile_pins prevents them from running while the swap 10598 * file is active and moving the extents. Note that this also prevents 10599 * a concurrent device add which isn't actually necessary, but it's not 10600 * really worth the trouble to allow it. 10601 */ 10602 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 10603 btrfs_warn(fs_info, 10604 "cannot activate swapfile while exclusive operation is running"); 10605 return -EBUSY; 10606 } 10607 10608 /* 10609 * Prevent snapshot creation while we are activating the swap file. 10610 * We do not want to race with snapshot creation. If snapshot creation 10611 * already started before we bumped nr_swapfiles from 0 to 1 and 10612 * completes before the first write into the swap file after it is 10613 * activated, than that write would fallback to COW. 10614 */ 10615 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 10616 btrfs_exclop_finish(fs_info); 10617 btrfs_warn(fs_info, 10618 "cannot activate swapfile because snapshot creation is in progress"); 10619 return -EINVAL; 10620 } 10621 /* 10622 * Snapshots can create extents which require COW even if NODATACOW is 10623 * set. We use this counter to prevent snapshots. We must increment it 10624 * before walking the extents because we don't want a concurrent 10625 * snapshot to run after we've already checked the extents. 10626 * 10627 * It is possible that subvolume is marked for deletion but still not 10628 * removed yet. To prevent this race, we check the root status before 10629 * activating the swapfile. 10630 */ 10631 spin_lock(&root->root_item_lock); 10632 if (btrfs_root_dead(root)) { 10633 spin_unlock(&root->root_item_lock); 10634 10635 btrfs_exclop_finish(fs_info); 10636 btrfs_warn(fs_info, 10637 "cannot activate swapfile because subvolume %llu is being deleted", 10638 root->root_key.objectid); 10639 return -EPERM; 10640 } 10641 atomic_inc(&root->nr_swapfiles); 10642 spin_unlock(&root->root_item_lock); 10643 10644 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 10645 10646 lock_extent(io_tree, 0, isize - 1, &cached_state); 10647 start = 0; 10648 while (start < isize) { 10649 u64 logical_block_start, physical_block_start; 10650 struct btrfs_block_group *bg; 10651 u64 len = isize - start; 10652 10653 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 10654 if (IS_ERR(em)) { 10655 ret = PTR_ERR(em); 10656 goto out; 10657 } 10658 10659 if (em->block_start == EXTENT_MAP_HOLE) { 10660 btrfs_warn(fs_info, "swapfile must not have holes"); 10661 ret = -EINVAL; 10662 goto out; 10663 } 10664 if (em->block_start == EXTENT_MAP_INLINE) { 10665 /* 10666 * It's unlikely we'll ever actually find ourselves 10667 * here, as a file small enough to fit inline won't be 10668 * big enough to store more than the swap header, but in 10669 * case something changes in the future, let's catch it 10670 * here rather than later. 10671 */ 10672 btrfs_warn(fs_info, "swapfile must not be inline"); 10673 ret = -EINVAL; 10674 goto out; 10675 } 10676 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10677 btrfs_warn(fs_info, "swapfile must not be compressed"); 10678 ret = -EINVAL; 10679 goto out; 10680 } 10681 10682 logical_block_start = em->block_start + (start - em->start); 10683 len = min(len, em->len - (start - em->start)); 10684 free_extent_map(em); 10685 em = NULL; 10686 10687 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true); 10688 if (ret < 0) { 10689 goto out; 10690 } else if (ret) { 10691 ret = 0; 10692 } else { 10693 btrfs_warn(fs_info, 10694 "swapfile must not be copy-on-write"); 10695 ret = -EINVAL; 10696 goto out; 10697 } 10698 10699 em = btrfs_get_chunk_map(fs_info, logical_block_start, len); 10700 if (IS_ERR(em)) { 10701 ret = PTR_ERR(em); 10702 goto out; 10703 } 10704 10705 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 10706 btrfs_warn(fs_info, 10707 "swapfile must have single data profile"); 10708 ret = -EINVAL; 10709 goto out; 10710 } 10711 10712 if (device == NULL) { 10713 device = em->map_lookup->stripes[0].dev; 10714 ret = btrfs_add_swapfile_pin(inode, device, false); 10715 if (ret == 1) 10716 ret = 0; 10717 else if (ret) 10718 goto out; 10719 } else if (device != em->map_lookup->stripes[0].dev) { 10720 btrfs_warn(fs_info, "swapfile must be on one device"); 10721 ret = -EINVAL; 10722 goto out; 10723 } 10724 10725 physical_block_start = (em->map_lookup->stripes[0].physical + 10726 (logical_block_start - em->start)); 10727 len = min(len, em->len - (logical_block_start - em->start)); 10728 free_extent_map(em); 10729 em = NULL; 10730 10731 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 10732 if (!bg) { 10733 btrfs_warn(fs_info, 10734 "could not find block group containing swapfile"); 10735 ret = -EINVAL; 10736 goto out; 10737 } 10738 10739 if (!btrfs_inc_block_group_swap_extents(bg)) { 10740 btrfs_warn(fs_info, 10741 "block group for swapfile at %llu is read-only%s", 10742 bg->start, 10743 atomic_read(&fs_info->scrubs_running) ? 10744 " (scrub running)" : ""); 10745 btrfs_put_block_group(bg); 10746 ret = -EINVAL; 10747 goto out; 10748 } 10749 10750 ret = btrfs_add_swapfile_pin(inode, bg, true); 10751 if (ret) { 10752 btrfs_put_block_group(bg); 10753 if (ret == 1) 10754 ret = 0; 10755 else 10756 goto out; 10757 } 10758 10759 if (bsi.block_len && 10760 bsi.block_start + bsi.block_len == physical_block_start) { 10761 bsi.block_len += len; 10762 } else { 10763 if (bsi.block_len) { 10764 ret = btrfs_add_swap_extent(sis, &bsi); 10765 if (ret) 10766 goto out; 10767 } 10768 bsi.start = start; 10769 bsi.block_start = physical_block_start; 10770 bsi.block_len = len; 10771 } 10772 10773 start += len; 10774 } 10775 10776 if (bsi.block_len) 10777 ret = btrfs_add_swap_extent(sis, &bsi); 10778 10779 out: 10780 if (!IS_ERR_OR_NULL(em)) 10781 free_extent_map(em); 10782 10783 unlock_extent(io_tree, 0, isize - 1, &cached_state); 10784 10785 if (ret) 10786 btrfs_swap_deactivate(file); 10787 10788 btrfs_drew_write_unlock(&root->snapshot_lock); 10789 10790 btrfs_exclop_finish(fs_info); 10791 10792 if (ret) 10793 return ret; 10794 10795 if (device) 10796 sis->bdev = device->bdev; 10797 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10798 sis->max = bsi.nr_pages; 10799 sis->pages = bsi.nr_pages - 1; 10800 sis->highest_bit = bsi.nr_pages - 1; 10801 return bsi.nr_extents; 10802 } 10803 #else 10804 static void btrfs_swap_deactivate(struct file *file) 10805 { 10806 } 10807 10808 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10809 sector_t *span) 10810 { 10811 return -EOPNOTSUPP; 10812 } 10813 #endif 10814 10815 /* 10816 * Update the number of bytes used in the VFS' inode. When we replace extents in 10817 * a range (clone, dedupe, fallocate's zero range), we must update the number of 10818 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 10819 * always get a correct value. 10820 */ 10821 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 10822 const u64 add_bytes, 10823 const u64 del_bytes) 10824 { 10825 if (add_bytes == del_bytes) 10826 return; 10827 10828 spin_lock(&inode->lock); 10829 if (del_bytes > 0) 10830 inode_sub_bytes(&inode->vfs_inode, del_bytes); 10831 if (add_bytes > 0) 10832 inode_add_bytes(&inode->vfs_inode, add_bytes); 10833 spin_unlock(&inode->lock); 10834 } 10835 10836 /* 10837 * Verify that there are no ordered extents for a given file range. 10838 * 10839 * @inode: The target inode. 10840 * @start: Start offset of the file range, should be sector size aligned. 10841 * @end: End offset (inclusive) of the file range, its value +1 should be 10842 * sector size aligned. 10843 * 10844 * This should typically be used for cases where we locked an inode's VFS lock in 10845 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 10846 * we have flushed all delalloc in the range, we have waited for all ordered 10847 * extents in the range to complete and finally we have locked the file range in 10848 * the inode's io_tree. 10849 */ 10850 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 10851 { 10852 struct btrfs_root *root = inode->root; 10853 struct btrfs_ordered_extent *ordered; 10854 10855 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 10856 return; 10857 10858 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 10859 if (ordered) { 10860 btrfs_err(root->fs_info, 10861 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 10862 start, end, btrfs_ino(inode), root->root_key.objectid, 10863 ordered->file_offset, 10864 ordered->file_offset + ordered->num_bytes - 1); 10865 btrfs_put_ordered_extent(ordered); 10866 } 10867 10868 ASSERT(ordered == NULL); 10869 } 10870 10871 static const struct inode_operations btrfs_dir_inode_operations = { 10872 .getattr = btrfs_getattr, 10873 .lookup = btrfs_lookup, 10874 .create = btrfs_create, 10875 .unlink = btrfs_unlink, 10876 .link = btrfs_link, 10877 .mkdir = btrfs_mkdir, 10878 .rmdir = btrfs_rmdir, 10879 .rename = btrfs_rename2, 10880 .symlink = btrfs_symlink, 10881 .setattr = btrfs_setattr, 10882 .mknod = btrfs_mknod, 10883 .listxattr = btrfs_listxattr, 10884 .permission = btrfs_permission, 10885 .get_inode_acl = btrfs_get_acl, 10886 .set_acl = btrfs_set_acl, 10887 .update_time = btrfs_update_time, 10888 .tmpfile = btrfs_tmpfile, 10889 .fileattr_get = btrfs_fileattr_get, 10890 .fileattr_set = btrfs_fileattr_set, 10891 }; 10892 10893 static const struct file_operations btrfs_dir_file_operations = { 10894 .llseek = generic_file_llseek, 10895 .read = generic_read_dir, 10896 .iterate_shared = btrfs_real_readdir, 10897 .open = btrfs_opendir, 10898 .unlocked_ioctl = btrfs_ioctl, 10899 #ifdef CONFIG_COMPAT 10900 .compat_ioctl = btrfs_compat_ioctl, 10901 #endif 10902 .release = btrfs_release_file, 10903 .fsync = btrfs_sync_file, 10904 }; 10905 10906 /* 10907 * btrfs doesn't support the bmap operation because swapfiles 10908 * use bmap to make a mapping of extents in the file. They assume 10909 * these extents won't change over the life of the file and they 10910 * use the bmap result to do IO directly to the drive. 10911 * 10912 * the btrfs bmap call would return logical addresses that aren't 10913 * suitable for IO and they also will change frequently as COW 10914 * operations happen. So, swapfile + btrfs == corruption. 10915 * 10916 * For now we're avoiding this by dropping bmap. 10917 */ 10918 static const struct address_space_operations btrfs_aops = { 10919 .read_folio = btrfs_read_folio, 10920 .writepages = btrfs_writepages, 10921 .readahead = btrfs_readahead, 10922 .invalidate_folio = btrfs_invalidate_folio, 10923 .release_folio = btrfs_release_folio, 10924 .migrate_folio = btrfs_migrate_folio, 10925 .dirty_folio = filemap_dirty_folio, 10926 .error_remove_page = generic_error_remove_page, 10927 .swap_activate = btrfs_swap_activate, 10928 .swap_deactivate = btrfs_swap_deactivate, 10929 }; 10930 10931 static const struct inode_operations btrfs_file_inode_operations = { 10932 .getattr = btrfs_getattr, 10933 .setattr = btrfs_setattr, 10934 .listxattr = btrfs_listxattr, 10935 .permission = btrfs_permission, 10936 .fiemap = btrfs_fiemap, 10937 .get_inode_acl = btrfs_get_acl, 10938 .set_acl = btrfs_set_acl, 10939 .update_time = btrfs_update_time, 10940 .fileattr_get = btrfs_fileattr_get, 10941 .fileattr_set = btrfs_fileattr_set, 10942 }; 10943 static const struct inode_operations btrfs_special_inode_operations = { 10944 .getattr = btrfs_getattr, 10945 .setattr = btrfs_setattr, 10946 .permission = btrfs_permission, 10947 .listxattr = btrfs_listxattr, 10948 .get_inode_acl = btrfs_get_acl, 10949 .set_acl = btrfs_set_acl, 10950 .update_time = btrfs_update_time, 10951 }; 10952 static const struct inode_operations btrfs_symlink_inode_operations = { 10953 .get_link = page_get_link, 10954 .getattr = btrfs_getattr, 10955 .setattr = btrfs_setattr, 10956 .permission = btrfs_permission, 10957 .listxattr = btrfs_listxattr, 10958 .update_time = btrfs_update_time, 10959 }; 10960 10961 const struct dentry_operations btrfs_dentry_operations = { 10962 .d_delete = btrfs_dentry_delete, 10963 }; 10964