1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 #include <linux/time.h> 15 #include <linux/init.h> 16 #include <linux/string.h> 17 #include <linux/backing-dev.h> 18 #include <linux/writeback.h> 19 #include <linux/compat.h> 20 #include <linux/xattr.h> 21 #include <linux/posix_acl.h> 22 #include <linux/falloc.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/btrfs.h> 26 #include <linux/blkdev.h> 27 #include <linux/posix_acl_xattr.h> 28 #include <linux/uio.h> 29 #include <linux/magic.h> 30 #include <linux/iversion.h> 31 #include <linux/swap.h> 32 #include <linux/migrate.h> 33 #include <linux/sched/mm.h> 34 #include <linux/iomap.h> 35 #include <asm/unaligned.h> 36 #include <linux/fsverity.h> 37 #include "misc.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "print-tree.h" 43 #include "ordered-data.h" 44 #include "xattr.h" 45 #include "tree-log.h" 46 #include "bio.h" 47 #include "compression.h" 48 #include "locking.h" 49 #include "free-space-cache.h" 50 #include "props.h" 51 #include "qgroup.h" 52 #include "delalloc-space.h" 53 #include "block-group.h" 54 #include "space-info.h" 55 #include "zoned.h" 56 #include "subpage.h" 57 #include "inode-item.h" 58 #include "fs.h" 59 #include "accessors.h" 60 #include "extent-tree.h" 61 #include "root-tree.h" 62 #include "defrag.h" 63 #include "dir-item.h" 64 #include "file-item.h" 65 #include "uuid-tree.h" 66 #include "ioctl.h" 67 #include "file.h" 68 #include "acl.h" 69 #include "relocation.h" 70 #include "verity.h" 71 #include "super.h" 72 #include "orphan.h" 73 #include "backref.h" 74 75 struct btrfs_iget_args { 76 u64 ino; 77 struct btrfs_root *root; 78 }; 79 80 struct btrfs_dio_data { 81 ssize_t submitted; 82 struct extent_changeset *data_reserved; 83 struct btrfs_ordered_extent *ordered; 84 bool data_space_reserved; 85 bool nocow_done; 86 }; 87 88 struct btrfs_dio_private { 89 /* Range of I/O */ 90 u64 file_offset; 91 u32 bytes; 92 93 /* This must be last */ 94 struct btrfs_bio bbio; 95 }; 96 97 static struct bio_set btrfs_dio_bioset; 98 99 struct btrfs_rename_ctx { 100 /* Output field. Stores the index number of the old directory entry. */ 101 u64 index; 102 }; 103 104 /* 105 * Used by data_reloc_print_warning_inode() to pass needed info for filename 106 * resolution and output of error message. 107 */ 108 struct data_reloc_warn { 109 struct btrfs_path path; 110 struct btrfs_fs_info *fs_info; 111 u64 extent_item_size; 112 u64 logical; 113 int mirror_num; 114 }; 115 116 static const struct inode_operations btrfs_dir_inode_operations; 117 static const struct inode_operations btrfs_symlink_inode_operations; 118 static const struct inode_operations btrfs_special_inode_operations; 119 static const struct inode_operations btrfs_file_inode_operations; 120 static const struct address_space_operations btrfs_aops; 121 static const struct file_operations btrfs_dir_file_operations; 122 123 static struct kmem_cache *btrfs_inode_cachep; 124 125 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 126 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback); 127 static noinline int cow_file_range(struct btrfs_inode *inode, 128 struct page *locked_page, 129 u64 start, u64 end, int *page_started, 130 unsigned long *nr_written, int unlock, 131 u64 *done_offset); 132 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 133 u64 len, u64 orig_start, u64 block_start, 134 u64 block_len, u64 orig_block_len, 135 u64 ram_bytes, int compress_type, 136 int type); 137 138 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, 139 u64 root, void *warn_ctx) 140 { 141 struct data_reloc_warn *warn = warn_ctx; 142 struct btrfs_fs_info *fs_info = warn->fs_info; 143 struct extent_buffer *eb; 144 struct btrfs_inode_item *inode_item; 145 struct inode_fs_paths *ipath = NULL; 146 struct btrfs_root *local_root; 147 struct btrfs_key key; 148 unsigned int nofs_flag; 149 u32 nlink; 150 int ret; 151 152 local_root = btrfs_get_fs_root(fs_info, root, true); 153 if (IS_ERR(local_root)) { 154 ret = PTR_ERR(local_root); 155 goto err; 156 } 157 158 /* This makes the path point to (inum INODE_ITEM ioff). */ 159 key.objectid = inum; 160 key.type = BTRFS_INODE_ITEM_KEY; 161 key.offset = 0; 162 163 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0); 164 if (ret) { 165 btrfs_put_root(local_root); 166 btrfs_release_path(&warn->path); 167 goto err; 168 } 169 170 eb = warn->path.nodes[0]; 171 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item); 172 nlink = btrfs_inode_nlink(eb, inode_item); 173 btrfs_release_path(&warn->path); 174 175 nofs_flag = memalloc_nofs_save(); 176 ipath = init_ipath(4096, local_root, &warn->path); 177 memalloc_nofs_restore(nofs_flag); 178 if (IS_ERR(ipath)) { 179 btrfs_put_root(local_root); 180 ret = PTR_ERR(ipath); 181 ipath = NULL; 182 /* 183 * -ENOMEM, not a critical error, just output an generic error 184 * without filename. 185 */ 186 btrfs_warn(fs_info, 187 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu", 188 warn->logical, warn->mirror_num, root, inum, offset); 189 return ret; 190 } 191 ret = paths_from_inode(inum, ipath); 192 if (ret < 0) 193 goto err; 194 195 /* 196 * We deliberately ignore the bit ipath might have been too small to 197 * hold all of the paths here 198 */ 199 for (int i = 0; i < ipath->fspath->elem_cnt; i++) { 200 btrfs_warn(fs_info, 201 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)", 202 warn->logical, warn->mirror_num, root, inum, offset, 203 fs_info->sectorsize, nlink, 204 (char *)(unsigned long)ipath->fspath->val[i]); 205 } 206 207 btrfs_put_root(local_root); 208 free_ipath(ipath); 209 return 0; 210 211 err: 212 btrfs_warn(fs_info, 213 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d", 214 warn->logical, warn->mirror_num, root, inum, offset, ret); 215 216 free_ipath(ipath); 217 return ret; 218 } 219 220 /* 221 * Do extra user-friendly error output (e.g. lookup all the affected files). 222 * 223 * Return true if we succeeded doing the backref lookup. 224 * Return false if such lookup failed, and has to fallback to the old error message. 225 */ 226 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off, 227 const u8 *csum, const u8 *csum_expected, 228 int mirror_num) 229 { 230 struct btrfs_fs_info *fs_info = inode->root->fs_info; 231 struct btrfs_path path = { 0 }; 232 struct btrfs_key found_key = { 0 }; 233 struct extent_buffer *eb; 234 struct btrfs_extent_item *ei; 235 const u32 csum_size = fs_info->csum_size; 236 u64 logical; 237 u64 flags; 238 u32 item_size; 239 int ret; 240 241 mutex_lock(&fs_info->reloc_mutex); 242 logical = btrfs_get_reloc_bg_bytenr(fs_info); 243 mutex_unlock(&fs_info->reloc_mutex); 244 245 if (logical == U64_MAX) { 246 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation"); 247 btrfs_warn_rl(fs_info, 248 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 249 inode->root->root_key.objectid, btrfs_ino(inode), file_off, 250 CSUM_FMT_VALUE(csum_size, csum), 251 CSUM_FMT_VALUE(csum_size, csum_expected), 252 mirror_num); 253 return; 254 } 255 256 logical += file_off; 257 btrfs_warn_rl(fs_info, 258 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 259 inode->root->root_key.objectid, 260 btrfs_ino(inode), file_off, logical, 261 CSUM_FMT_VALUE(csum_size, csum), 262 CSUM_FMT_VALUE(csum_size, csum_expected), 263 mirror_num); 264 265 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags); 266 if (ret < 0) { 267 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d", 268 logical, ret); 269 return; 270 } 271 eb = path.nodes[0]; 272 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item); 273 item_size = btrfs_item_size(eb, path.slots[0]); 274 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 275 unsigned long ptr = 0; 276 u64 ref_root; 277 u8 ref_level; 278 279 while (true) { 280 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 281 item_size, &ref_root, 282 &ref_level); 283 if (ret < 0) { 284 btrfs_warn_rl(fs_info, 285 "failed to resolve tree backref for logical %llu: %d", 286 logical, ret); 287 break; 288 } 289 if (ret > 0) 290 break; 291 292 btrfs_warn_rl(fs_info, 293 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu", 294 logical, mirror_num, 295 (ref_level ? "node" : "leaf"), 296 ref_level, ref_root); 297 } 298 btrfs_release_path(&path); 299 } else { 300 struct btrfs_backref_walk_ctx ctx = { 0 }; 301 struct data_reloc_warn reloc_warn = { 0 }; 302 303 btrfs_release_path(&path); 304 305 ctx.bytenr = found_key.objectid; 306 ctx.extent_item_pos = logical - found_key.objectid; 307 ctx.fs_info = fs_info; 308 309 reloc_warn.logical = logical; 310 reloc_warn.extent_item_size = found_key.offset; 311 reloc_warn.mirror_num = mirror_num; 312 reloc_warn.fs_info = fs_info; 313 314 iterate_extent_inodes(&ctx, true, 315 data_reloc_print_warning_inode, &reloc_warn); 316 } 317 } 318 319 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, 320 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) 321 { 322 struct btrfs_root *root = inode->root; 323 const u32 csum_size = root->fs_info->csum_size; 324 325 /* For data reloc tree, it's better to do a backref lookup instead. */ 326 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 327 return print_data_reloc_error(inode, logical_start, csum, 328 csum_expected, mirror_num); 329 330 /* Output without objectid, which is more meaningful */ 331 if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) { 332 btrfs_warn_rl(root->fs_info, 333 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 334 root->root_key.objectid, btrfs_ino(inode), 335 logical_start, 336 CSUM_FMT_VALUE(csum_size, csum), 337 CSUM_FMT_VALUE(csum_size, csum_expected), 338 mirror_num); 339 } else { 340 btrfs_warn_rl(root->fs_info, 341 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 342 root->root_key.objectid, btrfs_ino(inode), 343 logical_start, 344 CSUM_FMT_VALUE(csum_size, csum), 345 CSUM_FMT_VALUE(csum_size, csum_expected), 346 mirror_num); 347 } 348 } 349 350 /* 351 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed 352 * 353 * ilock_flags can have the following bit set: 354 * 355 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 356 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 357 * return -EAGAIN 358 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 359 */ 360 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) 361 { 362 if (ilock_flags & BTRFS_ILOCK_SHARED) { 363 if (ilock_flags & BTRFS_ILOCK_TRY) { 364 if (!inode_trylock_shared(&inode->vfs_inode)) 365 return -EAGAIN; 366 else 367 return 0; 368 } 369 inode_lock_shared(&inode->vfs_inode); 370 } else { 371 if (ilock_flags & BTRFS_ILOCK_TRY) { 372 if (!inode_trylock(&inode->vfs_inode)) 373 return -EAGAIN; 374 else 375 return 0; 376 } 377 inode_lock(&inode->vfs_inode); 378 } 379 if (ilock_flags & BTRFS_ILOCK_MMAP) 380 down_write(&inode->i_mmap_lock); 381 return 0; 382 } 383 384 /* 385 * btrfs_inode_unlock - unock inode i_rwsem 386 * 387 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 388 * to decide whether the lock acquired is shared or exclusive. 389 */ 390 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) 391 { 392 if (ilock_flags & BTRFS_ILOCK_MMAP) 393 up_write(&inode->i_mmap_lock); 394 if (ilock_flags & BTRFS_ILOCK_SHARED) 395 inode_unlock_shared(&inode->vfs_inode); 396 else 397 inode_unlock(&inode->vfs_inode); 398 } 399 400 /* 401 * Cleanup all submitted ordered extents in specified range to handle errors 402 * from the btrfs_run_delalloc_range() callback. 403 * 404 * NOTE: caller must ensure that when an error happens, it can not call 405 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 406 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 407 * to be released, which we want to happen only when finishing the ordered 408 * extent (btrfs_finish_ordered_io()). 409 */ 410 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 411 struct page *locked_page, 412 u64 offset, u64 bytes) 413 { 414 unsigned long index = offset >> PAGE_SHIFT; 415 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 416 u64 page_start = 0, page_end = 0; 417 struct page *page; 418 419 if (locked_page) { 420 page_start = page_offset(locked_page); 421 page_end = page_start + PAGE_SIZE - 1; 422 } 423 424 while (index <= end_index) { 425 /* 426 * For locked page, we will call end_extent_writepage() on it 427 * in run_delalloc_range() for the error handling. That 428 * end_extent_writepage() function will call 429 * btrfs_mark_ordered_io_finished() to clear page Ordered and 430 * run the ordered extent accounting. 431 * 432 * Here we can't just clear the Ordered bit, or 433 * btrfs_mark_ordered_io_finished() would skip the accounting 434 * for the page range, and the ordered extent will never finish. 435 */ 436 if (locked_page && index == (page_start >> PAGE_SHIFT)) { 437 index++; 438 continue; 439 } 440 page = find_get_page(inode->vfs_inode.i_mapping, index); 441 index++; 442 if (!page) 443 continue; 444 445 /* 446 * Here we just clear all Ordered bits for every page in the 447 * range, then btrfs_mark_ordered_io_finished() will handle 448 * the ordered extent accounting for the range. 449 */ 450 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page, 451 offset, bytes); 452 put_page(page); 453 } 454 455 if (locked_page) { 456 /* The locked page covers the full range, nothing needs to be done */ 457 if (bytes + offset <= page_start + PAGE_SIZE) 458 return; 459 /* 460 * In case this page belongs to the delalloc range being 461 * instantiated then skip it, since the first page of a range is 462 * going to be properly cleaned up by the caller of 463 * run_delalloc_range 464 */ 465 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 466 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE; 467 offset = page_offset(locked_page) + PAGE_SIZE; 468 } 469 } 470 471 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false); 472 } 473 474 static int btrfs_dirty_inode(struct btrfs_inode *inode); 475 476 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 477 struct btrfs_new_inode_args *args) 478 { 479 int err; 480 481 if (args->default_acl) { 482 err = __btrfs_set_acl(trans, args->inode, args->default_acl, 483 ACL_TYPE_DEFAULT); 484 if (err) 485 return err; 486 } 487 if (args->acl) { 488 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 489 if (err) 490 return err; 491 } 492 if (!args->default_acl && !args->acl) 493 cache_no_acl(args->inode); 494 return btrfs_xattr_security_init(trans, args->inode, args->dir, 495 &args->dentry->d_name); 496 } 497 498 /* 499 * this does all the hard work for inserting an inline extent into 500 * the btree. The caller should have done a btrfs_drop_extents so that 501 * no overlapping inline items exist in the btree 502 */ 503 static int insert_inline_extent(struct btrfs_trans_handle *trans, 504 struct btrfs_path *path, 505 struct btrfs_inode *inode, bool extent_inserted, 506 size_t size, size_t compressed_size, 507 int compress_type, 508 struct page **compressed_pages, 509 bool update_i_size) 510 { 511 struct btrfs_root *root = inode->root; 512 struct extent_buffer *leaf; 513 struct page *page = NULL; 514 char *kaddr; 515 unsigned long ptr; 516 struct btrfs_file_extent_item *ei; 517 int ret; 518 size_t cur_size = size; 519 u64 i_size; 520 521 ASSERT((compressed_size > 0 && compressed_pages) || 522 (compressed_size == 0 && !compressed_pages)); 523 524 if (compressed_size && compressed_pages) 525 cur_size = compressed_size; 526 527 if (!extent_inserted) { 528 struct btrfs_key key; 529 size_t datasize; 530 531 key.objectid = btrfs_ino(inode); 532 key.offset = 0; 533 key.type = BTRFS_EXTENT_DATA_KEY; 534 535 datasize = btrfs_file_extent_calc_inline_size(cur_size); 536 ret = btrfs_insert_empty_item(trans, root, path, &key, 537 datasize); 538 if (ret) 539 goto fail; 540 } 541 leaf = path->nodes[0]; 542 ei = btrfs_item_ptr(leaf, path->slots[0], 543 struct btrfs_file_extent_item); 544 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 545 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 546 btrfs_set_file_extent_encryption(leaf, ei, 0); 547 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 548 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 549 ptr = btrfs_file_extent_inline_start(ei); 550 551 if (compress_type != BTRFS_COMPRESS_NONE) { 552 struct page *cpage; 553 int i = 0; 554 while (compressed_size > 0) { 555 cpage = compressed_pages[i]; 556 cur_size = min_t(unsigned long, compressed_size, 557 PAGE_SIZE); 558 559 kaddr = kmap_local_page(cpage); 560 write_extent_buffer(leaf, kaddr, ptr, cur_size); 561 kunmap_local(kaddr); 562 563 i++; 564 ptr += cur_size; 565 compressed_size -= cur_size; 566 } 567 btrfs_set_file_extent_compression(leaf, ei, 568 compress_type); 569 } else { 570 page = find_get_page(inode->vfs_inode.i_mapping, 0); 571 btrfs_set_file_extent_compression(leaf, ei, 0); 572 kaddr = kmap_local_page(page); 573 write_extent_buffer(leaf, kaddr, ptr, size); 574 kunmap_local(kaddr); 575 put_page(page); 576 } 577 btrfs_mark_buffer_dirty(leaf); 578 btrfs_release_path(path); 579 580 /* 581 * We align size to sectorsize for inline extents just for simplicity 582 * sake. 583 */ 584 ret = btrfs_inode_set_file_extent_range(inode, 0, 585 ALIGN(size, root->fs_info->sectorsize)); 586 if (ret) 587 goto fail; 588 589 /* 590 * We're an inline extent, so nobody can extend the file past i_size 591 * without locking a page we already have locked. 592 * 593 * We must do any i_size and inode updates before we unlock the pages. 594 * Otherwise we could end up racing with unlink. 595 */ 596 i_size = i_size_read(&inode->vfs_inode); 597 if (update_i_size && size > i_size) { 598 i_size_write(&inode->vfs_inode, size); 599 i_size = size; 600 } 601 inode->disk_i_size = i_size; 602 603 fail: 604 return ret; 605 } 606 607 608 /* 609 * conditionally insert an inline extent into the file. This 610 * does the checks required to make sure the data is small enough 611 * to fit as an inline extent. 612 */ 613 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size, 614 size_t compressed_size, 615 int compress_type, 616 struct page **compressed_pages, 617 bool update_i_size) 618 { 619 struct btrfs_drop_extents_args drop_args = { 0 }; 620 struct btrfs_root *root = inode->root; 621 struct btrfs_fs_info *fs_info = root->fs_info; 622 struct btrfs_trans_handle *trans; 623 u64 data_len = (compressed_size ?: size); 624 int ret; 625 struct btrfs_path *path; 626 627 /* 628 * We can create an inline extent if it ends at or beyond the current 629 * i_size, is no larger than a sector (decompressed), and the (possibly 630 * compressed) data fits in a leaf and the configured maximum inline 631 * size. 632 */ 633 if (size < i_size_read(&inode->vfs_inode) || 634 size > fs_info->sectorsize || 635 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || 636 data_len > fs_info->max_inline) 637 return 1; 638 639 path = btrfs_alloc_path(); 640 if (!path) 641 return -ENOMEM; 642 643 trans = btrfs_join_transaction(root); 644 if (IS_ERR(trans)) { 645 btrfs_free_path(path); 646 return PTR_ERR(trans); 647 } 648 trans->block_rsv = &inode->block_rsv; 649 650 drop_args.path = path; 651 drop_args.start = 0; 652 drop_args.end = fs_info->sectorsize; 653 drop_args.drop_cache = true; 654 drop_args.replace_extent = true; 655 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 656 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 657 if (ret) { 658 btrfs_abort_transaction(trans, ret); 659 goto out; 660 } 661 662 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 663 size, compressed_size, compress_type, 664 compressed_pages, update_i_size); 665 if (ret && ret != -ENOSPC) { 666 btrfs_abort_transaction(trans, ret); 667 goto out; 668 } else if (ret == -ENOSPC) { 669 ret = 1; 670 goto out; 671 } 672 673 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 674 ret = btrfs_update_inode(trans, root, inode); 675 if (ret && ret != -ENOSPC) { 676 btrfs_abort_transaction(trans, ret); 677 goto out; 678 } else if (ret == -ENOSPC) { 679 ret = 1; 680 goto out; 681 } 682 683 btrfs_set_inode_full_sync(inode); 684 out: 685 /* 686 * Don't forget to free the reserved space, as for inlined extent 687 * it won't count as data extent, free them directly here. 688 * And at reserve time, it's always aligned to page size, so 689 * just free one page here. 690 */ 691 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE); 692 btrfs_free_path(path); 693 btrfs_end_transaction(trans); 694 return ret; 695 } 696 697 struct async_extent { 698 u64 start; 699 u64 ram_size; 700 u64 compressed_size; 701 struct page **pages; 702 unsigned long nr_pages; 703 int compress_type; 704 struct list_head list; 705 }; 706 707 struct async_chunk { 708 struct btrfs_inode *inode; 709 struct page *locked_page; 710 u64 start; 711 u64 end; 712 blk_opf_t write_flags; 713 struct list_head extents; 714 struct cgroup_subsys_state *blkcg_css; 715 struct btrfs_work work; 716 struct async_cow *async_cow; 717 }; 718 719 struct async_cow { 720 atomic_t num_chunks; 721 struct async_chunk chunks[]; 722 }; 723 724 static noinline int add_async_extent(struct async_chunk *cow, 725 u64 start, u64 ram_size, 726 u64 compressed_size, 727 struct page **pages, 728 unsigned long nr_pages, 729 int compress_type) 730 { 731 struct async_extent *async_extent; 732 733 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 734 BUG_ON(!async_extent); /* -ENOMEM */ 735 async_extent->start = start; 736 async_extent->ram_size = ram_size; 737 async_extent->compressed_size = compressed_size; 738 async_extent->pages = pages; 739 async_extent->nr_pages = nr_pages; 740 async_extent->compress_type = compress_type; 741 list_add_tail(&async_extent->list, &cow->extents); 742 return 0; 743 } 744 745 /* 746 * Check if the inode needs to be submitted to compression, based on mount 747 * options, defragmentation, properties or heuristics. 748 */ 749 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 750 u64 end) 751 { 752 struct btrfs_fs_info *fs_info = inode->root->fs_info; 753 754 if (!btrfs_inode_can_compress(inode)) { 755 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 756 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 757 btrfs_ino(inode)); 758 return 0; 759 } 760 /* 761 * Special check for subpage. 762 * 763 * We lock the full page then run each delalloc range in the page, thus 764 * for the following case, we will hit some subpage specific corner case: 765 * 766 * 0 32K 64K 767 * | |///////| |///////| 768 * \- A \- B 769 * 770 * In above case, both range A and range B will try to unlock the full 771 * page [0, 64K), causing the one finished later will have page 772 * unlocked already, triggering various page lock requirement BUG_ON()s. 773 * 774 * So here we add an artificial limit that subpage compression can only 775 * if the range is fully page aligned. 776 * 777 * In theory we only need to ensure the first page is fully covered, but 778 * the tailing partial page will be locked until the full compression 779 * finishes, delaying the write of other range. 780 * 781 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range 782 * first to prevent any submitted async extent to unlock the full page. 783 * By this, we can ensure for subpage case that only the last async_cow 784 * will unlock the full page. 785 */ 786 if (fs_info->sectorsize < PAGE_SIZE) { 787 if (!PAGE_ALIGNED(start) || 788 !PAGE_ALIGNED(end + 1)) 789 return 0; 790 } 791 792 /* force compress */ 793 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 794 return 1; 795 /* defrag ioctl */ 796 if (inode->defrag_compress) 797 return 1; 798 /* bad compression ratios */ 799 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 800 return 0; 801 if (btrfs_test_opt(fs_info, COMPRESS) || 802 inode->flags & BTRFS_INODE_COMPRESS || 803 inode->prop_compress) 804 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); 805 return 0; 806 } 807 808 static inline void inode_should_defrag(struct btrfs_inode *inode, 809 u64 start, u64 end, u64 num_bytes, u32 small_write) 810 { 811 /* If this is a small write inside eof, kick off a defrag */ 812 if (num_bytes < small_write && 813 (start > 0 || end + 1 < inode->disk_i_size)) 814 btrfs_add_inode_defrag(NULL, inode, small_write); 815 } 816 817 /* 818 * we create compressed extents in two phases. The first 819 * phase compresses a range of pages that have already been 820 * locked (both pages and state bits are locked). 821 * 822 * This is done inside an ordered work queue, and the compression 823 * is spread across many cpus. The actual IO submission is step 824 * two, and the ordered work queue takes care of making sure that 825 * happens in the same order things were put onto the queue by 826 * writepages and friends. 827 * 828 * If this code finds it can't get good compression, it puts an 829 * entry onto the work queue to write the uncompressed bytes. This 830 * makes sure that both compressed inodes and uncompressed inodes 831 * are written in the same order that the flusher thread sent them 832 * down. 833 */ 834 static noinline int compress_file_range(struct async_chunk *async_chunk) 835 { 836 struct btrfs_inode *inode = async_chunk->inode; 837 struct btrfs_fs_info *fs_info = inode->root->fs_info; 838 struct address_space *mapping = inode->vfs_inode.i_mapping; 839 u64 blocksize = fs_info->sectorsize; 840 u64 start = async_chunk->start; 841 u64 end = async_chunk->end; 842 u64 actual_end; 843 u64 i_size; 844 int ret = 0; 845 struct page **pages = NULL; 846 unsigned long nr_pages; 847 unsigned long total_compressed = 0; 848 unsigned long total_in = 0; 849 int i; 850 int will_compress; 851 int compress_type = fs_info->compress_type; 852 int compressed_extents = 0; 853 int redirty = 0; 854 855 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); 856 857 /* 858 * We need to save i_size before now because it could change in between 859 * us evaluating the size and assigning it. This is because we lock and 860 * unlock the page in truncate and fallocate, and then modify the i_size 861 * later on. 862 * 863 * The barriers are to emulate READ_ONCE, remove that once i_size_read 864 * does that for us. 865 */ 866 barrier(); 867 i_size = i_size_read(&inode->vfs_inode); 868 barrier(); 869 actual_end = min_t(u64, i_size, end + 1); 870 again: 871 will_compress = 0; 872 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 873 nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES); 874 875 /* 876 * we don't want to send crud past the end of i_size through 877 * compression, that's just a waste of CPU time. So, if the 878 * end of the file is before the start of our current 879 * requested range of bytes, we bail out to the uncompressed 880 * cleanup code that can deal with all of this. 881 * 882 * It isn't really the fastest way to fix things, but this is a 883 * very uncommon corner. 884 */ 885 if (actual_end <= start) 886 goto cleanup_and_bail_uncompressed; 887 888 total_compressed = actual_end - start; 889 890 /* 891 * Skip compression for a small file range(<=blocksize) that 892 * isn't an inline extent, since it doesn't save disk space at all. 893 */ 894 if (total_compressed <= blocksize && 895 (start > 0 || end + 1 < inode->disk_i_size)) 896 goto cleanup_and_bail_uncompressed; 897 898 /* 899 * For subpage case, we require full page alignment for the sector 900 * aligned range. 901 * Thus we must also check against @actual_end, not just @end. 902 */ 903 if (blocksize < PAGE_SIZE) { 904 if (!PAGE_ALIGNED(start) || 905 !PAGE_ALIGNED(round_up(actual_end, blocksize))) 906 goto cleanup_and_bail_uncompressed; 907 } 908 909 total_compressed = min_t(unsigned long, total_compressed, 910 BTRFS_MAX_UNCOMPRESSED); 911 total_in = 0; 912 ret = 0; 913 914 /* 915 * we do compression for mount -o compress and when the 916 * inode has not been flagged as nocompress. This flag can 917 * change at any time if we discover bad compression ratios. 918 */ 919 if (inode_need_compress(inode, start, end)) { 920 WARN_ON(pages); 921 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 922 if (!pages) { 923 /* just bail out to the uncompressed code */ 924 nr_pages = 0; 925 goto cont; 926 } 927 928 if (inode->defrag_compress) 929 compress_type = inode->defrag_compress; 930 else if (inode->prop_compress) 931 compress_type = inode->prop_compress; 932 933 /* 934 * we need to call clear_page_dirty_for_io on each 935 * page in the range. Otherwise applications with the file 936 * mmap'd can wander in and change the page contents while 937 * we are compressing them. 938 * 939 * If the compression fails for any reason, we set the pages 940 * dirty again later on. 941 * 942 * Note that the remaining part is redirtied, the start pointer 943 * has moved, the end is the original one. 944 */ 945 if (!redirty) { 946 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); 947 redirty = 1; 948 } 949 950 /* Compression level is applied here and only here */ 951 ret = btrfs_compress_pages( 952 compress_type | (fs_info->compress_level << 4), 953 mapping, start, 954 pages, 955 &nr_pages, 956 &total_in, 957 &total_compressed); 958 959 if (!ret) { 960 unsigned long offset = offset_in_page(total_compressed); 961 struct page *page = pages[nr_pages - 1]; 962 963 /* zero the tail end of the last page, we might be 964 * sending it down to disk 965 */ 966 if (offset) 967 memzero_page(page, offset, PAGE_SIZE - offset); 968 will_compress = 1; 969 } 970 } 971 cont: 972 /* 973 * Check cow_file_range() for why we don't even try to create inline 974 * extent for subpage case. 975 */ 976 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 977 /* lets try to make an inline extent */ 978 if (ret || total_in < actual_end) { 979 /* we didn't compress the entire range, try 980 * to make an uncompressed inline extent. 981 */ 982 ret = cow_file_range_inline(inode, actual_end, 983 0, BTRFS_COMPRESS_NONE, 984 NULL, false); 985 } else { 986 /* try making a compressed inline extent */ 987 ret = cow_file_range_inline(inode, actual_end, 988 total_compressed, 989 compress_type, pages, 990 false); 991 } 992 if (ret <= 0) { 993 unsigned long clear_flags = EXTENT_DELALLOC | 994 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 995 EXTENT_DO_ACCOUNTING; 996 997 if (ret < 0) 998 mapping_set_error(mapping, -EIO); 999 1000 /* 1001 * inline extent creation worked or returned error, 1002 * we don't need to create any more async work items. 1003 * Unlock and free up our temp pages. 1004 * 1005 * We use DO_ACCOUNTING here because we need the 1006 * delalloc_release_metadata to be done _after_ we drop 1007 * our outstanding extent for clearing delalloc for this 1008 * range. 1009 */ 1010 extent_clear_unlock_delalloc(inode, start, end, 1011 NULL, 1012 clear_flags, 1013 PAGE_UNLOCK | 1014 PAGE_START_WRITEBACK | 1015 PAGE_END_WRITEBACK); 1016 1017 /* 1018 * Ensure we only free the compressed pages if we have 1019 * them allocated, as we can still reach here with 1020 * inode_need_compress() == false. 1021 */ 1022 if (pages) { 1023 for (i = 0; i < nr_pages; i++) { 1024 WARN_ON(pages[i]->mapping); 1025 put_page(pages[i]); 1026 } 1027 kfree(pages); 1028 } 1029 return 0; 1030 } 1031 } 1032 1033 if (will_compress) { 1034 /* 1035 * we aren't doing an inline extent round the compressed size 1036 * up to a block size boundary so the allocator does sane 1037 * things 1038 */ 1039 total_compressed = ALIGN(total_compressed, blocksize); 1040 1041 /* 1042 * one last check to make sure the compression is really a 1043 * win, compare the page count read with the blocks on disk, 1044 * compression must free at least one sector size 1045 */ 1046 total_in = round_up(total_in, fs_info->sectorsize); 1047 if (total_compressed + blocksize <= total_in) { 1048 compressed_extents++; 1049 1050 /* 1051 * The async work queues will take care of doing actual 1052 * allocation on disk for these compressed pages, and 1053 * will submit them to the elevator. 1054 */ 1055 add_async_extent(async_chunk, start, total_in, 1056 total_compressed, pages, nr_pages, 1057 compress_type); 1058 1059 if (start + total_in < end) { 1060 start += total_in; 1061 pages = NULL; 1062 cond_resched(); 1063 goto again; 1064 } 1065 return compressed_extents; 1066 } 1067 } 1068 if (pages) { 1069 /* 1070 * the compression code ran but failed to make things smaller, 1071 * free any pages it allocated and our page pointer array 1072 */ 1073 for (i = 0; i < nr_pages; i++) { 1074 WARN_ON(pages[i]->mapping); 1075 put_page(pages[i]); 1076 } 1077 kfree(pages); 1078 pages = NULL; 1079 total_compressed = 0; 1080 nr_pages = 0; 1081 1082 /* flag the file so we don't compress in the future */ 1083 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && 1084 !(inode->prop_compress)) { 1085 inode->flags |= BTRFS_INODE_NOCOMPRESS; 1086 } 1087 } 1088 cleanup_and_bail_uncompressed: 1089 /* 1090 * No compression, but we still need to write the pages in the file 1091 * we've been given so far. redirty the locked page if it corresponds 1092 * to our extent and set things up for the async work queue to run 1093 * cow_file_range to do the normal delalloc dance. 1094 */ 1095 if (async_chunk->locked_page && 1096 (page_offset(async_chunk->locked_page) >= start && 1097 page_offset(async_chunk->locked_page)) <= end) { 1098 __set_page_dirty_nobuffers(async_chunk->locked_page); 1099 /* unlocked later on in the async handlers */ 1100 } 1101 1102 if (redirty) 1103 extent_range_redirty_for_io(&inode->vfs_inode, start, end); 1104 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 1105 BTRFS_COMPRESS_NONE); 1106 compressed_extents++; 1107 1108 return compressed_extents; 1109 } 1110 1111 static void free_async_extent_pages(struct async_extent *async_extent) 1112 { 1113 int i; 1114 1115 if (!async_extent->pages) 1116 return; 1117 1118 for (i = 0; i < async_extent->nr_pages; i++) { 1119 WARN_ON(async_extent->pages[i]->mapping); 1120 put_page(async_extent->pages[i]); 1121 } 1122 kfree(async_extent->pages); 1123 async_extent->nr_pages = 0; 1124 async_extent->pages = NULL; 1125 } 1126 1127 static int submit_uncompressed_range(struct btrfs_inode *inode, 1128 struct async_extent *async_extent, 1129 struct page *locked_page) 1130 { 1131 u64 start = async_extent->start; 1132 u64 end = async_extent->start + async_extent->ram_size - 1; 1133 unsigned long nr_written = 0; 1134 int page_started = 0; 1135 int ret; 1136 struct writeback_control wbc = { 1137 .sync_mode = WB_SYNC_ALL, 1138 .range_start = start, 1139 .range_end = end, 1140 .no_cgroup_owner = 1, 1141 }; 1142 1143 /* 1144 * Call cow_file_range() to run the delalloc range directly, since we 1145 * won't go to NOCOW or async path again. 1146 * 1147 * Also we call cow_file_range() with @unlock_page == 0, so that we 1148 * can directly submit them without interruption. 1149 */ 1150 ret = cow_file_range(inode, locked_page, start, end, &page_started, 1151 &nr_written, 0, NULL); 1152 /* Inline extent inserted, page gets unlocked and everything is done */ 1153 if (page_started) 1154 return 0; 1155 1156 if (ret < 0) { 1157 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1); 1158 if (locked_page) { 1159 const u64 page_start = page_offset(locked_page); 1160 const u64 page_end = page_start + PAGE_SIZE - 1; 1161 1162 set_page_writeback(locked_page); 1163 end_page_writeback(locked_page); 1164 end_extent_writepage(locked_page, ret, page_start, page_end); 1165 unlock_page(locked_page); 1166 } 1167 return ret; 1168 } 1169 1170 /* All pages will be unlocked, including @locked_page */ 1171 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); 1172 ret = extent_write_locked_range(&inode->vfs_inode, start, end, &wbc); 1173 wbc_detach_inode(&wbc); 1174 return ret; 1175 } 1176 1177 static int submit_one_async_extent(struct btrfs_inode *inode, 1178 struct async_chunk *async_chunk, 1179 struct async_extent *async_extent, 1180 u64 *alloc_hint) 1181 { 1182 struct extent_io_tree *io_tree = &inode->io_tree; 1183 struct btrfs_root *root = inode->root; 1184 struct btrfs_fs_info *fs_info = root->fs_info; 1185 struct btrfs_ordered_extent *ordered; 1186 struct btrfs_key ins; 1187 struct page *locked_page = NULL; 1188 struct extent_map *em; 1189 int ret = 0; 1190 u64 start = async_extent->start; 1191 u64 end = async_extent->start + async_extent->ram_size - 1; 1192 1193 if (async_chunk->blkcg_css) 1194 kthread_associate_blkcg(async_chunk->blkcg_css); 1195 1196 /* 1197 * If async_chunk->locked_page is in the async_extent range, we need to 1198 * handle it. 1199 */ 1200 if (async_chunk->locked_page) { 1201 u64 locked_page_start = page_offset(async_chunk->locked_page); 1202 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1; 1203 1204 if (!(start >= locked_page_end || end <= locked_page_start)) 1205 locked_page = async_chunk->locked_page; 1206 } 1207 lock_extent(io_tree, start, end, NULL); 1208 1209 /* We have fall back to uncompressed write */ 1210 if (!async_extent->pages) { 1211 ret = submit_uncompressed_range(inode, async_extent, locked_page); 1212 goto done; 1213 } 1214 1215 ret = btrfs_reserve_extent(root, async_extent->ram_size, 1216 async_extent->compressed_size, 1217 async_extent->compressed_size, 1218 0, *alloc_hint, &ins, 1, 1); 1219 if (ret) { 1220 free_async_extent_pages(async_extent); 1221 /* 1222 * Here we used to try again by going back to non-compressed 1223 * path for ENOSPC. But we can't reserve space even for 1224 * compressed size, how could it work for uncompressed size 1225 * which requires larger size? So here we directly go error 1226 * path. 1227 */ 1228 goto out_free; 1229 } 1230 1231 /* Here we're doing allocation and writeback of the compressed pages */ 1232 em = create_io_em(inode, start, 1233 async_extent->ram_size, /* len */ 1234 start, /* orig_start */ 1235 ins.objectid, /* block_start */ 1236 ins.offset, /* block_len */ 1237 ins.offset, /* orig_block_len */ 1238 async_extent->ram_size, /* ram_bytes */ 1239 async_extent->compress_type, 1240 BTRFS_ORDERED_COMPRESSED); 1241 if (IS_ERR(em)) { 1242 ret = PTR_ERR(em); 1243 goto out_free_reserve; 1244 } 1245 free_extent_map(em); 1246 1247 ordered = btrfs_alloc_ordered_extent(inode, start, /* file_offset */ 1248 async_extent->ram_size, /* num_bytes */ 1249 async_extent->ram_size, /* ram_bytes */ 1250 ins.objectid, /* disk_bytenr */ 1251 ins.offset, /* disk_num_bytes */ 1252 0, /* offset */ 1253 1 << BTRFS_ORDERED_COMPRESSED, 1254 async_extent->compress_type); 1255 if (IS_ERR(ordered)) { 1256 btrfs_drop_extent_map_range(inode, start, end, false); 1257 ret = PTR_ERR(ordered); 1258 goto out_free_reserve; 1259 } 1260 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1261 1262 /* Clear dirty, set writeback and unlock the pages. */ 1263 extent_clear_unlock_delalloc(inode, start, end, 1264 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 1265 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1266 btrfs_submit_compressed_write(ordered, 1267 async_extent->pages, /* compressed_pages */ 1268 async_extent->nr_pages, 1269 async_chunk->write_flags, true); 1270 *alloc_hint = ins.objectid + ins.offset; 1271 done: 1272 if (async_chunk->blkcg_css) 1273 kthread_associate_blkcg(NULL); 1274 kfree(async_extent); 1275 return ret; 1276 1277 out_free_reserve: 1278 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1279 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1280 out_free: 1281 mapping_set_error(inode->vfs_inode.i_mapping, -EIO); 1282 extent_clear_unlock_delalloc(inode, start, end, 1283 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 1284 EXTENT_DELALLOC_NEW | 1285 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1286 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1287 PAGE_END_WRITEBACK); 1288 free_async_extent_pages(async_extent); 1289 goto done; 1290 } 1291 1292 /* 1293 * Phase two of compressed writeback. This is the ordered portion of the code, 1294 * which only gets called in the order the work was queued. We walk all the 1295 * async extents created by compress_file_range and send them down to the disk. 1296 */ 1297 static noinline void submit_compressed_extents(struct async_chunk *async_chunk) 1298 { 1299 struct btrfs_inode *inode = async_chunk->inode; 1300 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1301 struct async_extent *async_extent; 1302 u64 alloc_hint = 0; 1303 int ret = 0; 1304 1305 while (!list_empty(&async_chunk->extents)) { 1306 u64 extent_start; 1307 u64 ram_size; 1308 1309 async_extent = list_entry(async_chunk->extents.next, 1310 struct async_extent, list); 1311 list_del(&async_extent->list); 1312 extent_start = async_extent->start; 1313 ram_size = async_extent->ram_size; 1314 1315 ret = submit_one_async_extent(inode, async_chunk, async_extent, 1316 &alloc_hint); 1317 btrfs_debug(fs_info, 1318 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1319 inode->root->root_key.objectid, 1320 btrfs_ino(inode), extent_start, ram_size, ret); 1321 } 1322 } 1323 1324 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1325 u64 num_bytes) 1326 { 1327 struct extent_map_tree *em_tree = &inode->extent_tree; 1328 struct extent_map *em; 1329 u64 alloc_hint = 0; 1330 1331 read_lock(&em_tree->lock); 1332 em = search_extent_mapping(em_tree, start, num_bytes); 1333 if (em) { 1334 /* 1335 * if block start isn't an actual block number then find the 1336 * first block in this inode and use that as a hint. If that 1337 * block is also bogus then just don't worry about it. 1338 */ 1339 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 1340 free_extent_map(em); 1341 em = search_extent_mapping(em_tree, 0, 0); 1342 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 1343 alloc_hint = em->block_start; 1344 if (em) 1345 free_extent_map(em); 1346 } else { 1347 alloc_hint = em->block_start; 1348 free_extent_map(em); 1349 } 1350 } 1351 read_unlock(&em_tree->lock); 1352 1353 return alloc_hint; 1354 } 1355 1356 /* 1357 * when extent_io.c finds a delayed allocation range in the file, 1358 * the call backs end up in this code. The basic idea is to 1359 * allocate extents on disk for the range, and create ordered data structs 1360 * in ram to track those extents. 1361 * 1362 * locked_page is the page that writepage had locked already. We use 1363 * it to make sure we don't do extra locks or unlocks. 1364 * 1365 * *page_started is set to one if we unlock locked_page and do everything 1366 * required to start IO on it. It may be clean and already done with 1367 * IO when we return. 1368 * 1369 * When unlock == 1, we unlock the pages in successfully allocated regions. 1370 * When unlock == 0, we leave them locked for writing them out. 1371 * 1372 * However, we unlock all the pages except @locked_page in case of failure. 1373 * 1374 * In summary, page locking state will be as follow: 1375 * 1376 * - page_started == 1 (return value) 1377 * - All the pages are unlocked. IO is started. 1378 * - Note that this can happen only on success 1379 * - unlock == 1 1380 * - All the pages except @locked_page are unlocked in any case 1381 * - unlock == 0 1382 * - On success, all the pages are locked for writing out them 1383 * - On failure, all the pages except @locked_page are unlocked 1384 * 1385 * When a failure happens in the second or later iteration of the 1386 * while-loop, the ordered extents created in previous iterations are kept 1387 * intact. So, the caller must clean them up by calling 1388 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for 1389 * example. 1390 */ 1391 static noinline int cow_file_range(struct btrfs_inode *inode, 1392 struct page *locked_page, 1393 u64 start, u64 end, int *page_started, 1394 unsigned long *nr_written, int unlock, 1395 u64 *done_offset) 1396 { 1397 struct btrfs_root *root = inode->root; 1398 struct btrfs_fs_info *fs_info = root->fs_info; 1399 u64 alloc_hint = 0; 1400 u64 orig_start = start; 1401 u64 num_bytes; 1402 unsigned long ram_size; 1403 u64 cur_alloc_size = 0; 1404 u64 min_alloc_size; 1405 u64 blocksize = fs_info->sectorsize; 1406 struct btrfs_key ins; 1407 struct extent_map *em; 1408 unsigned clear_bits; 1409 unsigned long page_ops; 1410 bool extent_reserved = false; 1411 int ret = 0; 1412 1413 if (btrfs_is_free_space_inode(inode)) { 1414 ret = -EINVAL; 1415 goto out_unlock; 1416 } 1417 1418 num_bytes = ALIGN(end - start + 1, blocksize); 1419 num_bytes = max(blocksize, num_bytes); 1420 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1421 1422 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1423 1424 /* 1425 * Due to the page size limit, for subpage we can only trigger the 1426 * writeback for the dirty sectors of page, that means data writeback 1427 * is doing more writeback than what we want. 1428 * 1429 * This is especially unexpected for some call sites like fallocate, 1430 * where we only increase i_size after everything is done. 1431 * This means we can trigger inline extent even if we didn't want to. 1432 * So here we skip inline extent creation completely. 1433 */ 1434 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 1435 u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode), 1436 end + 1); 1437 1438 /* lets try to make an inline extent */ 1439 ret = cow_file_range_inline(inode, actual_end, 0, 1440 BTRFS_COMPRESS_NONE, NULL, false); 1441 if (ret == 0) { 1442 /* 1443 * We use DO_ACCOUNTING here because we need the 1444 * delalloc_release_metadata to be run _after_ we drop 1445 * our outstanding extent for clearing delalloc for this 1446 * range. 1447 */ 1448 extent_clear_unlock_delalloc(inode, start, end, 1449 locked_page, 1450 EXTENT_LOCKED | EXTENT_DELALLOC | 1451 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1452 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1453 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); 1454 *nr_written = *nr_written + 1455 (end - start + PAGE_SIZE) / PAGE_SIZE; 1456 *page_started = 1; 1457 /* 1458 * locked_page is locked by the caller of 1459 * writepage_delalloc(), not locked by 1460 * __process_pages_contig(). 1461 * 1462 * We can't let __process_pages_contig() to unlock it, 1463 * as it doesn't have any subpage::writers recorded. 1464 * 1465 * Here we manually unlock the page, since the caller 1466 * can't use page_started to determine if it's an 1467 * inline extent or a compressed extent. 1468 */ 1469 unlock_page(locked_page); 1470 goto out; 1471 } else if (ret < 0) { 1472 goto out_unlock; 1473 } 1474 } 1475 1476 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 1477 1478 /* 1479 * Relocation relies on the relocated extents to have exactly the same 1480 * size as the original extents. Normally writeback for relocation data 1481 * extents follows a NOCOW path because relocation preallocates the 1482 * extents. However, due to an operation such as scrub turning a block 1483 * group to RO mode, it may fallback to COW mode, so we must make sure 1484 * an extent allocated during COW has exactly the requested size and can 1485 * not be split into smaller extents, otherwise relocation breaks and 1486 * fails during the stage where it updates the bytenr of file extent 1487 * items. 1488 */ 1489 if (btrfs_is_data_reloc_root(root)) 1490 min_alloc_size = num_bytes; 1491 else 1492 min_alloc_size = fs_info->sectorsize; 1493 1494 while (num_bytes > 0) { 1495 struct btrfs_ordered_extent *ordered; 1496 1497 cur_alloc_size = num_bytes; 1498 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, 1499 min_alloc_size, 0, alloc_hint, 1500 &ins, 1, 1); 1501 if (ret < 0) 1502 goto out_unlock; 1503 cur_alloc_size = ins.offset; 1504 extent_reserved = true; 1505 1506 ram_size = ins.offset; 1507 em = create_io_em(inode, start, ins.offset, /* len */ 1508 start, /* orig_start */ 1509 ins.objectid, /* block_start */ 1510 ins.offset, /* block_len */ 1511 ins.offset, /* orig_block_len */ 1512 ram_size, /* ram_bytes */ 1513 BTRFS_COMPRESS_NONE, /* compress_type */ 1514 BTRFS_ORDERED_REGULAR /* type */); 1515 if (IS_ERR(em)) { 1516 ret = PTR_ERR(em); 1517 goto out_reserve; 1518 } 1519 free_extent_map(em); 1520 1521 ordered = btrfs_alloc_ordered_extent(inode, start, ram_size, 1522 ram_size, ins.objectid, cur_alloc_size, 1523 0, 1 << BTRFS_ORDERED_REGULAR, 1524 BTRFS_COMPRESS_NONE); 1525 if (IS_ERR(ordered)) { 1526 ret = PTR_ERR(ordered); 1527 goto out_drop_extent_cache; 1528 } 1529 1530 if (btrfs_is_data_reloc_root(root)) { 1531 ret = btrfs_reloc_clone_csums(ordered); 1532 1533 /* 1534 * Only drop cache here, and process as normal. 1535 * 1536 * We must not allow extent_clear_unlock_delalloc() 1537 * at out_unlock label to free meta of this ordered 1538 * extent, as its meta should be freed by 1539 * btrfs_finish_ordered_io(). 1540 * 1541 * So we must continue until @start is increased to 1542 * skip current ordered extent. 1543 */ 1544 if (ret) 1545 btrfs_drop_extent_map_range(inode, start, 1546 start + ram_size - 1, 1547 false); 1548 } 1549 btrfs_put_ordered_extent(ordered); 1550 1551 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1552 1553 /* 1554 * We're not doing compressed IO, don't unlock the first page 1555 * (which the caller expects to stay locked), don't clear any 1556 * dirty bits and don't set any writeback bits 1557 * 1558 * Do set the Ordered (Private2) bit so we know this page was 1559 * properly setup for writepage. 1560 */ 1561 page_ops = unlock ? PAGE_UNLOCK : 0; 1562 page_ops |= PAGE_SET_ORDERED; 1563 1564 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, 1565 locked_page, 1566 EXTENT_LOCKED | EXTENT_DELALLOC, 1567 page_ops); 1568 if (num_bytes < cur_alloc_size) 1569 num_bytes = 0; 1570 else 1571 num_bytes -= cur_alloc_size; 1572 alloc_hint = ins.objectid + ins.offset; 1573 start += cur_alloc_size; 1574 extent_reserved = false; 1575 1576 /* 1577 * btrfs_reloc_clone_csums() error, since start is increased 1578 * extent_clear_unlock_delalloc() at out_unlock label won't 1579 * free metadata of current ordered extent, we're OK to exit. 1580 */ 1581 if (ret) 1582 goto out_unlock; 1583 } 1584 out: 1585 return ret; 1586 1587 out_drop_extent_cache: 1588 btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false); 1589 out_reserve: 1590 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1591 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1592 out_unlock: 1593 /* 1594 * If done_offset is non-NULL and ret == -EAGAIN, we expect the 1595 * caller to write out the successfully allocated region and retry. 1596 */ 1597 if (done_offset && ret == -EAGAIN) { 1598 if (orig_start < start) 1599 *done_offset = start - 1; 1600 else 1601 *done_offset = start; 1602 return ret; 1603 } else if (ret == -EAGAIN) { 1604 /* Convert to -ENOSPC since the caller cannot retry. */ 1605 ret = -ENOSPC; 1606 } 1607 1608 /* 1609 * Now, we have three regions to clean up: 1610 * 1611 * |-------(1)----|---(2)---|-------------(3)----------| 1612 * `- orig_start `- start `- start + cur_alloc_size `- end 1613 * 1614 * We process each region below. 1615 */ 1616 1617 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1618 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1619 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1620 1621 /* 1622 * For the range (1). We have already instantiated the ordered extents 1623 * for this region. They are cleaned up by 1624 * btrfs_cleanup_ordered_extents() in e.g, 1625 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are 1626 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW | 1627 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup 1628 * function. 1629 * 1630 * However, in case of unlock == 0, we still need to unlock the pages 1631 * (except @locked_page) to ensure all the pages are unlocked. 1632 */ 1633 if (!unlock && orig_start < start) { 1634 if (!locked_page) 1635 mapping_set_error(inode->vfs_inode.i_mapping, ret); 1636 extent_clear_unlock_delalloc(inode, orig_start, start - 1, 1637 locked_page, 0, page_ops); 1638 } 1639 1640 /* 1641 * For the range (2). If we reserved an extent for our delalloc range 1642 * (or a subrange) and failed to create the respective ordered extent, 1643 * then it means that when we reserved the extent we decremented the 1644 * extent's size from the data space_info's bytes_may_use counter and 1645 * incremented the space_info's bytes_reserved counter by the same 1646 * amount. We must make sure extent_clear_unlock_delalloc() does not try 1647 * to decrement again the data space_info's bytes_may_use counter, 1648 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV. 1649 */ 1650 if (extent_reserved) { 1651 extent_clear_unlock_delalloc(inode, start, 1652 start + cur_alloc_size - 1, 1653 locked_page, 1654 clear_bits, 1655 page_ops); 1656 start += cur_alloc_size; 1657 } 1658 1659 /* 1660 * For the range (3). We never touched the region. In addition to the 1661 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data 1662 * space_info's bytes_may_use counter, reserved in 1663 * btrfs_check_data_free_space(). 1664 */ 1665 if (start < end) { 1666 clear_bits |= EXTENT_CLEAR_DATA_RESV; 1667 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1668 clear_bits, page_ops); 1669 } 1670 return ret; 1671 } 1672 1673 /* 1674 * work queue call back to started compression on a file and pages 1675 */ 1676 static noinline void async_cow_start(struct btrfs_work *work) 1677 { 1678 struct async_chunk *async_chunk; 1679 int compressed_extents; 1680 1681 async_chunk = container_of(work, struct async_chunk, work); 1682 1683 compressed_extents = compress_file_range(async_chunk); 1684 if (compressed_extents == 0) { 1685 btrfs_add_delayed_iput(async_chunk->inode); 1686 async_chunk->inode = NULL; 1687 } 1688 } 1689 1690 /* 1691 * work queue call back to submit previously compressed pages 1692 */ 1693 static noinline void async_cow_submit(struct btrfs_work *work) 1694 { 1695 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1696 work); 1697 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1698 unsigned long nr_pages; 1699 1700 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1701 PAGE_SHIFT; 1702 1703 /* 1704 * ->inode could be NULL if async_chunk_start has failed to compress, 1705 * in which case we don't have anything to submit, yet we need to 1706 * always adjust ->async_delalloc_pages as its paired with the init 1707 * happening in run_delalloc_compressed 1708 */ 1709 if (async_chunk->inode) 1710 submit_compressed_extents(async_chunk); 1711 1712 /* atomic_sub_return implies a barrier */ 1713 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1714 5 * SZ_1M) 1715 cond_wake_up_nomb(&fs_info->async_submit_wait); 1716 } 1717 1718 static noinline void async_cow_free(struct btrfs_work *work) 1719 { 1720 struct async_chunk *async_chunk; 1721 struct async_cow *async_cow; 1722 1723 async_chunk = container_of(work, struct async_chunk, work); 1724 if (async_chunk->inode) 1725 btrfs_add_delayed_iput(async_chunk->inode); 1726 if (async_chunk->blkcg_css) 1727 css_put(async_chunk->blkcg_css); 1728 1729 async_cow = async_chunk->async_cow; 1730 if (atomic_dec_and_test(&async_cow->num_chunks)) 1731 kvfree(async_cow); 1732 } 1733 1734 static bool run_delalloc_compressed(struct btrfs_inode *inode, 1735 struct writeback_control *wbc, 1736 struct page *locked_page, 1737 u64 start, u64 end, int *page_started, 1738 unsigned long *nr_written) 1739 { 1740 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1741 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1742 struct async_cow *ctx; 1743 struct async_chunk *async_chunk; 1744 unsigned long nr_pages; 1745 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1746 int i; 1747 unsigned nofs_flag; 1748 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1749 1750 nofs_flag = memalloc_nofs_save(); 1751 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1752 memalloc_nofs_restore(nofs_flag); 1753 if (!ctx) 1754 return false; 1755 1756 unlock_extent(&inode->io_tree, start, end, NULL); 1757 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 1758 1759 async_chunk = ctx->chunks; 1760 atomic_set(&ctx->num_chunks, num_chunks); 1761 1762 for (i = 0; i < num_chunks; i++) { 1763 u64 cur_end = min(end, start + SZ_512K - 1); 1764 1765 /* 1766 * igrab is called higher up in the call chain, take only the 1767 * lightweight reference for the callback lifetime 1768 */ 1769 ihold(&inode->vfs_inode); 1770 async_chunk[i].async_cow = ctx; 1771 async_chunk[i].inode = inode; 1772 async_chunk[i].start = start; 1773 async_chunk[i].end = cur_end; 1774 async_chunk[i].write_flags = write_flags; 1775 INIT_LIST_HEAD(&async_chunk[i].extents); 1776 1777 /* 1778 * The locked_page comes all the way from writepage and its 1779 * the original page we were actually given. As we spread 1780 * this large delalloc region across multiple async_chunk 1781 * structs, only the first struct needs a pointer to locked_page 1782 * 1783 * This way we don't need racey decisions about who is supposed 1784 * to unlock it. 1785 */ 1786 if (locked_page) { 1787 /* 1788 * Depending on the compressibility, the pages might or 1789 * might not go through async. We want all of them to 1790 * be accounted against wbc once. Let's do it here 1791 * before the paths diverge. wbc accounting is used 1792 * only for foreign writeback detection and doesn't 1793 * need full accuracy. Just account the whole thing 1794 * against the first page. 1795 */ 1796 wbc_account_cgroup_owner(wbc, locked_page, 1797 cur_end - start); 1798 async_chunk[i].locked_page = locked_page; 1799 locked_page = NULL; 1800 } else { 1801 async_chunk[i].locked_page = NULL; 1802 } 1803 1804 if (blkcg_css != blkcg_root_css) { 1805 css_get(blkcg_css); 1806 async_chunk[i].blkcg_css = blkcg_css; 1807 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT; 1808 } else { 1809 async_chunk[i].blkcg_css = NULL; 1810 } 1811 1812 btrfs_init_work(&async_chunk[i].work, async_cow_start, 1813 async_cow_submit, async_cow_free); 1814 1815 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1816 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1817 1818 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1819 1820 *nr_written += nr_pages; 1821 start = cur_end + 1; 1822 } 1823 *page_started = 1; 1824 return true; 1825 } 1826 1827 static noinline int run_delalloc_zoned(struct btrfs_inode *inode, 1828 struct page *locked_page, u64 start, 1829 u64 end, int *page_started, 1830 unsigned long *nr_written, 1831 struct writeback_control *wbc) 1832 { 1833 u64 done_offset = end; 1834 int ret; 1835 bool locked_page_done = false; 1836 1837 while (start <= end) { 1838 ret = cow_file_range(inode, locked_page, start, end, page_started, 1839 nr_written, 0, &done_offset); 1840 if (ret && ret != -EAGAIN) 1841 return ret; 1842 1843 if (*page_started) { 1844 ASSERT(ret == 0); 1845 return 0; 1846 } 1847 1848 if (ret == 0) 1849 done_offset = end; 1850 1851 if (done_offset == start) { 1852 wait_on_bit_io(&inode->root->fs_info->flags, 1853 BTRFS_FS_NEED_ZONE_FINISH, 1854 TASK_UNINTERRUPTIBLE); 1855 continue; 1856 } 1857 1858 if (!locked_page_done) { 1859 __set_page_dirty_nobuffers(locked_page); 1860 account_page_redirty(locked_page); 1861 } 1862 locked_page_done = true; 1863 extent_write_locked_range(&inode->vfs_inode, start, done_offset, 1864 wbc); 1865 start = done_offset + 1; 1866 } 1867 1868 *page_started = 1; 1869 1870 return 0; 1871 } 1872 1873 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, 1874 u64 bytenr, u64 num_bytes, bool nowait) 1875 { 1876 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr); 1877 struct btrfs_ordered_sum *sums; 1878 int ret; 1879 LIST_HEAD(list); 1880 1881 ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1, 1882 &list, 0, nowait); 1883 if (ret == 0 && list_empty(&list)) 1884 return 0; 1885 1886 while (!list_empty(&list)) { 1887 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1888 list_del(&sums->list); 1889 kfree(sums); 1890 } 1891 if (ret < 0) 1892 return ret; 1893 return 1; 1894 } 1895 1896 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, 1897 const u64 start, const u64 end, 1898 int *page_started, unsigned long *nr_written) 1899 { 1900 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1901 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1902 const u64 range_bytes = end + 1 - start; 1903 struct extent_io_tree *io_tree = &inode->io_tree; 1904 u64 range_start = start; 1905 u64 count; 1906 1907 /* 1908 * If EXTENT_NORESERVE is set it means that when the buffered write was 1909 * made we had not enough available data space and therefore we did not 1910 * reserve data space for it, since we though we could do NOCOW for the 1911 * respective file range (either there is prealloc extent or the inode 1912 * has the NOCOW bit set). 1913 * 1914 * However when we need to fallback to COW mode (because for example the 1915 * block group for the corresponding extent was turned to RO mode by a 1916 * scrub or relocation) we need to do the following: 1917 * 1918 * 1) We increment the bytes_may_use counter of the data space info. 1919 * If COW succeeds, it allocates a new data extent and after doing 1920 * that it decrements the space info's bytes_may_use counter and 1921 * increments its bytes_reserved counter by the same amount (we do 1922 * this at btrfs_add_reserved_bytes()). So we need to increment the 1923 * bytes_may_use counter to compensate (when space is reserved at 1924 * buffered write time, the bytes_may_use counter is incremented); 1925 * 1926 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1927 * that if the COW path fails for any reason, it decrements (through 1928 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1929 * data space info, which we incremented in the step above. 1930 * 1931 * If we need to fallback to cow and the inode corresponds to a free 1932 * space cache inode or an inode of the data relocation tree, we must 1933 * also increment bytes_may_use of the data space_info for the same 1934 * reason. Space caches and relocated data extents always get a prealloc 1935 * extent for them, however scrub or balance may have set the block 1936 * group that contains that extent to RO mode and therefore force COW 1937 * when starting writeback. 1938 */ 1939 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1940 EXTENT_NORESERVE, 0, NULL); 1941 if (count > 0 || is_space_ino || is_reloc_ino) { 1942 u64 bytes = count; 1943 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1944 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1945 1946 if (is_space_ino || is_reloc_ino) 1947 bytes = range_bytes; 1948 1949 spin_lock(&sinfo->lock); 1950 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1951 spin_unlock(&sinfo->lock); 1952 1953 if (count > 0) 1954 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1955 NULL); 1956 } 1957 1958 return cow_file_range(inode, locked_page, start, end, page_started, 1959 nr_written, 1, NULL); 1960 } 1961 1962 struct can_nocow_file_extent_args { 1963 /* Input fields. */ 1964 1965 /* Start file offset of the range we want to NOCOW. */ 1966 u64 start; 1967 /* End file offset (inclusive) of the range we want to NOCOW. */ 1968 u64 end; 1969 bool writeback_path; 1970 bool strict; 1971 /* 1972 * Free the path passed to can_nocow_file_extent() once it's not needed 1973 * anymore. 1974 */ 1975 bool free_path; 1976 1977 /* Output fields. Only set when can_nocow_file_extent() returns 1. */ 1978 1979 u64 disk_bytenr; 1980 u64 disk_num_bytes; 1981 u64 extent_offset; 1982 /* Number of bytes that can be written to in NOCOW mode. */ 1983 u64 num_bytes; 1984 }; 1985 1986 /* 1987 * Check if we can NOCOW the file extent that the path points to. 1988 * This function may return with the path released, so the caller should check 1989 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1990 * 1991 * Returns: < 0 on error 1992 * 0 if we can not NOCOW 1993 * 1 if we can NOCOW 1994 */ 1995 static int can_nocow_file_extent(struct btrfs_path *path, 1996 struct btrfs_key *key, 1997 struct btrfs_inode *inode, 1998 struct can_nocow_file_extent_args *args) 1999 { 2000 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 2001 struct extent_buffer *leaf = path->nodes[0]; 2002 struct btrfs_root *root = inode->root; 2003 struct btrfs_file_extent_item *fi; 2004 u64 extent_end; 2005 u8 extent_type; 2006 int can_nocow = 0; 2007 int ret = 0; 2008 bool nowait = path->nowait; 2009 2010 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 2011 extent_type = btrfs_file_extent_type(leaf, fi); 2012 2013 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 2014 goto out; 2015 2016 /* Can't access these fields unless we know it's not an inline extent. */ 2017 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 2018 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 2019 args->extent_offset = btrfs_file_extent_offset(leaf, fi); 2020 2021 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 2022 extent_type == BTRFS_FILE_EXTENT_REG) 2023 goto out; 2024 2025 /* 2026 * If the extent was created before the generation where the last snapshot 2027 * for its subvolume was created, then this implies the extent is shared, 2028 * hence we must COW. 2029 */ 2030 if (!args->strict && 2031 btrfs_file_extent_generation(leaf, fi) <= 2032 btrfs_root_last_snapshot(&root->root_item)) 2033 goto out; 2034 2035 /* An explicit hole, must COW. */ 2036 if (args->disk_bytenr == 0) 2037 goto out; 2038 2039 /* Compressed/encrypted/encoded extents must be COWed. */ 2040 if (btrfs_file_extent_compression(leaf, fi) || 2041 btrfs_file_extent_encryption(leaf, fi) || 2042 btrfs_file_extent_other_encoding(leaf, fi)) 2043 goto out; 2044 2045 extent_end = btrfs_file_extent_end(path); 2046 2047 /* 2048 * The following checks can be expensive, as they need to take other 2049 * locks and do btree or rbtree searches, so release the path to avoid 2050 * blocking other tasks for too long. 2051 */ 2052 btrfs_release_path(path); 2053 2054 ret = btrfs_cross_ref_exist(root, btrfs_ino(inode), 2055 key->offset - args->extent_offset, 2056 args->disk_bytenr, args->strict, path); 2057 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 2058 if (ret != 0) 2059 goto out; 2060 2061 if (args->free_path) { 2062 /* 2063 * We don't need the path anymore, plus through the 2064 * csum_exist_in_range() call below we will end up allocating 2065 * another path. So free the path to avoid unnecessary extra 2066 * memory usage. 2067 */ 2068 btrfs_free_path(path); 2069 path = NULL; 2070 } 2071 2072 /* If there are pending snapshots for this root, we must COW. */ 2073 if (args->writeback_path && !is_freespace_inode && 2074 atomic_read(&root->snapshot_force_cow)) 2075 goto out; 2076 2077 args->disk_bytenr += args->extent_offset; 2078 args->disk_bytenr += args->start - key->offset; 2079 args->num_bytes = min(args->end + 1, extent_end) - args->start; 2080 2081 /* 2082 * Force COW if csums exist in the range. This ensures that csums for a 2083 * given extent are either valid or do not exist. 2084 */ 2085 ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes, 2086 nowait); 2087 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 2088 if (ret != 0) 2089 goto out; 2090 2091 can_nocow = 1; 2092 out: 2093 if (args->free_path && path) 2094 btrfs_free_path(path); 2095 2096 return ret < 0 ? ret : can_nocow; 2097 } 2098 2099 /* 2100 * when nowcow writeback call back. This checks for snapshots or COW copies 2101 * of the extents that exist in the file, and COWs the file as required. 2102 * 2103 * If no cow copies or snapshots exist, we write directly to the existing 2104 * blocks on disk 2105 */ 2106 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 2107 struct page *locked_page, 2108 const u64 start, const u64 end, 2109 int *page_started, 2110 unsigned long *nr_written) 2111 { 2112 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2113 struct btrfs_root *root = inode->root; 2114 struct btrfs_path *path; 2115 u64 cow_start = (u64)-1; 2116 u64 cur_offset = start; 2117 int ret; 2118 bool check_prev = true; 2119 u64 ino = btrfs_ino(inode); 2120 struct btrfs_block_group *bg; 2121 bool nocow = false; 2122 struct can_nocow_file_extent_args nocow_args = { 0 }; 2123 2124 path = btrfs_alloc_path(); 2125 if (!path) { 2126 extent_clear_unlock_delalloc(inode, start, end, locked_page, 2127 EXTENT_LOCKED | EXTENT_DELALLOC | 2128 EXTENT_DO_ACCOUNTING | 2129 EXTENT_DEFRAG, PAGE_UNLOCK | 2130 PAGE_START_WRITEBACK | 2131 PAGE_END_WRITEBACK); 2132 return -ENOMEM; 2133 } 2134 2135 nocow_args.end = end; 2136 nocow_args.writeback_path = true; 2137 2138 while (1) { 2139 struct btrfs_ordered_extent *ordered; 2140 struct btrfs_key found_key; 2141 struct btrfs_file_extent_item *fi; 2142 struct extent_buffer *leaf; 2143 u64 extent_end; 2144 u64 ram_bytes; 2145 u64 nocow_end; 2146 int extent_type; 2147 bool is_prealloc; 2148 2149 nocow = false; 2150 2151 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 2152 cur_offset, 0); 2153 if (ret < 0) 2154 goto error; 2155 2156 /* 2157 * If there is no extent for our range when doing the initial 2158 * search, then go back to the previous slot as it will be the 2159 * one containing the search offset 2160 */ 2161 if (ret > 0 && path->slots[0] > 0 && check_prev) { 2162 leaf = path->nodes[0]; 2163 btrfs_item_key_to_cpu(leaf, &found_key, 2164 path->slots[0] - 1); 2165 if (found_key.objectid == ino && 2166 found_key.type == BTRFS_EXTENT_DATA_KEY) 2167 path->slots[0]--; 2168 } 2169 check_prev = false; 2170 next_slot: 2171 /* Go to next leaf if we have exhausted the current one */ 2172 leaf = path->nodes[0]; 2173 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2174 ret = btrfs_next_leaf(root, path); 2175 if (ret < 0) { 2176 if (cow_start != (u64)-1) 2177 cur_offset = cow_start; 2178 goto error; 2179 } 2180 if (ret > 0) 2181 break; 2182 leaf = path->nodes[0]; 2183 } 2184 2185 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2186 2187 /* Didn't find anything for our INO */ 2188 if (found_key.objectid > ino) 2189 break; 2190 /* 2191 * Keep searching until we find an EXTENT_ITEM or there are no 2192 * more extents for this inode 2193 */ 2194 if (WARN_ON_ONCE(found_key.objectid < ino) || 2195 found_key.type < BTRFS_EXTENT_DATA_KEY) { 2196 path->slots[0]++; 2197 goto next_slot; 2198 } 2199 2200 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 2201 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 2202 found_key.offset > end) 2203 break; 2204 2205 /* 2206 * If the found extent starts after requested offset, then 2207 * adjust extent_end to be right before this extent begins 2208 */ 2209 if (found_key.offset > cur_offset) { 2210 extent_end = found_key.offset; 2211 extent_type = 0; 2212 goto out_check; 2213 } 2214 2215 /* 2216 * Found extent which begins before our range and potentially 2217 * intersect it 2218 */ 2219 fi = btrfs_item_ptr(leaf, path->slots[0], 2220 struct btrfs_file_extent_item); 2221 extent_type = btrfs_file_extent_type(leaf, fi); 2222 /* If this is triggered then we have a memory corruption. */ 2223 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 2224 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 2225 ret = -EUCLEAN; 2226 goto error; 2227 } 2228 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 2229 extent_end = btrfs_file_extent_end(path); 2230 2231 /* 2232 * If the extent we got ends before our current offset, skip to 2233 * the next extent. 2234 */ 2235 if (extent_end <= cur_offset) { 2236 path->slots[0]++; 2237 goto next_slot; 2238 } 2239 2240 nocow_args.start = cur_offset; 2241 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 2242 if (ret < 0) { 2243 if (cow_start != (u64)-1) 2244 cur_offset = cow_start; 2245 goto error; 2246 } else if (ret == 0) { 2247 goto out_check; 2248 } 2249 2250 ret = 0; 2251 bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr); 2252 if (bg) 2253 nocow = true; 2254 out_check: 2255 /* 2256 * If nocow is false then record the beginning of the range 2257 * that needs to be COWed 2258 */ 2259 if (!nocow) { 2260 if (cow_start == (u64)-1) 2261 cow_start = cur_offset; 2262 cur_offset = extent_end; 2263 if (cur_offset > end) 2264 break; 2265 if (!path->nodes[0]) 2266 continue; 2267 path->slots[0]++; 2268 goto next_slot; 2269 } 2270 2271 /* 2272 * COW range from cow_start to found_key.offset - 1. As the key 2273 * will contain the beginning of the first extent that can be 2274 * NOCOW, following one which needs to be COW'ed 2275 */ 2276 if (cow_start != (u64)-1) { 2277 ret = fallback_to_cow(inode, locked_page, 2278 cow_start, found_key.offset - 1, 2279 page_started, nr_written); 2280 if (ret) 2281 goto error; 2282 cow_start = (u64)-1; 2283 } 2284 2285 nocow_end = cur_offset + nocow_args.num_bytes - 1; 2286 is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC; 2287 if (is_prealloc) { 2288 u64 orig_start = found_key.offset - nocow_args.extent_offset; 2289 struct extent_map *em; 2290 2291 em = create_io_em(inode, cur_offset, nocow_args.num_bytes, 2292 orig_start, 2293 nocow_args.disk_bytenr, /* block_start */ 2294 nocow_args.num_bytes, /* block_len */ 2295 nocow_args.disk_num_bytes, /* orig_block_len */ 2296 ram_bytes, BTRFS_COMPRESS_NONE, 2297 BTRFS_ORDERED_PREALLOC); 2298 if (IS_ERR(em)) { 2299 ret = PTR_ERR(em); 2300 goto error; 2301 } 2302 free_extent_map(em); 2303 } 2304 2305 ordered = btrfs_alloc_ordered_extent(inode, cur_offset, 2306 nocow_args.num_bytes, nocow_args.num_bytes, 2307 nocow_args.disk_bytenr, nocow_args.num_bytes, 0, 2308 is_prealloc 2309 ? (1 << BTRFS_ORDERED_PREALLOC) 2310 : (1 << BTRFS_ORDERED_NOCOW), 2311 BTRFS_COMPRESS_NONE); 2312 if (IS_ERR(ordered)) { 2313 if (is_prealloc) { 2314 btrfs_drop_extent_map_range(inode, cur_offset, 2315 nocow_end, false); 2316 } 2317 ret = PTR_ERR(ordered); 2318 goto error; 2319 } 2320 2321 if (nocow) { 2322 btrfs_dec_nocow_writers(bg); 2323 nocow = false; 2324 } 2325 2326 if (btrfs_is_data_reloc_root(root)) 2327 /* 2328 * Error handled later, as we must prevent 2329 * extent_clear_unlock_delalloc() in error handler 2330 * from freeing metadata of created ordered extent. 2331 */ 2332 ret = btrfs_reloc_clone_csums(ordered); 2333 btrfs_put_ordered_extent(ordered); 2334 2335 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, 2336 locked_page, EXTENT_LOCKED | 2337 EXTENT_DELALLOC | 2338 EXTENT_CLEAR_DATA_RESV, 2339 PAGE_UNLOCK | PAGE_SET_ORDERED); 2340 2341 cur_offset = extent_end; 2342 2343 /* 2344 * btrfs_reloc_clone_csums() error, now we're OK to call error 2345 * handler, as metadata for created ordered extent will only 2346 * be freed by btrfs_finish_ordered_io(). 2347 */ 2348 if (ret) 2349 goto error; 2350 if (cur_offset > end) 2351 break; 2352 } 2353 btrfs_release_path(path); 2354 2355 if (cur_offset <= end && cow_start == (u64)-1) 2356 cow_start = cur_offset; 2357 2358 if (cow_start != (u64)-1) { 2359 cur_offset = end; 2360 ret = fallback_to_cow(inode, locked_page, cow_start, end, 2361 page_started, nr_written); 2362 if (ret) 2363 goto error; 2364 } 2365 2366 error: 2367 if (nocow) 2368 btrfs_dec_nocow_writers(bg); 2369 2370 if (ret && cur_offset < end) 2371 extent_clear_unlock_delalloc(inode, cur_offset, end, 2372 locked_page, EXTENT_LOCKED | 2373 EXTENT_DELALLOC | EXTENT_DEFRAG | 2374 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2375 PAGE_START_WRITEBACK | 2376 PAGE_END_WRITEBACK); 2377 btrfs_free_path(path); 2378 return ret; 2379 } 2380 2381 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2382 { 2383 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2384 if (inode->defrag_bytes && 2385 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 2386 0, NULL)) 2387 return false; 2388 return true; 2389 } 2390 return false; 2391 } 2392 2393 /* 2394 * Function to process delayed allocation (create CoW) for ranges which are 2395 * being touched for the first time. 2396 */ 2397 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, 2398 u64 start, u64 end, int *page_started, unsigned long *nr_written, 2399 struct writeback_control *wbc) 2400 { 2401 int ret = 0; 2402 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2403 2404 /* 2405 * The range must cover part of the @locked_page, or the returned 2406 * @page_started can confuse the caller. 2407 */ 2408 ASSERT(!(end <= page_offset(locked_page) || 2409 start >= page_offset(locked_page) + PAGE_SIZE)); 2410 2411 if (should_nocow(inode, start, end)) { 2412 /* 2413 * Normally on a zoned device we're only doing COW writes, but 2414 * in case of relocation on a zoned filesystem we have taken 2415 * precaution, that we're only writing sequentially. It's safe 2416 * to use run_delalloc_nocow() here, like for regular 2417 * preallocated inodes. 2418 */ 2419 ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root)); 2420 ret = run_delalloc_nocow(inode, locked_page, start, end, 2421 page_started, nr_written); 2422 goto out; 2423 } 2424 2425 if (btrfs_inode_can_compress(inode) && 2426 inode_need_compress(inode, start, end) && 2427 run_delalloc_compressed(inode, wbc, locked_page, start, 2428 end, page_started, nr_written)) 2429 goto out; 2430 2431 if (zoned) 2432 ret = run_delalloc_zoned(inode, locked_page, start, end, 2433 page_started, nr_written, wbc); 2434 else 2435 ret = cow_file_range(inode, locked_page, start, end, 2436 page_started, nr_written, 1, NULL); 2437 2438 out: 2439 ASSERT(ret <= 0); 2440 if (ret) 2441 btrfs_cleanup_ordered_extents(inode, locked_page, start, 2442 end - start + 1); 2443 return ret; 2444 } 2445 2446 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 2447 struct extent_state *orig, u64 split) 2448 { 2449 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2450 u64 size; 2451 2452 /* not delalloc, ignore it */ 2453 if (!(orig->state & EXTENT_DELALLOC)) 2454 return; 2455 2456 size = orig->end - orig->start + 1; 2457 if (size > fs_info->max_extent_size) { 2458 u32 num_extents; 2459 u64 new_size; 2460 2461 /* 2462 * See the explanation in btrfs_merge_delalloc_extent, the same 2463 * applies here, just in reverse. 2464 */ 2465 new_size = orig->end - split + 1; 2466 num_extents = count_max_extents(fs_info, new_size); 2467 new_size = split - orig->start; 2468 num_extents += count_max_extents(fs_info, new_size); 2469 if (count_max_extents(fs_info, size) >= num_extents) 2470 return; 2471 } 2472 2473 spin_lock(&inode->lock); 2474 btrfs_mod_outstanding_extents(inode, 1); 2475 spin_unlock(&inode->lock); 2476 } 2477 2478 /* 2479 * Handle merged delayed allocation extents so we can keep track of new extents 2480 * that are just merged onto old extents, such as when we are doing sequential 2481 * writes, so we can properly account for the metadata space we'll need. 2482 */ 2483 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 2484 struct extent_state *other) 2485 { 2486 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2487 u64 new_size, old_size; 2488 u32 num_extents; 2489 2490 /* not delalloc, ignore it */ 2491 if (!(other->state & EXTENT_DELALLOC)) 2492 return; 2493 2494 if (new->start > other->start) 2495 new_size = new->end - other->start + 1; 2496 else 2497 new_size = other->end - new->start + 1; 2498 2499 /* we're not bigger than the max, unreserve the space and go */ 2500 if (new_size <= fs_info->max_extent_size) { 2501 spin_lock(&inode->lock); 2502 btrfs_mod_outstanding_extents(inode, -1); 2503 spin_unlock(&inode->lock); 2504 return; 2505 } 2506 2507 /* 2508 * We have to add up either side to figure out how many extents were 2509 * accounted for before we merged into one big extent. If the number of 2510 * extents we accounted for is <= the amount we need for the new range 2511 * then we can return, otherwise drop. Think of it like this 2512 * 2513 * [ 4k][MAX_SIZE] 2514 * 2515 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2516 * need 2 outstanding extents, on one side we have 1 and the other side 2517 * we have 1 so they are == and we can return. But in this case 2518 * 2519 * [MAX_SIZE+4k][MAX_SIZE+4k] 2520 * 2521 * Each range on their own accounts for 2 extents, but merged together 2522 * they are only 3 extents worth of accounting, so we need to drop in 2523 * this case. 2524 */ 2525 old_size = other->end - other->start + 1; 2526 num_extents = count_max_extents(fs_info, old_size); 2527 old_size = new->end - new->start + 1; 2528 num_extents += count_max_extents(fs_info, old_size); 2529 if (count_max_extents(fs_info, new_size) >= num_extents) 2530 return; 2531 2532 spin_lock(&inode->lock); 2533 btrfs_mod_outstanding_extents(inode, -1); 2534 spin_unlock(&inode->lock); 2535 } 2536 2537 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 2538 struct btrfs_inode *inode) 2539 { 2540 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2541 2542 spin_lock(&root->delalloc_lock); 2543 if (list_empty(&inode->delalloc_inodes)) { 2544 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 2545 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, &inode->runtime_flags); 2546 root->nr_delalloc_inodes++; 2547 if (root->nr_delalloc_inodes == 1) { 2548 spin_lock(&fs_info->delalloc_root_lock); 2549 BUG_ON(!list_empty(&root->delalloc_root)); 2550 list_add_tail(&root->delalloc_root, 2551 &fs_info->delalloc_roots); 2552 spin_unlock(&fs_info->delalloc_root_lock); 2553 } 2554 } 2555 spin_unlock(&root->delalloc_lock); 2556 } 2557 2558 void __btrfs_del_delalloc_inode(struct btrfs_root *root, 2559 struct btrfs_inode *inode) 2560 { 2561 struct btrfs_fs_info *fs_info = root->fs_info; 2562 2563 if (!list_empty(&inode->delalloc_inodes)) { 2564 list_del_init(&inode->delalloc_inodes); 2565 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2566 &inode->runtime_flags); 2567 root->nr_delalloc_inodes--; 2568 if (!root->nr_delalloc_inodes) { 2569 ASSERT(list_empty(&root->delalloc_inodes)); 2570 spin_lock(&fs_info->delalloc_root_lock); 2571 BUG_ON(list_empty(&root->delalloc_root)); 2572 list_del_init(&root->delalloc_root); 2573 spin_unlock(&fs_info->delalloc_root_lock); 2574 } 2575 } 2576 } 2577 2578 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 2579 struct btrfs_inode *inode) 2580 { 2581 spin_lock(&root->delalloc_lock); 2582 __btrfs_del_delalloc_inode(root, inode); 2583 spin_unlock(&root->delalloc_lock); 2584 } 2585 2586 /* 2587 * Properly track delayed allocation bytes in the inode and to maintain the 2588 * list of inodes that have pending delalloc work to be done. 2589 */ 2590 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 2591 u32 bits) 2592 { 2593 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2594 2595 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC)) 2596 WARN_ON(1); 2597 /* 2598 * set_bit and clear bit hooks normally require _irqsave/restore 2599 * but in this case, we are only testing for the DELALLOC 2600 * bit, which is only set or cleared with irqs on 2601 */ 2602 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2603 struct btrfs_root *root = inode->root; 2604 u64 len = state->end + 1 - state->start; 2605 u32 num_extents = count_max_extents(fs_info, len); 2606 bool do_list = !btrfs_is_free_space_inode(inode); 2607 2608 spin_lock(&inode->lock); 2609 btrfs_mod_outstanding_extents(inode, num_extents); 2610 spin_unlock(&inode->lock); 2611 2612 /* For sanity tests */ 2613 if (btrfs_is_testing(fs_info)) 2614 return; 2615 2616 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2617 fs_info->delalloc_batch); 2618 spin_lock(&inode->lock); 2619 inode->delalloc_bytes += len; 2620 if (bits & EXTENT_DEFRAG) 2621 inode->defrag_bytes += len; 2622 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2623 &inode->runtime_flags)) 2624 btrfs_add_delalloc_inodes(root, inode); 2625 spin_unlock(&inode->lock); 2626 } 2627 2628 if (!(state->state & EXTENT_DELALLOC_NEW) && 2629 (bits & EXTENT_DELALLOC_NEW)) { 2630 spin_lock(&inode->lock); 2631 inode->new_delalloc_bytes += state->end + 1 - state->start; 2632 spin_unlock(&inode->lock); 2633 } 2634 } 2635 2636 /* 2637 * Once a range is no longer delalloc this function ensures that proper 2638 * accounting happens. 2639 */ 2640 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 2641 struct extent_state *state, u32 bits) 2642 { 2643 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2644 u64 len = state->end + 1 - state->start; 2645 u32 num_extents = count_max_extents(fs_info, len); 2646 2647 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { 2648 spin_lock(&inode->lock); 2649 inode->defrag_bytes -= len; 2650 spin_unlock(&inode->lock); 2651 } 2652 2653 /* 2654 * set_bit and clear bit hooks normally require _irqsave/restore 2655 * but in this case, we are only testing for the DELALLOC 2656 * bit, which is only set or cleared with irqs on 2657 */ 2658 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2659 struct btrfs_root *root = inode->root; 2660 bool do_list = !btrfs_is_free_space_inode(inode); 2661 2662 spin_lock(&inode->lock); 2663 btrfs_mod_outstanding_extents(inode, -num_extents); 2664 spin_unlock(&inode->lock); 2665 2666 /* 2667 * We don't reserve metadata space for space cache inodes so we 2668 * don't need to call delalloc_release_metadata if there is an 2669 * error. 2670 */ 2671 if (bits & EXTENT_CLEAR_META_RESV && 2672 root != fs_info->tree_root) 2673 btrfs_delalloc_release_metadata(inode, len, false); 2674 2675 /* For sanity tests. */ 2676 if (btrfs_is_testing(fs_info)) 2677 return; 2678 2679 if (!btrfs_is_data_reloc_root(root) && 2680 do_list && !(state->state & EXTENT_NORESERVE) && 2681 (bits & EXTENT_CLEAR_DATA_RESV)) 2682 btrfs_free_reserved_data_space_noquota(fs_info, len); 2683 2684 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2685 fs_info->delalloc_batch); 2686 spin_lock(&inode->lock); 2687 inode->delalloc_bytes -= len; 2688 if (do_list && inode->delalloc_bytes == 0 && 2689 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2690 &inode->runtime_flags)) 2691 btrfs_del_delalloc_inode(root, inode); 2692 spin_unlock(&inode->lock); 2693 } 2694 2695 if ((state->state & EXTENT_DELALLOC_NEW) && 2696 (bits & EXTENT_DELALLOC_NEW)) { 2697 spin_lock(&inode->lock); 2698 ASSERT(inode->new_delalloc_bytes >= len); 2699 inode->new_delalloc_bytes -= len; 2700 if (bits & EXTENT_ADD_INODE_BYTES) 2701 inode_add_bytes(&inode->vfs_inode, len); 2702 spin_unlock(&inode->lock); 2703 } 2704 } 2705 2706 static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, 2707 struct btrfs_ordered_extent *ordered) 2708 { 2709 u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 2710 u64 len = bbio->bio.bi_iter.bi_size; 2711 struct btrfs_ordered_extent *new; 2712 int ret; 2713 2714 /* Must always be called for the beginning of an ordered extent. */ 2715 if (WARN_ON_ONCE(start != ordered->disk_bytenr)) 2716 return -EINVAL; 2717 2718 /* No need to split if the ordered extent covers the entire bio. */ 2719 if (ordered->disk_num_bytes == len) { 2720 refcount_inc(&ordered->refs); 2721 bbio->ordered = ordered; 2722 return 0; 2723 } 2724 2725 /* 2726 * Don't split the extent_map for NOCOW extents, as we're writing into 2727 * a pre-existing one. 2728 */ 2729 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 2730 ret = split_extent_map(bbio->inode, bbio->file_offset, 2731 ordered->num_bytes, len, 2732 ordered->disk_bytenr); 2733 if (ret) 2734 return ret; 2735 } 2736 2737 new = btrfs_split_ordered_extent(ordered, len); 2738 if (IS_ERR(new)) 2739 return PTR_ERR(new); 2740 bbio->ordered = new; 2741 return 0; 2742 } 2743 2744 /* 2745 * given a list of ordered sums record them in the inode. This happens 2746 * at IO completion time based on sums calculated at bio submission time. 2747 */ 2748 static int add_pending_csums(struct btrfs_trans_handle *trans, 2749 struct list_head *list) 2750 { 2751 struct btrfs_ordered_sum *sum; 2752 struct btrfs_root *csum_root = NULL; 2753 int ret; 2754 2755 list_for_each_entry(sum, list, list) { 2756 trans->adding_csums = true; 2757 if (!csum_root) 2758 csum_root = btrfs_csum_root(trans->fs_info, 2759 sum->logical); 2760 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2761 trans->adding_csums = false; 2762 if (ret) 2763 return ret; 2764 } 2765 return 0; 2766 } 2767 2768 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2769 const u64 start, 2770 const u64 len, 2771 struct extent_state **cached_state) 2772 { 2773 u64 search_start = start; 2774 const u64 end = start + len - 1; 2775 2776 while (search_start < end) { 2777 const u64 search_len = end - search_start + 1; 2778 struct extent_map *em; 2779 u64 em_len; 2780 int ret = 0; 2781 2782 em = btrfs_get_extent(inode, NULL, 0, search_start, search_len); 2783 if (IS_ERR(em)) 2784 return PTR_ERR(em); 2785 2786 if (em->block_start != EXTENT_MAP_HOLE) 2787 goto next; 2788 2789 em_len = em->len; 2790 if (em->start < search_start) 2791 em_len -= search_start - em->start; 2792 if (em_len > search_len) 2793 em_len = search_len; 2794 2795 ret = set_extent_bit(&inode->io_tree, search_start, 2796 search_start + em_len - 1, 2797 EXTENT_DELALLOC_NEW, cached_state); 2798 next: 2799 search_start = extent_map_end(em); 2800 free_extent_map(em); 2801 if (ret) 2802 return ret; 2803 } 2804 return 0; 2805 } 2806 2807 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2808 unsigned int extra_bits, 2809 struct extent_state **cached_state) 2810 { 2811 WARN_ON(PAGE_ALIGNED(end)); 2812 2813 if (start >= i_size_read(&inode->vfs_inode) && 2814 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2815 /* 2816 * There can't be any extents following eof in this case so just 2817 * set the delalloc new bit for the range directly. 2818 */ 2819 extra_bits |= EXTENT_DELALLOC_NEW; 2820 } else { 2821 int ret; 2822 2823 ret = btrfs_find_new_delalloc_bytes(inode, start, 2824 end + 1 - start, 2825 cached_state); 2826 if (ret) 2827 return ret; 2828 } 2829 2830 return set_extent_bit(&inode->io_tree, start, end, 2831 EXTENT_DELALLOC | extra_bits, cached_state); 2832 } 2833 2834 /* see btrfs_writepage_start_hook for details on why this is required */ 2835 struct btrfs_writepage_fixup { 2836 struct page *page; 2837 struct btrfs_inode *inode; 2838 struct btrfs_work work; 2839 }; 2840 2841 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2842 { 2843 struct btrfs_writepage_fixup *fixup; 2844 struct btrfs_ordered_extent *ordered; 2845 struct extent_state *cached_state = NULL; 2846 struct extent_changeset *data_reserved = NULL; 2847 struct page *page; 2848 struct btrfs_inode *inode; 2849 u64 page_start; 2850 u64 page_end; 2851 int ret = 0; 2852 bool free_delalloc_space = true; 2853 2854 fixup = container_of(work, struct btrfs_writepage_fixup, work); 2855 page = fixup->page; 2856 inode = fixup->inode; 2857 page_start = page_offset(page); 2858 page_end = page_offset(page) + PAGE_SIZE - 1; 2859 2860 /* 2861 * This is similar to page_mkwrite, we need to reserve the space before 2862 * we take the page lock. 2863 */ 2864 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2865 PAGE_SIZE); 2866 again: 2867 lock_page(page); 2868 2869 /* 2870 * Before we queued this fixup, we took a reference on the page. 2871 * page->mapping may go NULL, but it shouldn't be moved to a different 2872 * address space. 2873 */ 2874 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 2875 /* 2876 * Unfortunately this is a little tricky, either 2877 * 2878 * 1) We got here and our page had already been dealt with and 2879 * we reserved our space, thus ret == 0, so we need to just 2880 * drop our space reservation and bail. This can happen the 2881 * first time we come into the fixup worker, or could happen 2882 * while waiting for the ordered extent. 2883 * 2) Our page was already dealt with, but we happened to get an 2884 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2885 * this case we obviously don't have anything to release, but 2886 * because the page was already dealt with we don't want to 2887 * mark the page with an error, so make sure we're resetting 2888 * ret to 0. This is why we have this check _before_ the ret 2889 * check, because we do not want to have a surprise ENOSPC 2890 * when the page was already properly dealt with. 2891 */ 2892 if (!ret) { 2893 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2894 btrfs_delalloc_release_space(inode, data_reserved, 2895 page_start, PAGE_SIZE, 2896 true); 2897 } 2898 ret = 0; 2899 goto out_page; 2900 } 2901 2902 /* 2903 * We can't mess with the page state unless it is locked, so now that 2904 * it is locked bail if we failed to make our space reservation. 2905 */ 2906 if (ret) 2907 goto out_page; 2908 2909 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2910 2911 /* already ordered? We're done */ 2912 if (PageOrdered(page)) 2913 goto out_reserved; 2914 2915 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2916 if (ordered) { 2917 unlock_extent(&inode->io_tree, page_start, page_end, 2918 &cached_state); 2919 unlock_page(page); 2920 btrfs_start_ordered_extent(ordered); 2921 btrfs_put_ordered_extent(ordered); 2922 goto again; 2923 } 2924 2925 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2926 &cached_state); 2927 if (ret) 2928 goto out_reserved; 2929 2930 /* 2931 * Everything went as planned, we're now the owner of a dirty page with 2932 * delayed allocation bits set and space reserved for our COW 2933 * destination. 2934 * 2935 * The page was dirty when we started, nothing should have cleaned it. 2936 */ 2937 BUG_ON(!PageDirty(page)); 2938 free_delalloc_space = false; 2939 out_reserved: 2940 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2941 if (free_delalloc_space) 2942 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2943 PAGE_SIZE, true); 2944 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2945 out_page: 2946 if (ret) { 2947 /* 2948 * We hit ENOSPC or other errors. Update the mapping and page 2949 * to reflect the errors and clean the page. 2950 */ 2951 mapping_set_error(page->mapping, ret); 2952 end_extent_writepage(page, ret, page_start, page_end); 2953 clear_page_dirty_for_io(page); 2954 } 2955 btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE); 2956 unlock_page(page); 2957 put_page(page); 2958 kfree(fixup); 2959 extent_changeset_free(data_reserved); 2960 /* 2961 * As a precaution, do a delayed iput in case it would be the last iput 2962 * that could need flushing space. Recursing back to fixup worker would 2963 * deadlock. 2964 */ 2965 btrfs_add_delayed_iput(inode); 2966 } 2967 2968 /* 2969 * There are a few paths in the higher layers of the kernel that directly 2970 * set the page dirty bit without asking the filesystem if it is a 2971 * good idea. This causes problems because we want to make sure COW 2972 * properly happens and the data=ordered rules are followed. 2973 * 2974 * In our case any range that doesn't have the ORDERED bit set 2975 * hasn't been properly setup for IO. We kick off an async process 2976 * to fix it up. The async helper will wait for ordered extents, set 2977 * the delalloc bit and make it safe to write the page. 2978 */ 2979 int btrfs_writepage_cow_fixup(struct page *page) 2980 { 2981 struct inode *inode = page->mapping->host; 2982 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2983 struct btrfs_writepage_fixup *fixup; 2984 2985 /* This page has ordered extent covering it already */ 2986 if (PageOrdered(page)) 2987 return 0; 2988 2989 /* 2990 * PageChecked is set below when we create a fixup worker for this page, 2991 * don't try to create another one if we're already PageChecked() 2992 * 2993 * The extent_io writepage code will redirty the page if we send back 2994 * EAGAIN. 2995 */ 2996 if (PageChecked(page)) 2997 return -EAGAIN; 2998 2999 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 3000 if (!fixup) 3001 return -EAGAIN; 3002 3003 /* 3004 * We are already holding a reference to this inode from 3005 * write_cache_pages. We need to hold it because the space reservation 3006 * takes place outside of the page lock, and we can't trust 3007 * page->mapping outside of the page lock. 3008 */ 3009 ihold(inode); 3010 btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE); 3011 get_page(page); 3012 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); 3013 fixup->page = page; 3014 fixup->inode = BTRFS_I(inode); 3015 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 3016 3017 return -EAGAIN; 3018 } 3019 3020 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 3021 struct btrfs_inode *inode, u64 file_pos, 3022 struct btrfs_file_extent_item *stack_fi, 3023 const bool update_inode_bytes, 3024 u64 qgroup_reserved) 3025 { 3026 struct btrfs_root *root = inode->root; 3027 const u64 sectorsize = root->fs_info->sectorsize; 3028 struct btrfs_path *path; 3029 struct extent_buffer *leaf; 3030 struct btrfs_key ins; 3031 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 3032 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 3033 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 3034 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 3035 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 3036 struct btrfs_drop_extents_args drop_args = { 0 }; 3037 int ret; 3038 3039 path = btrfs_alloc_path(); 3040 if (!path) 3041 return -ENOMEM; 3042 3043 /* 3044 * we may be replacing one extent in the tree with another. 3045 * The new extent is pinned in the extent map, and we don't want 3046 * to drop it from the cache until it is completely in the btree. 3047 * 3048 * So, tell btrfs_drop_extents to leave this extent in the cache. 3049 * the caller is expected to unpin it and allow it to be merged 3050 * with the others. 3051 */ 3052 drop_args.path = path; 3053 drop_args.start = file_pos; 3054 drop_args.end = file_pos + num_bytes; 3055 drop_args.replace_extent = true; 3056 drop_args.extent_item_size = sizeof(*stack_fi); 3057 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 3058 if (ret) 3059 goto out; 3060 3061 if (!drop_args.extent_inserted) { 3062 ins.objectid = btrfs_ino(inode); 3063 ins.offset = file_pos; 3064 ins.type = BTRFS_EXTENT_DATA_KEY; 3065 3066 ret = btrfs_insert_empty_item(trans, root, path, &ins, 3067 sizeof(*stack_fi)); 3068 if (ret) 3069 goto out; 3070 } 3071 leaf = path->nodes[0]; 3072 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 3073 write_extent_buffer(leaf, stack_fi, 3074 btrfs_item_ptr_offset(leaf, path->slots[0]), 3075 sizeof(struct btrfs_file_extent_item)); 3076 3077 btrfs_mark_buffer_dirty(leaf); 3078 btrfs_release_path(path); 3079 3080 /* 3081 * If we dropped an inline extent here, we know the range where it is 3082 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 3083 * number of bytes only for that range containing the inline extent. 3084 * The remaining of the range will be processed when clearning the 3085 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 3086 */ 3087 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 3088 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 3089 3090 inline_size = drop_args.bytes_found - inline_size; 3091 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 3092 drop_args.bytes_found -= inline_size; 3093 num_bytes -= sectorsize; 3094 } 3095 3096 if (update_inode_bytes) 3097 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 3098 3099 ins.objectid = disk_bytenr; 3100 ins.offset = disk_num_bytes; 3101 ins.type = BTRFS_EXTENT_ITEM_KEY; 3102 3103 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 3104 if (ret) 3105 goto out; 3106 3107 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 3108 file_pos - offset, 3109 qgroup_reserved, &ins); 3110 out: 3111 btrfs_free_path(path); 3112 3113 return ret; 3114 } 3115 3116 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 3117 u64 start, u64 len) 3118 { 3119 struct btrfs_block_group *cache; 3120 3121 cache = btrfs_lookup_block_group(fs_info, start); 3122 ASSERT(cache); 3123 3124 spin_lock(&cache->lock); 3125 cache->delalloc_bytes -= len; 3126 spin_unlock(&cache->lock); 3127 3128 btrfs_put_block_group(cache); 3129 } 3130 3131 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 3132 struct btrfs_ordered_extent *oe) 3133 { 3134 struct btrfs_file_extent_item stack_fi; 3135 bool update_inode_bytes; 3136 u64 num_bytes = oe->num_bytes; 3137 u64 ram_bytes = oe->ram_bytes; 3138 3139 memset(&stack_fi, 0, sizeof(stack_fi)); 3140 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 3141 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 3142 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 3143 oe->disk_num_bytes); 3144 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 3145 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) { 3146 num_bytes = oe->truncated_len; 3147 ram_bytes = num_bytes; 3148 } 3149 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 3150 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 3151 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 3152 /* Encryption and other encoding is reserved and all 0 */ 3153 3154 /* 3155 * For delalloc, when completing an ordered extent we update the inode's 3156 * bytes when clearing the range in the inode's io tree, so pass false 3157 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 3158 * except if the ordered extent was truncated. 3159 */ 3160 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 3161 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3162 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3163 3164 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), 3165 oe->file_offset, &stack_fi, 3166 update_inode_bytes, oe->qgroup_rsv); 3167 } 3168 3169 /* 3170 * As ordered data IO finishes, this gets called so we can finish 3171 * an ordered extent if the range of bytes in the file it covers are 3172 * fully written. 3173 */ 3174 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) 3175 { 3176 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); 3177 struct btrfs_root *root = inode->root; 3178 struct btrfs_fs_info *fs_info = root->fs_info; 3179 struct btrfs_trans_handle *trans = NULL; 3180 struct extent_io_tree *io_tree = &inode->io_tree; 3181 struct extent_state *cached_state = NULL; 3182 u64 start, end; 3183 int compress_type = 0; 3184 int ret = 0; 3185 u64 logical_len = ordered_extent->num_bytes; 3186 bool freespace_inode; 3187 bool truncated = false; 3188 bool clear_reserved_extent = true; 3189 unsigned int clear_bits = EXTENT_DEFRAG; 3190 3191 start = ordered_extent->file_offset; 3192 end = start + ordered_extent->num_bytes - 1; 3193 3194 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3195 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3196 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3197 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3198 clear_bits |= EXTENT_DELALLOC_NEW; 3199 3200 freespace_inode = btrfs_is_free_space_inode(inode); 3201 if (!freespace_inode) 3202 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3203 3204 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3205 ret = -EIO; 3206 goto out; 3207 } 3208 3209 if (btrfs_is_zoned(fs_info)) 3210 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3211 ordered_extent->disk_num_bytes); 3212 3213 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3214 truncated = true; 3215 logical_len = ordered_extent->truncated_len; 3216 /* Truncated the entire extent, don't bother adding */ 3217 if (!logical_len) 3218 goto out; 3219 } 3220 3221 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3222 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 3223 3224 btrfs_inode_safe_disk_i_size_write(inode, 0); 3225 if (freespace_inode) 3226 trans = btrfs_join_transaction_spacecache(root); 3227 else 3228 trans = btrfs_join_transaction(root); 3229 if (IS_ERR(trans)) { 3230 ret = PTR_ERR(trans); 3231 trans = NULL; 3232 goto out; 3233 } 3234 trans->block_rsv = &inode->block_rsv; 3235 ret = btrfs_update_inode_fallback(trans, root, inode); 3236 if (ret) /* -ENOMEM or corruption */ 3237 btrfs_abort_transaction(trans, ret); 3238 goto out; 3239 } 3240 3241 clear_bits |= EXTENT_LOCKED; 3242 lock_extent(io_tree, start, end, &cached_state); 3243 3244 if (freespace_inode) 3245 trans = btrfs_join_transaction_spacecache(root); 3246 else 3247 trans = btrfs_join_transaction(root); 3248 if (IS_ERR(trans)) { 3249 ret = PTR_ERR(trans); 3250 trans = NULL; 3251 goto out; 3252 } 3253 3254 trans->block_rsv = &inode->block_rsv; 3255 3256 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3257 compress_type = ordered_extent->compress_type; 3258 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3259 BUG_ON(compress_type); 3260 ret = btrfs_mark_extent_written(trans, inode, 3261 ordered_extent->file_offset, 3262 ordered_extent->file_offset + 3263 logical_len); 3264 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, 3265 ordered_extent->disk_num_bytes); 3266 } else { 3267 BUG_ON(root == fs_info->tree_root); 3268 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3269 if (!ret) { 3270 clear_reserved_extent = false; 3271 btrfs_release_delalloc_bytes(fs_info, 3272 ordered_extent->disk_bytenr, 3273 ordered_extent->disk_num_bytes); 3274 } 3275 } 3276 unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset, 3277 ordered_extent->num_bytes, trans->transid); 3278 if (ret < 0) { 3279 btrfs_abort_transaction(trans, ret); 3280 goto out; 3281 } 3282 3283 ret = add_pending_csums(trans, &ordered_extent->list); 3284 if (ret) { 3285 btrfs_abort_transaction(trans, ret); 3286 goto out; 3287 } 3288 3289 /* 3290 * If this is a new delalloc range, clear its new delalloc flag to 3291 * update the inode's number of bytes. This needs to be done first 3292 * before updating the inode item. 3293 */ 3294 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3295 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3296 clear_extent_bit(&inode->io_tree, start, end, 3297 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3298 &cached_state); 3299 3300 btrfs_inode_safe_disk_i_size_write(inode, 0); 3301 ret = btrfs_update_inode_fallback(trans, root, inode); 3302 if (ret) { /* -ENOMEM or corruption */ 3303 btrfs_abort_transaction(trans, ret); 3304 goto out; 3305 } 3306 ret = 0; 3307 out: 3308 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3309 &cached_state); 3310 3311 if (trans) 3312 btrfs_end_transaction(trans); 3313 3314 if (ret || truncated) { 3315 u64 unwritten_start = start; 3316 3317 /* 3318 * If we failed to finish this ordered extent for any reason we 3319 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3320 * extent, and mark the inode with the error if it wasn't 3321 * already set. Any error during writeback would have already 3322 * set the mapping error, so we need to set it if we're the ones 3323 * marking this ordered extent as failed. 3324 */ 3325 if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR, 3326 &ordered_extent->flags)) 3327 mapping_set_error(ordered_extent->inode->i_mapping, -EIO); 3328 3329 if (truncated) 3330 unwritten_start += logical_len; 3331 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3332 3333 /* Drop extent maps for the part of the extent we didn't write. */ 3334 btrfs_drop_extent_map_range(inode, unwritten_start, end, false); 3335 3336 /* 3337 * If the ordered extent had an IOERR or something else went 3338 * wrong we need to return the space for this ordered extent 3339 * back to the allocator. We only free the extent in the 3340 * truncated case if we didn't write out the extent at all. 3341 * 3342 * If we made it past insert_reserved_file_extent before we 3343 * errored out then we don't need to do this as the accounting 3344 * has already been done. 3345 */ 3346 if ((ret || !logical_len) && 3347 clear_reserved_extent && 3348 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3349 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3350 /* 3351 * Discard the range before returning it back to the 3352 * free space pool 3353 */ 3354 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3355 btrfs_discard_extent(fs_info, 3356 ordered_extent->disk_bytenr, 3357 ordered_extent->disk_num_bytes, 3358 NULL); 3359 btrfs_free_reserved_extent(fs_info, 3360 ordered_extent->disk_bytenr, 3361 ordered_extent->disk_num_bytes, 1); 3362 } 3363 } 3364 3365 /* 3366 * This needs to be done to make sure anybody waiting knows we are done 3367 * updating everything for this ordered extent. 3368 */ 3369 btrfs_remove_ordered_extent(inode, ordered_extent); 3370 3371 /* once for us */ 3372 btrfs_put_ordered_extent(ordered_extent); 3373 /* once for the tree */ 3374 btrfs_put_ordered_extent(ordered_extent); 3375 3376 return ret; 3377 } 3378 3379 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered) 3380 { 3381 if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) && 3382 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 3383 btrfs_finish_ordered_zoned(ordered); 3384 return btrfs_finish_one_ordered(ordered); 3385 } 3386 3387 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode, 3388 struct page *page, u64 start, 3389 u64 end, bool uptodate) 3390 { 3391 trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate); 3392 3393 btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start, uptodate); 3394 } 3395 3396 /* 3397 * Verify the checksum for a single sector without any extra action that depend 3398 * on the type of I/O. 3399 */ 3400 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, 3401 u32 pgoff, u8 *csum, const u8 * const csum_expected) 3402 { 3403 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3404 char *kaddr; 3405 3406 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); 3407 3408 shash->tfm = fs_info->csum_shash; 3409 3410 kaddr = kmap_local_page(page) + pgoff; 3411 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 3412 kunmap_local(kaddr); 3413 3414 if (memcmp(csum, csum_expected, fs_info->csum_size)) 3415 return -EIO; 3416 return 0; 3417 } 3418 3419 /* 3420 * Verify the checksum of a single data sector. 3421 * 3422 * @bbio: btrfs_io_bio which contains the csum 3423 * @dev: device the sector is on 3424 * @bio_offset: offset to the beginning of the bio (in bytes) 3425 * @bv: bio_vec to check 3426 * 3427 * Check if the checksum on a data block is valid. When a checksum mismatch is 3428 * detected, report the error and fill the corrupted range with zero. 3429 * 3430 * Return %true if the sector is ok or had no checksum to start with, else %false. 3431 */ 3432 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, 3433 u32 bio_offset, struct bio_vec *bv) 3434 { 3435 struct btrfs_inode *inode = bbio->inode; 3436 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3437 u64 file_offset = bbio->file_offset + bio_offset; 3438 u64 end = file_offset + bv->bv_len - 1; 3439 u8 *csum_expected; 3440 u8 csum[BTRFS_CSUM_SIZE]; 3441 3442 ASSERT(bv->bv_len == fs_info->sectorsize); 3443 3444 if (!bbio->csum) 3445 return true; 3446 3447 if (btrfs_is_data_reloc_root(inode->root) && 3448 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, 3449 1, NULL)) { 3450 /* Skip the range without csum for data reloc inode */ 3451 clear_extent_bits(&inode->io_tree, file_offset, end, 3452 EXTENT_NODATASUM); 3453 return true; 3454 } 3455 3456 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * 3457 fs_info->csum_size; 3458 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, 3459 csum_expected)) 3460 goto zeroit; 3461 return true; 3462 3463 zeroit: 3464 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected, 3465 bbio->mirror_num); 3466 if (dev) 3467 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 3468 memzero_bvec(bv); 3469 return false; 3470 } 3471 3472 /* 3473 * btrfs_add_delayed_iput - perform a delayed iput on @inode 3474 * 3475 * @inode: The inode we want to perform iput on 3476 * 3477 * This function uses the generic vfs_inode::i_count to track whether we should 3478 * just decrement it (in case it's > 1) or if this is the last iput then link 3479 * the inode to the delayed iput machinery. Delayed iputs are processed at 3480 * transaction commit time/superblock commit/cleaner kthread. 3481 */ 3482 void btrfs_add_delayed_iput(struct btrfs_inode *inode) 3483 { 3484 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3485 unsigned long flags; 3486 3487 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) 3488 return; 3489 3490 atomic_inc(&fs_info->nr_delayed_iputs); 3491 /* 3492 * Need to be irq safe here because we can be called from either an irq 3493 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq 3494 * context. 3495 */ 3496 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags); 3497 ASSERT(list_empty(&inode->delayed_iput)); 3498 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); 3499 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags); 3500 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3501 wake_up_process(fs_info->cleaner_kthread); 3502 } 3503 3504 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3505 struct btrfs_inode *inode) 3506 { 3507 list_del_init(&inode->delayed_iput); 3508 spin_unlock_irq(&fs_info->delayed_iput_lock); 3509 iput(&inode->vfs_inode); 3510 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3511 wake_up(&fs_info->delayed_iputs_wait); 3512 spin_lock_irq(&fs_info->delayed_iput_lock); 3513 } 3514 3515 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3516 struct btrfs_inode *inode) 3517 { 3518 if (!list_empty(&inode->delayed_iput)) { 3519 spin_lock_irq(&fs_info->delayed_iput_lock); 3520 if (!list_empty(&inode->delayed_iput)) 3521 run_delayed_iput_locked(fs_info, inode); 3522 spin_unlock_irq(&fs_info->delayed_iput_lock); 3523 } 3524 } 3525 3526 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3527 { 3528 /* 3529 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which 3530 * calls btrfs_add_delayed_iput() and that needs to lock 3531 * fs_info->delayed_iput_lock. So we need to disable irqs here to 3532 * prevent a deadlock. 3533 */ 3534 spin_lock_irq(&fs_info->delayed_iput_lock); 3535 while (!list_empty(&fs_info->delayed_iputs)) { 3536 struct btrfs_inode *inode; 3537 3538 inode = list_first_entry(&fs_info->delayed_iputs, 3539 struct btrfs_inode, delayed_iput); 3540 run_delayed_iput_locked(fs_info, inode); 3541 if (need_resched()) { 3542 spin_unlock_irq(&fs_info->delayed_iput_lock); 3543 cond_resched(); 3544 spin_lock_irq(&fs_info->delayed_iput_lock); 3545 } 3546 } 3547 spin_unlock_irq(&fs_info->delayed_iput_lock); 3548 } 3549 3550 /* 3551 * Wait for flushing all delayed iputs 3552 * 3553 * @fs_info: the filesystem 3554 * 3555 * This will wait on any delayed iputs that are currently running with KILLABLE 3556 * set. Once they are all done running we will return, unless we are killed in 3557 * which case we return EINTR. This helps in user operations like fallocate etc 3558 * that might get blocked on the iputs. 3559 * 3560 * Return EINTR if we were killed, 0 if nothing's pending 3561 */ 3562 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3563 { 3564 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3565 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3566 if (ret) 3567 return -EINTR; 3568 return 0; 3569 } 3570 3571 /* 3572 * This creates an orphan entry for the given inode in case something goes wrong 3573 * in the middle of an unlink. 3574 */ 3575 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3576 struct btrfs_inode *inode) 3577 { 3578 int ret; 3579 3580 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3581 if (ret && ret != -EEXIST) { 3582 btrfs_abort_transaction(trans, ret); 3583 return ret; 3584 } 3585 3586 return 0; 3587 } 3588 3589 /* 3590 * We have done the delete so we can go ahead and remove the orphan item for 3591 * this particular inode. 3592 */ 3593 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3594 struct btrfs_inode *inode) 3595 { 3596 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3597 } 3598 3599 /* 3600 * this cleans up any orphans that may be left on the list from the last use 3601 * of this root. 3602 */ 3603 int btrfs_orphan_cleanup(struct btrfs_root *root) 3604 { 3605 struct btrfs_fs_info *fs_info = root->fs_info; 3606 struct btrfs_path *path; 3607 struct extent_buffer *leaf; 3608 struct btrfs_key key, found_key; 3609 struct btrfs_trans_handle *trans; 3610 struct inode *inode; 3611 u64 last_objectid = 0; 3612 int ret = 0, nr_unlink = 0; 3613 3614 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3615 return 0; 3616 3617 path = btrfs_alloc_path(); 3618 if (!path) { 3619 ret = -ENOMEM; 3620 goto out; 3621 } 3622 path->reada = READA_BACK; 3623 3624 key.objectid = BTRFS_ORPHAN_OBJECTID; 3625 key.type = BTRFS_ORPHAN_ITEM_KEY; 3626 key.offset = (u64)-1; 3627 3628 while (1) { 3629 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3630 if (ret < 0) 3631 goto out; 3632 3633 /* 3634 * if ret == 0 means we found what we were searching for, which 3635 * is weird, but possible, so only screw with path if we didn't 3636 * find the key and see if we have stuff that matches 3637 */ 3638 if (ret > 0) { 3639 ret = 0; 3640 if (path->slots[0] == 0) 3641 break; 3642 path->slots[0]--; 3643 } 3644 3645 /* pull out the item */ 3646 leaf = path->nodes[0]; 3647 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3648 3649 /* make sure the item matches what we want */ 3650 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3651 break; 3652 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3653 break; 3654 3655 /* release the path since we're done with it */ 3656 btrfs_release_path(path); 3657 3658 /* 3659 * this is where we are basically btrfs_lookup, without the 3660 * crossing root thing. we store the inode number in the 3661 * offset of the orphan item. 3662 */ 3663 3664 if (found_key.offset == last_objectid) { 3665 btrfs_err(fs_info, 3666 "Error removing orphan entry, stopping orphan cleanup"); 3667 ret = -EINVAL; 3668 goto out; 3669 } 3670 3671 last_objectid = found_key.offset; 3672 3673 found_key.objectid = found_key.offset; 3674 found_key.type = BTRFS_INODE_ITEM_KEY; 3675 found_key.offset = 0; 3676 inode = btrfs_iget(fs_info->sb, last_objectid, root); 3677 if (IS_ERR(inode)) { 3678 ret = PTR_ERR(inode); 3679 inode = NULL; 3680 if (ret != -ENOENT) 3681 goto out; 3682 } 3683 3684 if (!inode && root == fs_info->tree_root) { 3685 struct btrfs_root *dead_root; 3686 int is_dead_root = 0; 3687 3688 /* 3689 * This is an orphan in the tree root. Currently these 3690 * could come from 2 sources: 3691 * a) a root (snapshot/subvolume) deletion in progress 3692 * b) a free space cache inode 3693 * We need to distinguish those two, as the orphan item 3694 * for a root must not get deleted before the deletion 3695 * of the snapshot/subvolume's tree completes. 3696 * 3697 * btrfs_find_orphan_roots() ran before us, which has 3698 * found all deleted roots and loaded them into 3699 * fs_info->fs_roots_radix. So here we can find if an 3700 * orphan item corresponds to a deleted root by looking 3701 * up the root from that radix tree. 3702 */ 3703 3704 spin_lock(&fs_info->fs_roots_radix_lock); 3705 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3706 (unsigned long)found_key.objectid); 3707 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3708 is_dead_root = 1; 3709 spin_unlock(&fs_info->fs_roots_radix_lock); 3710 3711 if (is_dead_root) { 3712 /* prevent this orphan from being found again */ 3713 key.offset = found_key.objectid - 1; 3714 continue; 3715 } 3716 3717 } 3718 3719 /* 3720 * If we have an inode with links, there are a couple of 3721 * possibilities: 3722 * 3723 * 1. We were halfway through creating fsverity metadata for the 3724 * file. In that case, the orphan item represents incomplete 3725 * fsverity metadata which must be cleaned up with 3726 * btrfs_drop_verity_items and deleting the orphan item. 3727 3728 * 2. Old kernels (before v3.12) used to create an 3729 * orphan item for truncate indicating that there were possibly 3730 * extent items past i_size that needed to be deleted. In v3.12, 3731 * truncate was changed to update i_size in sync with the extent 3732 * items, but the (useless) orphan item was still created. Since 3733 * v4.18, we don't create the orphan item for truncate at all. 3734 * 3735 * So, this item could mean that we need to do a truncate, but 3736 * only if this filesystem was last used on a pre-v3.12 kernel 3737 * and was not cleanly unmounted. The odds of that are quite 3738 * slim, and it's a pain to do the truncate now, so just delete 3739 * the orphan item. 3740 * 3741 * It's also possible that this orphan item was supposed to be 3742 * deleted but wasn't. The inode number may have been reused, 3743 * but either way, we can delete the orphan item. 3744 */ 3745 if (!inode || inode->i_nlink) { 3746 if (inode) { 3747 ret = btrfs_drop_verity_items(BTRFS_I(inode)); 3748 iput(inode); 3749 inode = NULL; 3750 if (ret) 3751 goto out; 3752 } 3753 trans = btrfs_start_transaction(root, 1); 3754 if (IS_ERR(trans)) { 3755 ret = PTR_ERR(trans); 3756 goto out; 3757 } 3758 btrfs_debug(fs_info, "auto deleting %Lu", 3759 found_key.objectid); 3760 ret = btrfs_del_orphan_item(trans, root, 3761 found_key.objectid); 3762 btrfs_end_transaction(trans); 3763 if (ret) 3764 goto out; 3765 continue; 3766 } 3767 3768 nr_unlink++; 3769 3770 /* this will do delete_inode and everything for us */ 3771 iput(inode); 3772 } 3773 /* release the path since we're done with it */ 3774 btrfs_release_path(path); 3775 3776 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3777 trans = btrfs_join_transaction(root); 3778 if (!IS_ERR(trans)) 3779 btrfs_end_transaction(trans); 3780 } 3781 3782 if (nr_unlink) 3783 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3784 3785 out: 3786 if (ret) 3787 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3788 btrfs_free_path(path); 3789 return ret; 3790 } 3791 3792 /* 3793 * very simple check to peek ahead in the leaf looking for xattrs. If we 3794 * don't find any xattrs, we know there can't be any acls. 3795 * 3796 * slot is the slot the inode is in, objectid is the objectid of the inode 3797 */ 3798 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3799 int slot, u64 objectid, 3800 int *first_xattr_slot) 3801 { 3802 u32 nritems = btrfs_header_nritems(leaf); 3803 struct btrfs_key found_key; 3804 static u64 xattr_access = 0; 3805 static u64 xattr_default = 0; 3806 int scanned = 0; 3807 3808 if (!xattr_access) { 3809 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3810 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3811 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3812 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3813 } 3814 3815 slot++; 3816 *first_xattr_slot = -1; 3817 while (slot < nritems) { 3818 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3819 3820 /* we found a different objectid, there must not be acls */ 3821 if (found_key.objectid != objectid) 3822 return 0; 3823 3824 /* we found an xattr, assume we've got an acl */ 3825 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3826 if (*first_xattr_slot == -1) 3827 *first_xattr_slot = slot; 3828 if (found_key.offset == xattr_access || 3829 found_key.offset == xattr_default) 3830 return 1; 3831 } 3832 3833 /* 3834 * we found a key greater than an xattr key, there can't 3835 * be any acls later on 3836 */ 3837 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3838 return 0; 3839 3840 slot++; 3841 scanned++; 3842 3843 /* 3844 * it goes inode, inode backrefs, xattrs, extents, 3845 * so if there are a ton of hard links to an inode there can 3846 * be a lot of backrefs. Don't waste time searching too hard, 3847 * this is just an optimization 3848 */ 3849 if (scanned >= 8) 3850 break; 3851 } 3852 /* we hit the end of the leaf before we found an xattr or 3853 * something larger than an xattr. We have to assume the inode 3854 * has acls 3855 */ 3856 if (*first_xattr_slot == -1) 3857 *first_xattr_slot = slot; 3858 return 1; 3859 } 3860 3861 /* 3862 * read an inode from the btree into the in-memory inode 3863 */ 3864 static int btrfs_read_locked_inode(struct inode *inode, 3865 struct btrfs_path *in_path) 3866 { 3867 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3868 struct btrfs_path *path = in_path; 3869 struct extent_buffer *leaf; 3870 struct btrfs_inode_item *inode_item; 3871 struct btrfs_root *root = BTRFS_I(inode)->root; 3872 struct btrfs_key location; 3873 unsigned long ptr; 3874 int maybe_acls; 3875 u32 rdev; 3876 int ret; 3877 bool filled = false; 3878 int first_xattr_slot; 3879 3880 ret = btrfs_fill_inode(inode, &rdev); 3881 if (!ret) 3882 filled = true; 3883 3884 if (!path) { 3885 path = btrfs_alloc_path(); 3886 if (!path) 3887 return -ENOMEM; 3888 } 3889 3890 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3891 3892 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3893 if (ret) { 3894 if (path != in_path) 3895 btrfs_free_path(path); 3896 return ret; 3897 } 3898 3899 leaf = path->nodes[0]; 3900 3901 if (filled) 3902 goto cache_index; 3903 3904 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3905 struct btrfs_inode_item); 3906 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3907 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3908 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3909 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3910 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3911 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 3912 round_up(i_size_read(inode), fs_info->sectorsize)); 3913 3914 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3915 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3916 3917 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3918 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3919 3920 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 3921 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 3922 3923 BTRFS_I(inode)->i_otime.tv_sec = 3924 btrfs_timespec_sec(leaf, &inode_item->otime); 3925 BTRFS_I(inode)->i_otime.tv_nsec = 3926 btrfs_timespec_nsec(leaf, &inode_item->otime); 3927 3928 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3929 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3930 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3931 3932 inode_set_iversion_queried(inode, 3933 btrfs_inode_sequence(leaf, inode_item)); 3934 inode->i_generation = BTRFS_I(inode)->generation; 3935 inode->i_rdev = 0; 3936 rdev = btrfs_inode_rdev(leaf, inode_item); 3937 3938 BTRFS_I(inode)->index_cnt = (u64)-1; 3939 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 3940 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 3941 3942 cache_index: 3943 /* 3944 * If we were modified in the current generation and evicted from memory 3945 * and then re-read we need to do a full sync since we don't have any 3946 * idea about which extents were modified before we were evicted from 3947 * cache. 3948 * 3949 * This is required for both inode re-read from disk and delayed inode 3950 * in delayed_nodes_tree. 3951 */ 3952 if (BTRFS_I(inode)->last_trans == fs_info->generation) 3953 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3954 &BTRFS_I(inode)->runtime_flags); 3955 3956 /* 3957 * We don't persist the id of the transaction where an unlink operation 3958 * against the inode was last made. So here we assume the inode might 3959 * have been evicted, and therefore the exact value of last_unlink_trans 3960 * lost, and set it to last_trans to avoid metadata inconsistencies 3961 * between the inode and its parent if the inode is fsync'ed and the log 3962 * replayed. For example, in the scenario: 3963 * 3964 * touch mydir/foo 3965 * ln mydir/foo mydir/bar 3966 * sync 3967 * unlink mydir/bar 3968 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3969 * xfs_io -c fsync mydir/foo 3970 * <power failure> 3971 * mount fs, triggers fsync log replay 3972 * 3973 * We must make sure that when we fsync our inode foo we also log its 3974 * parent inode, otherwise after log replay the parent still has the 3975 * dentry with the "bar" name but our inode foo has a link count of 1 3976 * and doesn't have an inode ref with the name "bar" anymore. 3977 * 3978 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3979 * but it guarantees correctness at the expense of occasional full 3980 * transaction commits on fsync if our inode is a directory, or if our 3981 * inode is not a directory, logging its parent unnecessarily. 3982 */ 3983 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3984 3985 /* 3986 * Same logic as for last_unlink_trans. We don't persist the generation 3987 * of the last transaction where this inode was used for a reflink 3988 * operation, so after eviction and reloading the inode we must be 3989 * pessimistic and assume the last transaction that modified the inode. 3990 */ 3991 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 3992 3993 path->slots[0]++; 3994 if (inode->i_nlink != 1 || 3995 path->slots[0] >= btrfs_header_nritems(leaf)) 3996 goto cache_acl; 3997 3998 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3999 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 4000 goto cache_acl; 4001 4002 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4003 if (location.type == BTRFS_INODE_REF_KEY) { 4004 struct btrfs_inode_ref *ref; 4005 4006 ref = (struct btrfs_inode_ref *)ptr; 4007 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 4008 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 4009 struct btrfs_inode_extref *extref; 4010 4011 extref = (struct btrfs_inode_extref *)ptr; 4012 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 4013 extref); 4014 } 4015 cache_acl: 4016 /* 4017 * try to precache a NULL acl entry for files that don't have 4018 * any xattrs or acls 4019 */ 4020 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 4021 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 4022 if (first_xattr_slot != -1) { 4023 path->slots[0] = first_xattr_slot; 4024 ret = btrfs_load_inode_props(inode, path); 4025 if (ret) 4026 btrfs_err(fs_info, 4027 "error loading props for ino %llu (root %llu): %d", 4028 btrfs_ino(BTRFS_I(inode)), 4029 root->root_key.objectid, ret); 4030 } 4031 if (path != in_path) 4032 btrfs_free_path(path); 4033 4034 if (!maybe_acls) 4035 cache_no_acl(inode); 4036 4037 switch (inode->i_mode & S_IFMT) { 4038 case S_IFREG: 4039 inode->i_mapping->a_ops = &btrfs_aops; 4040 inode->i_fop = &btrfs_file_operations; 4041 inode->i_op = &btrfs_file_inode_operations; 4042 break; 4043 case S_IFDIR: 4044 inode->i_fop = &btrfs_dir_file_operations; 4045 inode->i_op = &btrfs_dir_inode_operations; 4046 break; 4047 case S_IFLNK: 4048 inode->i_op = &btrfs_symlink_inode_operations; 4049 inode_nohighmem(inode); 4050 inode->i_mapping->a_ops = &btrfs_aops; 4051 break; 4052 default: 4053 inode->i_op = &btrfs_special_inode_operations; 4054 init_special_inode(inode, inode->i_mode, rdev); 4055 break; 4056 } 4057 4058 btrfs_sync_inode_flags_to_i_flags(inode); 4059 return 0; 4060 } 4061 4062 /* 4063 * given a leaf and an inode, copy the inode fields into the leaf 4064 */ 4065 static void fill_inode_item(struct btrfs_trans_handle *trans, 4066 struct extent_buffer *leaf, 4067 struct btrfs_inode_item *item, 4068 struct inode *inode) 4069 { 4070 struct btrfs_map_token token; 4071 u64 flags; 4072 4073 btrfs_init_map_token(&token, leaf); 4074 4075 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 4076 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 4077 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 4078 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 4079 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 4080 4081 btrfs_set_token_timespec_sec(&token, &item->atime, 4082 inode->i_atime.tv_sec); 4083 btrfs_set_token_timespec_nsec(&token, &item->atime, 4084 inode->i_atime.tv_nsec); 4085 4086 btrfs_set_token_timespec_sec(&token, &item->mtime, 4087 inode->i_mtime.tv_sec); 4088 btrfs_set_token_timespec_nsec(&token, &item->mtime, 4089 inode->i_mtime.tv_nsec); 4090 4091 btrfs_set_token_timespec_sec(&token, &item->ctime, 4092 inode->i_ctime.tv_sec); 4093 btrfs_set_token_timespec_nsec(&token, &item->ctime, 4094 inode->i_ctime.tv_nsec); 4095 4096 btrfs_set_token_timespec_sec(&token, &item->otime, 4097 BTRFS_I(inode)->i_otime.tv_sec); 4098 btrfs_set_token_timespec_nsec(&token, &item->otime, 4099 BTRFS_I(inode)->i_otime.tv_nsec); 4100 4101 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 4102 btrfs_set_token_inode_generation(&token, item, 4103 BTRFS_I(inode)->generation); 4104 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 4105 btrfs_set_token_inode_transid(&token, item, trans->transid); 4106 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 4107 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 4108 BTRFS_I(inode)->ro_flags); 4109 btrfs_set_token_inode_flags(&token, item, flags); 4110 btrfs_set_token_inode_block_group(&token, item, 0); 4111 } 4112 4113 /* 4114 * copy everything in the in-memory inode into the btree. 4115 */ 4116 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 4117 struct btrfs_root *root, 4118 struct btrfs_inode *inode) 4119 { 4120 struct btrfs_inode_item *inode_item; 4121 struct btrfs_path *path; 4122 struct extent_buffer *leaf; 4123 int ret; 4124 4125 path = btrfs_alloc_path(); 4126 if (!path) 4127 return -ENOMEM; 4128 4129 ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1); 4130 if (ret) { 4131 if (ret > 0) 4132 ret = -ENOENT; 4133 goto failed; 4134 } 4135 4136 leaf = path->nodes[0]; 4137 inode_item = btrfs_item_ptr(leaf, path->slots[0], 4138 struct btrfs_inode_item); 4139 4140 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 4141 btrfs_mark_buffer_dirty(leaf); 4142 btrfs_set_inode_last_trans(trans, inode); 4143 ret = 0; 4144 failed: 4145 btrfs_free_path(path); 4146 return ret; 4147 } 4148 4149 /* 4150 * copy everything in the in-memory inode into the btree. 4151 */ 4152 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 4153 struct btrfs_root *root, 4154 struct btrfs_inode *inode) 4155 { 4156 struct btrfs_fs_info *fs_info = root->fs_info; 4157 int ret; 4158 4159 /* 4160 * If the inode is a free space inode, we can deadlock during commit 4161 * if we put it into the delayed code. 4162 * 4163 * The data relocation inode should also be directly updated 4164 * without delay 4165 */ 4166 if (!btrfs_is_free_space_inode(inode) 4167 && !btrfs_is_data_reloc_root(root) 4168 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4169 btrfs_update_root_times(trans, root); 4170 4171 ret = btrfs_delayed_update_inode(trans, root, inode); 4172 if (!ret) 4173 btrfs_set_inode_last_trans(trans, inode); 4174 return ret; 4175 } 4176 4177 return btrfs_update_inode_item(trans, root, inode); 4178 } 4179 4180 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4181 struct btrfs_root *root, struct btrfs_inode *inode) 4182 { 4183 int ret; 4184 4185 ret = btrfs_update_inode(trans, root, inode); 4186 if (ret == -ENOSPC) 4187 return btrfs_update_inode_item(trans, root, inode); 4188 return ret; 4189 } 4190 4191 /* 4192 * unlink helper that gets used here in inode.c and in the tree logging 4193 * recovery code. It remove a link in a directory with a given name, and 4194 * also drops the back refs in the inode to the directory 4195 */ 4196 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4197 struct btrfs_inode *dir, 4198 struct btrfs_inode *inode, 4199 const struct fscrypt_str *name, 4200 struct btrfs_rename_ctx *rename_ctx) 4201 { 4202 struct btrfs_root *root = dir->root; 4203 struct btrfs_fs_info *fs_info = root->fs_info; 4204 struct btrfs_path *path; 4205 int ret = 0; 4206 struct btrfs_dir_item *di; 4207 u64 index; 4208 u64 ino = btrfs_ino(inode); 4209 u64 dir_ino = btrfs_ino(dir); 4210 4211 path = btrfs_alloc_path(); 4212 if (!path) { 4213 ret = -ENOMEM; 4214 goto out; 4215 } 4216 4217 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); 4218 if (IS_ERR_OR_NULL(di)) { 4219 ret = di ? PTR_ERR(di) : -ENOENT; 4220 goto err; 4221 } 4222 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4223 if (ret) 4224 goto err; 4225 btrfs_release_path(path); 4226 4227 /* 4228 * If we don't have dir index, we have to get it by looking up 4229 * the inode ref, since we get the inode ref, remove it directly, 4230 * it is unnecessary to do delayed deletion. 4231 * 4232 * But if we have dir index, needn't search inode ref to get it. 4233 * Since the inode ref is close to the inode item, it is better 4234 * that we delay to delete it, and just do this deletion when 4235 * we update the inode item. 4236 */ 4237 if (inode->dir_index) { 4238 ret = btrfs_delayed_delete_inode_ref(inode); 4239 if (!ret) { 4240 index = inode->dir_index; 4241 goto skip_backref; 4242 } 4243 } 4244 4245 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); 4246 if (ret) { 4247 btrfs_info(fs_info, 4248 "failed to delete reference to %.*s, inode %llu parent %llu", 4249 name->len, name->name, ino, dir_ino); 4250 btrfs_abort_transaction(trans, ret); 4251 goto err; 4252 } 4253 skip_backref: 4254 if (rename_ctx) 4255 rename_ctx->index = index; 4256 4257 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4258 if (ret) { 4259 btrfs_abort_transaction(trans, ret); 4260 goto err; 4261 } 4262 4263 /* 4264 * If we are in a rename context, we don't need to update anything in the 4265 * log. That will be done later during the rename by btrfs_log_new_name(). 4266 * Besides that, doing it here would only cause extra unnecessary btree 4267 * operations on the log tree, increasing latency for applications. 4268 */ 4269 if (!rename_ctx) { 4270 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino); 4271 btrfs_del_dir_entries_in_log(trans, root, name, dir, index); 4272 } 4273 4274 /* 4275 * If we have a pending delayed iput we could end up with the final iput 4276 * being run in btrfs-cleaner context. If we have enough of these built 4277 * up we can end up burning a lot of time in btrfs-cleaner without any 4278 * way to throttle the unlinks. Since we're currently holding a ref on 4279 * the inode we can run the delayed iput here without any issues as the 4280 * final iput won't be done until after we drop the ref we're currently 4281 * holding. 4282 */ 4283 btrfs_run_delayed_iput(fs_info, inode); 4284 err: 4285 btrfs_free_path(path); 4286 if (ret) 4287 goto out; 4288 4289 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4290 inode_inc_iversion(&inode->vfs_inode); 4291 inode_inc_iversion(&dir->vfs_inode); 4292 inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode); 4293 dir->vfs_inode.i_mtime = inode->vfs_inode.i_ctime; 4294 dir->vfs_inode.i_ctime = inode->vfs_inode.i_ctime; 4295 ret = btrfs_update_inode(trans, root, dir); 4296 out: 4297 return ret; 4298 } 4299 4300 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4301 struct btrfs_inode *dir, struct btrfs_inode *inode, 4302 const struct fscrypt_str *name) 4303 { 4304 int ret; 4305 4306 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); 4307 if (!ret) { 4308 drop_nlink(&inode->vfs_inode); 4309 ret = btrfs_update_inode(trans, inode->root, inode); 4310 } 4311 return ret; 4312 } 4313 4314 /* 4315 * helper to start transaction for unlink and rmdir. 4316 * 4317 * unlink and rmdir are special in btrfs, they do not always free space, so 4318 * if we cannot make our reservations the normal way try and see if there is 4319 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4320 * allow the unlink to occur. 4321 */ 4322 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) 4323 { 4324 struct btrfs_root *root = dir->root; 4325 4326 return btrfs_start_transaction_fallback_global_rsv(root, 4327 BTRFS_UNLINK_METADATA_UNITS); 4328 } 4329 4330 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4331 { 4332 struct btrfs_trans_handle *trans; 4333 struct inode *inode = d_inode(dentry); 4334 int ret; 4335 struct fscrypt_name fname; 4336 4337 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4338 if (ret) 4339 return ret; 4340 4341 /* This needs to handle no-key deletions later on */ 4342 4343 trans = __unlink_start_trans(BTRFS_I(dir)); 4344 if (IS_ERR(trans)) { 4345 ret = PTR_ERR(trans); 4346 goto fscrypt_free; 4347 } 4348 4349 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4350 false); 4351 4352 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4353 &fname.disk_name); 4354 if (ret) 4355 goto end_trans; 4356 4357 if (inode->i_nlink == 0) { 4358 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4359 if (ret) 4360 goto end_trans; 4361 } 4362 4363 end_trans: 4364 btrfs_end_transaction(trans); 4365 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4366 fscrypt_free: 4367 fscrypt_free_filename(&fname); 4368 return ret; 4369 } 4370 4371 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4372 struct btrfs_inode *dir, struct dentry *dentry) 4373 { 4374 struct btrfs_root *root = dir->root; 4375 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4376 struct btrfs_path *path; 4377 struct extent_buffer *leaf; 4378 struct btrfs_dir_item *di; 4379 struct btrfs_key key; 4380 u64 index; 4381 int ret; 4382 u64 objectid; 4383 u64 dir_ino = btrfs_ino(dir); 4384 struct fscrypt_name fname; 4385 4386 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 4387 if (ret) 4388 return ret; 4389 4390 /* This needs to handle no-key deletions later on */ 4391 4392 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4393 objectid = inode->root->root_key.objectid; 4394 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4395 objectid = inode->location.objectid; 4396 } else { 4397 WARN_ON(1); 4398 fscrypt_free_filename(&fname); 4399 return -EINVAL; 4400 } 4401 4402 path = btrfs_alloc_path(); 4403 if (!path) { 4404 ret = -ENOMEM; 4405 goto out; 4406 } 4407 4408 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4409 &fname.disk_name, -1); 4410 if (IS_ERR_OR_NULL(di)) { 4411 ret = di ? PTR_ERR(di) : -ENOENT; 4412 goto out; 4413 } 4414 4415 leaf = path->nodes[0]; 4416 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4417 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4418 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4419 if (ret) { 4420 btrfs_abort_transaction(trans, ret); 4421 goto out; 4422 } 4423 btrfs_release_path(path); 4424 4425 /* 4426 * This is a placeholder inode for a subvolume we didn't have a 4427 * reference to at the time of the snapshot creation. In the meantime 4428 * we could have renamed the real subvol link into our snapshot, so 4429 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4430 * Instead simply lookup the dir_index_item for this entry so we can 4431 * remove it. Otherwise we know we have a ref to the root and we can 4432 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4433 */ 4434 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4435 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); 4436 if (IS_ERR_OR_NULL(di)) { 4437 if (!di) 4438 ret = -ENOENT; 4439 else 4440 ret = PTR_ERR(di); 4441 btrfs_abort_transaction(trans, ret); 4442 goto out; 4443 } 4444 4445 leaf = path->nodes[0]; 4446 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4447 index = key.offset; 4448 btrfs_release_path(path); 4449 } else { 4450 ret = btrfs_del_root_ref(trans, objectid, 4451 root->root_key.objectid, dir_ino, 4452 &index, &fname.disk_name); 4453 if (ret) { 4454 btrfs_abort_transaction(trans, ret); 4455 goto out; 4456 } 4457 } 4458 4459 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4460 if (ret) { 4461 btrfs_abort_transaction(trans, ret); 4462 goto out; 4463 } 4464 4465 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); 4466 inode_inc_iversion(&dir->vfs_inode); 4467 dir->vfs_inode.i_mtime = current_time(&dir->vfs_inode); 4468 dir->vfs_inode.i_ctime = dir->vfs_inode.i_mtime; 4469 ret = btrfs_update_inode_fallback(trans, root, dir); 4470 if (ret) 4471 btrfs_abort_transaction(trans, ret); 4472 out: 4473 btrfs_free_path(path); 4474 fscrypt_free_filename(&fname); 4475 return ret; 4476 } 4477 4478 /* 4479 * Helper to check if the subvolume references other subvolumes or if it's 4480 * default. 4481 */ 4482 static noinline int may_destroy_subvol(struct btrfs_root *root) 4483 { 4484 struct btrfs_fs_info *fs_info = root->fs_info; 4485 struct btrfs_path *path; 4486 struct btrfs_dir_item *di; 4487 struct btrfs_key key; 4488 struct fscrypt_str name = FSTR_INIT("default", 7); 4489 u64 dir_id; 4490 int ret; 4491 4492 path = btrfs_alloc_path(); 4493 if (!path) 4494 return -ENOMEM; 4495 4496 /* Make sure this root isn't set as the default subvol */ 4497 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4498 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4499 dir_id, &name, 0); 4500 if (di && !IS_ERR(di)) { 4501 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4502 if (key.objectid == root->root_key.objectid) { 4503 ret = -EPERM; 4504 btrfs_err(fs_info, 4505 "deleting default subvolume %llu is not allowed", 4506 key.objectid); 4507 goto out; 4508 } 4509 btrfs_release_path(path); 4510 } 4511 4512 key.objectid = root->root_key.objectid; 4513 key.type = BTRFS_ROOT_REF_KEY; 4514 key.offset = (u64)-1; 4515 4516 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4517 if (ret < 0) 4518 goto out; 4519 BUG_ON(ret == 0); 4520 4521 ret = 0; 4522 if (path->slots[0] > 0) { 4523 path->slots[0]--; 4524 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4525 if (key.objectid == root->root_key.objectid && 4526 key.type == BTRFS_ROOT_REF_KEY) 4527 ret = -ENOTEMPTY; 4528 } 4529 out: 4530 btrfs_free_path(path); 4531 return ret; 4532 } 4533 4534 /* Delete all dentries for inodes belonging to the root */ 4535 static void btrfs_prune_dentries(struct btrfs_root *root) 4536 { 4537 struct btrfs_fs_info *fs_info = root->fs_info; 4538 struct rb_node *node; 4539 struct rb_node *prev; 4540 struct btrfs_inode *entry; 4541 struct inode *inode; 4542 u64 objectid = 0; 4543 4544 if (!BTRFS_FS_ERROR(fs_info)) 4545 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4546 4547 spin_lock(&root->inode_lock); 4548 again: 4549 node = root->inode_tree.rb_node; 4550 prev = NULL; 4551 while (node) { 4552 prev = node; 4553 entry = rb_entry(node, struct btrfs_inode, rb_node); 4554 4555 if (objectid < btrfs_ino(entry)) 4556 node = node->rb_left; 4557 else if (objectid > btrfs_ino(entry)) 4558 node = node->rb_right; 4559 else 4560 break; 4561 } 4562 if (!node) { 4563 while (prev) { 4564 entry = rb_entry(prev, struct btrfs_inode, rb_node); 4565 if (objectid <= btrfs_ino(entry)) { 4566 node = prev; 4567 break; 4568 } 4569 prev = rb_next(prev); 4570 } 4571 } 4572 while (node) { 4573 entry = rb_entry(node, struct btrfs_inode, rb_node); 4574 objectid = btrfs_ino(entry) + 1; 4575 inode = igrab(&entry->vfs_inode); 4576 if (inode) { 4577 spin_unlock(&root->inode_lock); 4578 if (atomic_read(&inode->i_count) > 1) 4579 d_prune_aliases(inode); 4580 /* 4581 * btrfs_drop_inode will have it removed from the inode 4582 * cache when its usage count hits zero. 4583 */ 4584 iput(inode); 4585 cond_resched(); 4586 spin_lock(&root->inode_lock); 4587 goto again; 4588 } 4589 4590 if (cond_resched_lock(&root->inode_lock)) 4591 goto again; 4592 4593 node = rb_next(node); 4594 } 4595 spin_unlock(&root->inode_lock); 4596 } 4597 4598 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) 4599 { 4600 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 4601 struct btrfs_root *root = dir->root; 4602 struct inode *inode = d_inode(dentry); 4603 struct btrfs_root *dest = BTRFS_I(inode)->root; 4604 struct btrfs_trans_handle *trans; 4605 struct btrfs_block_rsv block_rsv; 4606 u64 root_flags; 4607 int ret; 4608 4609 /* 4610 * Don't allow to delete a subvolume with send in progress. This is 4611 * inside the inode lock so the error handling that has to drop the bit 4612 * again is not run concurrently. 4613 */ 4614 spin_lock(&dest->root_item_lock); 4615 if (dest->send_in_progress) { 4616 spin_unlock(&dest->root_item_lock); 4617 btrfs_warn(fs_info, 4618 "attempt to delete subvolume %llu during send", 4619 dest->root_key.objectid); 4620 return -EPERM; 4621 } 4622 if (atomic_read(&dest->nr_swapfiles)) { 4623 spin_unlock(&dest->root_item_lock); 4624 btrfs_warn(fs_info, 4625 "attempt to delete subvolume %llu with active swapfile", 4626 root->root_key.objectid); 4627 return -EPERM; 4628 } 4629 root_flags = btrfs_root_flags(&dest->root_item); 4630 btrfs_set_root_flags(&dest->root_item, 4631 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4632 spin_unlock(&dest->root_item_lock); 4633 4634 down_write(&fs_info->subvol_sem); 4635 4636 ret = may_destroy_subvol(dest); 4637 if (ret) 4638 goto out_up_write; 4639 4640 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4641 /* 4642 * One for dir inode, 4643 * two for dir entries, 4644 * two for root ref/backref. 4645 */ 4646 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4647 if (ret) 4648 goto out_up_write; 4649 4650 trans = btrfs_start_transaction(root, 0); 4651 if (IS_ERR(trans)) { 4652 ret = PTR_ERR(trans); 4653 goto out_release; 4654 } 4655 trans->block_rsv = &block_rsv; 4656 trans->bytes_reserved = block_rsv.size; 4657 4658 btrfs_record_snapshot_destroy(trans, dir); 4659 4660 ret = btrfs_unlink_subvol(trans, dir, dentry); 4661 if (ret) { 4662 btrfs_abort_transaction(trans, ret); 4663 goto out_end_trans; 4664 } 4665 4666 ret = btrfs_record_root_in_trans(trans, dest); 4667 if (ret) { 4668 btrfs_abort_transaction(trans, ret); 4669 goto out_end_trans; 4670 } 4671 4672 memset(&dest->root_item.drop_progress, 0, 4673 sizeof(dest->root_item.drop_progress)); 4674 btrfs_set_root_drop_level(&dest->root_item, 0); 4675 btrfs_set_root_refs(&dest->root_item, 0); 4676 4677 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4678 ret = btrfs_insert_orphan_item(trans, 4679 fs_info->tree_root, 4680 dest->root_key.objectid); 4681 if (ret) { 4682 btrfs_abort_transaction(trans, ret); 4683 goto out_end_trans; 4684 } 4685 } 4686 4687 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4688 BTRFS_UUID_KEY_SUBVOL, 4689 dest->root_key.objectid); 4690 if (ret && ret != -ENOENT) { 4691 btrfs_abort_transaction(trans, ret); 4692 goto out_end_trans; 4693 } 4694 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4695 ret = btrfs_uuid_tree_remove(trans, 4696 dest->root_item.received_uuid, 4697 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4698 dest->root_key.objectid); 4699 if (ret && ret != -ENOENT) { 4700 btrfs_abort_transaction(trans, ret); 4701 goto out_end_trans; 4702 } 4703 } 4704 4705 free_anon_bdev(dest->anon_dev); 4706 dest->anon_dev = 0; 4707 out_end_trans: 4708 trans->block_rsv = NULL; 4709 trans->bytes_reserved = 0; 4710 ret = btrfs_end_transaction(trans); 4711 inode->i_flags |= S_DEAD; 4712 out_release: 4713 btrfs_subvolume_release_metadata(root, &block_rsv); 4714 out_up_write: 4715 up_write(&fs_info->subvol_sem); 4716 if (ret) { 4717 spin_lock(&dest->root_item_lock); 4718 root_flags = btrfs_root_flags(&dest->root_item); 4719 btrfs_set_root_flags(&dest->root_item, 4720 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4721 spin_unlock(&dest->root_item_lock); 4722 } else { 4723 d_invalidate(dentry); 4724 btrfs_prune_dentries(dest); 4725 ASSERT(dest->send_in_progress == 0); 4726 } 4727 4728 return ret; 4729 } 4730 4731 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4732 { 4733 struct inode *inode = d_inode(dentry); 4734 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 4735 int err = 0; 4736 struct btrfs_trans_handle *trans; 4737 u64 last_unlink_trans; 4738 struct fscrypt_name fname; 4739 4740 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4741 return -ENOTEMPTY; 4742 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { 4743 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4744 btrfs_err(fs_info, 4745 "extent tree v2 doesn't support snapshot deletion yet"); 4746 return -EOPNOTSUPP; 4747 } 4748 return btrfs_delete_subvolume(BTRFS_I(dir), dentry); 4749 } 4750 4751 err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4752 if (err) 4753 return err; 4754 4755 /* This needs to handle no-key deletions later on */ 4756 4757 trans = __unlink_start_trans(BTRFS_I(dir)); 4758 if (IS_ERR(trans)) { 4759 err = PTR_ERR(trans); 4760 goto out_notrans; 4761 } 4762 4763 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4764 err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); 4765 goto out; 4766 } 4767 4768 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4769 if (err) 4770 goto out; 4771 4772 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4773 4774 /* now the directory is empty */ 4775 err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4776 &fname.disk_name); 4777 if (!err) { 4778 btrfs_i_size_write(BTRFS_I(inode), 0); 4779 /* 4780 * Propagate the last_unlink_trans value of the deleted dir to 4781 * its parent directory. This is to prevent an unrecoverable 4782 * log tree in the case we do something like this: 4783 * 1) create dir foo 4784 * 2) create snapshot under dir foo 4785 * 3) delete the snapshot 4786 * 4) rmdir foo 4787 * 5) mkdir foo 4788 * 6) fsync foo or some file inside foo 4789 */ 4790 if (last_unlink_trans >= trans->transid) 4791 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4792 } 4793 out: 4794 btrfs_end_transaction(trans); 4795 out_notrans: 4796 btrfs_btree_balance_dirty(fs_info); 4797 fscrypt_free_filename(&fname); 4798 4799 return err; 4800 } 4801 4802 /* 4803 * btrfs_truncate_block - read, zero a chunk and write a block 4804 * @inode - inode that we're zeroing 4805 * @from - the offset to start zeroing 4806 * @len - the length to zero, 0 to zero the entire range respective to the 4807 * offset 4808 * @front - zero up to the offset instead of from the offset on 4809 * 4810 * This will find the block for the "from" offset and cow the block and zero the 4811 * part we want to zero. This is used with truncate and hole punching. 4812 */ 4813 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 4814 int front) 4815 { 4816 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4817 struct address_space *mapping = inode->vfs_inode.i_mapping; 4818 struct extent_io_tree *io_tree = &inode->io_tree; 4819 struct btrfs_ordered_extent *ordered; 4820 struct extent_state *cached_state = NULL; 4821 struct extent_changeset *data_reserved = NULL; 4822 bool only_release_metadata = false; 4823 u32 blocksize = fs_info->sectorsize; 4824 pgoff_t index = from >> PAGE_SHIFT; 4825 unsigned offset = from & (blocksize - 1); 4826 struct page *page; 4827 gfp_t mask = btrfs_alloc_write_mask(mapping); 4828 size_t write_bytes = blocksize; 4829 int ret = 0; 4830 u64 block_start; 4831 u64 block_end; 4832 4833 if (IS_ALIGNED(offset, blocksize) && 4834 (!len || IS_ALIGNED(len, blocksize))) 4835 goto out; 4836 4837 block_start = round_down(from, blocksize); 4838 block_end = block_start + blocksize - 1; 4839 4840 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 4841 blocksize, false); 4842 if (ret < 0) { 4843 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { 4844 /* For nocow case, no need to reserve data space */ 4845 only_release_metadata = true; 4846 } else { 4847 goto out; 4848 } 4849 } 4850 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 4851 if (ret < 0) { 4852 if (!only_release_metadata) 4853 btrfs_free_reserved_data_space(inode, data_reserved, 4854 block_start, blocksize); 4855 goto out; 4856 } 4857 again: 4858 page = find_or_create_page(mapping, index, mask); 4859 if (!page) { 4860 btrfs_delalloc_release_space(inode, data_reserved, block_start, 4861 blocksize, true); 4862 btrfs_delalloc_release_extents(inode, blocksize); 4863 ret = -ENOMEM; 4864 goto out; 4865 } 4866 4867 if (!PageUptodate(page)) { 4868 ret = btrfs_read_folio(NULL, page_folio(page)); 4869 lock_page(page); 4870 if (page->mapping != mapping) { 4871 unlock_page(page); 4872 put_page(page); 4873 goto again; 4874 } 4875 if (!PageUptodate(page)) { 4876 ret = -EIO; 4877 goto out_unlock; 4878 } 4879 } 4880 4881 /* 4882 * We unlock the page after the io is completed and then re-lock it 4883 * above. release_folio() could have come in between that and cleared 4884 * PagePrivate(), but left the page in the mapping. Set the page mapped 4885 * here to make sure it's properly set for the subpage stuff. 4886 */ 4887 ret = set_page_extent_mapped(page); 4888 if (ret < 0) 4889 goto out_unlock; 4890 4891 wait_on_page_writeback(page); 4892 4893 lock_extent(io_tree, block_start, block_end, &cached_state); 4894 4895 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4896 if (ordered) { 4897 unlock_extent(io_tree, block_start, block_end, &cached_state); 4898 unlock_page(page); 4899 put_page(page); 4900 btrfs_start_ordered_extent(ordered); 4901 btrfs_put_ordered_extent(ordered); 4902 goto again; 4903 } 4904 4905 clear_extent_bit(&inode->io_tree, block_start, block_end, 4906 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4907 &cached_state); 4908 4909 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 4910 &cached_state); 4911 if (ret) { 4912 unlock_extent(io_tree, block_start, block_end, &cached_state); 4913 goto out_unlock; 4914 } 4915 4916 if (offset != blocksize) { 4917 if (!len) 4918 len = blocksize - offset; 4919 if (front) 4920 memzero_page(page, (block_start - page_offset(page)), 4921 offset); 4922 else 4923 memzero_page(page, (block_start - page_offset(page)) + offset, 4924 len); 4925 } 4926 btrfs_page_clear_checked(fs_info, page, block_start, 4927 block_end + 1 - block_start); 4928 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); 4929 unlock_extent(io_tree, block_start, block_end, &cached_state); 4930 4931 if (only_release_metadata) 4932 set_extent_bit(&inode->io_tree, block_start, block_end, 4933 EXTENT_NORESERVE, NULL); 4934 4935 out_unlock: 4936 if (ret) { 4937 if (only_release_metadata) 4938 btrfs_delalloc_release_metadata(inode, blocksize, true); 4939 else 4940 btrfs_delalloc_release_space(inode, data_reserved, 4941 block_start, blocksize, true); 4942 } 4943 btrfs_delalloc_release_extents(inode, blocksize); 4944 unlock_page(page); 4945 put_page(page); 4946 out: 4947 if (only_release_metadata) 4948 btrfs_check_nocow_unlock(inode); 4949 extent_changeset_free(data_reserved); 4950 return ret; 4951 } 4952 4953 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode, 4954 u64 offset, u64 len) 4955 { 4956 struct btrfs_fs_info *fs_info = root->fs_info; 4957 struct btrfs_trans_handle *trans; 4958 struct btrfs_drop_extents_args drop_args = { 0 }; 4959 int ret; 4960 4961 /* 4962 * If NO_HOLES is enabled, we don't need to do anything. 4963 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 4964 * or btrfs_update_inode() will be called, which guarantee that the next 4965 * fsync will know this inode was changed and needs to be logged. 4966 */ 4967 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 4968 return 0; 4969 4970 /* 4971 * 1 - for the one we're dropping 4972 * 1 - for the one we're adding 4973 * 1 - for updating the inode. 4974 */ 4975 trans = btrfs_start_transaction(root, 3); 4976 if (IS_ERR(trans)) 4977 return PTR_ERR(trans); 4978 4979 drop_args.start = offset; 4980 drop_args.end = offset + len; 4981 drop_args.drop_cache = true; 4982 4983 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 4984 if (ret) { 4985 btrfs_abort_transaction(trans, ret); 4986 btrfs_end_transaction(trans); 4987 return ret; 4988 } 4989 4990 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len); 4991 if (ret) { 4992 btrfs_abort_transaction(trans, ret); 4993 } else { 4994 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 4995 btrfs_update_inode(trans, root, inode); 4996 } 4997 btrfs_end_transaction(trans); 4998 return ret; 4999 } 5000 5001 /* 5002 * This function puts in dummy file extents for the area we're creating a hole 5003 * for. So if we are truncating this file to a larger size we need to insert 5004 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 5005 * the range between oldsize and size 5006 */ 5007 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 5008 { 5009 struct btrfs_root *root = inode->root; 5010 struct btrfs_fs_info *fs_info = root->fs_info; 5011 struct extent_io_tree *io_tree = &inode->io_tree; 5012 struct extent_map *em = NULL; 5013 struct extent_state *cached_state = NULL; 5014 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 5015 u64 block_end = ALIGN(size, fs_info->sectorsize); 5016 u64 last_byte; 5017 u64 cur_offset; 5018 u64 hole_size; 5019 int err = 0; 5020 5021 /* 5022 * If our size started in the middle of a block we need to zero out the 5023 * rest of the block before we expand the i_size, otherwise we could 5024 * expose stale data. 5025 */ 5026 err = btrfs_truncate_block(inode, oldsize, 0, 0); 5027 if (err) 5028 return err; 5029 5030 if (size <= hole_start) 5031 return 0; 5032 5033 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 5034 &cached_state); 5035 cur_offset = hole_start; 5036 while (1) { 5037 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 5038 block_end - cur_offset); 5039 if (IS_ERR(em)) { 5040 err = PTR_ERR(em); 5041 em = NULL; 5042 break; 5043 } 5044 last_byte = min(extent_map_end(em), block_end); 5045 last_byte = ALIGN(last_byte, fs_info->sectorsize); 5046 hole_size = last_byte - cur_offset; 5047 5048 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 5049 struct extent_map *hole_em; 5050 5051 err = maybe_insert_hole(root, inode, cur_offset, 5052 hole_size); 5053 if (err) 5054 break; 5055 5056 err = btrfs_inode_set_file_extent_range(inode, 5057 cur_offset, hole_size); 5058 if (err) 5059 break; 5060 5061 hole_em = alloc_extent_map(); 5062 if (!hole_em) { 5063 btrfs_drop_extent_map_range(inode, cur_offset, 5064 cur_offset + hole_size - 1, 5065 false); 5066 btrfs_set_inode_full_sync(inode); 5067 goto next; 5068 } 5069 hole_em->start = cur_offset; 5070 hole_em->len = hole_size; 5071 hole_em->orig_start = cur_offset; 5072 5073 hole_em->block_start = EXTENT_MAP_HOLE; 5074 hole_em->block_len = 0; 5075 hole_em->orig_block_len = 0; 5076 hole_em->ram_bytes = hole_size; 5077 hole_em->compress_type = BTRFS_COMPRESS_NONE; 5078 hole_em->generation = fs_info->generation; 5079 5080 err = btrfs_replace_extent_map_range(inode, hole_em, true); 5081 free_extent_map(hole_em); 5082 } else { 5083 err = btrfs_inode_set_file_extent_range(inode, 5084 cur_offset, hole_size); 5085 if (err) 5086 break; 5087 } 5088 next: 5089 free_extent_map(em); 5090 em = NULL; 5091 cur_offset = last_byte; 5092 if (cur_offset >= block_end) 5093 break; 5094 } 5095 free_extent_map(em); 5096 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); 5097 return err; 5098 } 5099 5100 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 5101 { 5102 struct btrfs_root *root = BTRFS_I(inode)->root; 5103 struct btrfs_trans_handle *trans; 5104 loff_t oldsize = i_size_read(inode); 5105 loff_t newsize = attr->ia_size; 5106 int mask = attr->ia_valid; 5107 int ret; 5108 5109 /* 5110 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 5111 * special case where we need to update the times despite not having 5112 * these flags set. For all other operations the VFS set these flags 5113 * explicitly if it wants a timestamp update. 5114 */ 5115 if (newsize != oldsize) { 5116 inode_inc_iversion(inode); 5117 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) { 5118 inode->i_mtime = current_time(inode); 5119 inode->i_ctime = inode->i_mtime; 5120 } 5121 } 5122 5123 if (newsize > oldsize) { 5124 /* 5125 * Don't do an expanding truncate while snapshotting is ongoing. 5126 * This is to ensure the snapshot captures a fully consistent 5127 * state of this file - if the snapshot captures this expanding 5128 * truncation, it must capture all writes that happened before 5129 * this truncation. 5130 */ 5131 btrfs_drew_write_lock(&root->snapshot_lock); 5132 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 5133 if (ret) { 5134 btrfs_drew_write_unlock(&root->snapshot_lock); 5135 return ret; 5136 } 5137 5138 trans = btrfs_start_transaction(root, 1); 5139 if (IS_ERR(trans)) { 5140 btrfs_drew_write_unlock(&root->snapshot_lock); 5141 return PTR_ERR(trans); 5142 } 5143 5144 i_size_write(inode, newsize); 5145 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 5146 pagecache_isize_extended(inode, oldsize, newsize); 5147 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 5148 btrfs_drew_write_unlock(&root->snapshot_lock); 5149 btrfs_end_transaction(trans); 5150 } else { 5151 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5152 5153 if (btrfs_is_zoned(fs_info)) { 5154 ret = btrfs_wait_ordered_range(inode, 5155 ALIGN(newsize, fs_info->sectorsize), 5156 (u64)-1); 5157 if (ret) 5158 return ret; 5159 } 5160 5161 /* 5162 * We're truncating a file that used to have good data down to 5163 * zero. Make sure any new writes to the file get on disk 5164 * on close. 5165 */ 5166 if (newsize == 0) 5167 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5168 &BTRFS_I(inode)->runtime_flags); 5169 5170 truncate_setsize(inode, newsize); 5171 5172 inode_dio_wait(inode); 5173 5174 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); 5175 if (ret && inode->i_nlink) { 5176 int err; 5177 5178 /* 5179 * Truncate failed, so fix up the in-memory size. We 5180 * adjusted disk_i_size down as we removed extents, so 5181 * wait for disk_i_size to be stable and then update the 5182 * in-memory size to match. 5183 */ 5184 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); 5185 if (err) 5186 return err; 5187 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5188 } 5189 } 5190 5191 return ret; 5192 } 5193 5194 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 5195 struct iattr *attr) 5196 { 5197 struct inode *inode = d_inode(dentry); 5198 struct btrfs_root *root = BTRFS_I(inode)->root; 5199 int err; 5200 5201 if (btrfs_root_readonly(root)) 5202 return -EROFS; 5203 5204 err = setattr_prepare(idmap, dentry, attr); 5205 if (err) 5206 return err; 5207 5208 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5209 err = btrfs_setsize(inode, attr); 5210 if (err) 5211 return err; 5212 } 5213 5214 if (attr->ia_valid) { 5215 setattr_copy(idmap, inode, attr); 5216 inode_inc_iversion(inode); 5217 err = btrfs_dirty_inode(BTRFS_I(inode)); 5218 5219 if (!err && attr->ia_valid & ATTR_MODE) 5220 err = posix_acl_chmod(idmap, dentry, inode->i_mode); 5221 } 5222 5223 return err; 5224 } 5225 5226 /* 5227 * While truncating the inode pages during eviction, we get the VFS 5228 * calling btrfs_invalidate_folio() against each folio of the inode. This 5229 * is slow because the calls to btrfs_invalidate_folio() result in a 5230 * huge amount of calls to lock_extent() and clear_extent_bit(), 5231 * which keep merging and splitting extent_state structures over and over, 5232 * wasting lots of time. 5233 * 5234 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5235 * skip all those expensive operations on a per folio basis and do only 5236 * the ordered io finishing, while we release here the extent_map and 5237 * extent_state structures, without the excessive merging and splitting. 5238 */ 5239 static void evict_inode_truncate_pages(struct inode *inode) 5240 { 5241 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5242 struct rb_node *node; 5243 5244 ASSERT(inode->i_state & I_FREEING); 5245 truncate_inode_pages_final(&inode->i_data); 5246 5247 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 5248 5249 /* 5250 * Keep looping until we have no more ranges in the io tree. 5251 * We can have ongoing bios started by readahead that have 5252 * their endio callback (extent_io.c:end_bio_extent_readpage) 5253 * still in progress (unlocked the pages in the bio but did not yet 5254 * unlocked the ranges in the io tree). Therefore this means some 5255 * ranges can still be locked and eviction started because before 5256 * submitting those bios, which are executed by a separate task (work 5257 * queue kthread), inode references (inode->i_count) were not taken 5258 * (which would be dropped in the end io callback of each bio). 5259 * Therefore here we effectively end up waiting for those bios and 5260 * anyone else holding locked ranges without having bumped the inode's 5261 * reference count - if we don't do it, when they access the inode's 5262 * io_tree to unlock a range it may be too late, leading to an 5263 * use-after-free issue. 5264 */ 5265 spin_lock(&io_tree->lock); 5266 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5267 struct extent_state *state; 5268 struct extent_state *cached_state = NULL; 5269 u64 start; 5270 u64 end; 5271 unsigned state_flags; 5272 5273 node = rb_first(&io_tree->state); 5274 state = rb_entry(node, struct extent_state, rb_node); 5275 start = state->start; 5276 end = state->end; 5277 state_flags = state->state; 5278 spin_unlock(&io_tree->lock); 5279 5280 lock_extent(io_tree, start, end, &cached_state); 5281 5282 /* 5283 * If still has DELALLOC flag, the extent didn't reach disk, 5284 * and its reserved space won't be freed by delayed_ref. 5285 * So we need to free its reserved space here. 5286 * (Refer to comment in btrfs_invalidate_folio, case 2) 5287 * 5288 * Note, end is the bytenr of last byte, so we need + 1 here. 5289 */ 5290 if (state_flags & EXTENT_DELALLOC) 5291 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5292 end - start + 1); 5293 5294 clear_extent_bit(io_tree, start, end, 5295 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, 5296 &cached_state); 5297 5298 cond_resched(); 5299 spin_lock(&io_tree->lock); 5300 } 5301 spin_unlock(&io_tree->lock); 5302 } 5303 5304 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5305 struct btrfs_block_rsv *rsv) 5306 { 5307 struct btrfs_fs_info *fs_info = root->fs_info; 5308 struct btrfs_trans_handle *trans; 5309 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1); 5310 int ret; 5311 5312 /* 5313 * Eviction should be taking place at some place safe because of our 5314 * delayed iputs. However the normal flushing code will run delayed 5315 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5316 * 5317 * We reserve the delayed_refs_extra here again because we can't use 5318 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5319 * above. We reserve our extra bit here because we generate a ton of 5320 * delayed refs activity by truncating. 5321 * 5322 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5323 * if we fail to make this reservation we can re-try without the 5324 * delayed_refs_extra so we can make some forward progress. 5325 */ 5326 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5327 BTRFS_RESERVE_FLUSH_EVICT); 5328 if (ret) { 5329 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5330 BTRFS_RESERVE_FLUSH_EVICT); 5331 if (ret) { 5332 btrfs_warn(fs_info, 5333 "could not allocate space for delete; will truncate on mount"); 5334 return ERR_PTR(-ENOSPC); 5335 } 5336 delayed_refs_extra = 0; 5337 } 5338 5339 trans = btrfs_join_transaction(root); 5340 if (IS_ERR(trans)) 5341 return trans; 5342 5343 if (delayed_refs_extra) { 5344 trans->block_rsv = &fs_info->trans_block_rsv; 5345 trans->bytes_reserved = delayed_refs_extra; 5346 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5347 delayed_refs_extra, true); 5348 } 5349 return trans; 5350 } 5351 5352 void btrfs_evict_inode(struct inode *inode) 5353 { 5354 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5355 struct btrfs_trans_handle *trans; 5356 struct btrfs_root *root = BTRFS_I(inode)->root; 5357 struct btrfs_block_rsv *rsv = NULL; 5358 int ret; 5359 5360 trace_btrfs_inode_evict(inode); 5361 5362 if (!root) { 5363 fsverity_cleanup_inode(inode); 5364 clear_inode(inode); 5365 return; 5366 } 5367 5368 evict_inode_truncate_pages(inode); 5369 5370 if (inode->i_nlink && 5371 ((btrfs_root_refs(&root->root_item) != 0 && 5372 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5373 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5374 goto out; 5375 5376 if (is_bad_inode(inode)) 5377 goto out; 5378 5379 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5380 goto out; 5381 5382 if (inode->i_nlink > 0) { 5383 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5384 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5385 goto out; 5386 } 5387 5388 /* 5389 * This makes sure the inode item in tree is uptodate and the space for 5390 * the inode update is released. 5391 */ 5392 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5393 if (ret) 5394 goto out; 5395 5396 /* 5397 * This drops any pending insert or delete operations we have for this 5398 * inode. We could have a delayed dir index deletion queued up, but 5399 * we're removing the inode completely so that'll be taken care of in 5400 * the truncate. 5401 */ 5402 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5403 5404 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5405 if (!rsv) 5406 goto out; 5407 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5408 rsv->failfast = true; 5409 5410 btrfs_i_size_write(BTRFS_I(inode), 0); 5411 5412 while (1) { 5413 struct btrfs_truncate_control control = { 5414 .inode = BTRFS_I(inode), 5415 .ino = btrfs_ino(BTRFS_I(inode)), 5416 .new_size = 0, 5417 .min_type = 0, 5418 }; 5419 5420 trans = evict_refill_and_join(root, rsv); 5421 if (IS_ERR(trans)) 5422 goto out; 5423 5424 trans->block_rsv = rsv; 5425 5426 ret = btrfs_truncate_inode_items(trans, root, &control); 5427 trans->block_rsv = &fs_info->trans_block_rsv; 5428 btrfs_end_transaction(trans); 5429 /* 5430 * We have not added new delayed items for our inode after we 5431 * have flushed its delayed items, so no need to throttle on 5432 * delayed items. However we have modified extent buffers. 5433 */ 5434 btrfs_btree_balance_dirty_nodelay(fs_info); 5435 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5436 goto out; 5437 else if (!ret) 5438 break; 5439 } 5440 5441 /* 5442 * Errors here aren't a big deal, it just means we leave orphan items in 5443 * the tree. They will be cleaned up on the next mount. If the inode 5444 * number gets reused, cleanup deletes the orphan item without doing 5445 * anything, and unlink reuses the existing orphan item. 5446 * 5447 * If it turns out that we are dropping too many of these, we might want 5448 * to add a mechanism for retrying these after a commit. 5449 */ 5450 trans = evict_refill_and_join(root, rsv); 5451 if (!IS_ERR(trans)) { 5452 trans->block_rsv = rsv; 5453 btrfs_orphan_del(trans, BTRFS_I(inode)); 5454 trans->block_rsv = &fs_info->trans_block_rsv; 5455 btrfs_end_transaction(trans); 5456 } 5457 5458 out: 5459 btrfs_free_block_rsv(fs_info, rsv); 5460 /* 5461 * If we didn't successfully delete, the orphan item will still be in 5462 * the tree and we'll retry on the next mount. Again, we might also want 5463 * to retry these periodically in the future. 5464 */ 5465 btrfs_remove_delayed_node(BTRFS_I(inode)); 5466 fsverity_cleanup_inode(inode); 5467 clear_inode(inode); 5468 } 5469 5470 /* 5471 * Return the key found in the dir entry in the location pointer, fill @type 5472 * with BTRFS_FT_*, and return 0. 5473 * 5474 * If no dir entries were found, returns -ENOENT. 5475 * If found a corrupted location in dir entry, returns -EUCLEAN. 5476 */ 5477 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, 5478 struct btrfs_key *location, u8 *type) 5479 { 5480 struct btrfs_dir_item *di; 5481 struct btrfs_path *path; 5482 struct btrfs_root *root = dir->root; 5483 int ret = 0; 5484 struct fscrypt_name fname; 5485 5486 path = btrfs_alloc_path(); 5487 if (!path) 5488 return -ENOMEM; 5489 5490 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 5491 if (ret < 0) 5492 goto out; 5493 /* 5494 * fscrypt_setup_filename() should never return a positive value, but 5495 * gcc on sparc/parisc thinks it can, so assert that doesn't happen. 5496 */ 5497 ASSERT(ret == 0); 5498 5499 /* This needs to handle no-key deletions later on */ 5500 5501 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), 5502 &fname.disk_name, 0); 5503 if (IS_ERR_OR_NULL(di)) { 5504 ret = di ? PTR_ERR(di) : -ENOENT; 5505 goto out; 5506 } 5507 5508 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5509 if (location->type != BTRFS_INODE_ITEM_KEY && 5510 location->type != BTRFS_ROOT_ITEM_KEY) { 5511 ret = -EUCLEAN; 5512 btrfs_warn(root->fs_info, 5513 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5514 __func__, fname.disk_name.name, btrfs_ino(dir), 5515 location->objectid, location->type, location->offset); 5516 } 5517 if (!ret) 5518 *type = btrfs_dir_ftype(path->nodes[0], di); 5519 out: 5520 fscrypt_free_filename(&fname); 5521 btrfs_free_path(path); 5522 return ret; 5523 } 5524 5525 /* 5526 * when we hit a tree root in a directory, the btrfs part of the inode 5527 * needs to be changed to reflect the root directory of the tree root. This 5528 * is kind of like crossing a mount point. 5529 */ 5530 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5531 struct btrfs_inode *dir, 5532 struct dentry *dentry, 5533 struct btrfs_key *location, 5534 struct btrfs_root **sub_root) 5535 { 5536 struct btrfs_path *path; 5537 struct btrfs_root *new_root; 5538 struct btrfs_root_ref *ref; 5539 struct extent_buffer *leaf; 5540 struct btrfs_key key; 5541 int ret; 5542 int err = 0; 5543 struct fscrypt_name fname; 5544 5545 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); 5546 if (ret) 5547 return ret; 5548 5549 path = btrfs_alloc_path(); 5550 if (!path) { 5551 err = -ENOMEM; 5552 goto out; 5553 } 5554 5555 err = -ENOENT; 5556 key.objectid = dir->root->root_key.objectid; 5557 key.type = BTRFS_ROOT_REF_KEY; 5558 key.offset = location->objectid; 5559 5560 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5561 if (ret) { 5562 if (ret < 0) 5563 err = ret; 5564 goto out; 5565 } 5566 5567 leaf = path->nodes[0]; 5568 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5569 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5570 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) 5571 goto out; 5572 5573 ret = memcmp_extent_buffer(leaf, fname.disk_name.name, 5574 (unsigned long)(ref + 1), fname.disk_name.len); 5575 if (ret) 5576 goto out; 5577 5578 btrfs_release_path(path); 5579 5580 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5581 if (IS_ERR(new_root)) { 5582 err = PTR_ERR(new_root); 5583 goto out; 5584 } 5585 5586 *sub_root = new_root; 5587 location->objectid = btrfs_root_dirid(&new_root->root_item); 5588 location->type = BTRFS_INODE_ITEM_KEY; 5589 location->offset = 0; 5590 err = 0; 5591 out: 5592 btrfs_free_path(path); 5593 fscrypt_free_filename(&fname); 5594 return err; 5595 } 5596 5597 static void inode_tree_add(struct btrfs_inode *inode) 5598 { 5599 struct btrfs_root *root = inode->root; 5600 struct btrfs_inode *entry; 5601 struct rb_node **p; 5602 struct rb_node *parent; 5603 struct rb_node *new = &inode->rb_node; 5604 u64 ino = btrfs_ino(inode); 5605 5606 if (inode_unhashed(&inode->vfs_inode)) 5607 return; 5608 parent = NULL; 5609 spin_lock(&root->inode_lock); 5610 p = &root->inode_tree.rb_node; 5611 while (*p) { 5612 parent = *p; 5613 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5614 5615 if (ino < btrfs_ino(entry)) 5616 p = &parent->rb_left; 5617 else if (ino > btrfs_ino(entry)) 5618 p = &parent->rb_right; 5619 else { 5620 WARN_ON(!(entry->vfs_inode.i_state & 5621 (I_WILL_FREE | I_FREEING))); 5622 rb_replace_node(parent, new, &root->inode_tree); 5623 RB_CLEAR_NODE(parent); 5624 spin_unlock(&root->inode_lock); 5625 return; 5626 } 5627 } 5628 rb_link_node(new, parent, p); 5629 rb_insert_color(new, &root->inode_tree); 5630 spin_unlock(&root->inode_lock); 5631 } 5632 5633 static void inode_tree_del(struct btrfs_inode *inode) 5634 { 5635 struct btrfs_root *root = inode->root; 5636 int empty = 0; 5637 5638 spin_lock(&root->inode_lock); 5639 if (!RB_EMPTY_NODE(&inode->rb_node)) { 5640 rb_erase(&inode->rb_node, &root->inode_tree); 5641 RB_CLEAR_NODE(&inode->rb_node); 5642 empty = RB_EMPTY_ROOT(&root->inode_tree); 5643 } 5644 spin_unlock(&root->inode_lock); 5645 5646 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5647 spin_lock(&root->inode_lock); 5648 empty = RB_EMPTY_ROOT(&root->inode_tree); 5649 spin_unlock(&root->inode_lock); 5650 if (empty) 5651 btrfs_add_dead_root(root); 5652 } 5653 } 5654 5655 5656 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5657 { 5658 struct btrfs_iget_args *args = p; 5659 5660 inode->i_ino = args->ino; 5661 BTRFS_I(inode)->location.objectid = args->ino; 5662 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 5663 BTRFS_I(inode)->location.offset = 0; 5664 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5665 BUG_ON(args->root && !BTRFS_I(inode)->root); 5666 5667 if (args->root && args->root == args->root->fs_info->tree_root && 5668 args->ino != BTRFS_BTREE_INODE_OBJECTID) 5669 set_bit(BTRFS_INODE_FREE_SPACE_INODE, 5670 &BTRFS_I(inode)->runtime_flags); 5671 return 0; 5672 } 5673 5674 static int btrfs_find_actor(struct inode *inode, void *opaque) 5675 { 5676 struct btrfs_iget_args *args = opaque; 5677 5678 return args->ino == BTRFS_I(inode)->location.objectid && 5679 args->root == BTRFS_I(inode)->root; 5680 } 5681 5682 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, 5683 struct btrfs_root *root) 5684 { 5685 struct inode *inode; 5686 struct btrfs_iget_args args; 5687 unsigned long hashval = btrfs_inode_hash(ino, root); 5688 5689 args.ino = ino; 5690 args.root = root; 5691 5692 inode = iget5_locked(s, hashval, btrfs_find_actor, 5693 btrfs_init_locked_inode, 5694 (void *)&args); 5695 return inode; 5696 } 5697 5698 /* 5699 * Get an inode object given its inode number and corresponding root. 5700 * Path can be preallocated to prevent recursing back to iget through 5701 * allocator. NULL is also valid but may require an additional allocation 5702 * later. 5703 */ 5704 struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 5705 struct btrfs_root *root, struct btrfs_path *path) 5706 { 5707 struct inode *inode; 5708 5709 inode = btrfs_iget_locked(s, ino, root); 5710 if (!inode) 5711 return ERR_PTR(-ENOMEM); 5712 5713 if (inode->i_state & I_NEW) { 5714 int ret; 5715 5716 ret = btrfs_read_locked_inode(inode, path); 5717 if (!ret) { 5718 inode_tree_add(BTRFS_I(inode)); 5719 unlock_new_inode(inode); 5720 } else { 5721 iget_failed(inode); 5722 /* 5723 * ret > 0 can come from btrfs_search_slot called by 5724 * btrfs_read_locked_inode, this means the inode item 5725 * was not found. 5726 */ 5727 if (ret > 0) 5728 ret = -ENOENT; 5729 inode = ERR_PTR(ret); 5730 } 5731 } 5732 5733 return inode; 5734 } 5735 5736 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) 5737 { 5738 return btrfs_iget_path(s, ino, root, NULL); 5739 } 5740 5741 static struct inode *new_simple_dir(struct super_block *s, 5742 struct btrfs_key *key, 5743 struct btrfs_root *root) 5744 { 5745 struct inode *inode = new_inode(s); 5746 5747 if (!inode) 5748 return ERR_PTR(-ENOMEM); 5749 5750 BTRFS_I(inode)->root = btrfs_grab_root(root); 5751 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5752 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5753 5754 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5755 /* 5756 * We only need lookup, the rest is read-only and there's no inode 5757 * associated with the dentry 5758 */ 5759 inode->i_op = &simple_dir_inode_operations; 5760 inode->i_opflags &= ~IOP_XATTR; 5761 inode->i_fop = &simple_dir_operations; 5762 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5763 inode->i_mtime = current_time(inode); 5764 inode->i_atime = inode->i_mtime; 5765 inode->i_ctime = inode->i_mtime; 5766 BTRFS_I(inode)->i_otime = inode->i_mtime; 5767 5768 return inode; 5769 } 5770 5771 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5772 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5773 static_assert(BTRFS_FT_DIR == FT_DIR); 5774 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5775 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5776 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5777 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5778 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5779 5780 static inline u8 btrfs_inode_type(struct inode *inode) 5781 { 5782 return fs_umode_to_ftype(inode->i_mode); 5783 } 5784 5785 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5786 { 5787 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 5788 struct inode *inode; 5789 struct btrfs_root *root = BTRFS_I(dir)->root; 5790 struct btrfs_root *sub_root = root; 5791 struct btrfs_key location; 5792 u8 di_type = 0; 5793 int ret = 0; 5794 5795 if (dentry->d_name.len > BTRFS_NAME_LEN) 5796 return ERR_PTR(-ENAMETOOLONG); 5797 5798 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type); 5799 if (ret < 0) 5800 return ERR_PTR(ret); 5801 5802 if (location.type == BTRFS_INODE_ITEM_KEY) { 5803 inode = btrfs_iget(dir->i_sb, location.objectid, root); 5804 if (IS_ERR(inode)) 5805 return inode; 5806 5807 /* Do extra check against inode mode with di_type */ 5808 if (btrfs_inode_type(inode) != di_type) { 5809 btrfs_crit(fs_info, 5810 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5811 inode->i_mode, btrfs_inode_type(inode), 5812 di_type); 5813 iput(inode); 5814 return ERR_PTR(-EUCLEAN); 5815 } 5816 return inode; 5817 } 5818 5819 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry, 5820 &location, &sub_root); 5821 if (ret < 0) { 5822 if (ret != -ENOENT) 5823 inode = ERR_PTR(ret); 5824 else 5825 inode = new_simple_dir(dir->i_sb, &location, root); 5826 } else { 5827 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); 5828 btrfs_put_root(sub_root); 5829 5830 if (IS_ERR(inode)) 5831 return inode; 5832 5833 down_read(&fs_info->cleanup_work_sem); 5834 if (!sb_rdonly(inode->i_sb)) 5835 ret = btrfs_orphan_cleanup(sub_root); 5836 up_read(&fs_info->cleanup_work_sem); 5837 if (ret) { 5838 iput(inode); 5839 inode = ERR_PTR(ret); 5840 } 5841 } 5842 5843 return inode; 5844 } 5845 5846 static int btrfs_dentry_delete(const struct dentry *dentry) 5847 { 5848 struct btrfs_root *root; 5849 struct inode *inode = d_inode(dentry); 5850 5851 if (!inode && !IS_ROOT(dentry)) 5852 inode = d_inode(dentry->d_parent); 5853 5854 if (inode) { 5855 root = BTRFS_I(inode)->root; 5856 if (btrfs_root_refs(&root->root_item) == 0) 5857 return 1; 5858 5859 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5860 return 1; 5861 } 5862 return 0; 5863 } 5864 5865 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5866 unsigned int flags) 5867 { 5868 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5869 5870 if (inode == ERR_PTR(-ENOENT)) 5871 inode = NULL; 5872 return d_splice_alias(inode, dentry); 5873 } 5874 5875 /* 5876 * Find the highest existing sequence number in a directory and then set the 5877 * in-memory index_cnt variable to the first free sequence number. 5878 */ 5879 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 5880 { 5881 struct btrfs_root *root = inode->root; 5882 struct btrfs_key key, found_key; 5883 struct btrfs_path *path; 5884 struct extent_buffer *leaf; 5885 int ret; 5886 5887 key.objectid = btrfs_ino(inode); 5888 key.type = BTRFS_DIR_INDEX_KEY; 5889 key.offset = (u64)-1; 5890 5891 path = btrfs_alloc_path(); 5892 if (!path) 5893 return -ENOMEM; 5894 5895 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5896 if (ret < 0) 5897 goto out; 5898 /* FIXME: we should be able to handle this */ 5899 if (ret == 0) 5900 goto out; 5901 ret = 0; 5902 5903 if (path->slots[0] == 0) { 5904 inode->index_cnt = BTRFS_DIR_START_INDEX; 5905 goto out; 5906 } 5907 5908 path->slots[0]--; 5909 5910 leaf = path->nodes[0]; 5911 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5912 5913 if (found_key.objectid != btrfs_ino(inode) || 5914 found_key.type != BTRFS_DIR_INDEX_KEY) { 5915 inode->index_cnt = BTRFS_DIR_START_INDEX; 5916 goto out; 5917 } 5918 5919 inode->index_cnt = found_key.offset + 1; 5920 out: 5921 btrfs_free_path(path); 5922 return ret; 5923 } 5924 5925 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) 5926 { 5927 if (dir->index_cnt == (u64)-1) { 5928 int ret; 5929 5930 ret = btrfs_inode_delayed_dir_index_count(dir); 5931 if (ret) { 5932 ret = btrfs_set_inode_index_count(dir); 5933 if (ret) 5934 return ret; 5935 } 5936 } 5937 5938 *index = dir->index_cnt; 5939 5940 return 0; 5941 } 5942 5943 /* 5944 * All this infrastructure exists because dir_emit can fault, and we are holding 5945 * the tree lock when doing readdir. For now just allocate a buffer and copy 5946 * our information into that, and then dir_emit from the buffer. This is 5947 * similar to what NFS does, only we don't keep the buffer around in pagecache 5948 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5949 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5950 * tree lock. 5951 */ 5952 static int btrfs_opendir(struct inode *inode, struct file *file) 5953 { 5954 struct btrfs_file_private *private; 5955 u64 last_index; 5956 int ret; 5957 5958 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); 5959 if (ret) 5960 return ret; 5961 5962 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5963 if (!private) 5964 return -ENOMEM; 5965 private->last_index = last_index; 5966 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5967 if (!private->filldir_buf) { 5968 kfree(private); 5969 return -ENOMEM; 5970 } 5971 file->private_data = private; 5972 return 0; 5973 } 5974 5975 struct dir_entry { 5976 u64 ino; 5977 u64 offset; 5978 unsigned type; 5979 int name_len; 5980 }; 5981 5982 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5983 { 5984 while (entries--) { 5985 struct dir_entry *entry = addr; 5986 char *name = (char *)(entry + 1); 5987 5988 ctx->pos = get_unaligned(&entry->offset); 5989 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5990 get_unaligned(&entry->ino), 5991 get_unaligned(&entry->type))) 5992 return 1; 5993 addr += sizeof(struct dir_entry) + 5994 get_unaligned(&entry->name_len); 5995 ctx->pos++; 5996 } 5997 return 0; 5998 } 5999 6000 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 6001 { 6002 struct inode *inode = file_inode(file); 6003 struct btrfs_root *root = BTRFS_I(inode)->root; 6004 struct btrfs_file_private *private = file->private_data; 6005 struct btrfs_dir_item *di; 6006 struct btrfs_key key; 6007 struct btrfs_key found_key; 6008 struct btrfs_path *path; 6009 void *addr; 6010 struct list_head ins_list; 6011 struct list_head del_list; 6012 int ret; 6013 char *name_ptr; 6014 int name_len; 6015 int entries = 0; 6016 int total_len = 0; 6017 bool put = false; 6018 struct btrfs_key location; 6019 6020 if (!dir_emit_dots(file, ctx)) 6021 return 0; 6022 6023 path = btrfs_alloc_path(); 6024 if (!path) 6025 return -ENOMEM; 6026 6027 addr = private->filldir_buf; 6028 path->reada = READA_FORWARD; 6029 6030 INIT_LIST_HEAD(&ins_list); 6031 INIT_LIST_HEAD(&del_list); 6032 put = btrfs_readdir_get_delayed_items(inode, private->last_index, 6033 &ins_list, &del_list); 6034 6035 again: 6036 key.type = BTRFS_DIR_INDEX_KEY; 6037 key.offset = ctx->pos; 6038 key.objectid = btrfs_ino(BTRFS_I(inode)); 6039 6040 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 6041 struct dir_entry *entry; 6042 struct extent_buffer *leaf = path->nodes[0]; 6043 u8 ftype; 6044 6045 if (found_key.objectid != key.objectid) 6046 break; 6047 if (found_key.type != BTRFS_DIR_INDEX_KEY) 6048 break; 6049 if (found_key.offset < ctx->pos) 6050 continue; 6051 if (found_key.offset > private->last_index) 6052 break; 6053 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 6054 continue; 6055 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 6056 name_len = btrfs_dir_name_len(leaf, di); 6057 if ((total_len + sizeof(struct dir_entry) + name_len) >= 6058 PAGE_SIZE) { 6059 btrfs_release_path(path); 6060 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6061 if (ret) 6062 goto nopos; 6063 addr = private->filldir_buf; 6064 entries = 0; 6065 total_len = 0; 6066 goto again; 6067 } 6068 6069 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di)); 6070 entry = addr; 6071 name_ptr = (char *)(entry + 1); 6072 read_extent_buffer(leaf, name_ptr, 6073 (unsigned long)(di + 1), name_len); 6074 put_unaligned(name_len, &entry->name_len); 6075 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); 6076 btrfs_dir_item_key_to_cpu(leaf, di, &location); 6077 put_unaligned(location.objectid, &entry->ino); 6078 put_unaligned(found_key.offset, &entry->offset); 6079 entries++; 6080 addr += sizeof(struct dir_entry) + name_len; 6081 total_len += sizeof(struct dir_entry) + name_len; 6082 } 6083 /* Catch error encountered during iteration */ 6084 if (ret < 0) 6085 goto err; 6086 6087 btrfs_release_path(path); 6088 6089 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6090 if (ret) 6091 goto nopos; 6092 6093 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 6094 if (ret) 6095 goto nopos; 6096 6097 /* 6098 * Stop new entries from being returned after we return the last 6099 * entry. 6100 * 6101 * New directory entries are assigned a strictly increasing 6102 * offset. This means that new entries created during readdir 6103 * are *guaranteed* to be seen in the future by that readdir. 6104 * This has broken buggy programs which operate on names as 6105 * they're returned by readdir. Until we re-use freed offsets 6106 * we have this hack to stop new entries from being returned 6107 * under the assumption that they'll never reach this huge 6108 * offset. 6109 * 6110 * This is being careful not to overflow 32bit loff_t unless the 6111 * last entry requires it because doing so has broken 32bit apps 6112 * in the past. 6113 */ 6114 if (ctx->pos >= INT_MAX) 6115 ctx->pos = LLONG_MAX; 6116 else 6117 ctx->pos = INT_MAX; 6118 nopos: 6119 ret = 0; 6120 err: 6121 if (put) 6122 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); 6123 btrfs_free_path(path); 6124 return ret; 6125 } 6126 6127 /* 6128 * This is somewhat expensive, updating the tree every time the 6129 * inode changes. But, it is most likely to find the inode in cache. 6130 * FIXME, needs more benchmarking...there are no reasons other than performance 6131 * to keep or drop this code. 6132 */ 6133 static int btrfs_dirty_inode(struct btrfs_inode *inode) 6134 { 6135 struct btrfs_root *root = inode->root; 6136 struct btrfs_fs_info *fs_info = root->fs_info; 6137 struct btrfs_trans_handle *trans; 6138 int ret; 6139 6140 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) 6141 return 0; 6142 6143 trans = btrfs_join_transaction(root); 6144 if (IS_ERR(trans)) 6145 return PTR_ERR(trans); 6146 6147 ret = btrfs_update_inode(trans, root, inode); 6148 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) { 6149 /* whoops, lets try again with the full transaction */ 6150 btrfs_end_transaction(trans); 6151 trans = btrfs_start_transaction(root, 1); 6152 if (IS_ERR(trans)) 6153 return PTR_ERR(trans); 6154 6155 ret = btrfs_update_inode(trans, root, inode); 6156 } 6157 btrfs_end_transaction(trans); 6158 if (inode->delayed_node) 6159 btrfs_balance_delayed_items(fs_info); 6160 6161 return ret; 6162 } 6163 6164 /* 6165 * This is a copy of file_update_time. We need this so we can return error on 6166 * ENOSPC for updating the inode in the case of file write and mmap writes. 6167 */ 6168 static int btrfs_update_time(struct inode *inode, struct timespec64 *now, 6169 int flags) 6170 { 6171 struct btrfs_root *root = BTRFS_I(inode)->root; 6172 bool dirty = flags & ~S_VERSION; 6173 6174 if (btrfs_root_readonly(root)) 6175 return -EROFS; 6176 6177 if (flags & S_VERSION) 6178 dirty |= inode_maybe_inc_iversion(inode, dirty); 6179 if (flags & S_CTIME) 6180 inode->i_ctime = *now; 6181 if (flags & S_MTIME) 6182 inode->i_mtime = *now; 6183 if (flags & S_ATIME) 6184 inode->i_atime = *now; 6185 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6186 } 6187 6188 /* 6189 * helper to find a free sequence number in a given directory. This current 6190 * code is very simple, later versions will do smarter things in the btree 6191 */ 6192 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6193 { 6194 int ret = 0; 6195 6196 if (dir->index_cnt == (u64)-1) { 6197 ret = btrfs_inode_delayed_dir_index_count(dir); 6198 if (ret) { 6199 ret = btrfs_set_inode_index_count(dir); 6200 if (ret) 6201 return ret; 6202 } 6203 } 6204 6205 *index = dir->index_cnt; 6206 dir->index_cnt++; 6207 6208 return ret; 6209 } 6210 6211 static int btrfs_insert_inode_locked(struct inode *inode) 6212 { 6213 struct btrfs_iget_args args; 6214 6215 args.ino = BTRFS_I(inode)->location.objectid; 6216 args.root = BTRFS_I(inode)->root; 6217 6218 return insert_inode_locked4(inode, 6219 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6220 btrfs_find_actor, &args); 6221 } 6222 6223 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6224 unsigned int *trans_num_items) 6225 { 6226 struct inode *dir = args->dir; 6227 struct inode *inode = args->inode; 6228 int ret; 6229 6230 if (!args->orphan) { 6231 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, 6232 &args->fname); 6233 if (ret) 6234 return ret; 6235 } 6236 6237 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6238 if (ret) { 6239 fscrypt_free_filename(&args->fname); 6240 return ret; 6241 } 6242 6243 /* 1 to add inode item */ 6244 *trans_num_items = 1; 6245 /* 1 to add compression property */ 6246 if (BTRFS_I(dir)->prop_compress) 6247 (*trans_num_items)++; 6248 /* 1 to add default ACL xattr */ 6249 if (args->default_acl) 6250 (*trans_num_items)++; 6251 /* 1 to add access ACL xattr */ 6252 if (args->acl) 6253 (*trans_num_items)++; 6254 #ifdef CONFIG_SECURITY 6255 /* 1 to add LSM xattr */ 6256 if (dir->i_security) 6257 (*trans_num_items)++; 6258 #endif 6259 if (args->orphan) { 6260 /* 1 to add orphan item */ 6261 (*trans_num_items)++; 6262 } else { 6263 /* 6264 * 1 to add dir item 6265 * 1 to add dir index 6266 * 1 to update parent inode item 6267 * 6268 * No need for 1 unit for the inode ref item because it is 6269 * inserted in a batch together with the inode item at 6270 * btrfs_create_new_inode(). 6271 */ 6272 *trans_num_items += 3; 6273 } 6274 return 0; 6275 } 6276 6277 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6278 { 6279 posix_acl_release(args->acl); 6280 posix_acl_release(args->default_acl); 6281 fscrypt_free_filename(&args->fname); 6282 } 6283 6284 /* 6285 * Inherit flags from the parent inode. 6286 * 6287 * Currently only the compression flags and the cow flags are inherited. 6288 */ 6289 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir) 6290 { 6291 unsigned int flags; 6292 6293 flags = dir->flags; 6294 6295 if (flags & BTRFS_INODE_NOCOMPRESS) { 6296 inode->flags &= ~BTRFS_INODE_COMPRESS; 6297 inode->flags |= BTRFS_INODE_NOCOMPRESS; 6298 } else if (flags & BTRFS_INODE_COMPRESS) { 6299 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 6300 inode->flags |= BTRFS_INODE_COMPRESS; 6301 } 6302 6303 if (flags & BTRFS_INODE_NODATACOW) { 6304 inode->flags |= BTRFS_INODE_NODATACOW; 6305 if (S_ISREG(inode->vfs_inode.i_mode)) 6306 inode->flags |= BTRFS_INODE_NODATASUM; 6307 } 6308 6309 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); 6310 } 6311 6312 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6313 struct btrfs_new_inode_args *args) 6314 { 6315 struct inode *dir = args->dir; 6316 struct inode *inode = args->inode; 6317 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; 6318 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6319 struct btrfs_root *root; 6320 struct btrfs_inode_item *inode_item; 6321 struct btrfs_key *location; 6322 struct btrfs_path *path; 6323 u64 objectid; 6324 struct btrfs_inode_ref *ref; 6325 struct btrfs_key key[2]; 6326 u32 sizes[2]; 6327 struct btrfs_item_batch batch; 6328 unsigned long ptr; 6329 int ret; 6330 6331 path = btrfs_alloc_path(); 6332 if (!path) 6333 return -ENOMEM; 6334 6335 if (!args->subvol) 6336 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6337 root = BTRFS_I(inode)->root; 6338 6339 ret = btrfs_get_free_objectid(root, &objectid); 6340 if (ret) 6341 goto out; 6342 inode->i_ino = objectid; 6343 6344 if (args->orphan) { 6345 /* 6346 * O_TMPFILE, set link count to 0, so that after this point, we 6347 * fill in an inode item with the correct link count. 6348 */ 6349 set_nlink(inode, 0); 6350 } else { 6351 trace_btrfs_inode_request(dir); 6352 6353 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6354 if (ret) 6355 goto out; 6356 } 6357 /* index_cnt is ignored for everything but a dir. */ 6358 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6359 BTRFS_I(inode)->generation = trans->transid; 6360 inode->i_generation = BTRFS_I(inode)->generation; 6361 6362 /* 6363 * Subvolumes don't inherit flags from their parent directory. 6364 * Originally this was probably by accident, but we probably can't 6365 * change it now without compatibility issues. 6366 */ 6367 if (!args->subvol) 6368 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir)); 6369 6370 if (S_ISREG(inode->i_mode)) { 6371 if (btrfs_test_opt(fs_info, NODATASUM)) 6372 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6373 if (btrfs_test_opt(fs_info, NODATACOW)) 6374 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6375 BTRFS_INODE_NODATASUM; 6376 } 6377 6378 location = &BTRFS_I(inode)->location; 6379 location->objectid = objectid; 6380 location->offset = 0; 6381 location->type = BTRFS_INODE_ITEM_KEY; 6382 6383 ret = btrfs_insert_inode_locked(inode); 6384 if (ret < 0) { 6385 if (!args->orphan) 6386 BTRFS_I(dir)->index_cnt--; 6387 goto out; 6388 } 6389 6390 /* 6391 * We could have gotten an inode number from somebody who was fsynced 6392 * and then removed in this same transaction, so let's just set full 6393 * sync since it will be a full sync anyway and this will blow away the 6394 * old info in the log. 6395 */ 6396 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6397 6398 key[0].objectid = objectid; 6399 key[0].type = BTRFS_INODE_ITEM_KEY; 6400 key[0].offset = 0; 6401 6402 sizes[0] = sizeof(struct btrfs_inode_item); 6403 6404 if (!args->orphan) { 6405 /* 6406 * Start new inodes with an inode_ref. This is slightly more 6407 * efficient for small numbers of hard links since they will 6408 * be packed into one item. Extended refs will kick in if we 6409 * add more hard links than can fit in the ref item. 6410 */ 6411 key[1].objectid = objectid; 6412 key[1].type = BTRFS_INODE_REF_KEY; 6413 if (args->subvol) { 6414 key[1].offset = objectid; 6415 sizes[1] = 2 + sizeof(*ref); 6416 } else { 6417 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6418 sizes[1] = name->len + sizeof(*ref); 6419 } 6420 } 6421 6422 batch.keys = &key[0]; 6423 batch.data_sizes = &sizes[0]; 6424 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6425 batch.nr = args->orphan ? 1 : 2; 6426 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6427 if (ret != 0) { 6428 btrfs_abort_transaction(trans, ret); 6429 goto discard; 6430 } 6431 6432 inode->i_mtime = current_time(inode); 6433 inode->i_atime = inode->i_mtime; 6434 inode->i_ctime = inode->i_mtime; 6435 BTRFS_I(inode)->i_otime = inode->i_mtime; 6436 6437 /* 6438 * We're going to fill the inode item now, so at this point the inode 6439 * must be fully initialized. 6440 */ 6441 6442 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6443 struct btrfs_inode_item); 6444 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6445 sizeof(*inode_item)); 6446 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6447 6448 if (!args->orphan) { 6449 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6450 struct btrfs_inode_ref); 6451 ptr = (unsigned long)(ref + 1); 6452 if (args->subvol) { 6453 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6454 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6455 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6456 } else { 6457 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 6458 name->len); 6459 btrfs_set_inode_ref_index(path->nodes[0], ref, 6460 BTRFS_I(inode)->dir_index); 6461 write_extent_buffer(path->nodes[0], name->name, ptr, 6462 name->len); 6463 } 6464 } 6465 6466 btrfs_mark_buffer_dirty(path->nodes[0]); 6467 /* 6468 * We don't need the path anymore, plus inheriting properties, adding 6469 * ACLs, security xattrs, orphan item or adding the link, will result in 6470 * allocating yet another path. So just free our path. 6471 */ 6472 btrfs_free_path(path); 6473 path = NULL; 6474 6475 if (args->subvol) { 6476 struct inode *parent; 6477 6478 /* 6479 * Subvolumes inherit properties from their parent subvolume, 6480 * not the directory they were created in. 6481 */ 6482 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, 6483 BTRFS_I(dir)->root); 6484 if (IS_ERR(parent)) { 6485 ret = PTR_ERR(parent); 6486 } else { 6487 ret = btrfs_inode_inherit_props(trans, inode, parent); 6488 iput(parent); 6489 } 6490 } else { 6491 ret = btrfs_inode_inherit_props(trans, inode, dir); 6492 } 6493 if (ret) { 6494 btrfs_err(fs_info, 6495 "error inheriting props for ino %llu (root %llu): %d", 6496 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, 6497 ret); 6498 } 6499 6500 /* 6501 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6502 * probably a bug. 6503 */ 6504 if (!args->subvol) { 6505 ret = btrfs_init_inode_security(trans, args); 6506 if (ret) { 6507 btrfs_abort_transaction(trans, ret); 6508 goto discard; 6509 } 6510 } 6511 6512 inode_tree_add(BTRFS_I(inode)); 6513 6514 trace_btrfs_inode_new(inode); 6515 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6516 6517 btrfs_update_root_times(trans, root); 6518 6519 if (args->orphan) { 6520 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6521 } else { 6522 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6523 0, BTRFS_I(inode)->dir_index); 6524 } 6525 if (ret) { 6526 btrfs_abort_transaction(trans, ret); 6527 goto discard; 6528 } 6529 6530 return 0; 6531 6532 discard: 6533 /* 6534 * discard_new_inode() calls iput(), but the caller owns the reference 6535 * to the inode. 6536 */ 6537 ihold(inode); 6538 discard_new_inode(inode); 6539 out: 6540 btrfs_free_path(path); 6541 return ret; 6542 } 6543 6544 /* 6545 * utility function to add 'inode' into 'parent_inode' with 6546 * a give name and a given sequence number. 6547 * if 'add_backref' is true, also insert a backref from the 6548 * inode to the parent directory. 6549 */ 6550 int btrfs_add_link(struct btrfs_trans_handle *trans, 6551 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6552 const struct fscrypt_str *name, int add_backref, u64 index) 6553 { 6554 int ret = 0; 6555 struct btrfs_key key; 6556 struct btrfs_root *root = parent_inode->root; 6557 u64 ino = btrfs_ino(inode); 6558 u64 parent_ino = btrfs_ino(parent_inode); 6559 6560 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6561 memcpy(&key, &inode->root->root_key, sizeof(key)); 6562 } else { 6563 key.objectid = ino; 6564 key.type = BTRFS_INODE_ITEM_KEY; 6565 key.offset = 0; 6566 } 6567 6568 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6569 ret = btrfs_add_root_ref(trans, key.objectid, 6570 root->root_key.objectid, parent_ino, 6571 index, name); 6572 } else if (add_backref) { 6573 ret = btrfs_insert_inode_ref(trans, root, name, 6574 ino, parent_ino, index); 6575 } 6576 6577 /* Nothing to clean up yet */ 6578 if (ret) 6579 return ret; 6580 6581 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, 6582 btrfs_inode_type(&inode->vfs_inode), index); 6583 if (ret == -EEXIST || ret == -EOVERFLOW) 6584 goto fail_dir_item; 6585 else if (ret) { 6586 btrfs_abort_transaction(trans, ret); 6587 return ret; 6588 } 6589 6590 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6591 name->len * 2); 6592 inode_inc_iversion(&parent_inode->vfs_inode); 6593 /* 6594 * If we are replaying a log tree, we do not want to update the mtime 6595 * and ctime of the parent directory with the current time, since the 6596 * log replay procedure is responsible for setting them to their correct 6597 * values (the ones it had when the fsync was done). 6598 */ 6599 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { 6600 struct timespec64 now = current_time(&parent_inode->vfs_inode); 6601 6602 parent_inode->vfs_inode.i_mtime = now; 6603 parent_inode->vfs_inode.i_ctime = now; 6604 } 6605 ret = btrfs_update_inode(trans, root, parent_inode); 6606 if (ret) 6607 btrfs_abort_transaction(trans, ret); 6608 return ret; 6609 6610 fail_dir_item: 6611 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6612 u64 local_index; 6613 int err; 6614 err = btrfs_del_root_ref(trans, key.objectid, 6615 root->root_key.objectid, parent_ino, 6616 &local_index, name); 6617 if (err) 6618 btrfs_abort_transaction(trans, err); 6619 } else if (add_backref) { 6620 u64 local_index; 6621 int err; 6622 6623 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, 6624 &local_index); 6625 if (err) 6626 btrfs_abort_transaction(trans, err); 6627 } 6628 6629 /* Return the original error code */ 6630 return ret; 6631 } 6632 6633 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6634 struct inode *inode) 6635 { 6636 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6637 struct btrfs_root *root = BTRFS_I(dir)->root; 6638 struct btrfs_new_inode_args new_inode_args = { 6639 .dir = dir, 6640 .dentry = dentry, 6641 .inode = inode, 6642 }; 6643 unsigned int trans_num_items; 6644 struct btrfs_trans_handle *trans; 6645 int err; 6646 6647 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6648 if (err) 6649 goto out_inode; 6650 6651 trans = btrfs_start_transaction(root, trans_num_items); 6652 if (IS_ERR(trans)) { 6653 err = PTR_ERR(trans); 6654 goto out_new_inode_args; 6655 } 6656 6657 err = btrfs_create_new_inode(trans, &new_inode_args); 6658 if (!err) 6659 d_instantiate_new(dentry, inode); 6660 6661 btrfs_end_transaction(trans); 6662 btrfs_btree_balance_dirty(fs_info); 6663 out_new_inode_args: 6664 btrfs_new_inode_args_destroy(&new_inode_args); 6665 out_inode: 6666 if (err) 6667 iput(inode); 6668 return err; 6669 } 6670 6671 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir, 6672 struct dentry *dentry, umode_t mode, dev_t rdev) 6673 { 6674 struct inode *inode; 6675 6676 inode = new_inode(dir->i_sb); 6677 if (!inode) 6678 return -ENOMEM; 6679 inode_init_owner(idmap, inode, dir, mode); 6680 inode->i_op = &btrfs_special_inode_operations; 6681 init_special_inode(inode, inode->i_mode, rdev); 6682 return btrfs_create_common(dir, dentry, inode); 6683 } 6684 6685 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir, 6686 struct dentry *dentry, umode_t mode, bool excl) 6687 { 6688 struct inode *inode; 6689 6690 inode = new_inode(dir->i_sb); 6691 if (!inode) 6692 return -ENOMEM; 6693 inode_init_owner(idmap, inode, dir, mode); 6694 inode->i_fop = &btrfs_file_operations; 6695 inode->i_op = &btrfs_file_inode_operations; 6696 inode->i_mapping->a_ops = &btrfs_aops; 6697 return btrfs_create_common(dir, dentry, inode); 6698 } 6699 6700 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6701 struct dentry *dentry) 6702 { 6703 struct btrfs_trans_handle *trans = NULL; 6704 struct btrfs_root *root = BTRFS_I(dir)->root; 6705 struct inode *inode = d_inode(old_dentry); 6706 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 6707 struct fscrypt_name fname; 6708 u64 index; 6709 int err; 6710 int drop_inode = 0; 6711 6712 /* do not allow sys_link's with other subvols of the same device */ 6713 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) 6714 return -EXDEV; 6715 6716 if (inode->i_nlink >= BTRFS_LINK_MAX) 6717 return -EMLINK; 6718 6719 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); 6720 if (err) 6721 goto fail; 6722 6723 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6724 if (err) 6725 goto fail; 6726 6727 /* 6728 * 2 items for inode and inode ref 6729 * 2 items for dir items 6730 * 1 item for parent inode 6731 * 1 item for orphan item deletion if O_TMPFILE 6732 */ 6733 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6734 if (IS_ERR(trans)) { 6735 err = PTR_ERR(trans); 6736 trans = NULL; 6737 goto fail; 6738 } 6739 6740 /* There are several dir indexes for this inode, clear the cache. */ 6741 BTRFS_I(inode)->dir_index = 0ULL; 6742 inc_nlink(inode); 6743 inode_inc_iversion(inode); 6744 inode->i_ctime = current_time(inode); 6745 ihold(inode); 6746 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6747 6748 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6749 &fname.disk_name, 1, index); 6750 6751 if (err) { 6752 drop_inode = 1; 6753 } else { 6754 struct dentry *parent = dentry->d_parent; 6755 6756 err = btrfs_update_inode(trans, root, BTRFS_I(inode)); 6757 if (err) 6758 goto fail; 6759 if (inode->i_nlink == 1) { 6760 /* 6761 * If new hard link count is 1, it's a file created 6762 * with open(2) O_TMPFILE flag. 6763 */ 6764 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6765 if (err) 6766 goto fail; 6767 } 6768 d_instantiate(dentry, inode); 6769 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent); 6770 } 6771 6772 fail: 6773 fscrypt_free_filename(&fname); 6774 if (trans) 6775 btrfs_end_transaction(trans); 6776 if (drop_inode) { 6777 inode_dec_link_count(inode); 6778 iput(inode); 6779 } 6780 btrfs_btree_balance_dirty(fs_info); 6781 return err; 6782 } 6783 6784 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 6785 struct dentry *dentry, umode_t mode) 6786 { 6787 struct inode *inode; 6788 6789 inode = new_inode(dir->i_sb); 6790 if (!inode) 6791 return -ENOMEM; 6792 inode_init_owner(idmap, inode, dir, S_IFDIR | mode); 6793 inode->i_op = &btrfs_dir_inode_operations; 6794 inode->i_fop = &btrfs_dir_file_operations; 6795 return btrfs_create_common(dir, dentry, inode); 6796 } 6797 6798 static noinline int uncompress_inline(struct btrfs_path *path, 6799 struct page *page, 6800 struct btrfs_file_extent_item *item) 6801 { 6802 int ret; 6803 struct extent_buffer *leaf = path->nodes[0]; 6804 char *tmp; 6805 size_t max_size; 6806 unsigned long inline_size; 6807 unsigned long ptr; 6808 int compress_type; 6809 6810 compress_type = btrfs_file_extent_compression(leaf, item); 6811 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6812 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 6813 tmp = kmalloc(inline_size, GFP_NOFS); 6814 if (!tmp) 6815 return -ENOMEM; 6816 ptr = btrfs_file_extent_inline_start(item); 6817 6818 read_extent_buffer(leaf, tmp, ptr, inline_size); 6819 6820 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6821 ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size); 6822 6823 /* 6824 * decompression code contains a memset to fill in any space between the end 6825 * of the uncompressed data and the end of max_size in case the decompressed 6826 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6827 * the end of an inline extent and the beginning of the next block, so we 6828 * cover that region here. 6829 */ 6830 6831 if (max_size < PAGE_SIZE) 6832 memzero_page(page, max_size, PAGE_SIZE - max_size); 6833 kfree(tmp); 6834 return ret; 6835 } 6836 6837 static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path, 6838 struct page *page) 6839 { 6840 struct btrfs_file_extent_item *fi; 6841 void *kaddr; 6842 size_t copy_size; 6843 6844 if (!page || PageUptodate(page)) 6845 return 0; 6846 6847 ASSERT(page_offset(page) == 0); 6848 6849 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 6850 struct btrfs_file_extent_item); 6851 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) 6852 return uncompress_inline(path, page, fi); 6853 6854 copy_size = min_t(u64, PAGE_SIZE, 6855 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); 6856 kaddr = kmap_local_page(page); 6857 read_extent_buffer(path->nodes[0], kaddr, 6858 btrfs_file_extent_inline_start(fi), copy_size); 6859 kunmap_local(kaddr); 6860 if (copy_size < PAGE_SIZE) 6861 memzero_page(page, copy_size, PAGE_SIZE - copy_size); 6862 return 0; 6863 } 6864 6865 /* 6866 * Lookup the first extent overlapping a range in a file. 6867 * 6868 * @inode: file to search in 6869 * @page: page to read extent data into if the extent is inline 6870 * @pg_offset: offset into @page to copy to 6871 * @start: file offset 6872 * @len: length of range starting at @start 6873 * 6874 * Return the first &struct extent_map which overlaps the given range, reading 6875 * it from the B-tree and caching it if necessary. Note that there may be more 6876 * extents which overlap the given range after the returned extent_map. 6877 * 6878 * If @page is not NULL and the extent is inline, this also reads the extent 6879 * data directly into the page and marks the extent up to date in the io_tree. 6880 * 6881 * Return: ERR_PTR on error, non-NULL extent_map on success. 6882 */ 6883 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6884 struct page *page, size_t pg_offset, 6885 u64 start, u64 len) 6886 { 6887 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6888 int ret = 0; 6889 u64 extent_start = 0; 6890 u64 extent_end = 0; 6891 u64 objectid = btrfs_ino(inode); 6892 int extent_type = -1; 6893 struct btrfs_path *path = NULL; 6894 struct btrfs_root *root = inode->root; 6895 struct btrfs_file_extent_item *item; 6896 struct extent_buffer *leaf; 6897 struct btrfs_key found_key; 6898 struct extent_map *em = NULL; 6899 struct extent_map_tree *em_tree = &inode->extent_tree; 6900 6901 read_lock(&em_tree->lock); 6902 em = lookup_extent_mapping(em_tree, start, len); 6903 read_unlock(&em_tree->lock); 6904 6905 if (em) { 6906 if (em->start > start || em->start + em->len <= start) 6907 free_extent_map(em); 6908 else if (em->block_start == EXTENT_MAP_INLINE && page) 6909 free_extent_map(em); 6910 else 6911 goto out; 6912 } 6913 em = alloc_extent_map(); 6914 if (!em) { 6915 ret = -ENOMEM; 6916 goto out; 6917 } 6918 em->start = EXTENT_MAP_HOLE; 6919 em->orig_start = EXTENT_MAP_HOLE; 6920 em->len = (u64)-1; 6921 em->block_len = (u64)-1; 6922 6923 path = btrfs_alloc_path(); 6924 if (!path) { 6925 ret = -ENOMEM; 6926 goto out; 6927 } 6928 6929 /* Chances are we'll be called again, so go ahead and do readahead */ 6930 path->reada = READA_FORWARD; 6931 6932 /* 6933 * The same explanation in load_free_space_cache applies here as well, 6934 * we only read when we're loading the free space cache, and at that 6935 * point the commit_root has everything we need. 6936 */ 6937 if (btrfs_is_free_space_inode(inode)) { 6938 path->search_commit_root = 1; 6939 path->skip_locking = 1; 6940 } 6941 6942 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6943 if (ret < 0) { 6944 goto out; 6945 } else if (ret > 0) { 6946 if (path->slots[0] == 0) 6947 goto not_found; 6948 path->slots[0]--; 6949 ret = 0; 6950 } 6951 6952 leaf = path->nodes[0]; 6953 item = btrfs_item_ptr(leaf, path->slots[0], 6954 struct btrfs_file_extent_item); 6955 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6956 if (found_key.objectid != objectid || 6957 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6958 /* 6959 * If we backup past the first extent we want to move forward 6960 * and see if there is an extent in front of us, otherwise we'll 6961 * say there is a hole for our whole search range which can 6962 * cause problems. 6963 */ 6964 extent_end = start; 6965 goto next; 6966 } 6967 6968 extent_type = btrfs_file_extent_type(leaf, item); 6969 extent_start = found_key.offset; 6970 extent_end = btrfs_file_extent_end(path); 6971 if (extent_type == BTRFS_FILE_EXTENT_REG || 6972 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6973 /* Only regular file could have regular/prealloc extent */ 6974 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6975 ret = -EUCLEAN; 6976 btrfs_crit(fs_info, 6977 "regular/prealloc extent found for non-regular inode %llu", 6978 btrfs_ino(inode)); 6979 goto out; 6980 } 6981 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6982 extent_start); 6983 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6984 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 6985 path->slots[0], 6986 extent_start); 6987 } 6988 next: 6989 if (start >= extent_end) { 6990 path->slots[0]++; 6991 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6992 ret = btrfs_next_leaf(root, path); 6993 if (ret < 0) 6994 goto out; 6995 else if (ret > 0) 6996 goto not_found; 6997 6998 leaf = path->nodes[0]; 6999 } 7000 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 7001 if (found_key.objectid != objectid || 7002 found_key.type != BTRFS_EXTENT_DATA_KEY) 7003 goto not_found; 7004 if (start + len <= found_key.offset) 7005 goto not_found; 7006 if (start > found_key.offset) 7007 goto next; 7008 7009 /* New extent overlaps with existing one */ 7010 em->start = start; 7011 em->orig_start = start; 7012 em->len = found_key.offset - start; 7013 em->block_start = EXTENT_MAP_HOLE; 7014 goto insert; 7015 } 7016 7017 btrfs_extent_item_to_extent_map(inode, path, item, em); 7018 7019 if (extent_type == BTRFS_FILE_EXTENT_REG || 7020 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 7021 goto insert; 7022 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 7023 /* 7024 * Inline extent can only exist at file offset 0. This is 7025 * ensured by tree-checker and inline extent creation path. 7026 * Thus all members representing file offsets should be zero. 7027 */ 7028 ASSERT(pg_offset == 0); 7029 ASSERT(extent_start == 0); 7030 ASSERT(em->start == 0); 7031 7032 /* 7033 * btrfs_extent_item_to_extent_map() should have properly 7034 * initialized em members already. 7035 * 7036 * Other members are not utilized for inline extents. 7037 */ 7038 ASSERT(em->block_start == EXTENT_MAP_INLINE); 7039 ASSERT(em->len == fs_info->sectorsize); 7040 7041 ret = read_inline_extent(inode, path, page); 7042 if (ret < 0) 7043 goto out; 7044 goto insert; 7045 } 7046 not_found: 7047 em->start = start; 7048 em->orig_start = start; 7049 em->len = len; 7050 em->block_start = EXTENT_MAP_HOLE; 7051 insert: 7052 ret = 0; 7053 btrfs_release_path(path); 7054 if (em->start > start || extent_map_end(em) <= start) { 7055 btrfs_err(fs_info, 7056 "bad extent! em: [%llu %llu] passed [%llu %llu]", 7057 em->start, em->len, start, len); 7058 ret = -EIO; 7059 goto out; 7060 } 7061 7062 write_lock(&em_tree->lock); 7063 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 7064 write_unlock(&em_tree->lock); 7065 out: 7066 btrfs_free_path(path); 7067 7068 trace_btrfs_get_extent(root, inode, em); 7069 7070 if (ret) { 7071 free_extent_map(em); 7072 return ERR_PTR(ret); 7073 } 7074 return em; 7075 } 7076 7077 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, 7078 struct btrfs_dio_data *dio_data, 7079 const u64 start, 7080 const u64 len, 7081 const u64 orig_start, 7082 const u64 block_start, 7083 const u64 block_len, 7084 const u64 orig_block_len, 7085 const u64 ram_bytes, 7086 const int type) 7087 { 7088 struct extent_map *em = NULL; 7089 struct btrfs_ordered_extent *ordered; 7090 7091 if (type != BTRFS_ORDERED_NOCOW) { 7092 em = create_io_em(inode, start, len, orig_start, block_start, 7093 block_len, orig_block_len, ram_bytes, 7094 BTRFS_COMPRESS_NONE, /* compress_type */ 7095 type); 7096 if (IS_ERR(em)) 7097 goto out; 7098 } 7099 ordered = btrfs_alloc_ordered_extent(inode, start, len, len, 7100 block_start, block_len, 0, 7101 (1 << type) | 7102 (1 << BTRFS_ORDERED_DIRECT), 7103 BTRFS_COMPRESS_NONE); 7104 if (IS_ERR(ordered)) { 7105 if (em) { 7106 free_extent_map(em); 7107 btrfs_drop_extent_map_range(inode, start, 7108 start + len - 1, false); 7109 } 7110 em = ERR_CAST(ordered); 7111 } else { 7112 ASSERT(!dio_data->ordered); 7113 dio_data->ordered = ordered; 7114 } 7115 out: 7116 7117 return em; 7118 } 7119 7120 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, 7121 struct btrfs_dio_data *dio_data, 7122 u64 start, u64 len) 7123 { 7124 struct btrfs_root *root = inode->root; 7125 struct btrfs_fs_info *fs_info = root->fs_info; 7126 struct extent_map *em; 7127 struct btrfs_key ins; 7128 u64 alloc_hint; 7129 int ret; 7130 7131 alloc_hint = get_extent_allocation_hint(inode, start, len); 7132 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 7133 0, alloc_hint, &ins, 1, 1); 7134 if (ret) 7135 return ERR_PTR(ret); 7136 7137 em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start, 7138 ins.objectid, ins.offset, ins.offset, 7139 ins.offset, BTRFS_ORDERED_REGULAR); 7140 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 7141 if (IS_ERR(em)) 7142 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 7143 1); 7144 7145 return em; 7146 } 7147 7148 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 7149 { 7150 struct btrfs_block_group *block_group; 7151 bool readonly = false; 7152 7153 block_group = btrfs_lookup_block_group(fs_info, bytenr); 7154 if (!block_group || block_group->ro) 7155 readonly = true; 7156 if (block_group) 7157 btrfs_put_block_group(block_group); 7158 return readonly; 7159 } 7160 7161 /* 7162 * Check if we can do nocow write into the range [@offset, @offset + @len) 7163 * 7164 * @offset: File offset 7165 * @len: The length to write, will be updated to the nocow writeable 7166 * range 7167 * @orig_start: (optional) Return the original file offset of the file extent 7168 * @orig_len: (optional) Return the original on-disk length of the file extent 7169 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7170 * @strict: if true, omit optimizations that might force us into unnecessary 7171 * cow. e.g., don't trust generation number. 7172 * 7173 * Return: 7174 * >0 and update @len if we can do nocow write 7175 * 0 if we can't do nocow write 7176 * <0 if error happened 7177 * 7178 * NOTE: This only checks the file extents, caller is responsible to wait for 7179 * any ordered extents. 7180 */ 7181 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7182 u64 *orig_start, u64 *orig_block_len, 7183 u64 *ram_bytes, bool nowait, bool strict) 7184 { 7185 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7186 struct can_nocow_file_extent_args nocow_args = { 0 }; 7187 struct btrfs_path *path; 7188 int ret; 7189 struct extent_buffer *leaf; 7190 struct btrfs_root *root = BTRFS_I(inode)->root; 7191 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7192 struct btrfs_file_extent_item *fi; 7193 struct btrfs_key key; 7194 int found_type; 7195 7196 path = btrfs_alloc_path(); 7197 if (!path) 7198 return -ENOMEM; 7199 path->nowait = nowait; 7200 7201 ret = btrfs_lookup_file_extent(NULL, root, path, 7202 btrfs_ino(BTRFS_I(inode)), offset, 0); 7203 if (ret < 0) 7204 goto out; 7205 7206 if (ret == 1) { 7207 if (path->slots[0] == 0) { 7208 /* can't find the item, must cow */ 7209 ret = 0; 7210 goto out; 7211 } 7212 path->slots[0]--; 7213 } 7214 ret = 0; 7215 leaf = path->nodes[0]; 7216 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7217 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7218 key.type != BTRFS_EXTENT_DATA_KEY) { 7219 /* not our file or wrong item type, must cow */ 7220 goto out; 7221 } 7222 7223 if (key.offset > offset) { 7224 /* Wrong offset, must cow */ 7225 goto out; 7226 } 7227 7228 if (btrfs_file_extent_end(path) <= offset) 7229 goto out; 7230 7231 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7232 found_type = btrfs_file_extent_type(leaf, fi); 7233 if (ram_bytes) 7234 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7235 7236 nocow_args.start = offset; 7237 nocow_args.end = offset + *len - 1; 7238 nocow_args.strict = strict; 7239 nocow_args.free_path = true; 7240 7241 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args); 7242 /* can_nocow_file_extent() has freed the path. */ 7243 path = NULL; 7244 7245 if (ret != 1) { 7246 /* Treat errors as not being able to NOCOW. */ 7247 ret = 0; 7248 goto out; 7249 } 7250 7251 ret = 0; 7252 if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr)) 7253 goto out; 7254 7255 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7256 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7257 u64 range_end; 7258 7259 range_end = round_up(offset + nocow_args.num_bytes, 7260 root->fs_info->sectorsize) - 1; 7261 ret = test_range_bit(io_tree, offset, range_end, 7262 EXTENT_DELALLOC, 0, NULL); 7263 if (ret) { 7264 ret = -EAGAIN; 7265 goto out; 7266 } 7267 } 7268 7269 if (orig_start) 7270 *orig_start = key.offset - nocow_args.extent_offset; 7271 if (orig_block_len) 7272 *orig_block_len = nocow_args.disk_num_bytes; 7273 7274 *len = nocow_args.num_bytes; 7275 ret = 1; 7276 out: 7277 btrfs_free_path(path); 7278 return ret; 7279 } 7280 7281 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7282 struct extent_state **cached_state, 7283 unsigned int iomap_flags) 7284 { 7285 const bool writing = (iomap_flags & IOMAP_WRITE); 7286 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7287 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7288 struct btrfs_ordered_extent *ordered; 7289 int ret = 0; 7290 7291 while (1) { 7292 if (nowait) { 7293 if (!try_lock_extent(io_tree, lockstart, lockend, 7294 cached_state)) 7295 return -EAGAIN; 7296 } else { 7297 lock_extent(io_tree, lockstart, lockend, cached_state); 7298 } 7299 /* 7300 * We're concerned with the entire range that we're going to be 7301 * doing DIO to, so we need to make sure there's no ordered 7302 * extents in this range. 7303 */ 7304 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, 7305 lockend - lockstart + 1); 7306 7307 /* 7308 * We need to make sure there are no buffered pages in this 7309 * range either, we could have raced between the invalidate in 7310 * generic_file_direct_write and locking the extent. The 7311 * invalidate needs to happen so that reads after a write do not 7312 * get stale data. 7313 */ 7314 if (!ordered && 7315 (!writing || !filemap_range_has_page(inode->i_mapping, 7316 lockstart, lockend))) 7317 break; 7318 7319 unlock_extent(io_tree, lockstart, lockend, cached_state); 7320 7321 if (ordered) { 7322 if (nowait) { 7323 btrfs_put_ordered_extent(ordered); 7324 ret = -EAGAIN; 7325 break; 7326 } 7327 /* 7328 * If we are doing a DIO read and the ordered extent we 7329 * found is for a buffered write, we can not wait for it 7330 * to complete and retry, because if we do so we can 7331 * deadlock with concurrent buffered writes on page 7332 * locks. This happens only if our DIO read covers more 7333 * than one extent map, if at this point has already 7334 * created an ordered extent for a previous extent map 7335 * and locked its range in the inode's io tree, and a 7336 * concurrent write against that previous extent map's 7337 * range and this range started (we unlock the ranges 7338 * in the io tree only when the bios complete and 7339 * buffered writes always lock pages before attempting 7340 * to lock range in the io tree). 7341 */ 7342 if (writing || 7343 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) 7344 btrfs_start_ordered_extent(ordered); 7345 else 7346 ret = nowait ? -EAGAIN : -ENOTBLK; 7347 btrfs_put_ordered_extent(ordered); 7348 } else { 7349 /* 7350 * We could trigger writeback for this range (and wait 7351 * for it to complete) and then invalidate the pages for 7352 * this range (through invalidate_inode_pages2_range()), 7353 * but that can lead us to a deadlock with a concurrent 7354 * call to readahead (a buffered read or a defrag call 7355 * triggered a readahead) on a page lock due to an 7356 * ordered dio extent we created before but did not have 7357 * yet a corresponding bio submitted (whence it can not 7358 * complete), which makes readahead wait for that 7359 * ordered extent to complete while holding a lock on 7360 * that page. 7361 */ 7362 ret = nowait ? -EAGAIN : -ENOTBLK; 7363 } 7364 7365 if (ret) 7366 break; 7367 7368 cond_resched(); 7369 } 7370 7371 return ret; 7372 } 7373 7374 /* The callers of this must take lock_extent() */ 7375 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 7376 u64 len, u64 orig_start, u64 block_start, 7377 u64 block_len, u64 orig_block_len, 7378 u64 ram_bytes, int compress_type, 7379 int type) 7380 { 7381 struct extent_map *em; 7382 int ret; 7383 7384 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7385 type == BTRFS_ORDERED_COMPRESSED || 7386 type == BTRFS_ORDERED_NOCOW || 7387 type == BTRFS_ORDERED_REGULAR); 7388 7389 em = alloc_extent_map(); 7390 if (!em) 7391 return ERR_PTR(-ENOMEM); 7392 7393 em->start = start; 7394 em->orig_start = orig_start; 7395 em->len = len; 7396 em->block_len = block_len; 7397 em->block_start = block_start; 7398 em->orig_block_len = orig_block_len; 7399 em->ram_bytes = ram_bytes; 7400 em->generation = -1; 7401 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7402 if (type == BTRFS_ORDERED_PREALLOC) { 7403 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7404 } else if (type == BTRFS_ORDERED_COMPRESSED) { 7405 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 7406 em->compress_type = compress_type; 7407 } 7408 7409 ret = btrfs_replace_extent_map_range(inode, em, true); 7410 if (ret) { 7411 free_extent_map(em); 7412 return ERR_PTR(ret); 7413 } 7414 7415 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7416 return em; 7417 } 7418 7419 7420 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7421 struct inode *inode, 7422 struct btrfs_dio_data *dio_data, 7423 u64 start, u64 *lenp, 7424 unsigned int iomap_flags) 7425 { 7426 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7427 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7428 struct extent_map *em = *map; 7429 int type; 7430 u64 block_start, orig_start, orig_block_len, ram_bytes; 7431 struct btrfs_block_group *bg; 7432 bool can_nocow = false; 7433 bool space_reserved = false; 7434 u64 len = *lenp; 7435 u64 prev_len; 7436 int ret = 0; 7437 7438 /* 7439 * We don't allocate a new extent in the following cases 7440 * 7441 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7442 * existing extent. 7443 * 2) The extent is marked as PREALLOC. We're good to go here and can 7444 * just use the extent. 7445 * 7446 */ 7447 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7448 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7449 em->block_start != EXTENT_MAP_HOLE)) { 7450 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7451 type = BTRFS_ORDERED_PREALLOC; 7452 else 7453 type = BTRFS_ORDERED_NOCOW; 7454 len = min(len, em->len - (start - em->start)); 7455 block_start = em->block_start + (start - em->start); 7456 7457 if (can_nocow_extent(inode, start, &len, &orig_start, 7458 &orig_block_len, &ram_bytes, false, false) == 1) { 7459 bg = btrfs_inc_nocow_writers(fs_info, block_start); 7460 if (bg) 7461 can_nocow = true; 7462 } 7463 } 7464 7465 prev_len = len; 7466 if (can_nocow) { 7467 struct extent_map *em2; 7468 7469 /* We can NOCOW, so only need to reserve metadata space. */ 7470 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7471 nowait); 7472 if (ret < 0) { 7473 /* Our caller expects us to free the input extent map. */ 7474 free_extent_map(em); 7475 *map = NULL; 7476 btrfs_dec_nocow_writers(bg); 7477 if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) 7478 ret = -EAGAIN; 7479 goto out; 7480 } 7481 space_reserved = true; 7482 7483 em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len, 7484 orig_start, block_start, 7485 len, orig_block_len, 7486 ram_bytes, type); 7487 btrfs_dec_nocow_writers(bg); 7488 if (type == BTRFS_ORDERED_PREALLOC) { 7489 free_extent_map(em); 7490 *map = em2; 7491 em = em2; 7492 } 7493 7494 if (IS_ERR(em2)) { 7495 ret = PTR_ERR(em2); 7496 goto out; 7497 } 7498 7499 dio_data->nocow_done = true; 7500 } else { 7501 /* Our caller expects us to free the input extent map. */ 7502 free_extent_map(em); 7503 *map = NULL; 7504 7505 if (nowait) { 7506 ret = -EAGAIN; 7507 goto out; 7508 } 7509 7510 /* 7511 * If we could not allocate data space before locking the file 7512 * range and we can't do a NOCOW write, then we have to fail. 7513 */ 7514 if (!dio_data->data_space_reserved) { 7515 ret = -ENOSPC; 7516 goto out; 7517 } 7518 7519 /* 7520 * We have to COW and we have already reserved data space before, 7521 * so now we reserve only metadata. 7522 */ 7523 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7524 false); 7525 if (ret < 0) 7526 goto out; 7527 space_reserved = true; 7528 7529 em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len); 7530 if (IS_ERR(em)) { 7531 ret = PTR_ERR(em); 7532 goto out; 7533 } 7534 *map = em; 7535 len = min(len, em->len - (start - em->start)); 7536 if (len < prev_len) 7537 btrfs_delalloc_release_metadata(BTRFS_I(inode), 7538 prev_len - len, true); 7539 } 7540 7541 /* 7542 * We have created our ordered extent, so we can now release our reservation 7543 * for an outstanding extent. 7544 */ 7545 btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len); 7546 7547 /* 7548 * Need to update the i_size under the extent lock so buffered 7549 * readers will get the updated i_size when we unlock. 7550 */ 7551 if (start + len > i_size_read(inode)) 7552 i_size_write(inode, start + len); 7553 out: 7554 if (ret && space_reserved) { 7555 btrfs_delalloc_release_extents(BTRFS_I(inode), len); 7556 btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true); 7557 } 7558 *lenp = len; 7559 return ret; 7560 } 7561 7562 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, 7563 loff_t length, unsigned int flags, struct iomap *iomap, 7564 struct iomap *srcmap) 7565 { 7566 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7567 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7568 struct extent_map *em; 7569 struct extent_state *cached_state = NULL; 7570 struct btrfs_dio_data *dio_data = iter->private; 7571 u64 lockstart, lockend; 7572 const bool write = !!(flags & IOMAP_WRITE); 7573 int ret = 0; 7574 u64 len = length; 7575 const u64 data_alloc_len = length; 7576 bool unlock_extents = false; 7577 7578 /* 7579 * We could potentially fault if we have a buffer > PAGE_SIZE, and if 7580 * we're NOWAIT we may submit a bio for a partial range and return 7581 * EIOCBQUEUED, which would result in an errant short read. 7582 * 7583 * The best way to handle this would be to allow for partial completions 7584 * of iocb's, so we could submit the partial bio, return and fault in 7585 * the rest of the pages, and then submit the io for the rest of the 7586 * range. However we don't have that currently, so simply return 7587 * -EAGAIN at this point so that the normal path is used. 7588 */ 7589 if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE) 7590 return -EAGAIN; 7591 7592 /* 7593 * Cap the size of reads to that usually seen in buffered I/O as we need 7594 * to allocate a contiguous array for the checksums. 7595 */ 7596 if (!write) 7597 len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS); 7598 7599 lockstart = start; 7600 lockend = start + len - 1; 7601 7602 /* 7603 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't 7604 * enough if we've written compressed pages to this area, so we need to 7605 * flush the dirty pages again to make absolutely sure that any 7606 * outstanding dirty pages are on disk - the first flush only starts 7607 * compression on the data, while keeping the pages locked, so by the 7608 * time the second flush returns we know bios for the compressed pages 7609 * were submitted and finished, and the pages no longer under writeback. 7610 * 7611 * If we have a NOWAIT request and we have any pages in the range that 7612 * are locked, likely due to compression still in progress, we don't want 7613 * to block on page locks. We also don't want to block on pages marked as 7614 * dirty or under writeback (same as for the non-compression case). 7615 * iomap_dio_rw() did the same check, but after that and before we got 7616 * here, mmap'ed writes may have happened or buffered reads started 7617 * (readpage() and readahead(), which lock pages), as we haven't locked 7618 * the file range yet. 7619 */ 7620 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7621 &BTRFS_I(inode)->runtime_flags)) { 7622 if (flags & IOMAP_NOWAIT) { 7623 if (filemap_range_needs_writeback(inode->i_mapping, 7624 lockstart, lockend)) 7625 return -EAGAIN; 7626 } else { 7627 ret = filemap_fdatawrite_range(inode->i_mapping, start, 7628 start + length - 1); 7629 if (ret) 7630 return ret; 7631 } 7632 } 7633 7634 memset(dio_data, 0, sizeof(*dio_data)); 7635 7636 /* 7637 * We always try to allocate data space and must do it before locking 7638 * the file range, to avoid deadlocks with concurrent writes to the same 7639 * range if the range has several extents and the writes don't expand the 7640 * current i_size (the inode lock is taken in shared mode). If we fail to 7641 * allocate data space here we continue and later, after locking the 7642 * file range, we fail with ENOSPC only if we figure out we can not do a 7643 * NOCOW write. 7644 */ 7645 if (write && !(flags & IOMAP_NOWAIT)) { 7646 ret = btrfs_check_data_free_space(BTRFS_I(inode), 7647 &dio_data->data_reserved, 7648 start, data_alloc_len, false); 7649 if (!ret) 7650 dio_data->data_space_reserved = true; 7651 else if (ret && !(BTRFS_I(inode)->flags & 7652 (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 7653 goto err; 7654 } 7655 7656 /* 7657 * If this errors out it's because we couldn't invalidate pagecache for 7658 * this range and we need to fallback to buffered IO, or we are doing a 7659 * NOWAIT read/write and we need to block. 7660 */ 7661 ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags); 7662 if (ret < 0) 7663 goto err; 7664 7665 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 7666 if (IS_ERR(em)) { 7667 ret = PTR_ERR(em); 7668 goto unlock_err; 7669 } 7670 7671 /* 7672 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7673 * io. INLINE is special, and we could probably kludge it in here, but 7674 * it's still buffered so for safety lets just fall back to the generic 7675 * buffered path. 7676 * 7677 * For COMPRESSED we _have_ to read the entire extent in so we can 7678 * decompress it, so there will be buffering required no matter what we 7679 * do, so go ahead and fallback to buffered. 7680 * 7681 * We return -ENOTBLK because that's what makes DIO go ahead and go back 7682 * to buffered IO. Don't blame me, this is the price we pay for using 7683 * the generic code. 7684 */ 7685 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7686 em->block_start == EXTENT_MAP_INLINE) { 7687 free_extent_map(em); 7688 /* 7689 * If we are in a NOWAIT context, return -EAGAIN in order to 7690 * fallback to buffered IO. This is not only because we can 7691 * block with buffered IO (no support for NOWAIT semantics at 7692 * the moment) but also to avoid returning short reads to user 7693 * space - this happens if we were able to read some data from 7694 * previous non-compressed extents and then when we fallback to 7695 * buffered IO, at btrfs_file_read_iter() by calling 7696 * filemap_read(), we fail to fault in pages for the read buffer, 7697 * in which case filemap_read() returns a short read (the number 7698 * of bytes previously read is > 0, so it does not return -EFAULT). 7699 */ 7700 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; 7701 goto unlock_err; 7702 } 7703 7704 len = min(len, em->len - (start - em->start)); 7705 7706 /* 7707 * If we have a NOWAIT request and the range contains multiple extents 7708 * (or a mix of extents and holes), then we return -EAGAIN to make the 7709 * caller fallback to a context where it can do a blocking (without 7710 * NOWAIT) request. This way we avoid doing partial IO and returning 7711 * success to the caller, which is not optimal for writes and for reads 7712 * it can result in unexpected behaviour for an application. 7713 * 7714 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling 7715 * iomap_dio_rw(), we can end up returning less data then what the caller 7716 * asked for, resulting in an unexpected, and incorrect, short read. 7717 * That is, the caller asked to read N bytes and we return less than that, 7718 * which is wrong unless we are crossing EOF. This happens if we get a 7719 * page fault error when trying to fault in pages for the buffer that is 7720 * associated to the struct iov_iter passed to iomap_dio_rw(), and we 7721 * have previously submitted bios for other extents in the range, in 7722 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of 7723 * those bios have completed by the time we get the page fault error, 7724 * which we return back to our caller - we should only return EIOCBQUEUED 7725 * after we have submitted bios for all the extents in the range. 7726 */ 7727 if ((flags & IOMAP_NOWAIT) && len < length) { 7728 free_extent_map(em); 7729 ret = -EAGAIN; 7730 goto unlock_err; 7731 } 7732 7733 if (write) { 7734 ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, 7735 start, &len, flags); 7736 if (ret < 0) 7737 goto unlock_err; 7738 unlock_extents = true; 7739 /* Recalc len in case the new em is smaller than requested */ 7740 len = min(len, em->len - (start - em->start)); 7741 if (dio_data->data_space_reserved) { 7742 u64 release_offset; 7743 u64 release_len = 0; 7744 7745 if (dio_data->nocow_done) { 7746 release_offset = start; 7747 release_len = data_alloc_len; 7748 } else if (len < data_alloc_len) { 7749 release_offset = start + len; 7750 release_len = data_alloc_len - len; 7751 } 7752 7753 if (release_len > 0) 7754 btrfs_free_reserved_data_space(BTRFS_I(inode), 7755 dio_data->data_reserved, 7756 release_offset, 7757 release_len); 7758 } 7759 } else { 7760 /* 7761 * We need to unlock only the end area that we aren't using. 7762 * The rest is going to be unlocked by the endio routine. 7763 */ 7764 lockstart = start + len; 7765 if (lockstart < lockend) 7766 unlock_extents = true; 7767 } 7768 7769 if (unlock_extents) 7770 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7771 &cached_state); 7772 else 7773 free_extent_state(cached_state); 7774 7775 /* 7776 * Translate extent map information to iomap. 7777 * We trim the extents (and move the addr) even though iomap code does 7778 * that, since we have locked only the parts we are performing I/O in. 7779 */ 7780 if ((em->block_start == EXTENT_MAP_HOLE) || 7781 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { 7782 iomap->addr = IOMAP_NULL_ADDR; 7783 iomap->type = IOMAP_HOLE; 7784 } else { 7785 iomap->addr = em->block_start + (start - em->start); 7786 iomap->type = IOMAP_MAPPED; 7787 } 7788 iomap->offset = start; 7789 iomap->bdev = fs_info->fs_devices->latest_dev->bdev; 7790 iomap->length = len; 7791 free_extent_map(em); 7792 7793 return 0; 7794 7795 unlock_err: 7796 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7797 &cached_state); 7798 err: 7799 if (dio_data->data_space_reserved) { 7800 btrfs_free_reserved_data_space(BTRFS_I(inode), 7801 dio_data->data_reserved, 7802 start, data_alloc_len); 7803 extent_changeset_free(dio_data->data_reserved); 7804 } 7805 7806 return ret; 7807 } 7808 7809 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, 7810 ssize_t written, unsigned int flags, struct iomap *iomap) 7811 { 7812 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7813 struct btrfs_dio_data *dio_data = iter->private; 7814 size_t submitted = dio_data->submitted; 7815 const bool write = !!(flags & IOMAP_WRITE); 7816 int ret = 0; 7817 7818 if (!write && (iomap->type == IOMAP_HOLE)) { 7819 /* If reading from a hole, unlock and return */ 7820 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, 7821 NULL); 7822 return 0; 7823 } 7824 7825 if (submitted < length) { 7826 pos += submitted; 7827 length -= submitted; 7828 if (write) 7829 btrfs_finish_ordered_extent(dio_data->ordered, NULL, 7830 pos, length, false); 7831 else 7832 unlock_extent(&BTRFS_I(inode)->io_tree, pos, 7833 pos + length - 1, NULL); 7834 ret = -ENOTBLK; 7835 } 7836 if (write) { 7837 btrfs_put_ordered_extent(dio_data->ordered); 7838 dio_data->ordered = NULL; 7839 } 7840 7841 if (write) 7842 extent_changeset_free(dio_data->data_reserved); 7843 return ret; 7844 } 7845 7846 static void btrfs_dio_end_io(struct btrfs_bio *bbio) 7847 { 7848 struct btrfs_dio_private *dip = 7849 container_of(bbio, struct btrfs_dio_private, bbio); 7850 struct btrfs_inode *inode = bbio->inode; 7851 struct bio *bio = &bbio->bio; 7852 7853 if (bio->bi_status) { 7854 btrfs_warn(inode->root->fs_info, 7855 "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d", 7856 btrfs_ino(inode), bio->bi_opf, 7857 dip->file_offset, dip->bytes, bio->bi_status); 7858 } 7859 7860 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 7861 btrfs_finish_ordered_extent(bbio->ordered, NULL, 7862 dip->file_offset, dip->bytes, 7863 !bio->bi_status); 7864 } else { 7865 unlock_extent(&inode->io_tree, dip->file_offset, 7866 dip->file_offset + dip->bytes - 1, NULL); 7867 } 7868 7869 bbio->bio.bi_private = bbio->private; 7870 iomap_dio_bio_end_io(bio); 7871 } 7872 7873 static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio, 7874 loff_t file_offset) 7875 { 7876 struct btrfs_bio *bbio = btrfs_bio(bio); 7877 struct btrfs_dio_private *dip = 7878 container_of(bbio, struct btrfs_dio_private, bbio); 7879 struct btrfs_dio_data *dio_data = iter->private; 7880 7881 btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info, 7882 btrfs_dio_end_io, bio->bi_private); 7883 bbio->inode = BTRFS_I(iter->inode); 7884 bbio->file_offset = file_offset; 7885 7886 dip->file_offset = file_offset; 7887 dip->bytes = bio->bi_iter.bi_size; 7888 7889 dio_data->submitted += bio->bi_iter.bi_size; 7890 7891 /* 7892 * Check if we are doing a partial write. If we are, we need to split 7893 * the ordered extent to match the submitted bio. Hang on to the 7894 * remaining unfinishable ordered_extent in dio_data so that it can be 7895 * cancelled in iomap_end to avoid a deadlock wherein faulting the 7896 * remaining pages is blocked on the outstanding ordered extent. 7897 */ 7898 if (iter->flags & IOMAP_WRITE) { 7899 int ret; 7900 7901 ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered); 7902 if (ret) { 7903 btrfs_finish_ordered_extent(dio_data->ordered, NULL, 7904 file_offset, dip->bytes, 7905 !ret); 7906 bio->bi_status = errno_to_blk_status(ret); 7907 iomap_dio_bio_end_io(bio); 7908 return; 7909 } 7910 } 7911 7912 btrfs_submit_bio(bbio, 0); 7913 } 7914 7915 static const struct iomap_ops btrfs_dio_iomap_ops = { 7916 .iomap_begin = btrfs_dio_iomap_begin, 7917 .iomap_end = btrfs_dio_iomap_end, 7918 }; 7919 7920 static const struct iomap_dio_ops btrfs_dio_ops = { 7921 .submit_io = btrfs_dio_submit_io, 7922 .bio_set = &btrfs_dio_bioset, 7923 }; 7924 7925 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) 7926 { 7927 struct btrfs_dio_data data = { 0 }; 7928 7929 return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7930 IOMAP_DIO_PARTIAL, &data, done_before); 7931 } 7932 7933 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, 7934 size_t done_before) 7935 { 7936 struct btrfs_dio_data data = { 0 }; 7937 7938 return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7939 IOMAP_DIO_PARTIAL, &data, done_before); 7940 } 7941 7942 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 7943 u64 start, u64 len) 7944 { 7945 int ret; 7946 7947 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 7948 if (ret) 7949 return ret; 7950 7951 /* 7952 * fiemap_prep() called filemap_write_and_wait() for the whole possible 7953 * file range (0 to LLONG_MAX), but that is not enough if we have 7954 * compression enabled. The first filemap_fdatawrite_range() only kicks 7955 * in the compression of data (in an async thread) and will return 7956 * before the compression is done and writeback is started. A second 7957 * filemap_fdatawrite_range() is needed to wait for the compression to 7958 * complete and writeback to start. We also need to wait for ordered 7959 * extents to complete, because our fiemap implementation uses mainly 7960 * file extent items to list the extents, searching for extent maps 7961 * only for file ranges with holes or prealloc extents to figure out 7962 * if we have delalloc in those ranges. 7963 */ 7964 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { 7965 ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX); 7966 if (ret) 7967 return ret; 7968 } 7969 7970 return extent_fiemap(BTRFS_I(inode), fieinfo, start, len); 7971 } 7972 7973 static int btrfs_writepages(struct address_space *mapping, 7974 struct writeback_control *wbc) 7975 { 7976 return extent_writepages(mapping, wbc); 7977 } 7978 7979 static void btrfs_readahead(struct readahead_control *rac) 7980 { 7981 extent_readahead(rac); 7982 } 7983 7984 /* 7985 * For release_folio() and invalidate_folio() we have a race window where 7986 * folio_end_writeback() is called but the subpage spinlock is not yet released. 7987 * If we continue to release/invalidate the page, we could cause use-after-free 7988 * for subpage spinlock. So this function is to spin and wait for subpage 7989 * spinlock. 7990 */ 7991 static void wait_subpage_spinlock(struct page *page) 7992 { 7993 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); 7994 struct btrfs_subpage *subpage; 7995 7996 if (!btrfs_is_subpage(fs_info, page)) 7997 return; 7998 7999 ASSERT(PagePrivate(page) && page->private); 8000 subpage = (struct btrfs_subpage *)page->private; 8001 8002 /* 8003 * This may look insane as we just acquire the spinlock and release it, 8004 * without doing anything. But we just want to make sure no one is 8005 * still holding the subpage spinlock. 8006 * And since the page is not dirty nor writeback, and we have page 8007 * locked, the only possible way to hold a spinlock is from the endio 8008 * function to clear page writeback. 8009 * 8010 * Here we just acquire the spinlock so that all existing callers 8011 * should exit and we're safe to release/invalidate the page. 8012 */ 8013 spin_lock_irq(&subpage->lock); 8014 spin_unlock_irq(&subpage->lock); 8015 } 8016 8017 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 8018 { 8019 int ret = try_release_extent_mapping(&folio->page, gfp_flags); 8020 8021 if (ret == 1) { 8022 wait_subpage_spinlock(&folio->page); 8023 clear_page_extent_mapped(&folio->page); 8024 } 8025 return ret; 8026 } 8027 8028 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 8029 { 8030 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 8031 return false; 8032 return __btrfs_release_folio(folio, gfp_flags); 8033 } 8034 8035 #ifdef CONFIG_MIGRATION 8036 static int btrfs_migrate_folio(struct address_space *mapping, 8037 struct folio *dst, struct folio *src, 8038 enum migrate_mode mode) 8039 { 8040 int ret = filemap_migrate_folio(mapping, dst, src, mode); 8041 8042 if (ret != MIGRATEPAGE_SUCCESS) 8043 return ret; 8044 8045 if (folio_test_ordered(src)) { 8046 folio_clear_ordered(src); 8047 folio_set_ordered(dst); 8048 } 8049 8050 return MIGRATEPAGE_SUCCESS; 8051 } 8052 #else 8053 #define btrfs_migrate_folio NULL 8054 #endif 8055 8056 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 8057 size_t length) 8058 { 8059 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); 8060 struct btrfs_fs_info *fs_info = inode->root->fs_info; 8061 struct extent_io_tree *tree = &inode->io_tree; 8062 struct extent_state *cached_state = NULL; 8063 u64 page_start = folio_pos(folio); 8064 u64 page_end = page_start + folio_size(folio) - 1; 8065 u64 cur; 8066 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 8067 8068 /* 8069 * We have folio locked so no new ordered extent can be created on this 8070 * page, nor bio can be submitted for this folio. 8071 * 8072 * But already submitted bio can still be finished on this folio. 8073 * Furthermore, endio function won't skip folio which has Ordered 8074 * (Private2) already cleared, so it's possible for endio and 8075 * invalidate_folio to do the same ordered extent accounting twice 8076 * on one folio. 8077 * 8078 * So here we wait for any submitted bios to finish, so that we won't 8079 * do double ordered extent accounting on the same folio. 8080 */ 8081 folio_wait_writeback(folio); 8082 wait_subpage_spinlock(&folio->page); 8083 8084 /* 8085 * For subpage case, we have call sites like 8086 * btrfs_punch_hole_lock_range() which passes range not aligned to 8087 * sectorsize. 8088 * If the range doesn't cover the full folio, we don't need to and 8089 * shouldn't clear page extent mapped, as folio->private can still 8090 * record subpage dirty bits for other part of the range. 8091 * 8092 * For cases that invalidate the full folio even the range doesn't 8093 * cover the full folio, like invalidating the last folio, we're 8094 * still safe to wait for ordered extent to finish. 8095 */ 8096 if (!(offset == 0 && length == folio_size(folio))) { 8097 btrfs_release_folio(folio, GFP_NOFS); 8098 return; 8099 } 8100 8101 if (!inode_evicting) 8102 lock_extent(tree, page_start, page_end, &cached_state); 8103 8104 cur = page_start; 8105 while (cur < page_end) { 8106 struct btrfs_ordered_extent *ordered; 8107 u64 range_end; 8108 u32 range_len; 8109 u32 extra_flags = 0; 8110 8111 ordered = btrfs_lookup_first_ordered_range(inode, cur, 8112 page_end + 1 - cur); 8113 if (!ordered) { 8114 range_end = page_end; 8115 /* 8116 * No ordered extent covering this range, we are safe 8117 * to delete all extent states in the range. 8118 */ 8119 extra_flags = EXTENT_CLEAR_ALL_BITS; 8120 goto next; 8121 } 8122 if (ordered->file_offset > cur) { 8123 /* 8124 * There is a range between [cur, oe->file_offset) not 8125 * covered by any ordered extent. 8126 * We are safe to delete all extent states, and handle 8127 * the ordered extent in the next iteration. 8128 */ 8129 range_end = ordered->file_offset - 1; 8130 extra_flags = EXTENT_CLEAR_ALL_BITS; 8131 goto next; 8132 } 8133 8134 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 8135 page_end); 8136 ASSERT(range_end + 1 - cur < U32_MAX); 8137 range_len = range_end + 1 - cur; 8138 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) { 8139 /* 8140 * If Ordered (Private2) is cleared, it means endio has 8141 * already been executed for the range. 8142 * We can't delete the extent states as 8143 * btrfs_finish_ordered_io() may still use some of them. 8144 */ 8145 goto next; 8146 } 8147 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len); 8148 8149 /* 8150 * IO on this page will never be started, so we need to account 8151 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 8152 * here, must leave that up for the ordered extent completion. 8153 * 8154 * This will also unlock the range for incoming 8155 * btrfs_finish_ordered_io(). 8156 */ 8157 if (!inode_evicting) 8158 clear_extent_bit(tree, cur, range_end, 8159 EXTENT_DELALLOC | 8160 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8161 EXTENT_DEFRAG, &cached_state); 8162 8163 spin_lock_irq(&inode->ordered_tree.lock); 8164 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8165 ordered->truncated_len = min(ordered->truncated_len, 8166 cur - ordered->file_offset); 8167 spin_unlock_irq(&inode->ordered_tree.lock); 8168 8169 /* 8170 * If the ordered extent has finished, we're safe to delete all 8171 * the extent states of the range, otherwise 8172 * btrfs_finish_ordered_io() will get executed by endio for 8173 * other pages, so we can't delete extent states. 8174 */ 8175 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8176 cur, range_end + 1 - cur)) { 8177 btrfs_finish_ordered_io(ordered); 8178 /* 8179 * The ordered extent has finished, now we're again 8180 * safe to delete all extent states of the range. 8181 */ 8182 extra_flags = EXTENT_CLEAR_ALL_BITS; 8183 } 8184 next: 8185 if (ordered) 8186 btrfs_put_ordered_extent(ordered); 8187 /* 8188 * Qgroup reserved space handler 8189 * Sector(s) here will be either: 8190 * 8191 * 1) Already written to disk or bio already finished 8192 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 8193 * Qgroup will be handled by its qgroup_record then. 8194 * btrfs_qgroup_free_data() call will do nothing here. 8195 * 8196 * 2) Not written to disk yet 8197 * Then btrfs_qgroup_free_data() call will clear the 8198 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 8199 * reserved data space. 8200 * Since the IO will never happen for this page. 8201 */ 8202 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur); 8203 if (!inode_evicting) { 8204 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 8205 EXTENT_DELALLOC | EXTENT_UPTODATE | 8206 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG | 8207 extra_flags, &cached_state); 8208 } 8209 cur = range_end + 1; 8210 } 8211 /* 8212 * We have iterated through all ordered extents of the page, the page 8213 * should not have Ordered (Private2) anymore, or the above iteration 8214 * did something wrong. 8215 */ 8216 ASSERT(!folio_test_ordered(folio)); 8217 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio)); 8218 if (!inode_evicting) 8219 __btrfs_release_folio(folio, GFP_NOFS); 8220 clear_page_extent_mapped(&folio->page); 8221 } 8222 8223 /* 8224 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8225 * called from a page fault handler when a page is first dirtied. Hence we must 8226 * be careful to check for EOF conditions here. We set the page up correctly 8227 * for a written page which means we get ENOSPC checking when writing into 8228 * holes and correct delalloc and unwritten extent mapping on filesystems that 8229 * support these features. 8230 * 8231 * We are not allowed to take the i_mutex here so we have to play games to 8232 * protect against truncate races as the page could now be beyond EOF. Because 8233 * truncate_setsize() writes the inode size before removing pages, once we have 8234 * the page lock we can determine safely if the page is beyond EOF. If it is not 8235 * beyond EOF, then the page is guaranteed safe against truncation until we 8236 * unlock the page. 8237 */ 8238 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 8239 { 8240 struct page *page = vmf->page; 8241 struct inode *inode = file_inode(vmf->vma->vm_file); 8242 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8243 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8244 struct btrfs_ordered_extent *ordered; 8245 struct extent_state *cached_state = NULL; 8246 struct extent_changeset *data_reserved = NULL; 8247 unsigned long zero_start; 8248 loff_t size; 8249 vm_fault_t ret; 8250 int ret2; 8251 int reserved = 0; 8252 u64 reserved_space; 8253 u64 page_start; 8254 u64 page_end; 8255 u64 end; 8256 8257 reserved_space = PAGE_SIZE; 8258 8259 sb_start_pagefault(inode->i_sb); 8260 page_start = page_offset(page); 8261 page_end = page_start + PAGE_SIZE - 1; 8262 end = page_end; 8263 8264 /* 8265 * Reserving delalloc space after obtaining the page lock can lead to 8266 * deadlock. For example, if a dirty page is locked by this function 8267 * and the call to btrfs_delalloc_reserve_space() ends up triggering 8268 * dirty page write out, then the btrfs_writepages() function could 8269 * end up waiting indefinitely to get a lock on the page currently 8270 * being processed by btrfs_page_mkwrite() function. 8271 */ 8272 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 8273 page_start, reserved_space); 8274 if (!ret2) { 8275 ret2 = file_update_time(vmf->vma->vm_file); 8276 reserved = 1; 8277 } 8278 if (ret2) { 8279 ret = vmf_error(ret2); 8280 if (reserved) 8281 goto out; 8282 goto out_noreserve; 8283 } 8284 8285 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8286 again: 8287 down_read(&BTRFS_I(inode)->i_mmap_lock); 8288 lock_page(page); 8289 size = i_size_read(inode); 8290 8291 if ((page->mapping != inode->i_mapping) || 8292 (page_start >= size)) { 8293 /* page got truncated out from underneath us */ 8294 goto out_unlock; 8295 } 8296 wait_on_page_writeback(page); 8297 8298 lock_extent(io_tree, page_start, page_end, &cached_state); 8299 ret2 = set_page_extent_mapped(page); 8300 if (ret2 < 0) { 8301 ret = vmf_error(ret2); 8302 unlock_extent(io_tree, page_start, page_end, &cached_state); 8303 goto out_unlock; 8304 } 8305 8306 /* 8307 * we can't set the delalloc bits if there are pending ordered 8308 * extents. Drop our locks and wait for them to finish 8309 */ 8310 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, 8311 PAGE_SIZE); 8312 if (ordered) { 8313 unlock_extent(io_tree, page_start, page_end, &cached_state); 8314 unlock_page(page); 8315 up_read(&BTRFS_I(inode)->i_mmap_lock); 8316 btrfs_start_ordered_extent(ordered); 8317 btrfs_put_ordered_extent(ordered); 8318 goto again; 8319 } 8320 8321 if (page->index == ((size - 1) >> PAGE_SHIFT)) { 8322 reserved_space = round_up(size - page_start, 8323 fs_info->sectorsize); 8324 if (reserved_space < PAGE_SIZE) { 8325 end = page_start + reserved_space - 1; 8326 btrfs_delalloc_release_space(BTRFS_I(inode), 8327 data_reserved, page_start, 8328 PAGE_SIZE - reserved_space, true); 8329 } 8330 } 8331 8332 /* 8333 * page_mkwrite gets called when the page is firstly dirtied after it's 8334 * faulted in, but write(2) could also dirty a page and set delalloc 8335 * bits, thus in this case for space account reason, we still need to 8336 * clear any delalloc bits within this page range since we have to 8337 * reserve data&meta space before lock_page() (see above comments). 8338 */ 8339 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, 8340 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8341 EXTENT_DEFRAG, &cached_state); 8342 8343 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, 8344 &cached_state); 8345 if (ret2) { 8346 unlock_extent(io_tree, page_start, page_end, &cached_state); 8347 ret = VM_FAULT_SIGBUS; 8348 goto out_unlock; 8349 } 8350 8351 /* page is wholly or partially inside EOF */ 8352 if (page_start + PAGE_SIZE > size) 8353 zero_start = offset_in_page(size); 8354 else 8355 zero_start = PAGE_SIZE; 8356 8357 if (zero_start != PAGE_SIZE) 8358 memzero_page(page, zero_start, PAGE_SIZE - zero_start); 8359 8360 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); 8361 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); 8362 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); 8363 8364 btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); 8365 8366 unlock_extent(io_tree, page_start, page_end, &cached_state); 8367 up_read(&BTRFS_I(inode)->i_mmap_lock); 8368 8369 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8370 sb_end_pagefault(inode->i_sb); 8371 extent_changeset_free(data_reserved); 8372 return VM_FAULT_LOCKED; 8373 8374 out_unlock: 8375 unlock_page(page); 8376 up_read(&BTRFS_I(inode)->i_mmap_lock); 8377 out: 8378 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8379 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, 8380 reserved_space, (ret != 0)); 8381 out_noreserve: 8382 sb_end_pagefault(inode->i_sb); 8383 extent_changeset_free(data_reserved); 8384 return ret; 8385 } 8386 8387 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) 8388 { 8389 struct btrfs_truncate_control control = { 8390 .inode = inode, 8391 .ino = btrfs_ino(inode), 8392 .min_type = BTRFS_EXTENT_DATA_KEY, 8393 .clear_extent_range = true, 8394 }; 8395 struct btrfs_root *root = inode->root; 8396 struct btrfs_fs_info *fs_info = root->fs_info; 8397 struct btrfs_block_rsv *rsv; 8398 int ret; 8399 struct btrfs_trans_handle *trans; 8400 u64 mask = fs_info->sectorsize - 1; 8401 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 8402 8403 if (!skip_writeback) { 8404 ret = btrfs_wait_ordered_range(&inode->vfs_inode, 8405 inode->vfs_inode.i_size & (~mask), 8406 (u64)-1); 8407 if (ret) 8408 return ret; 8409 } 8410 8411 /* 8412 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 8413 * things going on here: 8414 * 8415 * 1) We need to reserve space to update our inode. 8416 * 8417 * 2) We need to have something to cache all the space that is going to 8418 * be free'd up by the truncate operation, but also have some slack 8419 * space reserved in case it uses space during the truncate (thank you 8420 * very much snapshotting). 8421 * 8422 * And we need these to be separate. The fact is we can use a lot of 8423 * space doing the truncate, and we have no earthly idea how much space 8424 * we will use, so we need the truncate reservation to be separate so it 8425 * doesn't end up using space reserved for updating the inode. We also 8426 * need to be able to stop the transaction and start a new one, which 8427 * means we need to be able to update the inode several times, and we 8428 * have no idea of knowing how many times that will be, so we can't just 8429 * reserve 1 item for the entirety of the operation, so that has to be 8430 * done separately as well. 8431 * 8432 * So that leaves us with 8433 * 8434 * 1) rsv - for the truncate reservation, which we will steal from the 8435 * transaction reservation. 8436 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 8437 * updating the inode. 8438 */ 8439 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 8440 if (!rsv) 8441 return -ENOMEM; 8442 rsv->size = min_size; 8443 rsv->failfast = true; 8444 8445 /* 8446 * 1 for the truncate slack space 8447 * 1 for updating the inode. 8448 */ 8449 trans = btrfs_start_transaction(root, 2); 8450 if (IS_ERR(trans)) { 8451 ret = PTR_ERR(trans); 8452 goto out; 8453 } 8454 8455 /* Migrate the slack space for the truncate to our reserve */ 8456 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 8457 min_size, false); 8458 /* 8459 * We have reserved 2 metadata units when we started the transaction and 8460 * min_size matches 1 unit, so this should never fail, but if it does, 8461 * it's not critical we just fail truncation. 8462 */ 8463 if (WARN_ON(ret)) { 8464 btrfs_end_transaction(trans); 8465 goto out; 8466 } 8467 8468 trans->block_rsv = rsv; 8469 8470 while (1) { 8471 struct extent_state *cached_state = NULL; 8472 const u64 new_size = inode->vfs_inode.i_size; 8473 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 8474 8475 control.new_size = new_size; 8476 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8477 /* 8478 * We want to drop from the next block forward in case this new 8479 * size is not block aligned since we will be keeping the last 8480 * block of the extent just the way it is. 8481 */ 8482 btrfs_drop_extent_map_range(inode, 8483 ALIGN(new_size, fs_info->sectorsize), 8484 (u64)-1, false); 8485 8486 ret = btrfs_truncate_inode_items(trans, root, &control); 8487 8488 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 8489 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 8490 8491 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8492 8493 trans->block_rsv = &fs_info->trans_block_rsv; 8494 if (ret != -ENOSPC && ret != -EAGAIN) 8495 break; 8496 8497 ret = btrfs_update_inode(trans, root, inode); 8498 if (ret) 8499 break; 8500 8501 btrfs_end_transaction(trans); 8502 btrfs_btree_balance_dirty(fs_info); 8503 8504 trans = btrfs_start_transaction(root, 2); 8505 if (IS_ERR(trans)) { 8506 ret = PTR_ERR(trans); 8507 trans = NULL; 8508 break; 8509 } 8510 8511 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 8512 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 8513 rsv, min_size, false); 8514 /* 8515 * We have reserved 2 metadata units when we started the 8516 * transaction and min_size matches 1 unit, so this should never 8517 * fail, but if it does, it's not critical we just fail truncation. 8518 */ 8519 if (WARN_ON(ret)) 8520 break; 8521 8522 trans->block_rsv = rsv; 8523 } 8524 8525 /* 8526 * We can't call btrfs_truncate_block inside a trans handle as we could 8527 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 8528 * know we've truncated everything except the last little bit, and can 8529 * do btrfs_truncate_block and then update the disk_i_size. 8530 */ 8531 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 8532 btrfs_end_transaction(trans); 8533 btrfs_btree_balance_dirty(fs_info); 8534 8535 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); 8536 if (ret) 8537 goto out; 8538 trans = btrfs_start_transaction(root, 1); 8539 if (IS_ERR(trans)) { 8540 ret = PTR_ERR(trans); 8541 goto out; 8542 } 8543 btrfs_inode_safe_disk_i_size_write(inode, 0); 8544 } 8545 8546 if (trans) { 8547 int ret2; 8548 8549 trans->block_rsv = &fs_info->trans_block_rsv; 8550 ret2 = btrfs_update_inode(trans, root, inode); 8551 if (ret2 && !ret) 8552 ret = ret2; 8553 8554 ret2 = btrfs_end_transaction(trans); 8555 if (ret2 && !ret) 8556 ret = ret2; 8557 btrfs_btree_balance_dirty(fs_info); 8558 } 8559 out: 8560 btrfs_free_block_rsv(fs_info, rsv); 8561 /* 8562 * So if we truncate and then write and fsync we normally would just 8563 * write the extents that changed, which is a problem if we need to 8564 * first truncate that entire inode. So set this flag so we write out 8565 * all of the extents in the inode to the sync log so we're completely 8566 * safe. 8567 * 8568 * If no extents were dropped or trimmed we don't need to force the next 8569 * fsync to truncate all the inode's items from the log and re-log them 8570 * all. This means the truncate operation did not change the file size, 8571 * or changed it to a smaller size but there was only an implicit hole 8572 * between the old i_size and the new i_size, and there were no prealloc 8573 * extents beyond i_size to drop. 8574 */ 8575 if (control.extents_found > 0) 8576 btrfs_set_inode_full_sync(inode); 8577 8578 return ret; 8579 } 8580 8581 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap, 8582 struct inode *dir) 8583 { 8584 struct inode *inode; 8585 8586 inode = new_inode(dir->i_sb); 8587 if (inode) { 8588 /* 8589 * Subvolumes don't inherit the sgid bit or the parent's gid if 8590 * the parent's sgid bit is set. This is probably a bug. 8591 */ 8592 inode_init_owner(idmap, inode, NULL, 8593 S_IFDIR | (~current_umask() & S_IRWXUGO)); 8594 inode->i_op = &btrfs_dir_inode_operations; 8595 inode->i_fop = &btrfs_dir_file_operations; 8596 } 8597 return inode; 8598 } 8599 8600 struct inode *btrfs_alloc_inode(struct super_block *sb) 8601 { 8602 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 8603 struct btrfs_inode *ei; 8604 struct inode *inode; 8605 8606 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 8607 if (!ei) 8608 return NULL; 8609 8610 ei->root = NULL; 8611 ei->generation = 0; 8612 ei->last_trans = 0; 8613 ei->last_sub_trans = 0; 8614 ei->logged_trans = 0; 8615 ei->delalloc_bytes = 0; 8616 ei->new_delalloc_bytes = 0; 8617 ei->defrag_bytes = 0; 8618 ei->disk_i_size = 0; 8619 ei->flags = 0; 8620 ei->ro_flags = 0; 8621 ei->csum_bytes = 0; 8622 ei->index_cnt = (u64)-1; 8623 ei->dir_index = 0; 8624 ei->last_unlink_trans = 0; 8625 ei->last_reflink_trans = 0; 8626 ei->last_log_commit = 0; 8627 8628 spin_lock_init(&ei->lock); 8629 ei->outstanding_extents = 0; 8630 if (sb->s_magic != BTRFS_TEST_MAGIC) 8631 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 8632 BTRFS_BLOCK_RSV_DELALLOC); 8633 ei->runtime_flags = 0; 8634 ei->prop_compress = BTRFS_COMPRESS_NONE; 8635 ei->defrag_compress = BTRFS_COMPRESS_NONE; 8636 8637 ei->delayed_node = NULL; 8638 8639 ei->i_otime.tv_sec = 0; 8640 ei->i_otime.tv_nsec = 0; 8641 8642 inode = &ei->vfs_inode; 8643 extent_map_tree_init(&ei->extent_tree); 8644 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); 8645 ei->io_tree.inode = ei; 8646 extent_io_tree_init(fs_info, &ei->file_extent_tree, 8647 IO_TREE_INODE_FILE_EXTENT); 8648 mutex_init(&ei->log_mutex); 8649 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 8650 INIT_LIST_HEAD(&ei->delalloc_inodes); 8651 INIT_LIST_HEAD(&ei->delayed_iput); 8652 RB_CLEAR_NODE(&ei->rb_node); 8653 init_rwsem(&ei->i_mmap_lock); 8654 8655 return inode; 8656 } 8657 8658 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 8659 void btrfs_test_destroy_inode(struct inode *inode) 8660 { 8661 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 8662 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8663 } 8664 #endif 8665 8666 void btrfs_free_inode(struct inode *inode) 8667 { 8668 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8669 } 8670 8671 void btrfs_destroy_inode(struct inode *vfs_inode) 8672 { 8673 struct btrfs_ordered_extent *ordered; 8674 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 8675 struct btrfs_root *root = inode->root; 8676 bool freespace_inode; 8677 8678 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 8679 WARN_ON(vfs_inode->i_data.nrpages); 8680 WARN_ON(inode->block_rsv.reserved); 8681 WARN_ON(inode->block_rsv.size); 8682 WARN_ON(inode->outstanding_extents); 8683 if (!S_ISDIR(vfs_inode->i_mode)) { 8684 WARN_ON(inode->delalloc_bytes); 8685 WARN_ON(inode->new_delalloc_bytes); 8686 } 8687 WARN_ON(inode->csum_bytes); 8688 WARN_ON(inode->defrag_bytes); 8689 8690 /* 8691 * This can happen where we create an inode, but somebody else also 8692 * created the same inode and we need to destroy the one we already 8693 * created. 8694 */ 8695 if (!root) 8696 return; 8697 8698 /* 8699 * If this is a free space inode do not take the ordered extents lockdep 8700 * map. 8701 */ 8702 freespace_inode = btrfs_is_free_space_inode(inode); 8703 8704 while (1) { 8705 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8706 if (!ordered) 8707 break; 8708 else { 8709 btrfs_err(root->fs_info, 8710 "found ordered extent %llu %llu on inode cleanup", 8711 ordered->file_offset, ordered->num_bytes); 8712 8713 if (!freespace_inode) 8714 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); 8715 8716 btrfs_remove_ordered_extent(inode, ordered); 8717 btrfs_put_ordered_extent(ordered); 8718 btrfs_put_ordered_extent(ordered); 8719 } 8720 } 8721 btrfs_qgroup_check_reserved_leak(inode); 8722 inode_tree_del(inode); 8723 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 8724 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 8725 btrfs_put_root(inode->root); 8726 } 8727 8728 int btrfs_drop_inode(struct inode *inode) 8729 { 8730 struct btrfs_root *root = BTRFS_I(inode)->root; 8731 8732 if (root == NULL) 8733 return 1; 8734 8735 /* the snap/subvol tree is on deleting */ 8736 if (btrfs_root_refs(&root->root_item) == 0) 8737 return 1; 8738 else 8739 return generic_drop_inode(inode); 8740 } 8741 8742 static void init_once(void *foo) 8743 { 8744 struct btrfs_inode *ei = foo; 8745 8746 inode_init_once(&ei->vfs_inode); 8747 } 8748 8749 void __cold btrfs_destroy_cachep(void) 8750 { 8751 /* 8752 * Make sure all delayed rcu free inodes are flushed before we 8753 * destroy cache. 8754 */ 8755 rcu_barrier(); 8756 bioset_exit(&btrfs_dio_bioset); 8757 kmem_cache_destroy(btrfs_inode_cachep); 8758 } 8759 8760 int __init btrfs_init_cachep(void) 8761 { 8762 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 8763 sizeof(struct btrfs_inode), 0, 8764 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, 8765 init_once); 8766 if (!btrfs_inode_cachep) 8767 goto fail; 8768 8769 if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE, 8770 offsetof(struct btrfs_dio_private, bbio.bio), 8771 BIOSET_NEED_BVECS)) 8772 goto fail; 8773 8774 return 0; 8775 fail: 8776 btrfs_destroy_cachep(); 8777 return -ENOMEM; 8778 } 8779 8780 static int btrfs_getattr(struct mnt_idmap *idmap, 8781 const struct path *path, struct kstat *stat, 8782 u32 request_mask, unsigned int flags) 8783 { 8784 u64 delalloc_bytes; 8785 u64 inode_bytes; 8786 struct inode *inode = d_inode(path->dentry); 8787 u32 blocksize = inode->i_sb->s_blocksize; 8788 u32 bi_flags = BTRFS_I(inode)->flags; 8789 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 8790 8791 stat->result_mask |= STATX_BTIME; 8792 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; 8793 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; 8794 if (bi_flags & BTRFS_INODE_APPEND) 8795 stat->attributes |= STATX_ATTR_APPEND; 8796 if (bi_flags & BTRFS_INODE_COMPRESS) 8797 stat->attributes |= STATX_ATTR_COMPRESSED; 8798 if (bi_flags & BTRFS_INODE_IMMUTABLE) 8799 stat->attributes |= STATX_ATTR_IMMUTABLE; 8800 if (bi_flags & BTRFS_INODE_NODUMP) 8801 stat->attributes |= STATX_ATTR_NODUMP; 8802 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 8803 stat->attributes |= STATX_ATTR_VERITY; 8804 8805 stat->attributes_mask |= (STATX_ATTR_APPEND | 8806 STATX_ATTR_COMPRESSED | 8807 STATX_ATTR_IMMUTABLE | 8808 STATX_ATTR_NODUMP); 8809 8810 generic_fillattr(idmap, inode, stat); 8811 stat->dev = BTRFS_I(inode)->root->anon_dev; 8812 8813 spin_lock(&BTRFS_I(inode)->lock); 8814 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 8815 inode_bytes = inode_get_bytes(inode); 8816 spin_unlock(&BTRFS_I(inode)->lock); 8817 stat->blocks = (ALIGN(inode_bytes, blocksize) + 8818 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT; 8819 return 0; 8820 } 8821 8822 static int btrfs_rename_exchange(struct inode *old_dir, 8823 struct dentry *old_dentry, 8824 struct inode *new_dir, 8825 struct dentry *new_dentry) 8826 { 8827 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 8828 struct btrfs_trans_handle *trans; 8829 unsigned int trans_num_items; 8830 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8831 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8832 struct inode *new_inode = new_dentry->d_inode; 8833 struct inode *old_inode = old_dentry->d_inode; 8834 struct timespec64 ctime = current_time(old_inode); 8835 struct btrfs_rename_ctx old_rename_ctx; 8836 struct btrfs_rename_ctx new_rename_ctx; 8837 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8838 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 8839 u64 old_idx = 0; 8840 u64 new_idx = 0; 8841 int ret; 8842 int ret2; 8843 bool need_abort = false; 8844 struct fscrypt_name old_fname, new_fname; 8845 struct fscrypt_str *old_name, *new_name; 8846 8847 /* 8848 * For non-subvolumes allow exchange only within one subvolume, in the 8849 * same inode namespace. Two subvolumes (represented as directory) can 8850 * be exchanged as they're a logical link and have a fixed inode number. 8851 */ 8852 if (root != dest && 8853 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 8854 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 8855 return -EXDEV; 8856 8857 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8858 if (ret) 8859 return ret; 8860 8861 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8862 if (ret) { 8863 fscrypt_free_filename(&old_fname); 8864 return ret; 8865 } 8866 8867 old_name = &old_fname.disk_name; 8868 new_name = &new_fname.disk_name; 8869 8870 /* close the race window with snapshot create/destroy ioctl */ 8871 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 8872 new_ino == BTRFS_FIRST_FREE_OBJECTID) 8873 down_read(&fs_info->subvol_sem); 8874 8875 /* 8876 * For each inode: 8877 * 1 to remove old dir item 8878 * 1 to remove old dir index 8879 * 1 to add new dir item 8880 * 1 to add new dir index 8881 * 1 to update parent inode 8882 * 8883 * If the parents are the same, we only need to account for one 8884 */ 8885 trans_num_items = (old_dir == new_dir ? 9 : 10); 8886 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8887 /* 8888 * 1 to remove old root ref 8889 * 1 to remove old root backref 8890 * 1 to add new root ref 8891 * 1 to add new root backref 8892 */ 8893 trans_num_items += 4; 8894 } else { 8895 /* 8896 * 1 to update inode item 8897 * 1 to remove old inode ref 8898 * 1 to add new inode ref 8899 */ 8900 trans_num_items += 3; 8901 } 8902 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 8903 trans_num_items += 4; 8904 else 8905 trans_num_items += 3; 8906 trans = btrfs_start_transaction(root, trans_num_items); 8907 if (IS_ERR(trans)) { 8908 ret = PTR_ERR(trans); 8909 goto out_notrans; 8910 } 8911 8912 if (dest != root) { 8913 ret = btrfs_record_root_in_trans(trans, dest); 8914 if (ret) 8915 goto out_fail; 8916 } 8917 8918 /* 8919 * We need to find a free sequence number both in the source and 8920 * in the destination directory for the exchange. 8921 */ 8922 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 8923 if (ret) 8924 goto out_fail; 8925 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 8926 if (ret) 8927 goto out_fail; 8928 8929 BTRFS_I(old_inode)->dir_index = 0ULL; 8930 BTRFS_I(new_inode)->dir_index = 0ULL; 8931 8932 /* Reference for the source. */ 8933 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8934 /* force full log commit if subvolume involved. */ 8935 btrfs_set_log_full_commit(trans); 8936 } else { 8937 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, 8938 btrfs_ino(BTRFS_I(new_dir)), 8939 old_idx); 8940 if (ret) 8941 goto out_fail; 8942 need_abort = true; 8943 } 8944 8945 /* And now for the dest. */ 8946 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8947 /* force full log commit if subvolume involved. */ 8948 btrfs_set_log_full_commit(trans); 8949 } else { 8950 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, 8951 btrfs_ino(BTRFS_I(old_dir)), 8952 new_idx); 8953 if (ret) { 8954 if (need_abort) 8955 btrfs_abort_transaction(trans, ret); 8956 goto out_fail; 8957 } 8958 } 8959 8960 /* Update inode version and ctime/mtime. */ 8961 inode_inc_iversion(old_dir); 8962 inode_inc_iversion(new_dir); 8963 inode_inc_iversion(old_inode); 8964 inode_inc_iversion(new_inode); 8965 old_dir->i_mtime = ctime; 8966 old_dir->i_ctime = ctime; 8967 new_dir->i_mtime = ctime; 8968 new_dir->i_ctime = ctime; 8969 old_inode->i_ctime = ctime; 8970 new_inode->i_ctime = ctime; 8971 8972 if (old_dentry->d_parent != new_dentry->d_parent) { 8973 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8974 BTRFS_I(old_inode), true); 8975 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 8976 BTRFS_I(new_inode), true); 8977 } 8978 8979 /* src is a subvolume */ 8980 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8981 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8982 } else { /* src is an inode */ 8983 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8984 BTRFS_I(old_dentry->d_inode), 8985 old_name, &old_rename_ctx); 8986 if (!ret) 8987 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 8988 } 8989 if (ret) { 8990 btrfs_abort_transaction(trans, ret); 8991 goto out_fail; 8992 } 8993 8994 /* dest is a subvolume */ 8995 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8996 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8997 } else { /* dest is an inode */ 8998 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8999 BTRFS_I(new_dentry->d_inode), 9000 new_name, &new_rename_ctx); 9001 if (!ret) 9002 ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode)); 9003 } 9004 if (ret) { 9005 btrfs_abort_transaction(trans, ret); 9006 goto out_fail; 9007 } 9008 9009 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9010 new_name, 0, old_idx); 9011 if (ret) { 9012 btrfs_abort_transaction(trans, ret); 9013 goto out_fail; 9014 } 9015 9016 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 9017 old_name, 0, new_idx); 9018 if (ret) { 9019 btrfs_abort_transaction(trans, ret); 9020 goto out_fail; 9021 } 9022 9023 if (old_inode->i_nlink == 1) 9024 BTRFS_I(old_inode)->dir_index = old_idx; 9025 if (new_inode->i_nlink == 1) 9026 BTRFS_I(new_inode)->dir_index = new_idx; 9027 9028 /* 9029 * Now pin the logs of the roots. We do it to ensure that no other task 9030 * can sync the logs while we are in progress with the rename, because 9031 * that could result in an inconsistency in case any of the inodes that 9032 * are part of this rename operation were logged before. 9033 */ 9034 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9035 btrfs_pin_log_trans(root); 9036 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 9037 btrfs_pin_log_trans(dest); 9038 9039 /* Do the log updates for all inodes. */ 9040 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9041 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9042 old_rename_ctx.index, new_dentry->d_parent); 9043 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 9044 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 9045 new_rename_ctx.index, old_dentry->d_parent); 9046 9047 /* Now unpin the logs. */ 9048 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9049 btrfs_end_log_trans(root); 9050 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 9051 btrfs_end_log_trans(dest); 9052 out_fail: 9053 ret2 = btrfs_end_transaction(trans); 9054 ret = ret ? ret : ret2; 9055 out_notrans: 9056 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 9057 old_ino == BTRFS_FIRST_FREE_OBJECTID) 9058 up_read(&fs_info->subvol_sem); 9059 9060 fscrypt_free_filename(&new_fname); 9061 fscrypt_free_filename(&old_fname); 9062 return ret; 9063 } 9064 9065 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap, 9066 struct inode *dir) 9067 { 9068 struct inode *inode; 9069 9070 inode = new_inode(dir->i_sb); 9071 if (inode) { 9072 inode_init_owner(idmap, inode, dir, 9073 S_IFCHR | WHITEOUT_MODE); 9074 inode->i_op = &btrfs_special_inode_operations; 9075 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 9076 } 9077 return inode; 9078 } 9079 9080 static int btrfs_rename(struct mnt_idmap *idmap, 9081 struct inode *old_dir, struct dentry *old_dentry, 9082 struct inode *new_dir, struct dentry *new_dentry, 9083 unsigned int flags) 9084 { 9085 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 9086 struct btrfs_new_inode_args whiteout_args = { 9087 .dir = old_dir, 9088 .dentry = old_dentry, 9089 }; 9090 struct btrfs_trans_handle *trans; 9091 unsigned int trans_num_items; 9092 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9093 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9094 struct inode *new_inode = d_inode(new_dentry); 9095 struct inode *old_inode = d_inode(old_dentry); 9096 struct btrfs_rename_ctx rename_ctx; 9097 u64 index = 0; 9098 int ret; 9099 int ret2; 9100 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9101 struct fscrypt_name old_fname, new_fname; 9102 9103 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9104 return -EPERM; 9105 9106 /* we only allow rename subvolume link between subvolumes */ 9107 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9108 return -EXDEV; 9109 9110 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 9111 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 9112 return -ENOTEMPTY; 9113 9114 if (S_ISDIR(old_inode->i_mode) && new_inode && 9115 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 9116 return -ENOTEMPTY; 9117 9118 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 9119 if (ret) 9120 return ret; 9121 9122 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 9123 if (ret) { 9124 fscrypt_free_filename(&old_fname); 9125 return ret; 9126 } 9127 9128 /* check for collisions, even if the name isn't there */ 9129 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); 9130 if (ret) { 9131 if (ret == -EEXIST) { 9132 /* we shouldn't get 9133 * eexist without a new_inode */ 9134 if (WARN_ON(!new_inode)) { 9135 goto out_fscrypt_names; 9136 } 9137 } else { 9138 /* maybe -EOVERFLOW */ 9139 goto out_fscrypt_names; 9140 } 9141 } 9142 ret = 0; 9143 9144 /* 9145 * we're using rename to replace one file with another. Start IO on it 9146 * now so we don't add too much work to the end of the transaction 9147 */ 9148 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 9149 filemap_flush(old_inode->i_mapping); 9150 9151 if (flags & RENAME_WHITEOUT) { 9152 whiteout_args.inode = new_whiteout_inode(idmap, old_dir); 9153 if (!whiteout_args.inode) { 9154 ret = -ENOMEM; 9155 goto out_fscrypt_names; 9156 } 9157 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 9158 if (ret) 9159 goto out_whiteout_inode; 9160 } else { 9161 /* 1 to update the old parent inode. */ 9162 trans_num_items = 1; 9163 } 9164 9165 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9166 /* Close the race window with snapshot create/destroy ioctl */ 9167 down_read(&fs_info->subvol_sem); 9168 /* 9169 * 1 to remove old root ref 9170 * 1 to remove old root backref 9171 * 1 to add new root ref 9172 * 1 to add new root backref 9173 */ 9174 trans_num_items += 4; 9175 } else { 9176 /* 9177 * 1 to update inode 9178 * 1 to remove old inode ref 9179 * 1 to add new inode ref 9180 */ 9181 trans_num_items += 3; 9182 } 9183 /* 9184 * 1 to remove old dir item 9185 * 1 to remove old dir index 9186 * 1 to add new dir item 9187 * 1 to add new dir index 9188 */ 9189 trans_num_items += 4; 9190 /* 1 to update new parent inode if it's not the same as the old parent */ 9191 if (new_dir != old_dir) 9192 trans_num_items++; 9193 if (new_inode) { 9194 /* 9195 * 1 to update inode 9196 * 1 to remove inode ref 9197 * 1 to remove dir item 9198 * 1 to remove dir index 9199 * 1 to possibly add orphan item 9200 */ 9201 trans_num_items += 5; 9202 } 9203 trans = btrfs_start_transaction(root, trans_num_items); 9204 if (IS_ERR(trans)) { 9205 ret = PTR_ERR(trans); 9206 goto out_notrans; 9207 } 9208 9209 if (dest != root) { 9210 ret = btrfs_record_root_in_trans(trans, dest); 9211 if (ret) 9212 goto out_fail; 9213 } 9214 9215 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 9216 if (ret) 9217 goto out_fail; 9218 9219 BTRFS_I(old_inode)->dir_index = 0ULL; 9220 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9221 /* force full log commit if subvolume involved. */ 9222 btrfs_set_log_full_commit(trans); 9223 } else { 9224 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, 9225 old_ino, btrfs_ino(BTRFS_I(new_dir)), 9226 index); 9227 if (ret) 9228 goto out_fail; 9229 } 9230 9231 inode_inc_iversion(old_dir); 9232 inode_inc_iversion(new_dir); 9233 inode_inc_iversion(old_inode); 9234 old_dir->i_mtime = current_time(old_dir); 9235 old_dir->i_ctime = old_dir->i_mtime; 9236 new_dir->i_mtime = old_dir->i_mtime; 9237 new_dir->i_ctime = old_dir->i_mtime; 9238 old_inode->i_ctime = old_dir->i_mtime; 9239 9240 if (old_dentry->d_parent != new_dentry->d_parent) 9241 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9242 BTRFS_I(old_inode), true); 9243 9244 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9245 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 9246 } else { 9247 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 9248 BTRFS_I(d_inode(old_dentry)), 9249 &old_fname.disk_name, &rename_ctx); 9250 if (!ret) 9251 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 9252 } 9253 if (ret) { 9254 btrfs_abort_transaction(trans, ret); 9255 goto out_fail; 9256 } 9257 9258 if (new_inode) { 9259 inode_inc_iversion(new_inode); 9260 new_inode->i_ctime = current_time(new_inode); 9261 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 9262 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9263 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 9264 BUG_ON(new_inode->i_nlink == 0); 9265 } else { 9266 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 9267 BTRFS_I(d_inode(new_dentry)), 9268 &new_fname.disk_name); 9269 } 9270 if (!ret && new_inode->i_nlink == 0) 9271 ret = btrfs_orphan_add(trans, 9272 BTRFS_I(d_inode(new_dentry))); 9273 if (ret) { 9274 btrfs_abort_transaction(trans, ret); 9275 goto out_fail; 9276 } 9277 } 9278 9279 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9280 &new_fname.disk_name, 0, index); 9281 if (ret) { 9282 btrfs_abort_transaction(trans, ret); 9283 goto out_fail; 9284 } 9285 9286 if (old_inode->i_nlink == 1) 9287 BTRFS_I(old_inode)->dir_index = index; 9288 9289 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9290 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9291 rename_ctx.index, new_dentry->d_parent); 9292 9293 if (flags & RENAME_WHITEOUT) { 9294 ret = btrfs_create_new_inode(trans, &whiteout_args); 9295 if (ret) { 9296 btrfs_abort_transaction(trans, ret); 9297 goto out_fail; 9298 } else { 9299 unlock_new_inode(whiteout_args.inode); 9300 iput(whiteout_args.inode); 9301 whiteout_args.inode = NULL; 9302 } 9303 } 9304 out_fail: 9305 ret2 = btrfs_end_transaction(trans); 9306 ret = ret ? ret : ret2; 9307 out_notrans: 9308 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9309 up_read(&fs_info->subvol_sem); 9310 if (flags & RENAME_WHITEOUT) 9311 btrfs_new_inode_args_destroy(&whiteout_args); 9312 out_whiteout_inode: 9313 if (flags & RENAME_WHITEOUT) 9314 iput(whiteout_args.inode); 9315 out_fscrypt_names: 9316 fscrypt_free_filename(&old_fname); 9317 fscrypt_free_filename(&new_fname); 9318 return ret; 9319 } 9320 9321 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir, 9322 struct dentry *old_dentry, struct inode *new_dir, 9323 struct dentry *new_dentry, unsigned int flags) 9324 { 9325 int ret; 9326 9327 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 9328 return -EINVAL; 9329 9330 if (flags & RENAME_EXCHANGE) 9331 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir, 9332 new_dentry); 9333 else 9334 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir, 9335 new_dentry, flags); 9336 9337 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); 9338 9339 return ret; 9340 } 9341 9342 struct btrfs_delalloc_work { 9343 struct inode *inode; 9344 struct completion completion; 9345 struct list_head list; 9346 struct btrfs_work work; 9347 }; 9348 9349 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9350 { 9351 struct btrfs_delalloc_work *delalloc_work; 9352 struct inode *inode; 9353 9354 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9355 work); 9356 inode = delalloc_work->inode; 9357 filemap_flush(inode->i_mapping); 9358 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9359 &BTRFS_I(inode)->runtime_flags)) 9360 filemap_flush(inode->i_mapping); 9361 9362 iput(inode); 9363 complete(&delalloc_work->completion); 9364 } 9365 9366 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 9367 { 9368 struct btrfs_delalloc_work *work; 9369 9370 work = kmalloc(sizeof(*work), GFP_NOFS); 9371 if (!work) 9372 return NULL; 9373 9374 init_completion(&work->completion); 9375 INIT_LIST_HEAD(&work->list); 9376 work->inode = inode; 9377 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); 9378 9379 return work; 9380 } 9381 9382 /* 9383 * some fairly slow code that needs optimization. This walks the list 9384 * of all the inodes with pending delalloc and forces them to disk. 9385 */ 9386 static int start_delalloc_inodes(struct btrfs_root *root, 9387 struct writeback_control *wbc, bool snapshot, 9388 bool in_reclaim_context) 9389 { 9390 struct btrfs_inode *binode; 9391 struct inode *inode; 9392 struct btrfs_delalloc_work *work, *next; 9393 struct list_head works; 9394 struct list_head splice; 9395 int ret = 0; 9396 bool full_flush = wbc->nr_to_write == LONG_MAX; 9397 9398 INIT_LIST_HEAD(&works); 9399 INIT_LIST_HEAD(&splice); 9400 9401 mutex_lock(&root->delalloc_mutex); 9402 spin_lock(&root->delalloc_lock); 9403 list_splice_init(&root->delalloc_inodes, &splice); 9404 while (!list_empty(&splice)) { 9405 binode = list_entry(splice.next, struct btrfs_inode, 9406 delalloc_inodes); 9407 9408 list_move_tail(&binode->delalloc_inodes, 9409 &root->delalloc_inodes); 9410 9411 if (in_reclaim_context && 9412 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 9413 continue; 9414 9415 inode = igrab(&binode->vfs_inode); 9416 if (!inode) { 9417 cond_resched_lock(&root->delalloc_lock); 9418 continue; 9419 } 9420 spin_unlock(&root->delalloc_lock); 9421 9422 if (snapshot) 9423 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 9424 &binode->runtime_flags); 9425 if (full_flush) { 9426 work = btrfs_alloc_delalloc_work(inode); 9427 if (!work) { 9428 iput(inode); 9429 ret = -ENOMEM; 9430 goto out; 9431 } 9432 list_add_tail(&work->list, &works); 9433 btrfs_queue_work(root->fs_info->flush_workers, 9434 &work->work); 9435 } else { 9436 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); 9437 btrfs_add_delayed_iput(BTRFS_I(inode)); 9438 if (ret || wbc->nr_to_write <= 0) 9439 goto out; 9440 } 9441 cond_resched(); 9442 spin_lock(&root->delalloc_lock); 9443 } 9444 spin_unlock(&root->delalloc_lock); 9445 9446 out: 9447 list_for_each_entry_safe(work, next, &works, list) { 9448 list_del_init(&work->list); 9449 wait_for_completion(&work->completion); 9450 kfree(work); 9451 } 9452 9453 if (!list_empty(&splice)) { 9454 spin_lock(&root->delalloc_lock); 9455 list_splice_tail(&splice, &root->delalloc_inodes); 9456 spin_unlock(&root->delalloc_lock); 9457 } 9458 mutex_unlock(&root->delalloc_mutex); 9459 return ret; 9460 } 9461 9462 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 9463 { 9464 struct writeback_control wbc = { 9465 .nr_to_write = LONG_MAX, 9466 .sync_mode = WB_SYNC_NONE, 9467 .range_start = 0, 9468 .range_end = LLONG_MAX, 9469 }; 9470 struct btrfs_fs_info *fs_info = root->fs_info; 9471 9472 if (BTRFS_FS_ERROR(fs_info)) 9473 return -EROFS; 9474 9475 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context); 9476 } 9477 9478 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 9479 bool in_reclaim_context) 9480 { 9481 struct writeback_control wbc = { 9482 .nr_to_write = nr, 9483 .sync_mode = WB_SYNC_NONE, 9484 .range_start = 0, 9485 .range_end = LLONG_MAX, 9486 }; 9487 struct btrfs_root *root; 9488 struct list_head splice; 9489 int ret; 9490 9491 if (BTRFS_FS_ERROR(fs_info)) 9492 return -EROFS; 9493 9494 INIT_LIST_HEAD(&splice); 9495 9496 mutex_lock(&fs_info->delalloc_root_mutex); 9497 spin_lock(&fs_info->delalloc_root_lock); 9498 list_splice_init(&fs_info->delalloc_roots, &splice); 9499 while (!list_empty(&splice)) { 9500 /* 9501 * Reset nr_to_write here so we know that we're doing a full 9502 * flush. 9503 */ 9504 if (nr == LONG_MAX) 9505 wbc.nr_to_write = LONG_MAX; 9506 9507 root = list_first_entry(&splice, struct btrfs_root, 9508 delalloc_root); 9509 root = btrfs_grab_root(root); 9510 BUG_ON(!root); 9511 list_move_tail(&root->delalloc_root, 9512 &fs_info->delalloc_roots); 9513 spin_unlock(&fs_info->delalloc_root_lock); 9514 9515 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context); 9516 btrfs_put_root(root); 9517 if (ret < 0 || wbc.nr_to_write <= 0) 9518 goto out; 9519 spin_lock(&fs_info->delalloc_root_lock); 9520 } 9521 spin_unlock(&fs_info->delalloc_root_lock); 9522 9523 ret = 0; 9524 out: 9525 if (!list_empty(&splice)) { 9526 spin_lock(&fs_info->delalloc_root_lock); 9527 list_splice_tail(&splice, &fs_info->delalloc_roots); 9528 spin_unlock(&fs_info->delalloc_root_lock); 9529 } 9530 mutex_unlock(&fs_info->delalloc_root_mutex); 9531 return ret; 9532 } 9533 9534 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, 9535 struct dentry *dentry, const char *symname) 9536 { 9537 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9538 struct btrfs_trans_handle *trans; 9539 struct btrfs_root *root = BTRFS_I(dir)->root; 9540 struct btrfs_path *path; 9541 struct btrfs_key key; 9542 struct inode *inode; 9543 struct btrfs_new_inode_args new_inode_args = { 9544 .dir = dir, 9545 .dentry = dentry, 9546 }; 9547 unsigned int trans_num_items; 9548 int err; 9549 int name_len; 9550 int datasize; 9551 unsigned long ptr; 9552 struct btrfs_file_extent_item *ei; 9553 struct extent_buffer *leaf; 9554 9555 name_len = strlen(symname); 9556 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 9557 return -ENAMETOOLONG; 9558 9559 inode = new_inode(dir->i_sb); 9560 if (!inode) 9561 return -ENOMEM; 9562 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO); 9563 inode->i_op = &btrfs_symlink_inode_operations; 9564 inode_nohighmem(inode); 9565 inode->i_mapping->a_ops = &btrfs_aops; 9566 btrfs_i_size_write(BTRFS_I(inode), name_len); 9567 inode_set_bytes(inode, name_len); 9568 9569 new_inode_args.inode = inode; 9570 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9571 if (err) 9572 goto out_inode; 9573 /* 1 additional item for the inline extent */ 9574 trans_num_items++; 9575 9576 trans = btrfs_start_transaction(root, trans_num_items); 9577 if (IS_ERR(trans)) { 9578 err = PTR_ERR(trans); 9579 goto out_new_inode_args; 9580 } 9581 9582 err = btrfs_create_new_inode(trans, &new_inode_args); 9583 if (err) 9584 goto out; 9585 9586 path = btrfs_alloc_path(); 9587 if (!path) { 9588 err = -ENOMEM; 9589 btrfs_abort_transaction(trans, err); 9590 discard_new_inode(inode); 9591 inode = NULL; 9592 goto out; 9593 } 9594 key.objectid = btrfs_ino(BTRFS_I(inode)); 9595 key.offset = 0; 9596 key.type = BTRFS_EXTENT_DATA_KEY; 9597 datasize = btrfs_file_extent_calc_inline_size(name_len); 9598 err = btrfs_insert_empty_item(trans, root, path, &key, 9599 datasize); 9600 if (err) { 9601 btrfs_abort_transaction(trans, err); 9602 btrfs_free_path(path); 9603 discard_new_inode(inode); 9604 inode = NULL; 9605 goto out; 9606 } 9607 leaf = path->nodes[0]; 9608 ei = btrfs_item_ptr(leaf, path->slots[0], 9609 struct btrfs_file_extent_item); 9610 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9611 btrfs_set_file_extent_type(leaf, ei, 9612 BTRFS_FILE_EXTENT_INLINE); 9613 btrfs_set_file_extent_encryption(leaf, ei, 0); 9614 btrfs_set_file_extent_compression(leaf, ei, 0); 9615 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9616 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9617 9618 ptr = btrfs_file_extent_inline_start(ei); 9619 write_extent_buffer(leaf, symname, ptr, name_len); 9620 btrfs_mark_buffer_dirty(leaf); 9621 btrfs_free_path(path); 9622 9623 d_instantiate_new(dentry, inode); 9624 err = 0; 9625 out: 9626 btrfs_end_transaction(trans); 9627 btrfs_btree_balance_dirty(fs_info); 9628 out_new_inode_args: 9629 btrfs_new_inode_args_destroy(&new_inode_args); 9630 out_inode: 9631 if (err) 9632 iput(inode); 9633 return err; 9634 } 9635 9636 static struct btrfs_trans_handle *insert_prealloc_file_extent( 9637 struct btrfs_trans_handle *trans_in, 9638 struct btrfs_inode *inode, 9639 struct btrfs_key *ins, 9640 u64 file_offset) 9641 { 9642 struct btrfs_file_extent_item stack_fi; 9643 struct btrfs_replace_extent_info extent_info; 9644 struct btrfs_trans_handle *trans = trans_in; 9645 struct btrfs_path *path; 9646 u64 start = ins->objectid; 9647 u64 len = ins->offset; 9648 int qgroup_released; 9649 int ret; 9650 9651 memset(&stack_fi, 0, sizeof(stack_fi)); 9652 9653 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 9654 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 9655 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 9656 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 9657 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 9658 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9659 /* Encryption and other encoding is reserved and all 0 */ 9660 9661 qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len); 9662 if (qgroup_released < 0) 9663 return ERR_PTR(qgroup_released); 9664 9665 if (trans) { 9666 ret = insert_reserved_file_extent(trans, inode, 9667 file_offset, &stack_fi, 9668 true, qgroup_released); 9669 if (ret) 9670 goto free_qgroup; 9671 return trans; 9672 } 9673 9674 extent_info.disk_offset = start; 9675 extent_info.disk_len = len; 9676 extent_info.data_offset = 0; 9677 extent_info.data_len = len; 9678 extent_info.file_offset = file_offset; 9679 extent_info.extent_buf = (char *)&stack_fi; 9680 extent_info.is_new_extent = true; 9681 extent_info.update_times = true; 9682 extent_info.qgroup_reserved = qgroup_released; 9683 extent_info.insertions = 0; 9684 9685 path = btrfs_alloc_path(); 9686 if (!path) { 9687 ret = -ENOMEM; 9688 goto free_qgroup; 9689 } 9690 9691 ret = btrfs_replace_file_extents(inode, path, file_offset, 9692 file_offset + len - 1, &extent_info, 9693 &trans); 9694 btrfs_free_path(path); 9695 if (ret) 9696 goto free_qgroup; 9697 return trans; 9698 9699 free_qgroup: 9700 /* 9701 * We have released qgroup data range at the beginning of the function, 9702 * and normally qgroup_released bytes will be freed when committing 9703 * transaction. 9704 * But if we error out early, we have to free what we have released 9705 * or we leak qgroup data reservation. 9706 */ 9707 btrfs_qgroup_free_refroot(inode->root->fs_info, 9708 inode->root->root_key.objectid, qgroup_released, 9709 BTRFS_QGROUP_RSV_DATA); 9710 return ERR_PTR(ret); 9711 } 9712 9713 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9714 u64 start, u64 num_bytes, u64 min_size, 9715 loff_t actual_len, u64 *alloc_hint, 9716 struct btrfs_trans_handle *trans) 9717 { 9718 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 9719 struct extent_map *em; 9720 struct btrfs_root *root = BTRFS_I(inode)->root; 9721 struct btrfs_key ins; 9722 u64 cur_offset = start; 9723 u64 clear_offset = start; 9724 u64 i_size; 9725 u64 cur_bytes; 9726 u64 last_alloc = (u64)-1; 9727 int ret = 0; 9728 bool own_trans = true; 9729 u64 end = start + num_bytes - 1; 9730 9731 if (trans) 9732 own_trans = false; 9733 while (num_bytes > 0) { 9734 cur_bytes = min_t(u64, num_bytes, SZ_256M); 9735 cur_bytes = max(cur_bytes, min_size); 9736 /* 9737 * If we are severely fragmented we could end up with really 9738 * small allocations, so if the allocator is returning small 9739 * chunks lets make its job easier by only searching for those 9740 * sized chunks. 9741 */ 9742 cur_bytes = min(cur_bytes, last_alloc); 9743 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 9744 min_size, 0, *alloc_hint, &ins, 1, 0); 9745 if (ret) 9746 break; 9747 9748 /* 9749 * We've reserved this space, and thus converted it from 9750 * ->bytes_may_use to ->bytes_reserved. Any error that happens 9751 * from here on out we will only need to clear our reservation 9752 * for the remaining unreserved area, so advance our 9753 * clear_offset by our extent size. 9754 */ 9755 clear_offset += ins.offset; 9756 9757 last_alloc = ins.offset; 9758 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 9759 &ins, cur_offset); 9760 /* 9761 * Now that we inserted the prealloc extent we can finally 9762 * decrement the number of reservations in the block group. 9763 * If we did it before, we could race with relocation and have 9764 * relocation miss the reserved extent, making it fail later. 9765 */ 9766 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9767 if (IS_ERR(trans)) { 9768 ret = PTR_ERR(trans); 9769 btrfs_free_reserved_extent(fs_info, ins.objectid, 9770 ins.offset, 0); 9771 break; 9772 } 9773 9774 em = alloc_extent_map(); 9775 if (!em) { 9776 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, 9777 cur_offset + ins.offset - 1, false); 9778 btrfs_set_inode_full_sync(BTRFS_I(inode)); 9779 goto next; 9780 } 9781 9782 em->start = cur_offset; 9783 em->orig_start = cur_offset; 9784 em->len = ins.offset; 9785 em->block_start = ins.objectid; 9786 em->block_len = ins.offset; 9787 em->orig_block_len = ins.offset; 9788 em->ram_bytes = ins.offset; 9789 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 9790 em->generation = trans->transid; 9791 9792 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); 9793 free_extent_map(em); 9794 next: 9795 num_bytes -= ins.offset; 9796 cur_offset += ins.offset; 9797 *alloc_hint = ins.objectid + ins.offset; 9798 9799 inode_inc_iversion(inode); 9800 inode->i_ctime = current_time(inode); 9801 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9802 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9803 (actual_len > inode->i_size) && 9804 (cur_offset > inode->i_size)) { 9805 if (cur_offset > actual_len) 9806 i_size = actual_len; 9807 else 9808 i_size = cur_offset; 9809 i_size_write(inode, i_size); 9810 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 9811 } 9812 9813 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 9814 9815 if (ret) { 9816 btrfs_abort_transaction(trans, ret); 9817 if (own_trans) 9818 btrfs_end_transaction(trans); 9819 break; 9820 } 9821 9822 if (own_trans) { 9823 btrfs_end_transaction(trans); 9824 trans = NULL; 9825 } 9826 } 9827 if (clear_offset < end) 9828 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 9829 end - clear_offset + 1); 9830 return ret; 9831 } 9832 9833 int btrfs_prealloc_file_range(struct inode *inode, int mode, 9834 u64 start, u64 num_bytes, u64 min_size, 9835 loff_t actual_len, u64 *alloc_hint) 9836 { 9837 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9838 min_size, actual_len, alloc_hint, 9839 NULL); 9840 } 9841 9842 int btrfs_prealloc_file_range_trans(struct inode *inode, 9843 struct btrfs_trans_handle *trans, int mode, 9844 u64 start, u64 num_bytes, u64 min_size, 9845 loff_t actual_len, u64 *alloc_hint) 9846 { 9847 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9848 min_size, actual_len, alloc_hint, trans); 9849 } 9850 9851 static int btrfs_permission(struct mnt_idmap *idmap, 9852 struct inode *inode, int mask) 9853 { 9854 struct btrfs_root *root = BTRFS_I(inode)->root; 9855 umode_t mode = inode->i_mode; 9856 9857 if (mask & MAY_WRITE && 9858 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9859 if (btrfs_root_readonly(root)) 9860 return -EROFS; 9861 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9862 return -EACCES; 9863 } 9864 return generic_permission(idmap, inode, mask); 9865 } 9866 9867 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 9868 struct file *file, umode_t mode) 9869 { 9870 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9871 struct btrfs_trans_handle *trans; 9872 struct btrfs_root *root = BTRFS_I(dir)->root; 9873 struct inode *inode; 9874 struct btrfs_new_inode_args new_inode_args = { 9875 .dir = dir, 9876 .dentry = file->f_path.dentry, 9877 .orphan = true, 9878 }; 9879 unsigned int trans_num_items; 9880 int ret; 9881 9882 inode = new_inode(dir->i_sb); 9883 if (!inode) 9884 return -ENOMEM; 9885 inode_init_owner(idmap, inode, dir, mode); 9886 inode->i_fop = &btrfs_file_operations; 9887 inode->i_op = &btrfs_file_inode_operations; 9888 inode->i_mapping->a_ops = &btrfs_aops; 9889 9890 new_inode_args.inode = inode; 9891 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9892 if (ret) 9893 goto out_inode; 9894 9895 trans = btrfs_start_transaction(root, trans_num_items); 9896 if (IS_ERR(trans)) { 9897 ret = PTR_ERR(trans); 9898 goto out_new_inode_args; 9899 } 9900 9901 ret = btrfs_create_new_inode(trans, &new_inode_args); 9902 9903 /* 9904 * We set number of links to 0 in btrfs_create_new_inode(), and here we 9905 * set it to 1 because d_tmpfile() will issue a warning if the count is 9906 * 0, through: 9907 * 9908 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9909 */ 9910 set_nlink(inode, 1); 9911 9912 if (!ret) { 9913 d_tmpfile(file, inode); 9914 unlock_new_inode(inode); 9915 mark_inode_dirty(inode); 9916 } 9917 9918 btrfs_end_transaction(trans); 9919 btrfs_btree_balance_dirty(fs_info); 9920 out_new_inode_args: 9921 btrfs_new_inode_args_destroy(&new_inode_args); 9922 out_inode: 9923 if (ret) 9924 iput(inode); 9925 return finish_open_simple(file, ret); 9926 } 9927 9928 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end) 9929 { 9930 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9931 unsigned long index = start >> PAGE_SHIFT; 9932 unsigned long end_index = end >> PAGE_SHIFT; 9933 struct page *page; 9934 u32 len; 9935 9936 ASSERT(end + 1 - start <= U32_MAX); 9937 len = end + 1 - start; 9938 while (index <= end_index) { 9939 page = find_get_page(inode->vfs_inode.i_mapping, index); 9940 ASSERT(page); /* Pages should be in the extent_io_tree */ 9941 9942 btrfs_page_set_writeback(fs_info, page, start, len); 9943 put_page(page); 9944 index++; 9945 } 9946 } 9947 9948 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 9949 int compress_type) 9950 { 9951 switch (compress_type) { 9952 case BTRFS_COMPRESS_NONE: 9953 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 9954 case BTRFS_COMPRESS_ZLIB: 9955 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 9956 case BTRFS_COMPRESS_LZO: 9957 /* 9958 * The LZO format depends on the sector size. 64K is the maximum 9959 * sector size that we support. 9960 */ 9961 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 9962 return -EINVAL; 9963 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 9964 (fs_info->sectorsize_bits - 12); 9965 case BTRFS_COMPRESS_ZSTD: 9966 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 9967 default: 9968 return -EUCLEAN; 9969 } 9970 } 9971 9972 static ssize_t btrfs_encoded_read_inline( 9973 struct kiocb *iocb, 9974 struct iov_iter *iter, u64 start, 9975 u64 lockend, 9976 struct extent_state **cached_state, 9977 u64 extent_start, size_t count, 9978 struct btrfs_ioctl_encoded_io_args *encoded, 9979 bool *unlocked) 9980 { 9981 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9982 struct btrfs_root *root = inode->root; 9983 struct btrfs_fs_info *fs_info = root->fs_info; 9984 struct extent_io_tree *io_tree = &inode->io_tree; 9985 struct btrfs_path *path; 9986 struct extent_buffer *leaf; 9987 struct btrfs_file_extent_item *item; 9988 u64 ram_bytes; 9989 unsigned long ptr; 9990 void *tmp; 9991 ssize_t ret; 9992 9993 path = btrfs_alloc_path(); 9994 if (!path) { 9995 ret = -ENOMEM; 9996 goto out; 9997 } 9998 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 9999 extent_start, 0); 10000 if (ret) { 10001 if (ret > 0) { 10002 /* The extent item disappeared? */ 10003 ret = -EIO; 10004 } 10005 goto out; 10006 } 10007 leaf = path->nodes[0]; 10008 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 10009 10010 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 10011 ptr = btrfs_file_extent_inline_start(item); 10012 10013 encoded->len = min_t(u64, extent_start + ram_bytes, 10014 inode->vfs_inode.i_size) - iocb->ki_pos; 10015 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10016 btrfs_file_extent_compression(leaf, item)); 10017 if (ret < 0) 10018 goto out; 10019 encoded->compression = ret; 10020 if (encoded->compression) { 10021 size_t inline_size; 10022 10023 inline_size = btrfs_file_extent_inline_item_len(leaf, 10024 path->slots[0]); 10025 if (inline_size > count) { 10026 ret = -ENOBUFS; 10027 goto out; 10028 } 10029 count = inline_size; 10030 encoded->unencoded_len = ram_bytes; 10031 encoded->unencoded_offset = iocb->ki_pos - extent_start; 10032 } else { 10033 count = min_t(u64, count, encoded->len); 10034 encoded->len = count; 10035 encoded->unencoded_len = count; 10036 ptr += iocb->ki_pos - extent_start; 10037 } 10038 10039 tmp = kmalloc(count, GFP_NOFS); 10040 if (!tmp) { 10041 ret = -ENOMEM; 10042 goto out; 10043 } 10044 read_extent_buffer(leaf, tmp, ptr, count); 10045 btrfs_release_path(path); 10046 unlock_extent(io_tree, start, lockend, cached_state); 10047 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10048 *unlocked = true; 10049 10050 ret = copy_to_iter(tmp, count, iter); 10051 if (ret != count) 10052 ret = -EFAULT; 10053 kfree(tmp); 10054 out: 10055 btrfs_free_path(path); 10056 return ret; 10057 } 10058 10059 struct btrfs_encoded_read_private { 10060 wait_queue_head_t wait; 10061 atomic_t pending; 10062 blk_status_t status; 10063 }; 10064 10065 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) 10066 { 10067 struct btrfs_encoded_read_private *priv = bbio->private; 10068 10069 if (bbio->bio.bi_status) { 10070 /* 10071 * The memory barrier implied by the atomic_dec_return() here 10072 * pairs with the memory barrier implied by the 10073 * atomic_dec_return() or io_wait_event() in 10074 * btrfs_encoded_read_regular_fill_pages() to ensure that this 10075 * write is observed before the load of status in 10076 * btrfs_encoded_read_regular_fill_pages(). 10077 */ 10078 WRITE_ONCE(priv->status, bbio->bio.bi_status); 10079 } 10080 if (!atomic_dec_return(&priv->pending)) 10081 wake_up(&priv->wait); 10082 bio_put(&bbio->bio); 10083 } 10084 10085 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 10086 u64 file_offset, u64 disk_bytenr, 10087 u64 disk_io_size, struct page **pages) 10088 { 10089 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10090 struct btrfs_encoded_read_private priv = { 10091 .pending = ATOMIC_INIT(1), 10092 }; 10093 unsigned long i = 0; 10094 struct btrfs_bio *bbio; 10095 10096 init_waitqueue_head(&priv.wait); 10097 10098 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 10099 btrfs_encoded_read_endio, &priv); 10100 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 10101 bbio->inode = inode; 10102 10103 do { 10104 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); 10105 10106 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { 10107 atomic_inc(&priv.pending); 10108 btrfs_submit_bio(bbio, 0); 10109 10110 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 10111 btrfs_encoded_read_endio, &priv); 10112 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 10113 bbio->inode = inode; 10114 continue; 10115 } 10116 10117 i++; 10118 disk_bytenr += bytes; 10119 disk_io_size -= bytes; 10120 } while (disk_io_size); 10121 10122 atomic_inc(&priv.pending); 10123 btrfs_submit_bio(bbio, 0); 10124 10125 if (atomic_dec_return(&priv.pending)) 10126 io_wait_event(priv.wait, !atomic_read(&priv.pending)); 10127 /* See btrfs_encoded_read_endio() for ordering. */ 10128 return blk_status_to_errno(READ_ONCE(priv.status)); 10129 } 10130 10131 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, 10132 struct iov_iter *iter, 10133 u64 start, u64 lockend, 10134 struct extent_state **cached_state, 10135 u64 disk_bytenr, u64 disk_io_size, 10136 size_t count, bool compressed, 10137 bool *unlocked) 10138 { 10139 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10140 struct extent_io_tree *io_tree = &inode->io_tree; 10141 struct page **pages; 10142 unsigned long nr_pages, i; 10143 u64 cur; 10144 size_t page_offset; 10145 ssize_t ret; 10146 10147 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 10148 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 10149 if (!pages) 10150 return -ENOMEM; 10151 ret = btrfs_alloc_page_array(nr_pages, pages); 10152 if (ret) { 10153 ret = -ENOMEM; 10154 goto out; 10155 } 10156 10157 ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr, 10158 disk_io_size, pages); 10159 if (ret) 10160 goto out; 10161 10162 unlock_extent(io_tree, start, lockend, cached_state); 10163 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10164 *unlocked = true; 10165 10166 if (compressed) { 10167 i = 0; 10168 page_offset = 0; 10169 } else { 10170 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 10171 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 10172 } 10173 cur = 0; 10174 while (cur < count) { 10175 size_t bytes = min_t(size_t, count - cur, 10176 PAGE_SIZE - page_offset); 10177 10178 if (copy_page_to_iter(pages[i], page_offset, bytes, 10179 iter) != bytes) { 10180 ret = -EFAULT; 10181 goto out; 10182 } 10183 i++; 10184 cur += bytes; 10185 page_offset = 0; 10186 } 10187 ret = count; 10188 out: 10189 for (i = 0; i < nr_pages; i++) { 10190 if (pages[i]) 10191 __free_page(pages[i]); 10192 } 10193 kfree(pages); 10194 return ret; 10195 } 10196 10197 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 10198 struct btrfs_ioctl_encoded_io_args *encoded) 10199 { 10200 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10201 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10202 struct extent_io_tree *io_tree = &inode->io_tree; 10203 ssize_t ret; 10204 size_t count = iov_iter_count(iter); 10205 u64 start, lockend, disk_bytenr, disk_io_size; 10206 struct extent_state *cached_state = NULL; 10207 struct extent_map *em; 10208 bool unlocked = false; 10209 10210 file_accessed(iocb->ki_filp); 10211 10212 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED); 10213 10214 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 10215 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10216 return 0; 10217 } 10218 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 10219 /* 10220 * We don't know how long the extent containing iocb->ki_pos is, but if 10221 * it's compressed we know that it won't be longer than this. 10222 */ 10223 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 10224 10225 for (;;) { 10226 struct btrfs_ordered_extent *ordered; 10227 10228 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, 10229 lockend - start + 1); 10230 if (ret) 10231 goto out_unlock_inode; 10232 lock_extent(io_tree, start, lockend, &cached_state); 10233 ordered = btrfs_lookup_ordered_range(inode, start, 10234 lockend - start + 1); 10235 if (!ordered) 10236 break; 10237 btrfs_put_ordered_extent(ordered); 10238 unlock_extent(io_tree, start, lockend, &cached_state); 10239 cond_resched(); 10240 } 10241 10242 em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1); 10243 if (IS_ERR(em)) { 10244 ret = PTR_ERR(em); 10245 goto out_unlock_extent; 10246 } 10247 10248 if (em->block_start == EXTENT_MAP_INLINE) { 10249 u64 extent_start = em->start; 10250 10251 /* 10252 * For inline extents we get everything we need out of the 10253 * extent item. 10254 */ 10255 free_extent_map(em); 10256 em = NULL; 10257 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 10258 &cached_state, extent_start, 10259 count, encoded, &unlocked); 10260 goto out; 10261 } 10262 10263 /* 10264 * We only want to return up to EOF even if the extent extends beyond 10265 * that. 10266 */ 10267 encoded->len = min_t(u64, extent_map_end(em), 10268 inode->vfs_inode.i_size) - iocb->ki_pos; 10269 if (em->block_start == EXTENT_MAP_HOLE || 10270 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 10271 disk_bytenr = EXTENT_MAP_HOLE; 10272 count = min_t(u64, count, encoded->len); 10273 encoded->len = count; 10274 encoded->unencoded_len = count; 10275 } else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10276 disk_bytenr = em->block_start; 10277 /* 10278 * Bail if the buffer isn't large enough to return the whole 10279 * compressed extent. 10280 */ 10281 if (em->block_len > count) { 10282 ret = -ENOBUFS; 10283 goto out_em; 10284 } 10285 disk_io_size = em->block_len; 10286 count = em->block_len; 10287 encoded->unencoded_len = em->ram_bytes; 10288 encoded->unencoded_offset = iocb->ki_pos - em->orig_start; 10289 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10290 em->compress_type); 10291 if (ret < 0) 10292 goto out_em; 10293 encoded->compression = ret; 10294 } else { 10295 disk_bytenr = em->block_start + (start - em->start); 10296 if (encoded->len > count) 10297 encoded->len = count; 10298 /* 10299 * Don't read beyond what we locked. This also limits the page 10300 * allocations that we'll do. 10301 */ 10302 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 10303 count = start + disk_io_size - iocb->ki_pos; 10304 encoded->len = count; 10305 encoded->unencoded_len = count; 10306 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize); 10307 } 10308 free_extent_map(em); 10309 em = NULL; 10310 10311 if (disk_bytenr == EXTENT_MAP_HOLE) { 10312 unlock_extent(io_tree, start, lockend, &cached_state); 10313 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10314 unlocked = true; 10315 ret = iov_iter_zero(count, iter); 10316 if (ret != count) 10317 ret = -EFAULT; 10318 } else { 10319 ret = btrfs_encoded_read_regular(iocb, iter, start, lockend, 10320 &cached_state, disk_bytenr, 10321 disk_io_size, count, 10322 encoded->compression, 10323 &unlocked); 10324 } 10325 10326 out: 10327 if (ret >= 0) 10328 iocb->ki_pos += encoded->len; 10329 out_em: 10330 free_extent_map(em); 10331 out_unlock_extent: 10332 if (!unlocked) 10333 unlock_extent(io_tree, start, lockend, &cached_state); 10334 out_unlock_inode: 10335 if (!unlocked) 10336 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10337 return ret; 10338 } 10339 10340 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 10341 const struct btrfs_ioctl_encoded_io_args *encoded) 10342 { 10343 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10344 struct btrfs_root *root = inode->root; 10345 struct btrfs_fs_info *fs_info = root->fs_info; 10346 struct extent_io_tree *io_tree = &inode->io_tree; 10347 struct extent_changeset *data_reserved = NULL; 10348 struct extent_state *cached_state = NULL; 10349 struct btrfs_ordered_extent *ordered; 10350 int compression; 10351 size_t orig_count; 10352 u64 start, end; 10353 u64 num_bytes, ram_bytes, disk_num_bytes; 10354 unsigned long nr_pages, i; 10355 struct page **pages; 10356 struct btrfs_key ins; 10357 bool extent_reserved = false; 10358 struct extent_map *em; 10359 ssize_t ret; 10360 10361 switch (encoded->compression) { 10362 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 10363 compression = BTRFS_COMPRESS_ZLIB; 10364 break; 10365 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 10366 compression = BTRFS_COMPRESS_ZSTD; 10367 break; 10368 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 10369 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 10370 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 10371 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 10372 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 10373 /* The sector size must match for LZO. */ 10374 if (encoded->compression - 10375 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 10376 fs_info->sectorsize_bits) 10377 return -EINVAL; 10378 compression = BTRFS_COMPRESS_LZO; 10379 break; 10380 default: 10381 return -EINVAL; 10382 } 10383 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 10384 return -EINVAL; 10385 10386 orig_count = iov_iter_count(from); 10387 10388 /* The extent size must be sane. */ 10389 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 10390 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 10391 return -EINVAL; 10392 10393 /* 10394 * The compressed data must be smaller than the decompressed data. 10395 * 10396 * It's of course possible for data to compress to larger or the same 10397 * size, but the buffered I/O path falls back to no compression for such 10398 * data, and we don't want to break any assumptions by creating these 10399 * extents. 10400 * 10401 * Note that this is less strict than the current check we have that the 10402 * compressed data must be at least one sector smaller than the 10403 * decompressed data. We only want to enforce the weaker requirement 10404 * from old kernels that it is at least one byte smaller. 10405 */ 10406 if (orig_count >= encoded->unencoded_len) 10407 return -EINVAL; 10408 10409 /* The extent must start on a sector boundary. */ 10410 start = iocb->ki_pos; 10411 if (!IS_ALIGNED(start, fs_info->sectorsize)) 10412 return -EINVAL; 10413 10414 /* 10415 * The extent must end on a sector boundary. However, we allow a write 10416 * which ends at or extends i_size to have an unaligned length; we round 10417 * up the extent size and set i_size to the unaligned end. 10418 */ 10419 if (start + encoded->len < inode->vfs_inode.i_size && 10420 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 10421 return -EINVAL; 10422 10423 /* Finally, the offset in the unencoded data must be sector-aligned. */ 10424 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 10425 return -EINVAL; 10426 10427 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 10428 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 10429 end = start + num_bytes - 1; 10430 10431 /* 10432 * If the extent cannot be inline, the compressed data on disk must be 10433 * sector-aligned. For convenience, we extend it with zeroes if it 10434 * isn't. 10435 */ 10436 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 10437 nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 10438 pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT); 10439 if (!pages) 10440 return -ENOMEM; 10441 for (i = 0; i < nr_pages; i++) { 10442 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 10443 char *kaddr; 10444 10445 pages[i] = alloc_page(GFP_KERNEL_ACCOUNT); 10446 if (!pages[i]) { 10447 ret = -ENOMEM; 10448 goto out_pages; 10449 } 10450 kaddr = kmap_local_page(pages[i]); 10451 if (copy_from_iter(kaddr, bytes, from) != bytes) { 10452 kunmap_local(kaddr); 10453 ret = -EFAULT; 10454 goto out_pages; 10455 } 10456 if (bytes < PAGE_SIZE) 10457 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 10458 kunmap_local(kaddr); 10459 } 10460 10461 for (;;) { 10462 struct btrfs_ordered_extent *ordered; 10463 10464 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes); 10465 if (ret) 10466 goto out_pages; 10467 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 10468 start >> PAGE_SHIFT, 10469 end >> PAGE_SHIFT); 10470 if (ret) 10471 goto out_pages; 10472 lock_extent(io_tree, start, end, &cached_state); 10473 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 10474 if (!ordered && 10475 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 10476 break; 10477 if (ordered) 10478 btrfs_put_ordered_extent(ordered); 10479 unlock_extent(io_tree, start, end, &cached_state); 10480 cond_resched(); 10481 } 10482 10483 /* 10484 * We don't use the higher-level delalloc space functions because our 10485 * num_bytes and disk_num_bytes are different. 10486 */ 10487 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 10488 if (ret) 10489 goto out_unlock; 10490 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 10491 if (ret) 10492 goto out_free_data_space; 10493 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 10494 false); 10495 if (ret) 10496 goto out_qgroup_free_data; 10497 10498 /* Try an inline extent first. */ 10499 if (start == 0 && encoded->unencoded_len == encoded->len && 10500 encoded->unencoded_offset == 0) { 10501 ret = cow_file_range_inline(inode, encoded->len, orig_count, 10502 compression, pages, true); 10503 if (ret <= 0) { 10504 if (ret == 0) 10505 ret = orig_count; 10506 goto out_delalloc_release; 10507 } 10508 } 10509 10510 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 10511 disk_num_bytes, 0, 0, &ins, 1, 1); 10512 if (ret) 10513 goto out_delalloc_release; 10514 extent_reserved = true; 10515 10516 em = create_io_em(inode, start, num_bytes, 10517 start - encoded->unencoded_offset, ins.objectid, 10518 ins.offset, ins.offset, ram_bytes, compression, 10519 BTRFS_ORDERED_COMPRESSED); 10520 if (IS_ERR(em)) { 10521 ret = PTR_ERR(em); 10522 goto out_free_reserved; 10523 } 10524 free_extent_map(em); 10525 10526 ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes, 10527 ins.objectid, ins.offset, 10528 encoded->unencoded_offset, 10529 (1 << BTRFS_ORDERED_ENCODED) | 10530 (1 << BTRFS_ORDERED_COMPRESSED), 10531 compression); 10532 if (IS_ERR(ordered)) { 10533 btrfs_drop_extent_map_range(inode, start, end, false); 10534 ret = PTR_ERR(ordered); 10535 goto out_free_reserved; 10536 } 10537 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10538 10539 if (start + encoded->len > inode->vfs_inode.i_size) 10540 i_size_write(&inode->vfs_inode, start + encoded->len); 10541 10542 unlock_extent(io_tree, start, end, &cached_state); 10543 10544 btrfs_delalloc_release_extents(inode, num_bytes); 10545 10546 btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false); 10547 ret = orig_count; 10548 goto out; 10549 10550 out_free_reserved: 10551 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10552 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 10553 out_delalloc_release: 10554 btrfs_delalloc_release_extents(inode, num_bytes); 10555 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 10556 out_qgroup_free_data: 10557 if (ret < 0) 10558 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes); 10559 out_free_data_space: 10560 /* 10561 * If btrfs_reserve_extent() succeeded, then we already decremented 10562 * bytes_may_use. 10563 */ 10564 if (!extent_reserved) 10565 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); 10566 out_unlock: 10567 unlock_extent(io_tree, start, end, &cached_state); 10568 out_pages: 10569 for (i = 0; i < nr_pages; i++) { 10570 if (pages[i]) 10571 __free_page(pages[i]); 10572 } 10573 kvfree(pages); 10574 out: 10575 if (ret >= 0) 10576 iocb->ki_pos += encoded->len; 10577 return ret; 10578 } 10579 10580 #ifdef CONFIG_SWAP 10581 /* 10582 * Add an entry indicating a block group or device which is pinned by a 10583 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 10584 * negative errno on failure. 10585 */ 10586 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 10587 bool is_block_group) 10588 { 10589 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10590 struct btrfs_swapfile_pin *sp, *entry; 10591 struct rb_node **p; 10592 struct rb_node *parent = NULL; 10593 10594 sp = kmalloc(sizeof(*sp), GFP_NOFS); 10595 if (!sp) 10596 return -ENOMEM; 10597 sp->ptr = ptr; 10598 sp->inode = inode; 10599 sp->is_block_group = is_block_group; 10600 sp->bg_extent_count = 1; 10601 10602 spin_lock(&fs_info->swapfile_pins_lock); 10603 p = &fs_info->swapfile_pins.rb_node; 10604 while (*p) { 10605 parent = *p; 10606 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 10607 if (sp->ptr < entry->ptr || 10608 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 10609 p = &(*p)->rb_left; 10610 } else if (sp->ptr > entry->ptr || 10611 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 10612 p = &(*p)->rb_right; 10613 } else { 10614 if (is_block_group) 10615 entry->bg_extent_count++; 10616 spin_unlock(&fs_info->swapfile_pins_lock); 10617 kfree(sp); 10618 return 1; 10619 } 10620 } 10621 rb_link_node(&sp->node, parent, p); 10622 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 10623 spin_unlock(&fs_info->swapfile_pins_lock); 10624 return 0; 10625 } 10626 10627 /* Free all of the entries pinned by this swapfile. */ 10628 static void btrfs_free_swapfile_pins(struct inode *inode) 10629 { 10630 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10631 struct btrfs_swapfile_pin *sp; 10632 struct rb_node *node, *next; 10633 10634 spin_lock(&fs_info->swapfile_pins_lock); 10635 node = rb_first(&fs_info->swapfile_pins); 10636 while (node) { 10637 next = rb_next(node); 10638 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 10639 if (sp->inode == inode) { 10640 rb_erase(&sp->node, &fs_info->swapfile_pins); 10641 if (sp->is_block_group) { 10642 btrfs_dec_block_group_swap_extents(sp->ptr, 10643 sp->bg_extent_count); 10644 btrfs_put_block_group(sp->ptr); 10645 } 10646 kfree(sp); 10647 } 10648 node = next; 10649 } 10650 spin_unlock(&fs_info->swapfile_pins_lock); 10651 } 10652 10653 struct btrfs_swap_info { 10654 u64 start; 10655 u64 block_start; 10656 u64 block_len; 10657 u64 lowest_ppage; 10658 u64 highest_ppage; 10659 unsigned long nr_pages; 10660 int nr_extents; 10661 }; 10662 10663 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 10664 struct btrfs_swap_info *bsi) 10665 { 10666 unsigned long nr_pages; 10667 unsigned long max_pages; 10668 u64 first_ppage, first_ppage_reported, next_ppage; 10669 int ret; 10670 10671 /* 10672 * Our swapfile may have had its size extended after the swap header was 10673 * written. In that case activating the swapfile should not go beyond 10674 * the max size set in the swap header. 10675 */ 10676 if (bsi->nr_pages >= sis->max) 10677 return 0; 10678 10679 max_pages = sis->max - bsi->nr_pages; 10680 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; 10681 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; 10682 10683 if (first_ppage >= next_ppage) 10684 return 0; 10685 nr_pages = next_ppage - first_ppage; 10686 nr_pages = min(nr_pages, max_pages); 10687 10688 first_ppage_reported = first_ppage; 10689 if (bsi->start == 0) 10690 first_ppage_reported++; 10691 if (bsi->lowest_ppage > first_ppage_reported) 10692 bsi->lowest_ppage = first_ppage_reported; 10693 if (bsi->highest_ppage < (next_ppage - 1)) 10694 bsi->highest_ppage = next_ppage - 1; 10695 10696 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 10697 if (ret < 0) 10698 return ret; 10699 bsi->nr_extents += ret; 10700 bsi->nr_pages += nr_pages; 10701 return 0; 10702 } 10703 10704 static void btrfs_swap_deactivate(struct file *file) 10705 { 10706 struct inode *inode = file_inode(file); 10707 10708 btrfs_free_swapfile_pins(inode); 10709 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 10710 } 10711 10712 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10713 sector_t *span) 10714 { 10715 struct inode *inode = file_inode(file); 10716 struct btrfs_root *root = BTRFS_I(inode)->root; 10717 struct btrfs_fs_info *fs_info = root->fs_info; 10718 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 10719 struct extent_state *cached_state = NULL; 10720 struct extent_map *em = NULL; 10721 struct btrfs_device *device = NULL; 10722 struct btrfs_swap_info bsi = { 10723 .lowest_ppage = (sector_t)-1ULL, 10724 }; 10725 int ret = 0; 10726 u64 isize; 10727 u64 start; 10728 10729 /* 10730 * If the swap file was just created, make sure delalloc is done. If the 10731 * file changes again after this, the user is doing something stupid and 10732 * we don't really care. 10733 */ 10734 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 10735 if (ret) 10736 return ret; 10737 10738 /* 10739 * The inode is locked, so these flags won't change after we check them. 10740 */ 10741 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 10742 btrfs_warn(fs_info, "swapfile must not be compressed"); 10743 return -EINVAL; 10744 } 10745 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 10746 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 10747 return -EINVAL; 10748 } 10749 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 10750 btrfs_warn(fs_info, "swapfile must not be checksummed"); 10751 return -EINVAL; 10752 } 10753 10754 /* 10755 * Balance or device remove/replace/resize can move stuff around from 10756 * under us. The exclop protection makes sure they aren't running/won't 10757 * run concurrently while we are mapping the swap extents, and 10758 * fs_info->swapfile_pins prevents them from running while the swap 10759 * file is active and moving the extents. Note that this also prevents 10760 * a concurrent device add which isn't actually necessary, but it's not 10761 * really worth the trouble to allow it. 10762 */ 10763 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 10764 btrfs_warn(fs_info, 10765 "cannot activate swapfile while exclusive operation is running"); 10766 return -EBUSY; 10767 } 10768 10769 /* 10770 * Prevent snapshot creation while we are activating the swap file. 10771 * We do not want to race with snapshot creation. If snapshot creation 10772 * already started before we bumped nr_swapfiles from 0 to 1 and 10773 * completes before the first write into the swap file after it is 10774 * activated, than that write would fallback to COW. 10775 */ 10776 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 10777 btrfs_exclop_finish(fs_info); 10778 btrfs_warn(fs_info, 10779 "cannot activate swapfile because snapshot creation is in progress"); 10780 return -EINVAL; 10781 } 10782 /* 10783 * Snapshots can create extents which require COW even if NODATACOW is 10784 * set. We use this counter to prevent snapshots. We must increment it 10785 * before walking the extents because we don't want a concurrent 10786 * snapshot to run after we've already checked the extents. 10787 * 10788 * It is possible that subvolume is marked for deletion but still not 10789 * removed yet. To prevent this race, we check the root status before 10790 * activating the swapfile. 10791 */ 10792 spin_lock(&root->root_item_lock); 10793 if (btrfs_root_dead(root)) { 10794 spin_unlock(&root->root_item_lock); 10795 10796 btrfs_exclop_finish(fs_info); 10797 btrfs_warn(fs_info, 10798 "cannot activate swapfile because subvolume %llu is being deleted", 10799 root->root_key.objectid); 10800 return -EPERM; 10801 } 10802 atomic_inc(&root->nr_swapfiles); 10803 spin_unlock(&root->root_item_lock); 10804 10805 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 10806 10807 lock_extent(io_tree, 0, isize - 1, &cached_state); 10808 start = 0; 10809 while (start < isize) { 10810 u64 logical_block_start, physical_block_start; 10811 struct btrfs_block_group *bg; 10812 u64 len = isize - start; 10813 10814 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 10815 if (IS_ERR(em)) { 10816 ret = PTR_ERR(em); 10817 goto out; 10818 } 10819 10820 if (em->block_start == EXTENT_MAP_HOLE) { 10821 btrfs_warn(fs_info, "swapfile must not have holes"); 10822 ret = -EINVAL; 10823 goto out; 10824 } 10825 if (em->block_start == EXTENT_MAP_INLINE) { 10826 /* 10827 * It's unlikely we'll ever actually find ourselves 10828 * here, as a file small enough to fit inline won't be 10829 * big enough to store more than the swap header, but in 10830 * case something changes in the future, let's catch it 10831 * here rather than later. 10832 */ 10833 btrfs_warn(fs_info, "swapfile must not be inline"); 10834 ret = -EINVAL; 10835 goto out; 10836 } 10837 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10838 btrfs_warn(fs_info, "swapfile must not be compressed"); 10839 ret = -EINVAL; 10840 goto out; 10841 } 10842 10843 logical_block_start = em->block_start + (start - em->start); 10844 len = min(len, em->len - (start - em->start)); 10845 free_extent_map(em); 10846 em = NULL; 10847 10848 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true); 10849 if (ret < 0) { 10850 goto out; 10851 } else if (ret) { 10852 ret = 0; 10853 } else { 10854 btrfs_warn(fs_info, 10855 "swapfile must not be copy-on-write"); 10856 ret = -EINVAL; 10857 goto out; 10858 } 10859 10860 em = btrfs_get_chunk_map(fs_info, logical_block_start, len); 10861 if (IS_ERR(em)) { 10862 ret = PTR_ERR(em); 10863 goto out; 10864 } 10865 10866 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 10867 btrfs_warn(fs_info, 10868 "swapfile must have single data profile"); 10869 ret = -EINVAL; 10870 goto out; 10871 } 10872 10873 if (device == NULL) { 10874 device = em->map_lookup->stripes[0].dev; 10875 ret = btrfs_add_swapfile_pin(inode, device, false); 10876 if (ret == 1) 10877 ret = 0; 10878 else if (ret) 10879 goto out; 10880 } else if (device != em->map_lookup->stripes[0].dev) { 10881 btrfs_warn(fs_info, "swapfile must be on one device"); 10882 ret = -EINVAL; 10883 goto out; 10884 } 10885 10886 physical_block_start = (em->map_lookup->stripes[0].physical + 10887 (logical_block_start - em->start)); 10888 len = min(len, em->len - (logical_block_start - em->start)); 10889 free_extent_map(em); 10890 em = NULL; 10891 10892 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 10893 if (!bg) { 10894 btrfs_warn(fs_info, 10895 "could not find block group containing swapfile"); 10896 ret = -EINVAL; 10897 goto out; 10898 } 10899 10900 if (!btrfs_inc_block_group_swap_extents(bg)) { 10901 btrfs_warn(fs_info, 10902 "block group for swapfile at %llu is read-only%s", 10903 bg->start, 10904 atomic_read(&fs_info->scrubs_running) ? 10905 " (scrub running)" : ""); 10906 btrfs_put_block_group(bg); 10907 ret = -EINVAL; 10908 goto out; 10909 } 10910 10911 ret = btrfs_add_swapfile_pin(inode, bg, true); 10912 if (ret) { 10913 btrfs_put_block_group(bg); 10914 if (ret == 1) 10915 ret = 0; 10916 else 10917 goto out; 10918 } 10919 10920 if (bsi.block_len && 10921 bsi.block_start + bsi.block_len == physical_block_start) { 10922 bsi.block_len += len; 10923 } else { 10924 if (bsi.block_len) { 10925 ret = btrfs_add_swap_extent(sis, &bsi); 10926 if (ret) 10927 goto out; 10928 } 10929 bsi.start = start; 10930 bsi.block_start = physical_block_start; 10931 bsi.block_len = len; 10932 } 10933 10934 start += len; 10935 } 10936 10937 if (bsi.block_len) 10938 ret = btrfs_add_swap_extent(sis, &bsi); 10939 10940 out: 10941 if (!IS_ERR_OR_NULL(em)) 10942 free_extent_map(em); 10943 10944 unlock_extent(io_tree, 0, isize - 1, &cached_state); 10945 10946 if (ret) 10947 btrfs_swap_deactivate(file); 10948 10949 btrfs_drew_write_unlock(&root->snapshot_lock); 10950 10951 btrfs_exclop_finish(fs_info); 10952 10953 if (ret) 10954 return ret; 10955 10956 if (device) 10957 sis->bdev = device->bdev; 10958 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10959 sis->max = bsi.nr_pages; 10960 sis->pages = bsi.nr_pages - 1; 10961 sis->highest_bit = bsi.nr_pages - 1; 10962 return bsi.nr_extents; 10963 } 10964 #else 10965 static void btrfs_swap_deactivate(struct file *file) 10966 { 10967 } 10968 10969 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10970 sector_t *span) 10971 { 10972 return -EOPNOTSUPP; 10973 } 10974 #endif 10975 10976 /* 10977 * Update the number of bytes used in the VFS' inode. When we replace extents in 10978 * a range (clone, dedupe, fallocate's zero range), we must update the number of 10979 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 10980 * always get a correct value. 10981 */ 10982 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 10983 const u64 add_bytes, 10984 const u64 del_bytes) 10985 { 10986 if (add_bytes == del_bytes) 10987 return; 10988 10989 spin_lock(&inode->lock); 10990 if (del_bytes > 0) 10991 inode_sub_bytes(&inode->vfs_inode, del_bytes); 10992 if (add_bytes > 0) 10993 inode_add_bytes(&inode->vfs_inode, add_bytes); 10994 spin_unlock(&inode->lock); 10995 } 10996 10997 /* 10998 * Verify that there are no ordered extents for a given file range. 10999 * 11000 * @inode: The target inode. 11001 * @start: Start offset of the file range, should be sector size aligned. 11002 * @end: End offset (inclusive) of the file range, its value +1 should be 11003 * sector size aligned. 11004 * 11005 * This should typically be used for cases where we locked an inode's VFS lock in 11006 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 11007 * we have flushed all delalloc in the range, we have waited for all ordered 11008 * extents in the range to complete and finally we have locked the file range in 11009 * the inode's io_tree. 11010 */ 11011 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 11012 { 11013 struct btrfs_root *root = inode->root; 11014 struct btrfs_ordered_extent *ordered; 11015 11016 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 11017 return; 11018 11019 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 11020 if (ordered) { 11021 btrfs_err(root->fs_info, 11022 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 11023 start, end, btrfs_ino(inode), root->root_key.objectid, 11024 ordered->file_offset, 11025 ordered->file_offset + ordered->num_bytes - 1); 11026 btrfs_put_ordered_extent(ordered); 11027 } 11028 11029 ASSERT(ordered == NULL); 11030 } 11031 11032 static const struct inode_operations btrfs_dir_inode_operations = { 11033 .getattr = btrfs_getattr, 11034 .lookup = btrfs_lookup, 11035 .create = btrfs_create, 11036 .unlink = btrfs_unlink, 11037 .link = btrfs_link, 11038 .mkdir = btrfs_mkdir, 11039 .rmdir = btrfs_rmdir, 11040 .rename = btrfs_rename2, 11041 .symlink = btrfs_symlink, 11042 .setattr = btrfs_setattr, 11043 .mknod = btrfs_mknod, 11044 .listxattr = btrfs_listxattr, 11045 .permission = btrfs_permission, 11046 .get_inode_acl = btrfs_get_acl, 11047 .set_acl = btrfs_set_acl, 11048 .update_time = btrfs_update_time, 11049 .tmpfile = btrfs_tmpfile, 11050 .fileattr_get = btrfs_fileattr_get, 11051 .fileattr_set = btrfs_fileattr_set, 11052 }; 11053 11054 static const struct file_operations btrfs_dir_file_operations = { 11055 .llseek = generic_file_llseek, 11056 .read = generic_read_dir, 11057 .iterate_shared = btrfs_real_readdir, 11058 .open = btrfs_opendir, 11059 .unlocked_ioctl = btrfs_ioctl, 11060 #ifdef CONFIG_COMPAT 11061 .compat_ioctl = btrfs_compat_ioctl, 11062 #endif 11063 .release = btrfs_release_file, 11064 .fsync = btrfs_sync_file, 11065 }; 11066 11067 /* 11068 * btrfs doesn't support the bmap operation because swapfiles 11069 * use bmap to make a mapping of extents in the file. They assume 11070 * these extents won't change over the life of the file and they 11071 * use the bmap result to do IO directly to the drive. 11072 * 11073 * the btrfs bmap call would return logical addresses that aren't 11074 * suitable for IO and they also will change frequently as COW 11075 * operations happen. So, swapfile + btrfs == corruption. 11076 * 11077 * For now we're avoiding this by dropping bmap. 11078 */ 11079 static const struct address_space_operations btrfs_aops = { 11080 .read_folio = btrfs_read_folio, 11081 .writepages = btrfs_writepages, 11082 .readahead = btrfs_readahead, 11083 .invalidate_folio = btrfs_invalidate_folio, 11084 .release_folio = btrfs_release_folio, 11085 .migrate_folio = btrfs_migrate_folio, 11086 .dirty_folio = filemap_dirty_folio, 11087 .error_remove_page = generic_error_remove_page, 11088 .swap_activate = btrfs_swap_activate, 11089 .swap_deactivate = btrfs_swap_deactivate, 11090 }; 11091 11092 static const struct inode_operations btrfs_file_inode_operations = { 11093 .getattr = btrfs_getattr, 11094 .setattr = btrfs_setattr, 11095 .listxattr = btrfs_listxattr, 11096 .permission = btrfs_permission, 11097 .fiemap = btrfs_fiemap, 11098 .get_inode_acl = btrfs_get_acl, 11099 .set_acl = btrfs_set_acl, 11100 .update_time = btrfs_update_time, 11101 .fileattr_get = btrfs_fileattr_get, 11102 .fileattr_set = btrfs_fileattr_set, 11103 }; 11104 static const struct inode_operations btrfs_special_inode_operations = { 11105 .getattr = btrfs_getattr, 11106 .setattr = btrfs_setattr, 11107 .permission = btrfs_permission, 11108 .listxattr = btrfs_listxattr, 11109 .get_inode_acl = btrfs_get_acl, 11110 .set_acl = btrfs_set_acl, 11111 .update_time = btrfs_update_time, 11112 }; 11113 static const struct inode_operations btrfs_symlink_inode_operations = { 11114 .get_link = page_get_link, 11115 .getattr = btrfs_getattr, 11116 .setattr = btrfs_setattr, 11117 .permission = btrfs_permission, 11118 .listxattr = btrfs_listxattr, 11119 .update_time = btrfs_update_time, 11120 }; 11121 11122 const struct dentry_operations btrfs_dentry_operations = { 11123 .d_delete = btrfs_dentry_delete, 11124 }; 11125